changeset 661:5d8798772cca

Merge branch smartgit-4.5 (no actual changes, merely to denote branch is inactive
author Artem Tikhomirov <tikhomirov.artem@gmail.com>
date Wed, 10 Jul 2013 11:48:55 +0200
parents 4fd317a2fecf (diff) 49f0749307a0 (current diff)
children af5223b86dd3
files src/org/tmatesoft/hg/repo/HgManifest.java src/org/tmatesoft/hg/repo/HgRepositoryLock.java src/org/tmatesoft/hg/util/Path.java
diffstat 153 files changed, 8756 insertions(+), 2864 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Jun 06 14:21:11 2013 +0200
+++ b/.hgtags	Wed Jul 10 11:48:55 2013 +0200
@@ -6,3 +6,7 @@
 3ca4ae7bdd3890b8ed89bfea1b42af593e04b373 v1.0.0
 2103388d4010bff6dcf8d2e4c42a67b9d95aa646 v1.1m2
 32453f30de07efe9d7b386c084ebd607dbeaba2b v1.1m3
+f41dd9a3b8af1a5f74b533cd9f00b7d77423cc04 v1.1m4
+5afc7eedb3dd109f75e5f5a02dd88c9c4e7b7f3b v1.1rc1
+54e16ab771ec03d69cb05e38622ebdf9c3302c8c v1.1rc2
+2f33f102a8fa59274a27ebbe1c2903cecac6c5d5 v1.1.0
--- a/COPYING	Thu Jun 06 14:21:11 2013 +0200
+++ b/COPYING	Wed Jul 10 11:48:55 2013 +0200
@@ -1,4 +1,4 @@
-Copyright (C) 2010-2012 TMate Software Ltd
+Copyright (C) 2010-2013 TMate Software Ltd
 
 This program is free software; you can redistribute it and/or modify
 it under the terms of the GNU General Public License as published by
--- a/build.gradle	Thu Jun 06 14:21:11 2013 +0200
+++ b/build.gradle	Wed Jul 10 11:48:55 2013 +0200
@@ -1,9 +1,22 @@
 /*
- * Copyright (c) 2012 TMate Software Ltd
+ * Copyright (c) 2012-2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
  */
 def isRelease = false
 
-  version = '1.1.0-SNAPSHOT'
+  version = '1.2.0-SNAPSHOT'
   description = 'Pure Java API and Toolkit for Mercurial DVCS'
   group = 'org.tmatesoft.hg4j'
   
--- a/build.xml	Thu Jun 06 14:21:11 2013 +0200
+++ b/build.xml	Wed Jul 10 11:48:55 2013 +0200
@@ -27,7 +27,7 @@
 
 	<property name="junit.jar" value="lib/junit-4.8.2.jar" />
 	<property name="ver.qualifier" value="" />
-	<property name="version.lib" value="1.1.0" />
+	<property name="version.lib" value="1.2" />
 	<property name="version.jar" value="${version.lib}${ver.qualifier}" />
 	<property name="compile-with-debug" value="yes"/>
 
@@ -84,12 +84,14 @@
 			<test name="org.tmatesoft.hg.test.TestIntMap" />
 			<test name="org.tmatesoft.hg.test.TestAuxUtilities" />
 			<test name="org.tmatesoft.hg.test.TestConfigFileParser" />
+			<test name="org.tmatesoft.hg.test.TestInflaterDataAccess" />
 			<test name="org.tmatesoft.hg.test.TestHistory" />
 			<test name="org.tmatesoft.hg.test.TestManifest" />
 			<test name="org.tmatesoft.hg.test.TestStatus" />
 			<test name="org.tmatesoft.hg.test.TestStorePath" />
 			<test name="org.tmatesoft.hg.test.TestNewlineFilter" />
 			<test name="org.tmatesoft.hg.test.TestIgnore" />
+			<test name="org.tmatesoft.hg.test.TestConfigFiles" />
 			<test name="org.tmatesoft.hg.test.TestDirstate" />
 			<test name="org.tmatesoft.hg.test.TestBranches" />
 			<test name="org.tmatesoft.hg.test.TestByteChannel" />
@@ -107,6 +109,11 @@
 			<test name="org.tmatesoft.hg.test.TestCommit" />
 			<test name="org.tmatesoft.hg.test.TestBlame" />
 			<test name="org.tmatesoft.hg.test.TestDiffHelper" />
+			<test name="org.tmatesoft.hg.test.TestRepositoryLock" />
+			<test name="org.tmatesoft.hg.test.TestRevisionSet" />
+			<test name="org.tmatesoft.hg.test.TestRevisionMaps" />
+			<test name="org.tmatesoft.hg.test.TestPush" />
+			<test name="org.tmatesoft.hg.test.ComplexTest" />
 		</junit>
 	</target>
 
@@ -134,7 +141,7 @@
 
 	<target name="build-lib">
 		<mkdir dir="bin" />
-		<javac srcdir="src" destdir="bin" debug="${compile-with-debug}" includeantruntime="no"/>
+		<javac srcdir="src" destdir="bin" debug="${compile-with-debug}" includeantruntime="no" source="1.5" encoding="UTF-8"/>
 		<jar destfile="${hg4j.jar}">
 			<fileset dir="bin/">
 				<include name="org/tmatesoft/hg/core/**" />
@@ -148,7 +155,7 @@
 
 	<target name="build-tests" depends="build-lib">
 		<mkdir dir="bin" />
-		<javac srcdir="test" destdir="bin" debug="${compile-with-debug}" >
+		<javac srcdir="test" destdir="bin" debug="${compile-with-debug}" includeantruntime="no" source="1.5" encoding="UTF-8">
 			<classpath>
 				<pathelement location="${hg4j.jar}"/>
 				<pathelement location="${junit.jar}"/>
@@ -164,7 +171,7 @@
 
 	<target name="build-cmdline" depends="build-lib">
 		<mkdir dir="bin" />
-		<javac srcdir="cmdline" destdir="bin" debug="${compile-with-debug}">
+		<javac srcdir="cmdline" destdir="bin" debug="${compile-with-debug}" includeantruntime="no" source="1.5" encoding="UTF-8">
 			<classpath>
 				<pathelement location="${hg4j.jar}"/>
 				<pathelement location="${junit.jar}"/>
--- a/cmdline/org/tmatesoft/hg/console/Bundle.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/cmdline/org/tmatesoft/hg/console/Bundle.java	Wed Jul 10 11:48:55 2013 +0200
@@ -29,6 +29,7 @@
 import org.tmatesoft.hg.repo.HgBundle.GroupElement;
 import org.tmatesoft.hg.repo.HgBundle.Inspector;
 import org.tmatesoft.hg.repo.HgChangelog.RawChangeset;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 
 
 /**
@@ -60,7 +61,7 @@
 		hgBundle.changes(hgRepo, new HgChangelog.Inspector() {
 			private final HgChangelog changelog = hgRepo.getChangelog();
 			
-			public void next(int revisionNumber, Nodeid nodeid, RawChangeset cset) {
+			public void next(int revisionNumber, Nodeid nodeid, RawChangeset cset) throws HgRuntimeException {
 				if (changelog.isKnown(nodeid)) {
 					System.out.print("+");
 				} else {
@@ -99,7 +100,7 @@
 
  */
 
-	public static void dump(HgBundle hgBundle) throws HgException {
+	public static void dump(HgBundle hgBundle) throws HgException, HgRuntimeException {
 		Dump dump = new Dump();
 		hgBundle.inspectAll(dump);
 		System.out.println("Total files:" + dump.names.size());
--- a/cmdline/org/tmatesoft/hg/console/ChangesetDumpHandler.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/cmdline/org/tmatesoft/hg/console/ChangesetDumpHandler.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -26,6 +26,7 @@
 import org.tmatesoft.hg.core.HgFileRevision;
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.Path;
 
 /**
@@ -43,7 +44,7 @@
 	private final HgRepository repo;
 	private final int tip;
 
-	public ChangesetDumpHandler(HgRepository hgRepo) {
+	public ChangesetDumpHandler(HgRepository hgRepo) throws HgRuntimeException {
 		repo = hgRepo;
 		tip = hgRepo.getChangelog().getLastRevision();
 	}
@@ -63,7 +64,7 @@
 		return this;
 	}
 
-	public void cset(HgChangeset changeset) {
+	public void cset(HgChangeset changeset) throws HgRuntimeException {
 		try {
 			final String s = print(changeset);
 			if (reverseOrder) {
@@ -89,7 +90,7 @@
 		l.clear();
 	}
 
-	private String print(HgChangeset cset) throws HgException {
+	private String print(HgChangeset cset) throws HgException, HgRuntimeException {
 		StringBuilder sb = new StringBuilder();
 		Formatter f = new Formatter(sb);
 		final Nodeid csetNodeid = cset.getNodeid();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/cmdline/org/tmatesoft/hg/console/Commit.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.console;
+
+import java.util.Collections;
+
+import org.tmatesoft.hg.core.HgCommitCommand;
+import org.tmatesoft.hg.core.HgRepoFacade;
+import org.tmatesoft.hg.util.Outcome;
+
+/**
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class Commit {
+
+	public static void main(String[] args) throws Exception {
+		Options cmdLineOpts = Options.parse(args, Collections.<String>emptySet());
+		HgRepoFacade repo = new HgRepoFacade();
+		if (!repo.init(cmdLineOpts.findRepository())) {
+			System.err.printf("Can't find repository in: %s\n", repo.getRepository().getLocation());
+			return;
+		}
+		String message = cmdLineOpts.getSingle("-m", "--message");
+		if (message == null) {
+			System.err.println("Need a commit message");
+			return;
+		}
+		HgCommitCommand cmd = repo.createCommitCommand();
+		cmd.message(message);
+		Outcome o = cmd.execute();
+		if (!o.isOk()) {
+			System.err.println(o.getMessage());
+			return;
+		}
+		System.out.printf("New changeset: %s\n", cmd.getCommittedRevision().shortNotation());
+	}
+}
--- a/cmdline/org/tmatesoft/hg/console/Log.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/cmdline/org/tmatesoft/hg/console/Log.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010-2012 TMate Software Ltd
+ * Copyright (c) 2010-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -26,6 +26,7 @@
 import org.tmatesoft.hg.core.HgLogCommand;
 import org.tmatesoft.hg.repo.HgDataFile;
 import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.CancelSupport;
 import org.tmatesoft.hg.util.ProgressSupport;
 
@@ -124,7 +125,7 @@
 	private static final class Dump extends ChangesetDumpHandler implements HgChangesetHandler.WithCopyHistory {
 		private final RenameDumpHandler renameHandlerDelegate;
 
-		public Dump(HgRepository hgRepo) {
+		public Dump(HgRepository hgRepo) throws HgRuntimeException {
 			super(hgRepo);
 			renameHandlerDelegate = new RenameDumpHandler();
 		}
--- a/cmdline/org/tmatesoft/hg/console/Main.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/cmdline/org/tmatesoft/hg/console/Main.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -57,6 +57,7 @@
 import org.tmatesoft.hg.repo.HgDirstate.Record;
 import org.tmatesoft.hg.repo.HgIgnore;
 import org.tmatesoft.hg.repo.HgInternals;
+import org.tmatesoft.hg.repo.HgInvalidControlFileException;
 import org.tmatesoft.hg.repo.HgManifest;
 import org.tmatesoft.hg.repo.HgManifest.Flags;
 import org.tmatesoft.hg.repo.HgMergeState;
@@ -102,12 +103,11 @@
 
 	public static void main(String[] args) throws Exception {
 		Main m = new Main(args);
-		m.checkFileSneakerPerformance();
+//		m.checkFileSneakerPerformance();
 //		m.testRevert();
 //		m.testCheckout();
 //		m.tryExtensions();
 //		m.dumpBookmarks();
-//		m.readConfigFile();
 //		m.dumpCommitLastMessage();
 //		m.buildFileLog();
 //		m.testConsoleLog();
@@ -119,7 +119,7 @@
 //		m.testEffectiveFileLog();
 //		m.testMergeState();
 //		m.testFileStatus();
-//		m.dumpBranches();
+		m.dumpBranches();
 //		m.inflaterLengthException();
 //		m.dumpIgnored();
 //		m.dumpDirstate();
@@ -210,19 +210,6 @@
 		}
 	}
 
-	// TODO as test
-	private void readConfigFile() throws Exception {
-		ConfigFile configFile = new ConfigFile(hgRepo.getSessionContext());
-		configFile.addLocation(new File(System.getProperty("user.home"), "test-cfg/aaa/config1"));
-		for (String s : configFile.getSectionNames()) {
-			System.out.printf("[%s]\n", s);
-			for (Map.Entry<String, String> e : configFile.getSection(s).entrySet()) {
-				System.out.printf("%s = %s\n", e.getKey(), e.getValue());
-			}
-		}
-		
-	}
-
 	private void dumpCommitLastMessage() throws Exception {
 		System.out.println(hgRepo.getCommitLastMessage());
 	}
@@ -233,7 +220,7 @@
 		cmd.file("a2.txt", true, false);
 		final int[] count = new int[] { 0 };
 		class MyHandler implements HgChangesetTreeHandler, Adaptable {
-			public void treeElement(HgChangesetTreeHandler.TreeElement entry) {
+			public void treeElement(HgChangesetTreeHandler.TreeElement entry) throws HgRuntimeException {
 				StringBuilder sb = new StringBuilder();
 				HashSet<Nodeid> test = new HashSet<Nodeid>(entry.childRevisions());
 				for (HgChangeset cc : entry.children()) {
@@ -533,7 +520,7 @@
 		System.out.println(bac.toArray().length);
 	}
 	
-	private void dumpIgnored() {
+	private void dumpIgnored() throws HgInvalidControlFileException {
 		String[] toCheck = new String[] {"design.txt", "src/com/tmate/hgkit/ll/Changelog.java", "src/Extras.java", "bin/com/tmate/hgkit/ll/Changelog.class"};
 		HgIgnore ignore = hgRepo.getIgnore();
 		for (int i = 0; i < toCheck.length; i++) {
@@ -623,7 +610,7 @@
 			public void dir(Path p) {
 				System.out.println(p);
 			}
-			public void file(HgFileRevision fileRevision) {
+			public void file(HgFileRevision fileRevision) throws HgRuntimeException {
 				System.out.print(fileRevision.getRevision());;
 				System.out.print("   ");
 				System.out.printf("%s %s", fileRevision.getParents().first().shortNotation(), fileRevision.getParents().second().shortNotation());
@@ -686,7 +673,7 @@
 	}
 
 
-	private void testStatusInternals() throws HgException {
+	private void testStatusInternals() throws HgException, HgRuntimeException {
 		HgDataFile n = hgRepo.getFileNode(Path.create("design.txt"));
 		for (String s : new String[] {"011dfd44417c72bd9e54cf89b82828f661b700ed", "e5529faa06d53e06a816e56d218115b42782f1ba", "c18e7111f1fc89a80a00f6a39d51288289a382fc"}) {
 			// expected: 359, 2123, 3079
--- a/cmdline/org/tmatesoft/hg/console/Manifest.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/cmdline/org/tmatesoft/hg/console/Manifest.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010-2012 TMate Software Ltd
+ * Copyright (c) 2010-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -27,6 +27,7 @@
 import org.tmatesoft.hg.repo.HgInvalidRevisionException;
 import org.tmatesoft.hg.repo.HgManifest;
 import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.Path;
 
 
@@ -52,7 +53,7 @@
 			}
 			public void dir(Path p) {
 			}
-			public void file(HgFileRevision fileRevision) {
+			public void file(HgFileRevision fileRevision) throws HgRuntimeException {
 				try {
 					if (debug) {
 						System.out.print(fileRevision.getRevision());;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/cmdline/org/tmatesoft/hg/console/Push.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.console;
+
+import java.util.Collections;
+
+import org.tmatesoft.hg.core.HgPushCommand;
+import org.tmatesoft.hg.core.HgRepoFacade;
+import org.tmatesoft.hg.repo.HgLookup;
+import org.tmatesoft.hg.repo.HgRemoteRepository;
+
+/**
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class Push {
+
+	public static void main(String[] args) throws Exception {
+		Options cmdLineOpts = Options.parse(args, Collections.<String>emptySet());
+		HgRepoFacade hgRepo = new HgRepoFacade();
+		if (!hgRepo.init(cmdLineOpts.findRepository())) {
+			System.err.printf("Can't find repository in: %s\n", hgRepo.getRepository().getLocation());
+			return;
+		}
+		// XXX perhaps, HgRepoFacade shall get detectRemote() analog (to get remote server with respect of facade's repo)
+		HgRemoteRepository hgRemote = new HgLookup().detectRemote(cmdLineOpts.getSingle(""), hgRepo.getRepository());
+		if (hgRemote.isInvalid()) {
+			System.err.printf("Remote repository %s is not valid", hgRemote.getLocation());
+			return;
+		}
+		HgPushCommand cmd = hgRepo.createPushCommand();
+		cmd.destination(hgRemote);
+		cmd.execute();
+		System.out.printf("Added %d changesets\n", cmd.getPushedRevisions().size());
+	}
+}
--- a/src/org/tmatesoft/hg/core/ChangesetTransformer.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/ChangesetTransformer.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -24,6 +24,7 @@
 import org.tmatesoft.hg.repo.HgChangelog;
 import org.tmatesoft.hg.repo.HgChangelog.RawChangeset;
 import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.repo.HgStatusCollector;
 import org.tmatesoft.hg.repo.HgParentChildMap;
 import org.tmatesoft.hg.util.Adaptable;
@@ -63,7 +64,7 @@
 		lifecycleBridge = new LifecycleBridge(ps, cs);
 	}
 	
-	public void next(int revisionNumber, Nodeid nodeid, RawChangeset cset) {
+	public void next(int revisionNumber, Nodeid nodeid, RawChangeset cset) throws HgRuntimeException {
 		if (branches != null && !branches.contains(cset.branch())) {
 			return;
 		}
--- a/src/org/tmatesoft/hg/core/HgAddRemoveCommand.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgAddRemoveCommand.java	Wed Jul 10 11:48:55 2013 +0200
@@ -18,12 +18,14 @@
 
 import java.util.LinkedHashSet;
 
+import org.tmatesoft.hg.internal.COWTransaction;
 import org.tmatesoft.hg.internal.DirstateBuilder;
 import org.tmatesoft.hg.internal.DirstateReader;
-import org.tmatesoft.hg.internal.Experimental;
 import org.tmatesoft.hg.internal.Internals;
+import org.tmatesoft.hg.internal.Transaction;
 import org.tmatesoft.hg.repo.HgManifest.Flags;
 import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRepositoryLock;
 import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.CancelSupport;
 import org.tmatesoft.hg.util.CancelledException;
@@ -31,8 +33,6 @@
 import org.tmatesoft.hg.util.ProgressSupport;
 
 /**
- * WORK IN PROGRESS
- * 
  * Schedule files for addition and removal 
  * XXX and, perhaps, forget() functionality shall be here as well?
  * 
@@ -40,7 +40,6 @@
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
-@Experimental(reason="Work in progress")
 public class HgAddRemoveCommand extends HgAbstractCommand<HgAddRemoveCommand> {
 	
 	private final HgRepository repo;
@@ -98,9 +97,12 @@
 	 * Perform scheduled addition/removal
 	 * 
 	 * @throws HgException subclass thereof to indicate specific issue with the command arguments or repository state
+	 * @throws HgRepositoryLockException if failed to lock the repo for modifications
 	 * @throws CancelledException if execution of the command was cancelled
 	 */
-	public void execute() throws HgException, CancelledException {
+	public void execute() throws HgException, HgRepositoryLockException, CancelledException {
+		final HgRepositoryLock wdLock = repo.getWorkingDirLock();
+		wdLock.acquire();
 		try {
 			final ProgressSupport progress = getProgressSupport(null);
 			final CancelSupport cancellation = getCancelSupport(null, true);
@@ -121,11 +123,24 @@
 				progress.worked(1);
 				cancellation.checkCancelled();
 			}
-			dirstateBuilder.serialize();
+			Transaction.Factory trFactory = new COWTransaction.Factory();
+			Transaction tr = trFactory.create(repo);
+			try {
+				dirstateBuilder.serialize(tr);
+				tr.commit();
+			} catch (RuntimeException ex) {
+				tr.rollback();
+				throw ex;
+			} catch (HgException ex) {
+				tr.rollback();
+				throw ex;
+			}
 			progress.worked(1);
 			progress.done();
 		} catch (HgRuntimeException ex) {
 			throw new HgLibraryFailureException(ex);
+		} finally {
+			wdLock.release();
 		}
 	}
 }
--- a/src/org/tmatesoft/hg/core/HgAnnotateCommand.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgAnnotateCommand.java	Wed Jul 10 11:48:55 2013 +0200
@@ -20,30 +20,27 @@
 
 import java.util.Arrays;
 
+import org.tmatesoft.hg.core.HgBlameInspector.BlockData;
 import org.tmatesoft.hg.internal.Callback;
 import org.tmatesoft.hg.internal.CsetParamKeeper;
-import org.tmatesoft.hg.internal.Experimental;
 import org.tmatesoft.hg.internal.FileAnnotation;
 import org.tmatesoft.hg.internal.FileAnnotation.LineDescriptor;
 import org.tmatesoft.hg.internal.FileAnnotation.LineInspector;
-import org.tmatesoft.hg.repo.HgBlameFacility.BlockData;
-import org.tmatesoft.hg.repo.HgBlameFacility;
 import org.tmatesoft.hg.repo.HgDataFile;
 import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.CancelSupport;
 import org.tmatesoft.hg.util.CancelledException;
 import org.tmatesoft.hg.util.Path;
 import org.tmatesoft.hg.util.ProgressSupport;
 
 /**
- * WORK IN PROGRESS. UNSTABLE API
- * 
  * 'hg annotate' counterpart, report origin revision and file line-by-line 
  * 
+ * @since 1.1
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
-@Experimental(reason="Work in progress. Unstable API")
 public class HgAnnotateCommand extends HgAbstractCommand<HgAnnotateCommand> {
 	
 	private final HgRepository repo;
@@ -90,7 +87,7 @@
 		return this;
 	}
 	
-	// TODO [1.1] set encoding and provide String line content from LineInfo
+	// TODO [post-1.1] set encoding and provide String line content from LineInfo
 
 	/**
 	 * Annotate selected file
@@ -111,28 +108,34 @@
 		final CancelSupport cancellation = getCancelSupport(inspector, true);
 		cancellation.checkCancelled();
 		progress.start(2);
-		HgDataFile df = repo.getFileNode(file);
-		if (!df.exists()) {
-			return;
+		try {
+			HgDataFile df = repo.getFileNode(file);
+			if (!df.exists()) {
+				return;
+			}
+			final int changesetStart = followRename ? 0 : df.getChangesetRevisionIndex(0);
+			Collector c = new Collector(cancellation);
+			FileAnnotation fa = new FileAnnotation(c);
+			HgDiffCommand cmd = new HgDiffCommand(repo);
+			cmd.file(df).order(HgIterateDirection.NewToOld);
+			cmd.range(changesetStart, annotateRevision.get());
+			cmd.executeAnnotate(fa);
+			progress.worked(1);
+			c.throwIfCancelled();
+			cancellation.checkCancelled();
+			ProgressSupport.Sub subProgress = new ProgressSupport.Sub(progress, 1);
+			subProgress.start(c.lineRevisions.length);
+			LineImpl li = new LineImpl();
+			for (int i = 0; i < c.lineRevisions.length; i++) {
+				li.init(i+1, c.lineRevisions[i], c.line(i));
+				inspector.next(li);
+				subProgress.worked(1);
+				cancellation.checkCancelled();
+			}
+			subProgress.done();
+		} catch (HgRuntimeException ex) {
+			throw new HgLibraryFailureException(ex);
 		}
-		final int changesetStart = followRename ? 0 : df.getChangesetRevisionIndex(0);
-		Collector c = new Collector(cancellation);
-		FileAnnotation fa = new FileAnnotation(c);
-		HgBlameFacility af = new HgBlameFacility(df);
-		af.annotate(changesetStart, annotateRevision.get(), fa, HgIterateDirection.NewToOld);
-		progress.worked(1);
-		c.throwIfCancelled();
-		cancellation.checkCancelled();
-		ProgressSupport.Sub subProgress = new ProgressSupport.Sub(progress, 1);
-		subProgress.start(c.lineRevisions.length);
-		LineImpl li = new LineImpl();
-		for (int i = 0; i < c.lineRevisions.length; i++) {
-			li.init(i+1, c.lineRevisions[i], c.line(i));
-			inspector.next(li);
-			subProgress.worked(1);
-			cancellation.checkCancelled();
-		}
-		subProgress.done();
 		progress.done();
 	}
 	
@@ -157,7 +160,8 @@
 		byte[] getContent();
 	}
 
-	// FIXME there's no need in FileAnnotation.LineInspector, merge it here
+	// TODO [post-1.1] there's no need in FileAnnotation.LineInspector, merge it here
+	// ok for 1.1 as this LineInspector is internal class
 	private static class Collector implements LineInspector {
 		private int[] lineRevisions;
 		private byte[][] lines;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/core/HgBlameInspector.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.core;
+
+import org.tmatesoft.hg.core.HgCallbackTargetException;
+import org.tmatesoft.hg.internal.Callback;
+import org.tmatesoft.hg.internal.Experimental;
+import org.tmatesoft.hg.repo.HgDataFile;
+import org.tmatesoft.hg.util.Adaptable;
+
+/**
+ * Client's sink for revision differences, diff/annotate functionality.
+ * 
+ * When implemented, clients shall not expect new {@link Block blocks} instances in each call.
+ * 
+ * In case more information about annotated revision is needed, inspector instances may supply 
+ * {@link RevisionDescriptor.Recipient} through {@link Adaptable}.  
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ * @since 1.1
+ */
+@Callback
+@Experimental(reason="Unstable API")
+public interface HgBlameInspector {
+
+	void same(EqualBlock block) throws HgCallbackTargetException;
+	void added(AddBlock block) throws HgCallbackTargetException;
+	void changed(ChangeBlock block) throws HgCallbackTargetException;
+	void deleted(DeleteBlock block) throws HgCallbackTargetException;
+	
+	/**
+	 * Represents content of a block, either as a sequence of bytes or a 
+	 * sequence of smaller blocks (lines), if appropriate (according to usage context).
+	 * 
+	 * This approach allows line-by-line access to content data along with complete byte sequence for the whole block, i.e.
+	 * <pre>
+	 *    BlockData bd = addBlock.addedLines()
+	 *    // bd describes data from the addition completely.
+	 *    // elements of the BlockData are lines
+	 *    bd.elementCount() == addBlock.totalAddedLines();
+	 *    // one cat obtain complete addition with
+	 *    byte[] everythingAdded = bd.asArray();
+	 *    // or iterate line by line
+	 *    for (int i = 0; i < bd.elementCount(); i++) {
+	 *    	 byte[] lineContent = bd.elementAt(i);
+	 *       String line = new String(lineContent, fileEncodingCharset);
+	 *    }
+	 *    where bd.elementAt(0) is the line at index addBlock.firstAddedLine() 
+	 * </pre> 
+	 * 
+	 * LineData or ChunkData? 
+	 */
+	public interface BlockData {
+		BlockData elementAt(int index);
+		int elementCount();
+		byte[] asArray();
+	}
+	
+	/**
+	 * {@link HgBlameInspector} may optionally request extra information about revisions
+	 * being inspected, denoting itself as a {@link RevisionDescriptor.Recipient}. This class 
+	 * provides complete information about file revision under annotation now. 
+	 */
+	public interface RevisionDescriptor {
+		/**
+		 * @return complete source of the diff origin, never <code>null</code>
+		 */
+		BlockData origin();
+		/**
+		 * @return complete source of the diff target, never <code>null</code>
+		 */
+		BlockData target();
+		/**
+		 * @return changeset revision index of original file, or {@link HgRepository#NO_REVISION} if it's the very first revision
+		 */
+		int originChangesetIndex();
+		/**
+		 * @return changeset revision index of the target file
+		 */
+		int targetChangesetIndex();
+		/**
+		 * @return <code>true</code> if this revision is merge
+		 */
+		boolean isMerge();
+		/**
+		 * @return changeset revision index of the second, merged parent
+		 */
+		int mergeChangesetIndex();
+		/**
+		 * @return revision index of the change in target file's revlog
+		 */
+		int fileRevisionIndex();
+
+		/**
+		 * @return file object under blame (target file)
+		 */
+		HgDataFile file();
+
+		/**
+		 * Implement to indicate interest in {@link RevisionDescriptor}.
+		 * 
+		 * Note, instance of {@link RevisionDescriptor} is the same for 
+		 * {@link #start(RevisionDescriptor)} and {@link #done(RevisionDescriptor)} 
+		 * methods, and not necessarily a new one (i.e. <code>==</code>) for the next
+		 * revision announced.
+		 */
+		@Callback
+		public interface Recipient {
+			/**
+			 * Comes prior to any change {@link Block blocks}
+			 */
+			void start(RevisionDescriptor revisionDescription) throws HgCallbackTargetException;
+			/**
+			 * Comes after all change {@link Block blocks} were dispatched
+			 */
+			void done(RevisionDescriptor revisionDescription) throws HgCallbackTargetException;
+		}
+	}
+	
+	/**
+	 * Each change block comes from a single origin, blocks that are result of a merge
+	 * have {@link #originChangesetIndex()} equal to {@link RevisionDescriptor#mergeChangesetIndex()}.
+	 */
+	public interface Block {
+		int originChangesetIndex();
+		int targetChangesetIndex();
+	}
+	
+	public interface EqualBlock extends Block {
+		int originStart();
+		int targetStart();
+		int length();
+		BlockData content();
+	}
+	
+	public interface AddBlock extends Block {
+		/**
+		 * @return line index in the origin where this block is inserted
+		 */
+		int insertedAt();  
+		/**
+		 * @return line index of the first added line in the target revision
+		 */
+		int firstAddedLine();
+		/**
+		 * @return number of added lines in this block
+		 */
+		int totalAddedLines();
+		/**
+		 * @return content of added lines
+		 */
+		BlockData addedLines();
+	}
+	public interface DeleteBlock extends Block {
+		/**
+		 * @return line index in the target revision were this deleted block would be
+		 */
+		int removedAt();
+		/**
+		 * @return line index of the first removed line in the original revision
+		 */
+		int firstRemovedLine();
+		/**
+		 * @return number of deleted lines in this block
+		 */
+		int totalRemovedLines();
+		/**
+		 * @return content of deleted lines
+		 */
+		BlockData removedLines();
+	}
+	public interface ChangeBlock extends AddBlock, DeleteBlock {
+	}
+}
--- a/src/org/tmatesoft/hg/core/HgChangesetHandler.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgChangesetHandler.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -17,6 +17,7 @@
 package org.tmatesoft.hg.core;
 
 import org.tmatesoft.hg.internal.Callback;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.Adaptable;
 import org.tmatesoft.hg.util.Path;
 
@@ -32,8 +33,9 @@
 	/**
 	 * @param changeset descriptor of a change, not necessarily a distinct instance each time, {@link HgChangeset#clone() clone()} if need a copy.
 	 * @throws HgCallbackTargetException wrapper for any exception user code may produce 
+	 * @throws HgRuntimeException propagates library issues. <em>Runtime exception</em>
 	 */
-	void cset(HgChangeset changeset) throws HgCallbackTargetException;
+	void cset(HgChangeset changeset) throws HgCallbackTargetException, HgRuntimeException;
 
 
 	/**
--- a/src/org/tmatesoft/hg/core/HgChangesetTreeHandler.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgChangesetTreeHandler.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -20,6 +20,7 @@
 
 import org.tmatesoft.hg.internal.Callback;
 import org.tmatesoft.hg.repo.HgDataFile;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.Pair;
 
 /**
@@ -36,16 +37,18 @@
 	 * @param entry access to various pieces of information about current tree node. Instances might be 
 	 * reused across calls and shall not be kept by client's code
 	 * @throws HgCallbackTargetException wrapper for any exception user code may produce 
+	 * @throws HgRuntimeException propagates library issues. <em>Runtime exception</em>
 	 */
-	public void treeElement(HgChangesetTreeHandler.TreeElement entry) throws HgCallbackTargetException;
+	public void treeElement(HgChangesetTreeHandler.TreeElement entry) throws HgCallbackTargetException, HgRuntimeException;
 
 	interface TreeElement {
 		/**
 		 * Revision of the revlog being iterated. For example, when walking file history, return value represents file revisions.
 		 * 
 		 * @return revision of the revlog being iterated.
+		 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
 		 */
-		public Nodeid fileRevision();
+		public Nodeid fileRevision() throws HgRuntimeException;
 		
 		/**
 		 * File node, provided revlog being iterated is a {@link HgDataFile}; {@link #fileRevision()} 
@@ -55,19 +58,22 @@
 		 * file name for particular revision in the history.
 		 * 
 		 * @return instance of the file being walked, or <code>null</code> if it's not a file but other revlog.
+		 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
 		 */
-		public HgDataFile file();
+		public HgDataFile file() throws HgRuntimeException;
 
 		/**
 		 * @return changeset associated with the current file revision
+		 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
 		 */
-		public HgChangeset changeset();
+		public HgChangeset changeset() throws HgRuntimeException;
 
 		/**
 		 * Lightweight alternative to {@link #changeset()}, identifies changeset in which current file node has been modified 
 		 * @return changeset {@link Nodeid revision} 
+		 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
 		 */
-		public Nodeid changesetRevision();
+		public Nodeid changesetRevision() throws HgRuntimeException;
 
 		/**
 		 * Identifies parent changes, changesets where file/revlog in question was modified prior to change being visited.
@@ -91,25 +97,29 @@
 		 * then this {@link #parents()} call would return pair with single element only, pointing to <code>D</code>
 		 * 
 		 * @return changesets that correspond to parents of the current file node, either pair element may be <code>null</code>.
+		 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
 		 */
-		public Pair<HgChangeset, HgChangeset> parents();
+		public Pair<HgChangeset, HgChangeset> parents() throws HgRuntimeException;
 		
 		/**
 		 * Lightweight alternative to {@link #parents()}, give {@link Nodeid nodeids} only
 		 * @return two values, neither is <code>null</code>, use {@link Nodeid#isNull()} to identify parent not set
+		 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
 		 */
-		public Pair<Nodeid, Nodeid> parentRevisions();
+		public Pair<Nodeid, Nodeid> parentRevisions() throws HgRuntimeException;
 
 		/**
 		 * Changes that originate from the given change and bear it as their parent. 
 		 * @return collection (possibly empty) of immediate children of the change
+		 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
 		 */
-		public Collection<HgChangeset> children();
+		public Collection<HgChangeset> children() throws HgRuntimeException;
 
 		/**
 		 * Lightweight alternative to {@link #children()}.
 		 * @return never <code>null</code>
+		 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
 		 */
-		public Collection<Nodeid> childRevisions();
+		public Collection<Nodeid> childRevisions() throws HgRuntimeException;
 	}
 }
\ No newline at end of file
--- a/src/org/tmatesoft/hg/core/HgCheckoutCommand.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgCheckoutCommand.java	Wed Jul 10 11:48:55 2013 +0200
@@ -28,7 +28,6 @@
 import org.tmatesoft.hg.internal.CsetParamKeeper;
 import org.tmatesoft.hg.internal.DirstateBuilder;
 import org.tmatesoft.hg.internal.EncodingHelper;
-import org.tmatesoft.hg.internal.Experimental;
 import org.tmatesoft.hg.internal.Internals;
 import org.tmatesoft.hg.internal.WorkingDirFileWriter;
 import org.tmatesoft.hg.repo.HgDataFile;
@@ -46,8 +45,6 @@
 import org.tmatesoft.hg.util.ProgressSupport;
 
 /**
- * WORK IN PROGRESS.
- * 
  * Update working directory to specific state, 'hg checkout' counterpart.
  * For the time being, only 'clean' checkout is supported ('hg co --clean')
  * 
@@ -55,7 +52,6 @@
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
-@Experimental(reason="Work in progress")
 public class HgCheckoutCommand extends HgAbstractCommand<HgCheckoutCommand>{
 
 	private final HgRepository repo;
--- a/src/org/tmatesoft/hg/core/HgCloneCommand.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgCloneCommand.java	Wed Jul 10 11:48:55 2013 +0200
@@ -30,7 +30,6 @@
 
 import org.tmatesoft.hg.internal.ByteArrayDataAccess;
 import org.tmatesoft.hg.internal.DataAccess;
-import org.tmatesoft.hg.internal.DataAccessProvider;
 import org.tmatesoft.hg.internal.DataSerializer;
 import org.tmatesoft.hg.internal.DigestHelper;
 import org.tmatesoft.hg.internal.FNCacheFile;
@@ -91,7 +90,6 @@
 	 * @throws HgRepositoryNotFoundException
 	 * @throws HgException
 	 * @throws CancelledException
-	 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
 	 */
 	public HgRepository execute() throws HgException, CancelledException {
 		if (destination == null) {
@@ -115,23 +113,27 @@
 		// if cloning remote repo, which can stream and no revision is specified -
 		// can use 'stream_out' wireproto
 		//
-		// pull all changes from the very beginning
-		// XXX consult getContext() if by any chance has a bundle ready, if not, then read and register
-		HgBundle completeChanges = srcRepo.getChanges(Collections.singletonList(NULL));
-		cancel.checkCancelled();
-		WriteDownMate mate = new WriteDownMate(srcRepo.getSessionContext(), destination, progress, cancel);
 		try {
-			// instantiate new repo in the destdir
-			mate.initEmptyRepository();
-			// pull changes
-			completeChanges.inspectAll(mate);
-			mate.checkFailure();
-			mate.complete();
-		} catch (IOException ex) {
-			throw new HgInvalidFileException(getClass().getName(), ex);
-		} finally {
-			completeChanges.unlink();
-			progress.done();
+			// pull all changes from the very beginning
+			// XXX consult getContext() if by any chance has a bundle ready, if not, then read and register
+			HgBundle completeChanges = srcRepo.getChanges(Collections.singletonList(NULL));
+			cancel.checkCancelled();
+			WriteDownMate mate = new WriteDownMate(srcRepo.getSessionContext(), destination, progress, cancel);
+			try {
+				// instantiate new repo in the destdir
+				mate.initEmptyRepository();
+				// pull changes
+				completeChanges.inspectAll(mate);
+				mate.checkFailure();
+				mate.complete();
+			} catch (IOException ex) {
+				throw new HgInvalidFileException(getClass().getName(), ex);
+			} finally {
+				completeChanges.unlink();
+				progress.done();
+			}
+		} catch (HgRuntimeException ex) {
+			throw new HgLibraryFailureException(ex);
 		}
 		return new HgLookup().detect(destination);
 	}
@@ -148,6 +150,7 @@
 		private final SessionContext ctx;
 		private final Path.Source pathFactory;
 		private FileOutputStream indexFile;
+		private File currentFile;
 		private String filename; // human-readable name of the file being written, for log/exception purposes 
 
 		private final TreeMap<Nodeid, Integer> changelogIndexes = new TreeMap<Nodeid, Integer>();
@@ -170,7 +173,7 @@
 			ctx = sessionCtx;
 			hgDir = new File(destDir, ".hg");
 			repoInit = new RepoInitializer();
-			repoInit.setRequires(STORE | FNCACHE | DOTENCODE);
+			repoInit.setRequires(REVLOGV1 | STORE | FNCACHE | DOTENCODE);
 			storagePathHelper = repoInit.buildDataFilesHelper(sessionCtx);
 			progressSupport = progress;
 			cancelSupport = cancel;
@@ -178,28 +181,23 @@
 			pathFactory = ctx.getPathFactory();
 		}
 
-		public void initEmptyRepository() throws IOException {
+		public void initEmptyRepository() throws HgIOException, HgRepositoryNotFoundException {
 			repoInit.initEmptyRepository(hgDir);
-			try {
-				assert (repoInit.getRequires() & FNCACHE) != 0;
-				fncacheFile = new FNCacheFile(Internals.getInstance(new HgLookup(ctx).detect(hgDir)));
-			} catch (HgRepositoryNotFoundException ex) {
-				// SHALL NOT HAPPEN provided we initialized empty repository successfully
-				// TODO perhaps, with WriteDownMate moving to a more appropriate location,
-				// we could instantiate HgRepository (or Internals) by other means, without exception?
-				throw new IOException("Can't access fncache for newly created repository", ex);
-			}
+			assert (repoInit.getRequires() & FNCACHE) != 0;
+			// XXX perhaps, with WriteDownMate moving to a more appropriate location,
+			// we could instantiate HgRepository (or Internals) by other means, without exception?
+			fncacheFile = new FNCacheFile(Internals.getInstance(new HgLookup(ctx).detect(hgDir)));
 		}
 
 		public void complete() throws IOException {
 			fncacheFile.write();
 		}
 
-		public void changelogStart() {
+		public void changelogStart() throws HgInvalidControlFileException {
 			try {
 				revlogHeader.offset(0).baseRevision(-1);
 				revisionSequence.clear();
-				indexFile = new FileOutputStream(new File(hgDir, filename = "store/00changelog.i"));
+				indexFile = new FileOutputStream(currentFile = new File(hgDir, filename = "store/00changelog.i"));
 				collectChangelogIndexes = true;
 			} catch (IOException ex) {
 				throw new HgInvalidControlFileException("Failed to write changelog", ex, new File(hgDir, filename));
@@ -207,7 +205,7 @@
 			stopIfCancelled();
 		}
 
-		public void changelogEnd() {
+		public void changelogEnd() throws HgInvalidControlFileException {
 			try {
 				clearPreviousContent();
 				collectChangelogIndexes = false;
@@ -219,18 +217,18 @@
 			stopIfCancelled();
 		}
 
-		public void manifestStart() {
+		public void manifestStart() throws HgInvalidControlFileException {
 			try {
 				revlogHeader.offset(0).baseRevision(-1);
 				revisionSequence.clear();
-				indexFile = new FileOutputStream(new File(hgDir, filename = "store/00manifest.i"));
+				indexFile = new FileOutputStream(currentFile = new File(hgDir, filename = "store/00manifest.i"));
 			} catch (IOException ex) {
 				throw new HgInvalidControlFileException("Failed to write manifest", ex, new File(hgDir, filename));
 			}
 			stopIfCancelled();
 		}
 
-		public void manifestEnd() {
+		public void manifestEnd() throws HgInvalidControlFileException {
 			try {
 				clearPreviousContent();
 				closeIndexFile();
@@ -241,14 +239,13 @@
 			stopIfCancelled();
 		}
 		
-		public void fileStart(String name) {
+		public void fileStart(String name) throws HgInvalidControlFileException {
 			try {
 				revlogHeader.offset(0).baseRevision(-1);
 				revisionSequence.clear();
-				fncacheFile.add(pathFactory.path(name)); 
 				File file = new File(hgDir, filename = storagePathHelper.rewrite(name).toString());
 				file.getParentFile().mkdirs();
-				indexFile = new FileOutputStream(file);
+				indexFile = new FileOutputStream(currentFile = file);
 			} catch (IOException ex) {
 				String m = String.format("Failed to write file %s", filename);
 				throw new HgInvalidControlFileException(m, ex, new File(filename));
@@ -256,8 +253,9 @@
 			stopIfCancelled();
 		}
 
-		public void fileEnd(String name) {
+		public void fileEnd(String name) throws HgInvalidControlFileException {
 			try {
+				fncacheFile.addIndex(pathFactory.path(name)); 
 				clearPreviousContent();
 				closeIndexFile();
 			} catch (IOException ex) {
@@ -279,9 +277,10 @@
 			indexFile.close();
 			indexFile = null;
 			filename = null;
+			currentFile = null;
 		}
 
-		private int knownRevision(Nodeid p) {
+		private int knownRevision(Nodeid p) throws HgInvalidControlFileException {
 			if (p.isNull()) {
 				return -1;
 			} else {
@@ -295,7 +294,7 @@
 			throw new HgInvalidControlFileException(m, null, new File(hgDir, filename)).setRevision(p);
 		}
 		
-		public boolean element(GroupElement ge) {
+		public boolean element(GroupElement ge) throws HgRuntimeException {
 			try {
 				assert indexFile != null;
 				boolean writeComplete = false;
@@ -367,11 +366,15 @@
 				revlogHeader.length(content.length, compressedLen);
 				
 				// XXX may be wise not to create DataSerializer for each revision, but for a file
-				DataAccessProvider.StreamDataSerializer sds = new DataAccessProvider.StreamDataSerializer(ctx.getLog(), indexFile) {
+				DataSerializer sds = new DataSerializer() {
 					@Override
-					public void done() {
-						// override parent behavior not to close stream in use
-					}
+						public void write(byte[] data, int offset, int length) throws HgIOException {
+							try {
+								indexFile.write(data, offset, length);
+							} catch (IOException ex) {
+								throw new HgIOException("Write failure", ex, currentFile);
+							}
+						}
 				};
 				revlogHeader.serialize(sds);
 
@@ -389,9 +392,12 @@
 				revisionSequence.add(node);
 				prevRevContent.done();
 				prevRevContent = new ByteArrayDataAccess(content);
+			} catch (HgIOException ex) {
+				String m = String.format("Failed to write revision %s of file %s", ge.node().shortNotation(), filename);
+				throw new HgInvalidControlFileException(m, ex, currentFile);
 			} catch (IOException ex) {
 				String m = String.format("Failed to write revision %s of file %s", ge.node().shortNotation(), filename);
-				throw new HgInvalidControlFileException(m, ex, new File(hgDir, filename));
+				throw new HgInvalidControlFileException(m, ex, currentFile);
 			}
 			return cancelException == null;
 		}
--- a/src/org/tmatesoft/hg/core/HgCommitCommand.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgCommitCommand.java	Wed Jul 10 11:48:55 2013 +0200
@@ -19,16 +19,18 @@
 import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION;
 
 import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
 
-import org.tmatesoft.hg.internal.ByteArrayChannel;
-import org.tmatesoft.hg.internal.Experimental;
+import org.tmatesoft.hg.internal.COWTransaction;
+import org.tmatesoft.hg.internal.CommitFacility;
+import org.tmatesoft.hg.internal.CompleteRepoLock;
 import org.tmatesoft.hg.internal.FileContentSupplier;
-import org.tmatesoft.hg.repo.CommitFacility;
+import org.tmatesoft.hg.internal.Internals;
+import org.tmatesoft.hg.internal.Transaction;
+import org.tmatesoft.hg.internal.WorkingCopyContent;
 import org.tmatesoft.hg.repo.HgChangelog;
 import org.tmatesoft.hg.repo.HgDataFile;
 import org.tmatesoft.hg.repo.HgInternals;
+import org.tmatesoft.hg.repo.HgInvalidControlFileException;
 import org.tmatesoft.hg.repo.HgRepository;
 import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.repo.HgStatusCollector.Record;
@@ -40,14 +42,12 @@
 import org.tmatesoft.hg.util.Path;
 
 /**
- * WORK IN PROGRESS. UNSTABLE API
- * 
  * 'hg commit' counterpart, commit changes
  * 
+ * @since 1.1
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
-@Experimental(reason="Work in progress. Unstable API")
 public class HgCommitCommand extends HgAbstractCommand<HgCommitCommand> {
 
 	private final HgRepository repo;
@@ -75,16 +75,22 @@
 	 * Tell if changes in the working directory constitute merge commit. May be invoked prior to (and independently from) {@link #execute()}
 	 * 
 	 * @return <code>true</code> if working directory changes are result of a merge
-	 * @throws HgException subclass thereof to indicate specific issue with the repository
+	 * @throws HgLibraryFailureException to indicate unexpected issue with the repository
+	 * @throws HgException subclass thereof to indicate other specific issue with repository state
 	 */
 	public boolean isMergeCommit() throws HgException {
-		int[] parents = new int[2];
-		detectParentFromDirstate(parents);
-		return parents[0] != NO_REVISION && parents[1] != NO_REVISION; 
+		try {
+			int[] parents = new int[2];
+			detectParentFromDirstate(parents);
+			return parents[0] != NO_REVISION && parents[1] != NO_REVISION;
+		} catch (HgRuntimeException ex) {
+			throw new HgLibraryFailureException(ex);
+		}
 	}
 
 	/**
 	 * @throws HgException subclass thereof to indicate specific issue with the command arguments or repository state
+	 * @throws HgRepositoryLockException if failed to lock the repo for modifications
 	 * @throws IOException propagated IO errors from status walker over working directory
 	 * @throws CancelledException if execution of the command was cancelled
 	 */
@@ -92,6 +98,8 @@
 		if (message == null) {
 			throw new HgBadArgumentException("Shall supply commit message", null);
 		}
+		final CompleteRepoLock repoLock = new CompleteRepoLock(repo);
+		repoLock.acquire();
 		try {
 			int[] parentRevs = new int[2];
 			detectParentFromDirstate(parentRevs);
@@ -104,21 +112,18 @@
 				newRevision = Nodeid.NULL;
 				return new Outcome(Kind.Failure, "nothing to add");
 			}
-			CommitFacility cf = new CommitFacility(repo, parentRevs[0], parentRevs[1]);
+			CommitFacility cf = new CommitFacility(Internals.getInstance(repo), parentRevs[0], parentRevs[1]);
 			for (Path m : status.getModified()) {
 				HgDataFile df = repo.getFileNode(m);
 				cf.add(df, new WorkingCopyContent(df));
 			}
-			ArrayList<FileContentSupplier> toClear = new ArrayList<FileContentSupplier>();
 			for (Path a : status.getAdded()) {
 				HgDataFile df = repo.getFileNode(a); // TODO need smth explicit, like repo.createNewFileNode(Path) here
 				// XXX might be an interesting exercise not to demand a content supplier, but instead return a "DataRequester"
 				// object, that would indicate interest in data, and this code would "push" it to requester, so that any exception
 				// is handled here, right away, and won't need to travel supplier and CommitFacility. (although try/catch inside
 				// supplier.read (with empty throws declaration)
-				FileContentSupplier fcs = new FileContentSupplier(repo, a);
-				cf.add(df, fcs);
-				toClear.add(fcs);
+				cf.add(df, new FileContentSupplier(repo, a));
 			}
 			for (Path r : status.getRemoved()) {
 				HgDataFile df = repo.getFileNode(r); 
@@ -126,14 +131,23 @@
 			}
 			cf.branch(detectBranch());
 			cf.user(detectUser());
-			newRevision = cf.commit(message);
-			// TODO toClear list is awful
-			for (FileContentSupplier fcs : toClear) {
-				fcs.done();
+			Transaction.Factory trFactory = new COWTransaction.Factory();
+			Transaction tr = trFactory.create(repo);
+			try {
+				newRevision = cf.commit(message, tr);
+				tr.commit();
+			} catch (RuntimeException ex) {
+				tr.rollback();
+				throw ex;
+			} catch (HgException ex) {
+				tr.rollback();
+				throw ex;
 			}
 			return new Outcome(Kind.Success, "Commit ok");
 		} catch (HgRuntimeException ex) {
 			throw new HgLibraryFailureException(ex);
+		} finally {
+			repoLock.release();
 		}
 	}
 
@@ -144,7 +158,7 @@
 		return newRevision;
 	}
 
-	private String detectBranch() {
+	private String detectBranch() throws HgInvalidControlFileException {
 		return repo.getWorkingCopyBranchName();
 	}
 	
@@ -156,50 +170,10 @@
 		return new HgInternals(repo).getNextCommitUsername();
 	}
 
-	private void detectParentFromDirstate(int[] parents) {
+	private void detectParentFromDirstate(int[] parents) throws HgRuntimeException {
 		Pair<Nodeid, Nodeid> pn = repo.getWorkingCopyParents();
 		HgChangelog clog = repo.getChangelog();
 		parents[0] = pn.first().isNull() ? NO_REVISION : clog.getRevisionIndex(pn.first());
 		parents[1] = pn.second().isNull() ? NO_REVISION : clog.getRevisionIndex(pn.second());
 	}
-
-	private static class WorkingCopyContent implements CommitFacility.ByteDataSupplier {
-		private final HgDataFile file;
-		private ByteBuffer fileContent; 
-
-		public WorkingCopyContent(HgDataFile dataFile) {
-			file = dataFile;
-			if (!dataFile.exists()) {
-				throw new IllegalArgumentException();
-			}
-		}
-
-		public int read(ByteBuffer dst) {
-			if (fileContent == null) {
-				try {
-					ByteArrayChannel sink = new ByteArrayChannel();
-					// TODO desperately need partial read here
-					file.workingCopy(sink);
-					fileContent = ByteBuffer.wrap(sink.toArray());
-				} catch (CancelledException ex) {
-					// ByteArrayChannel doesn't cancel, never happens
-					assert false;
-				}
-			}
-			if (fileContent.remaining() == 0) {
-				return -1;
-			}
-			int dstCap = dst.remaining();
-			if (fileContent.remaining() > dstCap) {
-				// save actual limit, and pretend we've got exactly desired amount of bytes
-				final int lim = fileContent.limit();
-				fileContent.limit(dstCap);
-				dst.put(fileContent);
-				fileContent.limit(lim);
-			} else {
-				dst.put(fileContent);
-			}
-			return dstCap - dst.remaining();
-		}
-	}
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/core/HgDiffCommand.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.core;
+
+import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION;
+import static org.tmatesoft.hg.repo.HgRepository.TIP;
+
+import org.tmatesoft.hg.internal.BlameHelper;
+import org.tmatesoft.hg.internal.CsetParamKeeper;
+import org.tmatesoft.hg.internal.FileHistory;
+import org.tmatesoft.hg.internal.FileRevisionHistoryChunk;
+import org.tmatesoft.hg.repo.HgDataFile;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+import org.tmatesoft.hg.util.CancelSupport;
+import org.tmatesoft.hg.util.CancelledException;
+import org.tmatesoft.hg.util.Path;
+import org.tmatesoft.hg.util.ProgressSupport;
+
+/**
+ * 'hg diff' counterpart, with similar, although not identical, functionality.
+ * Despite both 'hg diff' and this command are diff-based, implementation
+ * peculiarities may lead to slightly different diff results. Either is valid
+ * as there's no strict diff specification. 
+ * 
+ * <p>
+ * <strong>Note</strong>, at the moment this command annotates single file only. Diff over
+ * complete repository (all the file changed in a given changeset) might
+ * be added later.
+ * 
+ * @since 1.1
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class HgDiffCommand extends HgAbstractCommand<HgDiffCommand> {
+
+	private final HgRepository repo;
+	private HgDataFile df;
+	private final CsetParamKeeper clogRevIndexStart, clogRevIndexEnd;
+	private HgIterateDirection iterateDirection = HgIterateDirection.NewToOld;
+
+	public HgDiffCommand(HgRepository hgRepo) {
+		repo = hgRepo;
+		clogRevIndexStart = new CsetParamKeeper(hgRepo);
+		clogRevIndexEnd = new CsetParamKeeper(hgRepo);
+	}
+	
+	public HgDiffCommand file(Path file) {
+		df = repo.getFileNode(file);
+		return this;
+	}
+
+	/**
+	 * Selects the file which history to blame, mandatory.
+	 * 
+	 * @param file repository file
+	 * @return <code>this</code> for convenience
+	 */
+	public HgDiffCommand file(HgDataFile file) {
+		df = file;
+		return this;
+	}
+
+	/**
+	 * Select range of file's history for {@link #executeDiff(HgBlameInspector)}
+	 * and {@link #executeAnnotate(HgBlameInspector)}.
+	 * <p>
+	 * {@link #executeDiff(HgBlameInspector) diff} uses these as revisions to diff against each other, while 
+	 * {@link #executeAnnotate(HgBlameInspector) annotate} walks the range. 
+	 * 
+	 * @param changelogRevIndexStart index of changelog revision, left range boundary
+	 * @param changelogRevIndexEnd index of changelog revision, right range boundary
+	 * @return <code>this</code> for convenience
+	 * @throws HgBadArgumentException if failed to find any of supplied changeset 
+	 */
+	public HgDiffCommand range(int changelogRevIndexStart, int changelogRevIndexEnd) throws HgBadArgumentException {
+		clogRevIndexStart.set(changelogRevIndexStart);
+		clogRevIndexEnd.set(changelogRevIndexEnd);
+		return this;
+	}
+	
+	/**
+	 * Selects revision for {@link #executeParentsAnnotate(HgBlameInspector)}, the one 
+	 * to diff against its parents. 
+	 * 
+	 * Besides, it is handy when range of interest spans up to the very beginning of the file history 
+	 * (and thus is equivalent to <code>range(0, changelogRevIndex)</code>)
+	 * 
+	 * @param changelogRevIndex index of changelog revision
+	 * @return <code>this</code> for convenience
+	 * @throws HgBadArgumentException if failed to find supplied changeset 
+	 */
+	public HgDiffCommand changeset(int changelogRevIndex) throws HgBadArgumentException {
+		clogRevIndexStart.set(0);
+		clogRevIndexEnd.set(changelogRevIndex);
+		return this;
+	}
+
+	/**
+	 * Revision differences are reported in selected order when 
+	 * annotating {@link #range(int, int) range} of changesets with
+	 * {@link #executeAnnotate(HgBlameInspector)}.
+	 * <p>
+	 * This method doesn't affect {@link #executeParentsAnnotate(HgBlameInspector)} and
+	 * {@link #executeDiff(HgBlameInspector)}
+	 * 
+	 * @param order desired iteration order 
+	 * @return <code>this</code> for convenience
+	 */
+	public HgDiffCommand order(HgIterateDirection order) {
+		iterateDirection = order;
+		return this;
+	}
+	
+	/**
+	 * Diff two revisions selected with {@link #range(int, int)} against each other.
+	 * <p>mimics 'hg diff -r clogRevIndex1 -r clogRevIndex2'
+	 * 
+ 	 * @throws HgCallbackTargetException propagated exception from the handler
+	 * @throws CancelledException if execution of the command was cancelled
+	 * @throws HgException subclass thereof to indicate specific issue with the command arguments or repository state
+	 */
+	public void executeDiff(HgBlameInspector insp) throws HgCallbackTargetException, CancelledException, HgException {
+		checkFile();
+		final ProgressSupport progress = getProgressSupport(insp);
+		progress.start(2);
+		try {
+			final CancelSupport cancel = getCancelSupport(insp, true);
+			int fileRevIndex1 = fileRevIndex(df, clogRevIndexStart.get());
+			int fileRevIndex2 = fileRevIndex(df, clogRevIndexEnd.get());
+			BlameHelper bh = new BlameHelper(insp);
+			bh.prepare(df, clogRevIndexStart.get(), clogRevIndexEnd.get());
+			progress.worked(1);
+			cancel.checkCancelled();
+			bh.diff(fileRevIndex1, clogRevIndexStart.get(), fileRevIndex2, clogRevIndexEnd.get());
+			progress.worked(1);
+			cancel.checkCancelled();
+		} catch (HgRuntimeException ex) {
+			throw new HgLibraryFailureException(ex);
+		} finally {
+			progress.done();
+		}
+	}
+
+	/**
+	 * Walk file history {@link #range(int, int) range} and report changes (diff) for each revision
+	 * 
+ 	 * @throws HgCallbackTargetException propagated exception from the handler
+	 * @throws CancelledException if execution of the command was cancelled
+	 * @throws HgException subclass thereof to indicate specific issue with the command arguments or repository state
+	 */
+	public void executeAnnotate(HgBlameInspector insp) throws HgCallbackTargetException, CancelledException, HgException {
+		checkFile();
+		ProgressSupport progress = null;
+		try {
+			if (!df.exists()) {
+				return;
+			}
+			final CancelSupport cancel = getCancelSupport(insp, true);
+			BlameHelper bh = new BlameHelper(insp);
+			FileHistory fileHistory = bh.prepare(df, clogRevIndexStart.get(), clogRevIndexEnd.get());
+			//
+			cancel.checkCancelled();
+			int totalWork = 0;
+			for (FileRevisionHistoryChunk fhc : fileHistory.iterate(iterateDirection)) {
+				totalWork += fhc.revisionCount();
+			}
+			progress = getProgressSupport(insp);
+			progress.start(totalWork + 1);
+			progress.worked(1); // BlameHelper.prepare
+			//
+			int[] fileClogParentRevs = new int[2];
+			int[] fileParentRevs = new int[2];
+			for (FileRevisionHistoryChunk fhc : fileHistory.iterate(iterateDirection)) {
+				for (int fri : fhc.fileRevisions(iterateDirection)) {
+					int clogRevIndex = fhc.changeset(fri);
+					// the way we built fileHistory ensures we won't walk past [changelogRevIndexStart..changelogRevIndexEnd]
+					assert clogRevIndex >= clogRevIndexStart.get();
+					assert clogRevIndex <= clogRevIndexEnd.get();
+					fhc.fillFileParents(fri, fileParentRevs);
+					fhc.fillCsetParents(fri, fileClogParentRevs);
+					bh.annotateChange(fri, clogRevIndex, fileParentRevs, fileClogParentRevs);
+					progress.worked(1);
+					cancel.checkCancelled();
+				}
+			}
+		} catch (HgRuntimeException ex) {
+			throw new HgLibraryFailureException(ex);
+		} finally {
+			if (progress != null) {
+				progress.done();
+			}
+		}
+	}
+
+	/**
+	 * Annotates changes of the file against its parent(s). 
+	 * Unlike {@link #annotate(HgDataFile, int, Inspector, HgIterateDirection)}, doesn't
+	 * walk file history, looks at the specified revision only. Handles both parents (if merge revision).
+	 * 
+ 	 * @throws HgCallbackTargetException propagated exception from the handler
+	 * @throws CancelledException if execution of the command was cancelled
+	 * @throws HgException subclass thereof to indicate specific issue with the command arguments or repository state
+	 */
+	public void executeParentsAnnotate(HgBlameInspector insp) throws HgCallbackTargetException, CancelledException, HgException {
+		checkFile();
+		final ProgressSupport progress = getProgressSupport(insp);
+		progress.start(2);
+		try {
+			final CancelSupport cancel = getCancelSupport(insp, true);
+			int changelogRevisionIndex = clogRevIndexEnd.get();
+			// TODO detect if file is text/binary (e.g. looking for chars < ' ' and not \t\r\n\f
+			int fileRevIndex = fileRevIndex(df, changelogRevisionIndex);
+			int[] fileRevParents = new int[2];
+			df.parents(fileRevIndex, fileRevParents, null, null);
+			if (changelogRevisionIndex == TIP) {
+				changelogRevisionIndex = df.getChangesetRevisionIndex(fileRevIndex);
+			}
+			int[] fileClogParentRevs = new int[2];
+			fileClogParentRevs[0] = fileRevParents[0] == NO_REVISION ? NO_REVISION : df.getChangesetRevisionIndex(fileRevParents[0]);
+			fileClogParentRevs[1] = fileRevParents[1] == NO_REVISION ? NO_REVISION : df.getChangesetRevisionIndex(fileRevParents[1]);
+			BlameHelper bh = new BlameHelper(insp);
+			int clogIndexStart = fileClogParentRevs[0] == NO_REVISION ? (fileClogParentRevs[1] == NO_REVISION ? 0 : fileClogParentRevs[1]) : fileClogParentRevs[0];
+			bh.prepare(df, clogIndexStart, changelogRevisionIndex);
+			progress.worked(1);
+			cancel.checkCancelled();
+			bh.annotateChange(fileRevIndex, changelogRevisionIndex, fileRevParents, fileClogParentRevs);
+			progress.worked(1);
+			cancel.checkCancelled();
+		} catch (HgRuntimeException ex) {
+			throw new HgLibraryFailureException(ex);
+		} finally {
+			progress.done();
+		}
+	}
+
+	private void checkFile() {
+		if (df == null) {
+			throw new IllegalArgumentException("File is not set");
+		}
+	}
+
+	private static int fileRevIndex(HgDataFile df, int csetRevIndex) throws HgRuntimeException {
+		Nodeid fileRev = df.getRepo().getManifest().getFileRevision(csetRevIndex, df.getPath());
+		return df.getRevisionIndex(fileRev);
+	}
+}
--- a/src/org/tmatesoft/hg/core/HgFileRenameHandlerMixin.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgFileRenameHandlerMixin.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012 TMate Software Ltd
+ * Copyright (c) 2012-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -16,6 +16,7 @@
  */
 package org.tmatesoft.hg.core;
 
+import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.Adaptable;
 
 /**
@@ -34,6 +35,7 @@
 
 	/**
 	 * @throws HgCallbackTargetException wrapper object for any exception user code may produce 
+	 * @throws HgRuntimeException propagates library issues. <em>Runtime exception</em>
 	 */
-	void copy(HgFileRevision from, HgFileRevision to) throws HgCallbackTargetException;
+	void copy(HgFileRevision from, HgFileRevision to) throws HgCallbackTargetException, HgRuntimeException;
 }
--- a/src/org/tmatesoft/hg/core/HgFileRevision.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgFileRevision.java	Wed Jul 10 11:48:55 2013 +0200
@@ -107,7 +107,11 @@
 		return flags;
 	}
 
-	public boolean wasCopied() throws HgException {
+	/**
+	 * @return <code>true</code> if this file revision was created as a result of a copy/rename
+	 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
+	 */
+	public boolean wasCopied() throws HgRuntimeException {
 		if (isCopy == null) {
 			checkCopy();
 		}
@@ -115,8 +119,9 @@
 	}
 	/**
 	 * @return <code>null</code> if {@link #wasCopied()} is <code>false</code>, name of the copy source otherwise.
+	 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
 	 */
-	public Path getOriginIfCopy() throws HgException {
+	public Path getOriginIfCopy() throws HgRuntimeException {
 		if (wasCopied()) {
 			return origin;
 		}
@@ -145,7 +150,13 @@
 		return parents;
 	}
 
-	public void putContentTo(ByteChannel sink) throws HgException, CancelledException {
+	/**
+	 * Pipe content of this file revision into the sink
+	 * @param sink accepts file revision content
+	 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
+	 * @throws CancelledException if execution of the operation was cancelled
+	 */
+	public void putContentTo(ByteChannel sink) throws HgRuntimeException, CancelledException {
 		HgDataFile fn = repo.getFileNode(path);
 		int revisionIndex = fn.getRevisionIndex(revision);
 		fn.contentWithFilters(revisionIndex, sink);
@@ -156,7 +167,7 @@
 		return String.format("HgFileRevision(%s, %s)", getPath().toString(), revision.shortNotation());
 	}
 
-	private void checkCopy() throws HgException {
+	private void checkCopy() throws HgRuntimeException {
 		HgDataFile fn = repo.getFileNode(path);
 		if (fn.isCopy()) {
 			if (fn.getRevision(0).equals(revision)) {
--- a/src/org/tmatesoft/hg/core/HgIOException.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgIOException.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012 TMate Software Ltd
+ * Copyright (c) 2012-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -40,11 +40,14 @@
 	 * @param cause root cause for the error, likely {@link IOException} or its subclass, but not necessarily, and may be omitted. 
 	 * @param troubleFile file we tried to deal with, never <code>null</code>
 	 */
-	public HgIOException(String message, Exception cause, File troubleFile) {
+	public HgIOException(String message, Throwable cause, File troubleFile) {
 		super(message, cause);
 		file = troubleFile;
 	}
 
+	/**
+	 * @return file that causes trouble, may be <code>null</code>
+	 */
 	public File getFile() {
 		return file;
 	}
--- a/src/org/tmatesoft/hg/core/HgIncomingCommand.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgIncomingCommand.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -31,12 +31,11 @@
 import org.tmatesoft.hg.repo.HgBundle;
 import org.tmatesoft.hg.repo.HgChangelog;
 import org.tmatesoft.hg.repo.HgChangelog.RawChangeset;
-import org.tmatesoft.hg.repo.HgInvalidControlFileException;
 import org.tmatesoft.hg.repo.HgInvalidStateException;
+import org.tmatesoft.hg.repo.HgParentChildMap;
 import org.tmatesoft.hg.repo.HgRemoteRepository;
 import org.tmatesoft.hg.repo.HgRepository;
 import org.tmatesoft.hg.repo.HgRuntimeException;
-import org.tmatesoft.hg.repo.HgParentChildMap;
 import org.tmatesoft.hg.util.CancelledException;
 import org.tmatesoft.hg.util.ProgressSupport;
 
@@ -138,10 +137,10 @@
 		if (handler == null) {
 			throw new IllegalArgumentException("Delegate can't be null");
 		}
-		final List<Nodeid> common = getCommon();
-		HgBundle changegroup = remoteRepo.getChanges(common);
 		final ProgressSupport ps = getProgressSupport(handler);
 		try {
+			final List<Nodeid> common = getCommon();
+			HgBundle changegroup = remoteRepo.getChanges(common);
 			final ChangesetTransformer transformer = new ChangesetTransformer(localRepo, handler, getParentHelper(), ps, getCancelSupport(handler, true));
 			transformer.limitBranches(branches);
 			changegroup.changes(localRepo, new HgChangelog.Inspector() {
@@ -154,7 +153,7 @@
 					localIndex = localRepo.getChangelog().getRevisionCount();
 				}
 				
-				public void next(int revisionNumber, Nodeid nodeid, RawChangeset cset) {
+				public void next(int revisionNumber, Nodeid nodeid, RawChangeset cset) throws HgRuntimeException {
 					if (parentHelper.knownNode(nodeid)) {
 						if (!common.contains(nodeid)) {
 							throw new HgInvalidStateException("Bundle shall not report known nodes other than roots we've supplied");
@@ -172,7 +171,7 @@
 		}
 	}
 
-	private RepositoryComparator getComparator() throws HgInvalidControlFileException, CancelledException {
+	private RepositoryComparator getComparator() throws CancelledException, HgRuntimeException {
 		if (remoteRepo == null) {
 			throw new IllegalArgumentException("Shall specify remote repository to compare against", null);
 		}
@@ -183,7 +182,7 @@
 		return comparator;
 	}
 	
-	private HgParentChildMap<HgChangelog> getParentHelper() throws HgInvalidControlFileException {
+	private HgParentChildMap<HgChangelog> getParentHelper() throws HgRuntimeException {
 		if (parentHelper == null) {
 			parentHelper = new HgParentChildMap<HgChangelog>(localRepo.getChangelog());
 			parentHelper.init();
@@ -191,14 +190,14 @@
 		return parentHelper;
 	}
 	
-	private List<BranchChain> getMissingBranches() throws HgRemoteConnectionException, HgInvalidControlFileException, CancelledException {
+	private List<BranchChain> getMissingBranches() throws HgRemoteConnectionException, CancelledException, HgRuntimeException {
 		if (missingBranches == null) {
 			missingBranches = getComparator().calculateMissingBranches();
 		}
 		return missingBranches;
 	}
 
-	private List<Nodeid> getCommon() throws HgRemoteConnectionException, HgInvalidControlFileException, CancelledException {
+	private List<Nodeid> getCommon() throws HgRemoteConnectionException, CancelledException, HgRuntimeException {
 //		return getComparator(context).getCommon();
 		final LinkedHashSet<Nodeid> common = new LinkedHashSet<Nodeid>();
 		// XXX common can be obtained from repoCompare, but at the moment it would almost duplicate work of calculateMissingBranches
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/core/HgInitCommand.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.core;
+
+import static org.tmatesoft.hg.internal.RequiresFile.*;
+
+import java.io.File;
+
+import org.tmatesoft.hg.internal.RepoInitializer;
+import org.tmatesoft.hg.repo.HgLookup;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.util.CancelledException;
+
+/**
+ * Initialize empty local repository. 
+ * <p>
+ * Two predefined alternatives are available, {@link #revlogV0() old} and {@link #revlogV1() new} mercurial format respectively.
+ * <p>
+ * Specific requirements may be turned off/on as needed if you know what you're doing.
+ * 
+ * @see http://mercurial.selenic.com/wiki/RequiresFile
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class HgInitCommand extends HgAbstractCommand<HgInitCommand> {
+	private static final int V1_DEFAULT = REVLOGV1 | STORE | FNCACHE | DOTENCODE;
+	
+	private final HgLookup hgLookup;
+	private File location;
+	private int requiresFlags;
+	
+	public HgInitCommand() {
+		this(null);
+	}
+
+	public HgInitCommand(HgLookup lookupEnv) {
+		hgLookup = lookupEnv;
+		requiresFlags = V1_DEFAULT;
+	}
+	
+	public HgInitCommand location(File repoLoc) {
+		location = repoLoc;
+		return this;
+	}
+	
+	public HgInitCommand revlogV0() {
+		requiresFlags = REVLOGV0;
+		return this;
+	}
+	
+	public HgInitCommand revlogV1() {
+		requiresFlags = V1_DEFAULT;
+		return this;
+	}
+	
+	public HgInitCommand store(boolean enable) {
+		return switchFlag(STORE, enable);
+	}
+	
+	public HgInitCommand fncache(boolean enable) {
+		return switchFlag(FNCACHE, enable);
+	}
+	
+	public HgInitCommand dotencode(boolean enable) {
+		return switchFlag(DOTENCODE, enable);
+	}
+
+	public HgRepository execute() throws HgRepositoryNotFoundException, HgException, CancelledException {
+		if (location == null) {
+			throw new IllegalArgumentException();
+		}
+		File repoDir;
+		if (".hg".equals(location.getName())) {
+			repoDir = location;
+		} else {
+			repoDir = new File(location, ".hg");
+		}
+		new RepoInitializer().setRequires(requiresFlags).initEmptyRepository(repoDir);
+		return getNewRepository();
+	}
+	
+	public HgRepository getNewRepository() throws HgRepositoryNotFoundException {
+		HgLookup l = hgLookup == null ? new HgLookup() : hgLookup;
+		return l.detect(location);
+	}
+	
+	private HgInitCommand switchFlag(int flag, boolean enable) {
+		if (enable) {
+			requiresFlags |= flag;
+		} else {
+			requiresFlags &= ~flag;
+		}
+		return this;
+	}
+}
--- a/src/org/tmatesoft/hg/core/HgLogCommand.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgLogCommand.java	Wed Jul 10 11:48:55 2013 +0200
@@ -30,7 +30,6 @@
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
-import java.util.ListIterator;
 import java.util.Set;
 import java.util.TreeSet;
 
@@ -42,10 +41,10 @@
 import org.tmatesoft.hg.internal.Internals;
 import org.tmatesoft.hg.internal.Lifecycle;
 import org.tmatesoft.hg.internal.LifecycleProxy;
+import org.tmatesoft.hg.internal.ReverseIterator;
 import org.tmatesoft.hg.repo.HgChangelog;
 import org.tmatesoft.hg.repo.HgChangelog.RawChangeset;
 import org.tmatesoft.hg.repo.HgDataFile;
-import org.tmatesoft.hg.repo.HgInvalidControlFileException;
 import org.tmatesoft.hg.repo.HgInvalidStateException;
 import org.tmatesoft.hg.repo.HgParentChildMap;
 import org.tmatesoft.hg.repo.HgRepository;
@@ -297,20 +296,20 @@
 		if (csetTransform != null) {
 			throw new ConcurrentModificationException();
 		}
-		if (repo.getChangelog().getRevisionCount() == 0) {
-			return;
-		}
-		final int lastCset = endRev == TIP ? repo.getChangelog().getLastRevision() : endRev;
-		// XXX pretty much like HgInternals.checkRevlogRange
-		if (lastCset < 0 || lastCset > repo.getChangelog().getLastRevision()) {
-			throw new HgBadArgumentException(String.format("Bad value %d for end revision", lastCset), null);
-		}
-		if (startRev < 0 || startRev > lastCset) {
-			throw new HgBadArgumentException(String.format("Bad value %d for start revision for range [%1$d..%d]", startRev, lastCset), null);
-		}
 		final ProgressSupport progressHelper = getProgressSupport(handler);
-		final int BATCH_SIZE = 100;
 		try {
+			if (repo.getChangelog().getRevisionCount() == 0) {
+				return;
+			}
+			final int lastCset = endRev == TIP ? repo.getChangelog().getLastRevision() : endRev;
+			// XXX pretty much like HgInternals.checkRevlogRange
+			if (lastCset < 0 || lastCset > repo.getChangelog().getLastRevision()) {
+				throw new HgBadArgumentException(String.format("Bad value %d for end revision", lastCset), null);
+			}
+			if (startRev < 0 || startRev > lastCset) {
+				throw new HgBadArgumentException(String.format("Bad value %d for start revision for range [%1$d..%d]", startRev, lastCset), null);
+			}
+			final int BATCH_SIZE = 100;
 			count = 0;
 			HgParentChildMap<HgChangelog> pw = getParentHelper(file == null); // leave it uninitialized unless we iterate whole repo
 			// ChangesetTransfrom creates a blank PathPool, and #file(String, boolean) above 
@@ -446,12 +445,7 @@
 		}
 		
 		public Iterable<BatchRecord> iterate(final boolean reverse) {
-			return new Iterable<BatchRecord>() {
-				
-				public Iterator<BatchRecord> iterator() {
-					return reverse ? new ReverseIterator<BatchRecord>(batch) : batch.iterator();
-				}
-			};
+			return reverse ? ReverseIterator.reversed(batch) : batch;
 		}
 		
 		// alternative would be dispatch(HgChangelog.Inspector) and dispatchReverse()
@@ -522,65 +516,51 @@
 		final CancelSupport cancelHelper = getCancelSupport(handler, true);
 		final HgFileRenameHandlerMixin renameHandler = Adaptable.Factory.getAdapter(handler, HgFileRenameHandlerMixin.class, null);
 
-		
-		// XXX rename. dispatcher is not a proper name (most of the job done - managing history chunk interconnection)
-		final HandlerDispatcher dispatcher = new HandlerDispatcher() {
-
-			@Override
-			protected void once(HistoryNode n) throws HgCallbackTargetException, CancelledException {
-				handler.treeElement(ei.init(n, currentFileNode));
-				cancelHelper.checkCancelled();
-			}
-		};
+		try {
 
-		// renamed files in the queue are placed with respect to #iterateDirection
-		// i.e. if we iterate from new to old, recent filenames come first
-		FileRenameQueueBuilder frqBuilder = new FileRenameQueueBuilder();
-		List<Pair<HgDataFile, Nodeid>> fileRenamesQueue = frqBuilder.buildFileRenamesQueue();
-		// XXX perhaps, makes sense to look at selected file's revision when followAncestry is true
-		// to ensure file we attempt to trace is in the WC's parent. Native hg aborts if not.
-		progressHelper.start(4 * fileRenamesQueue.size());
-		for (int namesIndex = 0, renamesQueueSize = fileRenamesQueue.size(); namesIndex < renamesQueueSize; namesIndex++) {
- 
-			final Pair<HgDataFile, Nodeid> renameInfo = fileRenamesQueue.get(namesIndex);
-			dispatcher.prepare(progressHelper, renameInfo);
-			cancelHelper.checkCancelled();
-			if (namesIndex > 0) {
-				dispatcher.connectWithLastJunctionPoint(renameInfo, fileRenamesQueue.get(namesIndex - 1));
-			}
-			if (namesIndex + 1 < renamesQueueSize) {
-				// there's at least one more name we are going to look at
-				dispatcher.updateJunctionPoint(renameInfo, fileRenamesQueue.get(namesIndex+1), renameHandler != null);
-			} else {
-				dispatcher.clearJunctionPoint();
-			}
-			dispatcher.dispatchAllChanges();
-			if (renameHandler != null && namesIndex + 1 < renamesQueueSize) {
-				dispatcher.reportRenames(renameHandler);
-			}
-		} // for fileRenamesQueue;
-		frqBuilder.reportRenameIfNotInQueue(fileRenamesQueue, renameHandler);
+			// XXX rename. dispatcher is not a proper name (most of the job done - managing history chunk interconnection)
+			final HandlerDispatcher dispatcher = new HandlerDispatcher() {
+	
+				@Override
+				protected void once(HistoryNode n) throws HgCallbackTargetException, CancelledException, HgRuntimeException {
+					handler.treeElement(ei.init(n, currentFileNode));
+					cancelHelper.checkCancelled();
+				}
+			};
+	
+			// renamed files in the queue are placed with respect to #iterateDirection
+			// i.e. if we iterate from new to old, recent filenames come first
+			FileRenameQueueBuilder frqBuilder = new FileRenameQueueBuilder();
+			List<Pair<HgDataFile, Nodeid>> fileRenamesQueue = frqBuilder.buildFileRenamesQueue();
+			// XXX perhaps, makes sense to look at selected file's revision when followAncestry is true
+			// to ensure file we attempt to trace is in the WC's parent. Native hg aborts if not.
+			progressHelper.start(4 * fileRenamesQueue.size());
+			for (int namesIndex = 0, renamesQueueSize = fileRenamesQueue.size(); namesIndex < renamesQueueSize; namesIndex++) {
+	 
+				final Pair<HgDataFile, Nodeid> renameInfo = fileRenamesQueue.get(namesIndex);
+				dispatcher.prepare(progressHelper, renameInfo);
+				cancelHelper.checkCancelled();
+				if (namesIndex > 0) {
+					dispatcher.connectWithLastJunctionPoint(renameInfo, fileRenamesQueue.get(namesIndex - 1));
+				}
+				if (namesIndex + 1 < renamesQueueSize) {
+					// there's at least one more name we are going to look at
+					dispatcher.updateJunctionPoint(renameInfo, fileRenamesQueue.get(namesIndex+1), renameHandler != null);
+				} else {
+					dispatcher.clearJunctionPoint();
+				}
+				dispatcher.dispatchAllChanges();
+				if (renameHandler != null && namesIndex + 1 < renamesQueueSize) {
+					dispatcher.reportRenames(renameHandler);
+				}
+			} // for fileRenamesQueue;
+			frqBuilder.reportRenameIfNotInQueue(fileRenamesQueue, renameHandler);
+		} catch (HgRuntimeException ex) {
+			throw new HgLibraryFailureException(ex);
+		}
 		progressHelper.done();
 	}
 	
-	private static class ReverseIterator<E> implements Iterator<E> {
-		private final ListIterator<E> listIterator;
-		
-		public ReverseIterator(List<E> list) {
-			listIterator = list.listIterator(list.size());
-		}
-
-		public boolean hasNext() {
-			return listIterator.hasPrevious();
-		}
-		public E next() {
-			return listIterator.previous();
-		}
-		public void remove() {
-			listIterator.remove();
-		}
-	}
-
 	/**
 	 * Utility to build sequence of file renames
 	 */
@@ -601,8 +581,9 @@
 		 * and possibly reuse this functionality
 		 * 
 		 * @return list of file renames, ordered with respect to {@link #iterateDirection}
+		 * @throws HgRuntimeException 
 		 */
-		public List<Pair<HgDataFile, Nodeid>> buildFileRenamesQueue() throws HgPathNotFoundException {
+		public List<Pair<HgDataFile, Nodeid>> buildFileRenamesQueue() throws HgPathNotFoundException, HgRuntimeException {
 			LinkedList<Pair<HgDataFile, Nodeid>> rv = new LinkedList<Pair<HgDataFile, Nodeid>>();
 			Nodeid startRev = null;
 			HgDataFile fileNode = repo.getFileNode(file);
@@ -636,11 +617,11 @@
 			return rv;
 		}
 		
-		public boolean hasOrigin(Pair<HgDataFile, Nodeid> p) {
+		public boolean hasOrigin(Pair<HgDataFile, Nodeid> p) throws HgRuntimeException {
 			return p.first().isCopy();
 		}
 
-		public Pair<HgDataFile, Nodeid> origin(Pair<HgDataFile, Nodeid> p) {
+		public Pair<HgDataFile, Nodeid> origin(Pair<HgDataFile, Nodeid> p) throws HgRuntimeException {
 			HgDataFile fileNode = p.first();
 			assert fileNode.isCopy();
 			Path fp = fileNode.getCopySourceName();
@@ -656,7 +637,7 @@
 		 * @param queue value from {@link #buildFileRenamesQueue()}
 		 * @param renameHandler may be <code>null</code>
 		 */
-		public void reportRenameIfNotInQueue(List<Pair<HgDataFile, Nodeid>> queue, HgFileRenameHandlerMixin renameHandler) throws HgCallbackTargetException {
+		public void reportRenameIfNotInQueue(List<Pair<HgDataFile, Nodeid>> queue, HgFileRenameHandlerMixin renameHandler) throws HgCallbackTargetException, HgRuntimeException {
 			if (renameHandler != null && !followRenames) {
 				// If followRenames is true, all the historical names were in the queue and are processed already.
 				// Hence, shall process origin explicitly only when renameHandler is present but followRenames is not requested.
@@ -700,12 +681,12 @@
 			completeHistory[revisionNumber] = new HistoryNode(commitRevisions[revisionNumber], revision, p1, p2);
 		}
 		
-		HistoryNode one(HgDataFile fileNode, Nodeid fileRevision) throws HgInvalidControlFileException {
+		HistoryNode one(HgDataFile fileNode, Nodeid fileRevision) throws HgRuntimeException {
 			int fileRevIndexToVisit = fileNode.getRevisionIndex(fileRevision);
 			return one(fileNode, fileRevIndexToVisit);
 		}
 
-		HistoryNode one(HgDataFile fileNode, int fileRevIndexToVisit) throws HgInvalidControlFileException {
+		HistoryNode one(HgDataFile fileNode, int fileRevIndexToVisit) throws HgRuntimeException {
 			resultHistory = null;
 			if (fileRevIndexToVisit == HgRepository.TIP) {
 				fileRevIndexToVisit = fileNode.getLastRevision();
@@ -731,7 +712,7 @@
 		 * @return list of history elements, from oldest to newest. In case {@link #followAncestry} is <code>true</code>, the list
 		 * is modifiable (to further augment with last/first elements of renamed file histories)
 		 */
-		List<HistoryNode> go(HgDataFile fileNode, Nodeid fileLastRevisionToVisit) throws HgInvalidControlFileException {
+		List<HistoryNode> go(HgDataFile fileNode, Nodeid fileLastRevisionToVisit) throws HgRuntimeException {
 			resultHistory = null;
 			int fileLastRevIndexToVisit = fileLastRevisionToVisit == null ? fileNode.getLastRevision() : fileNode.getRevisionIndex(fileLastRevisionToVisit);
 			completeHistory = new HistoryNode[fileLastRevIndexToVisit+1];
@@ -828,7 +809,7 @@
 		private HgFileRevision copiedFrom, copiedTo; 
 
 		// parentProgress shall be initialized with 4 XXX refactor all this stuff with parentProgress 
-		public void prepare(ProgressSupport parentProgress, Pair<HgDataFile, Nodeid> renameInfo) {
+		public void prepare(ProgressSupport parentProgress, Pair<HgDataFile, Nodeid> renameInfo) throws HgRuntimeException {
 			// if we don't followAncestry, take complete history
 			// XXX treeBuildInspector knows followAncestry, perhaps the logic 
 			// whether to take specific revision or the last one shall be there?
@@ -857,7 +838,7 @@
 			switchTo(renameInfo.first());
 		}
 		
-		public void updateJunctionPoint(Pair<HgDataFile, Nodeid> curRename, Pair<HgDataFile, Nodeid> nextRename, boolean needCopyFromTo) {
+		public void updateJunctionPoint(Pair<HgDataFile, Nodeid> curRename, Pair<HgDataFile, Nodeid> nextRename, boolean needCopyFromTo) throws HgRuntimeException {
 			copiedFrom = copiedTo = null;
 			//
 			// A (old) renamed to B(new).  A(0..k..n) -> B(0..m). If followAncestry, k == n
@@ -899,7 +880,7 @@
 			}
 		}
 		
-		public void reportRenames(HgFileRenameHandlerMixin renameHandler) throws HgCallbackTargetException {
+		public void reportRenames(HgFileRenameHandlerMixin renameHandler) throws HgCallbackTargetException, HgRuntimeException {
 			if (renameHandler != null) { // shall report renames
 				assert copiedFrom != null;
 				assert copiedTo != null;
@@ -954,9 +935,9 @@
 			throw new HgInvalidStateException(String.format("For change history (cset[%d..%d]) could not find node for file change %s", csetStart, csetEnd, fileRevision.shortNotation()));
 		}
 
-		protected abstract void once(HistoryNode n) throws HgCallbackTargetException, CancelledException;
+		protected abstract void once(HistoryNode n) throws HgCallbackTargetException, CancelledException, HgRuntimeException;
 		
-		public void dispatchAllChanges() throws HgCallbackTargetException, CancelledException {
+		public void dispatchAllChanges() throws HgCallbackTargetException, CancelledException, HgRuntimeException {
 			// XXX shall sort changeHistory according to changeset numbers?
 			Iterator<HistoryNode> it;
 			if (iterateDirection == HgIterateDirection.OldToNew) {
@@ -1006,7 +987,7 @@
 			}
 		}
 
-		public void next(int revisionNumber, Nodeid nodeid, RawChangeset cset) {
+		public void next(int revisionNumber, Nodeid nodeid, RawChangeset cset) throws HgRuntimeException {
 			if (limit > 0 && count >= limit) {
 				return;
 			}
@@ -1045,7 +1026,7 @@
 		}
 	}
 
-	private HgParentChildMap<HgChangelog> getParentHelper(boolean create) throws HgInvalidControlFileException {
+	private HgParentChildMap<HgChangelog> getParentHelper(boolean create) throws HgRuntimeException {
 		if (parentHelper == null && create) {
 			parentHelper = new HgParentChildMap<HgChangelog>(repo.getChangelog());
 			parentHelper.init();
@@ -1143,11 +1124,11 @@
 			return fileNode;
 		}
 
-		public HgChangeset changeset() {
+		public HgChangeset changeset() throws HgRuntimeException {
 			return get(historyNode.changeset)[0];
 		}
 
-		public Pair<HgChangeset, HgChangeset> parents() {
+		public Pair<HgChangeset, HgChangeset> parents() throws HgRuntimeException {
 			if (parents != null) {
 				return parents;
 			}
@@ -1167,7 +1148,7 @@
 			return parents = new Pair<HgChangeset, HgChangeset>(r[0], r[1]);
 		}
 
-		public Collection<HgChangeset> children() {
+		public Collection<HgChangeset> children() throws HgRuntimeException {
 			if (children != null) {
 				return children;
 			}
@@ -1188,7 +1169,7 @@
 			cachedChangesets.put(cs.getRevisionIndex(), cs);
 		}
 		
-		private HgChangeset[] get(int... changelogRevisionIndex) {
+		private HgChangeset[] get(int... changelogRevisionIndex) throws HgRuntimeException {
 			HgChangeset[] rv = new HgChangeset[changelogRevisionIndex.length];
 			IntVector misses = new IntVector(changelogRevisionIndex.length, -1);
 			for (int i = 0; i < changelogRevisionIndex.length; i++) {
@@ -1210,8 +1191,7 @@
 				for (int changeset2read : changesets2read) {
 					HgChangeset cs = cachedChangesets.get(changeset2read);
 					if (cs == null) {
-						HgInvalidStateException t = new HgInvalidStateException(String.format("Can't get changeset for revision %d", changeset2read));
-						throw t.setRevisionIndex(changeset2read);
+						throw new HgInvalidStateException(String.format("Can't get changeset for revision %d", changeset2read));
 					}
 					// HgChangelog.range may reorder changesets according to their order in the changelog
 					// thus need to find original index
@@ -1244,14 +1224,14 @@
 			populate(cs.clone());
 		}
 
-		public Nodeid changesetRevision() {
+		public Nodeid changesetRevision() throws HgRuntimeException {
 			if (changesetRevision == null) {
 				changesetRevision = getRevision(historyNode.changeset);
 			}
 			return changesetRevision;
 		}
 
-		public Pair<Nodeid, Nodeid> parentRevisions() {
+		public Pair<Nodeid, Nodeid> parentRevisions() throws HgRuntimeException {
 			if (parentRevisions == null) {
 				HistoryNode p;
 				final Nodeid p1, p2;
@@ -1270,7 +1250,7 @@
 			return parentRevisions;
 		}
 
-		public Collection<Nodeid> childRevisions() {
+		public Collection<Nodeid> childRevisions() throws HgRuntimeException {
 			if (childRevisions != null) {
 				return childRevisions;
 			}
@@ -1287,7 +1267,7 @@
 		}
 		
 		// reading nodeid involves reading index only, guess, can afford not to optimize multiple reads
-		private Nodeid getRevision(int changelogRevisionNumber) {
+		private Nodeid getRevision(int changelogRevisionNumber) throws HgRuntimeException {
 			// TODO post-1.0 pipe through pool
 			HgChangeset cs = cachedChangesets.get(changelogRevisionNumber);
 			if (cs != null) {
--- a/src/org/tmatesoft/hg/core/HgManifestCommand.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgManifestCommand.java	Wed Jul 10 11:48:55 2013 +0200
@@ -190,7 +190,7 @@
 			}
 		}
 	
-		public boolean begin(int manifestRevision, Nodeid nid, int changelogRevision) {
+		public boolean begin(int manifestRevision, Nodeid nid, int changelogRevision) throws HgRuntimeException {
 			if (needDirs && manifestContent == null) {
 				manifestContent = new LinkedList<HgFileRevision>();
 			}
@@ -206,7 +206,7 @@
 				return false;
 			}
 		}
-		public boolean end(int revision) {
+		public boolean end(int revision) throws HgRuntimeException {
 			try {
 				if (needDirs) {
 					LinkedHashMap<Path, LinkedList<HgFileRevision>> breakDown = new LinkedHashMap<Path, LinkedList<HgFileRevision>>();
@@ -243,7 +243,7 @@
 			}
 		}
 		
-		public boolean next(Nodeid nid, Path fname, Flags flags) {
+		public boolean next(Nodeid nid, Path fname, Flags flags) throws HgRuntimeException {
 			if (matcher != null && !matcher.accept(fname)) {
 				return true;
 			}
--- a/src/org/tmatesoft/hg/core/HgManifestHandler.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgManifestHandler.java	Wed Jul 10 11:48:55 2013 +0200
@@ -17,6 +17,7 @@
 package org.tmatesoft.hg.core;
 
 import org.tmatesoft.hg.internal.Callback;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.Path;
 
 /**
@@ -33,8 +34,9 @@
 	 * 
 	 * @param manifestRevision unique identifier of the manifest revision
 	 * @throws HgCallbackTargetException wrapper for any exception user code may produce
+	 * @throws HgRuntimeException propagates library issues. <em>Runtime exception</em>
 	 */
-	void begin(Nodeid manifestRevision) throws HgCallbackTargetException;
+	void begin(Nodeid manifestRevision) throws HgCallbackTargetException, HgRuntimeException;
 
 	/**
 	 * If walker is configured to spit out directories, indicates files from specified directories are about to be reported.
@@ -42,16 +44,18 @@
 	 * 
 	 * @param path directory known in the manifest
 	 * @throws HgCallbackTargetException wrapper for any exception user code may produce
+	 * @throws HgRuntimeException propagates library issues. <em>Runtime exception</em>
 	 */
-	void dir(Path path) throws HgCallbackTargetException; 
+	void dir(Path path) throws HgCallbackTargetException, HgRuntimeException; 
 
 	/**
 	 * Reports a file revision entry in the manifest
 	 * 
 	 * @param fileRevision description of the file revision
 	 * @throws HgCallbackTargetException wrapper for any exception user code may produce
+	 * @throws HgRuntimeException propagates library issues. <em>Runtime exception</em>
 	 */
-	void file(HgFileRevision fileRevision) throws HgCallbackTargetException;
+	void file(HgFileRevision fileRevision) throws HgCallbackTargetException, HgRuntimeException;
 
 	/**
 	 * Indicates all files from the manifest revision have been reported.
@@ -59,6 +63,7 @@
 	 * 
 	 * @param manifestRevision unique identifier of the manifest revision 
 	 * @throws HgCallbackTargetException wrapper for any exception user code may produce
+	 * @throws HgRuntimeException propagates library issues. <em>Runtime exception</em>
 	 */
-	void end(Nodeid manifestRevision) throws HgCallbackTargetException;
+	void end(Nodeid manifestRevision) throws HgCallbackTargetException, HgRuntimeException;
 }
\ No newline at end of file
--- a/src/org/tmatesoft/hg/core/HgOutgoingCommand.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgOutgoingCommand.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -21,13 +21,14 @@
 import java.util.TreeSet;
 
 import org.tmatesoft.hg.internal.Internals;
+import org.tmatesoft.hg.internal.PhasesHelper;
 import org.tmatesoft.hg.internal.RepositoryComparator;
+import org.tmatesoft.hg.internal.RevisionSet;
 import org.tmatesoft.hg.repo.HgChangelog;
-import org.tmatesoft.hg.repo.HgInvalidControlFileException;
+import org.tmatesoft.hg.repo.HgParentChildMap;
 import org.tmatesoft.hg.repo.HgRemoteRepository;
 import org.tmatesoft.hg.repo.HgRepository;
 import org.tmatesoft.hg.repo.HgRuntimeException;
-import org.tmatesoft.hg.repo.HgParentChildMap;
 import org.tmatesoft.hg.util.CancelSupport;
 import org.tmatesoft.hg.util.CancelledException;
 import org.tmatesoft.hg.util.ProgressSupport;
@@ -104,8 +105,7 @@
 	public List<Nodeid> executeLite() throws HgRemoteConnectionException, HgException, CancelledException {
 		final ProgressSupport ps = getProgressSupport(null);
 		try {
-			ps.start(10);
-			return getComparator(new ProgressSupport.Sub(ps, 5), getCancelSupport(null, true)).getLocalOnlyRevisions();
+			return getOutgoingRevisions(ps, getCancelSupport(null, true));
 		} catch (HgRuntimeException ex) {
 			throw new HgLibraryFailureException(ex);
 		} finally {
@@ -129,10 +129,16 @@
 		final ProgressSupport ps = getProgressSupport(handler);
 		final CancelSupport cs = getCancelSupport(handler, true);
 		try {
-			ps.start(-1);
-			ChangesetTransformer inspector = new ChangesetTransformer(localRepo, handler, getParentHelper(), ps, cs);
+			ps.start(200);
+			ChangesetTransformer inspector = new ChangesetTransformer(localRepo, handler, getParentHelper(), new ProgressSupport.Sub(ps, 100), cs);
 			inspector.limitBranches(branches);
-			getComparator(new ProgressSupport.Sub(ps, 1), cs).visitLocalOnlyRevisions(inspector);
+			List<Nodeid> out = getOutgoingRevisions(new ProgressSupport.Sub(ps, 100), cs);
+			int[] outRevIndex = new int[out.size()];
+			int i = 0;
+			for (Nodeid o : out) {
+				outRevIndex[i++] = localRepo.getChangelog().getRevisionIndex(o);
+			}
+			localRepo.getChangelog().range(inspector, outRevIndex);
 			inspector.checkFailure();
 		} catch (HgRuntimeException ex) {
 			throw new HgLibraryFailureException(ex);
@@ -141,7 +147,7 @@
 		}
 	}
 
-	private RepositoryComparator getComparator(ProgressSupport ps, CancelSupport cs) throws HgRemoteConnectionException, HgInvalidControlFileException, CancelledException {
+	private RepositoryComparator getComparator(ProgressSupport ps, CancelSupport cs) throws HgRemoteConnectionException, CancelledException, HgRuntimeException {
 		if (remoteRepo == null) {
 			throw new IllegalArgumentException("Shall specify remote repository to compare against");
 		}
@@ -152,7 +158,7 @@
 		return comparator;
 	}
 	
-	private HgParentChildMap<HgChangelog> getParentHelper() throws HgInvalidControlFileException {
+	private HgParentChildMap<HgChangelog> getParentHelper() throws HgRuntimeException {
 		if (parentHelper == null) {
 			parentHelper = new HgParentChildMap<HgChangelog>(localRepo.getChangelog());
 			parentHelper.init();
@@ -160,4 +166,17 @@
 		return parentHelper;
 	}
 
+	
+	private List<Nodeid> getOutgoingRevisions(ProgressSupport ps, CancelSupport cs) throws HgRemoteConnectionException, HgException, CancelledException {
+		ps.start(10);
+		final RepositoryComparator c = getComparator(new ProgressSupport.Sub(ps, 5), cs);
+		List<Nodeid> local = c.getLocalOnlyRevisions();
+		ps.worked(3);
+		PhasesHelper phaseHelper = new PhasesHelper(Internals.getInstance(localRepo));
+		if (phaseHelper.isCapableOfPhases() && phaseHelper.withSecretRoots()) {
+			local = new RevisionSet(local).subtract(phaseHelper.allSecret()).asList();
+		}
+		ps.worked(2);
+		return local;
+	}
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/core/HgPullCommand.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.core;
+
+import java.util.List;
+
+import org.tmatesoft.hg.internal.AddRevInspector;
+import org.tmatesoft.hg.internal.COWTransaction;
+import org.tmatesoft.hg.internal.Internals;
+import org.tmatesoft.hg.internal.PhasesHelper;
+import org.tmatesoft.hg.internal.RepositoryComparator;
+import org.tmatesoft.hg.internal.RevisionSet;
+import org.tmatesoft.hg.internal.Transaction;
+import org.tmatesoft.hg.repo.HgBundle;
+import org.tmatesoft.hg.repo.HgChangelog;
+import org.tmatesoft.hg.repo.HgInternals;
+import org.tmatesoft.hg.repo.HgParentChildMap;
+import org.tmatesoft.hg.repo.HgRemoteRepository;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+import org.tmatesoft.hg.util.CancelledException;
+import org.tmatesoft.hg.util.ProgressSupport;
+
+/**
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class HgPullCommand extends HgAbstractCommand<HgPullCommand> {
+
+	private final HgRepository repo;
+	private HgRemoteRepository remote;
+
+	public HgPullCommand(HgRepository hgRepo) {
+		repo = hgRepo;
+	}
+
+	public HgPullCommand source(HgRemoteRepository hgRemote) {
+		remote = hgRemote;
+		return this;
+	}
+
+	public void execute() throws HgRemoteConnectionException, HgIOException, HgLibraryFailureException, CancelledException {
+		final ProgressSupport progress = getProgressSupport(null);
+		try {
+			progress.start(100);
+			// TODO refactor same code in HgIncomingCommand #getComparator and #getParentHelper
+			final HgChangelog clog = repo.getChangelog();
+			final HgParentChildMap<HgChangelog> parentHelper = new HgParentChildMap<HgChangelog>(clog);
+			parentHelper.init();
+			final RepositoryComparator comparator = new RepositoryComparator(parentHelper, remote);
+			// get incoming revisions
+			comparator.compare(new ProgressSupport.Sub(progress, 50), getCancelSupport(null, true));
+			final List<Nodeid> common = comparator.getCommon();
+			// get bundle with changes from remote
+			HgBundle incoming = remote.getChanges(common);
+			//
+			// add revisions to changelog, manifest, files
+			final Internals implRepo = HgInternals.getImplementationRepo(repo);
+			final AddRevInspector insp;
+			Transaction.Factory trFactory = new COWTransaction.Factory();
+			Transaction tr = trFactory.create(repo);
+			try {
+				incoming.inspectAll(insp = new AddRevInspector(implRepo, tr));
+				tr.commit();
+			} catch (HgRuntimeException ex) {
+				tr.rollback();
+				throw ex;
+			} catch (RuntimeException ex) {
+				tr.rollback();
+				throw ex;
+			}
+			progress.worked(45);
+			RevisionSet added = insp.addedChangesets();
+			
+			// get remote phases, update local phases to match that of remote
+			final PhasesHelper phaseHelper = new PhasesHelper(implRepo, parentHelper);
+			if (phaseHelper.isCapableOfPhases()) {
+				RevisionSet rsCommon = new RevisionSet(common);
+				HgRemoteRepository.Phases remotePhases = remote.getPhases();
+				if (remotePhases.isPublishingServer()) {
+					final RevisionSet knownPublic = rsCommon.union(added);
+					RevisionSet newDraft = phaseHelper.allDraft().subtract(knownPublic);
+					RevisionSet newSecret = phaseHelper.allSecret().subtract(knownPublic);
+					phaseHelper.updateRoots(newDraft.asList(), newSecret.asList());
+				} else {
+					// FIXME refactor reuse from HgPushCommand
+				}
+			}
+			progress.worked(5);
+		} catch (HgRuntimeException ex) {
+			throw new HgLibraryFailureException(ex);
+		} finally {
+			progress.done();
+		}
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/core/HgPushCommand.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.core;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+import org.tmatesoft.hg.internal.BundleGenerator;
+import org.tmatesoft.hg.internal.Internals;
+import org.tmatesoft.hg.internal.PhasesHelper;
+import org.tmatesoft.hg.internal.RepositoryComparator;
+import org.tmatesoft.hg.internal.RevisionSet;
+import org.tmatesoft.hg.repo.HgBookmarks;
+import org.tmatesoft.hg.repo.HgBundle;
+import org.tmatesoft.hg.repo.HgChangelog;
+import org.tmatesoft.hg.repo.HgInternals;
+import org.tmatesoft.hg.repo.HgInvalidStateException;
+import org.tmatesoft.hg.repo.HgLookup;
+import org.tmatesoft.hg.repo.HgParentChildMap;
+import org.tmatesoft.hg.repo.HgPhase;
+import org.tmatesoft.hg.repo.HgRemoteRepository;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+import org.tmatesoft.hg.util.CancelledException;
+import org.tmatesoft.hg.util.LogFacility.Severity;
+import org.tmatesoft.hg.util.Outcome;
+import org.tmatesoft.hg.util.Pair;
+import org.tmatesoft.hg.util.ProgressSupport;
+
+/**
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class HgPushCommand extends HgAbstractCommand<HgPushCommand> {
+	
+	private final HgRepository repo;
+	private HgRemoteRepository remoteRepo;
+	private RevisionSet outgoing;
+
+	public HgPushCommand(HgRepository hgRepo) {
+		repo = hgRepo;
+	}
+	
+	public HgPushCommand destination(HgRemoteRepository hgRemote) {
+		remoteRepo = hgRemote;
+		return this;
+	}
+
+	public void execute() throws HgRemoteConnectionException, HgIOException, CancelledException, HgLibraryFailureException {
+		final ProgressSupport progress = getProgressSupport(null);
+		try {
+			progress.start(100);
+			//
+			// find out missing
+			// TODO refactor same code in HgOutgoingCommand #getComparator and #getParentHelper
+			final HgChangelog clog = repo.getChangelog();
+			final HgParentChildMap<HgChangelog> parentHelper = new HgParentChildMap<HgChangelog>(clog);
+			parentHelper.init();
+			final Internals implRepo = HgInternals.getImplementationRepo(repo);
+			final PhasesHelper phaseHelper = new PhasesHelper(implRepo, parentHelper);
+			final RepositoryComparator comparator = new RepositoryComparator(parentHelper, remoteRepo);
+			comparator.compare(new ProgressSupport.Sub(progress, 50), getCancelSupport(null, true));
+			List<Nodeid> l = comparator.getLocalOnlyRevisions();
+			if (phaseHelper.isCapableOfPhases() && phaseHelper.withSecretRoots()) {
+				RevisionSet secret = phaseHelper.allSecret();
+				outgoing = new RevisionSet(l).subtract(secret);
+			} else {
+				outgoing = new RevisionSet(l);
+			}
+			//
+			// prepare bundle
+			BundleGenerator bg = new BundleGenerator(implRepo);
+			File bundleFile = bg.create(outgoing.asList());
+			progress.worked(20);
+			HgBundle b = new HgLookup(repo.getSessionContext()).loadBundle(bundleFile);
+			//
+			// send changes
+			remoteRepo.unbundle(b, comparator.getRemoteHeads());
+			progress.worked(20);
+			//
+			// update phase information
+			if (phaseHelper.isCapableOfPhases()) {
+				RevisionSet presentSecret = phaseHelper.allSecret();
+				RevisionSet presentDraft = phaseHelper.allDraft();
+				RevisionSet secretLeft, draftLeft;
+				HgRemoteRepository.Phases remotePhases = remoteRepo.getPhases();
+				RevisionSet remoteDrafts = knownRemoteDrafts(remotePhases, parentHelper, outgoing, presentSecret);
+				if (remotePhases.isPublishingServer()) {
+					// although it's unlikely outgoing would affect secret changesets,
+					// it doesn't hurt to check secret roots along with draft ones
+					secretLeft = presentSecret.subtract(outgoing);
+					draftLeft = presentDraft.subtract(outgoing);
+				} else {
+					// shall merge local and remote phase states
+					// revisions that cease to be secret (gonna become Public), e.g. someone else pushed them
+					RevisionSet secretGone = presentSecret.intersect(remoteDrafts);
+					// parents of those remote drafts are public, mark them as public locally, too
+					RevisionSet remotePublic = presentSecret.ancestors(secretGone, parentHelper);
+					secretLeft = presentSecret.subtract(secretGone).subtract(remotePublic);
+					/*
+					 * Revisions grow from left to right (parents to the left, children to the right)
+					 * 
+					 * I: Set of local is subset of remote
+					 * 
+					 *               local draft 
+					 * --o---r---o---l---o--
+					 *       remote draft
+					 * 
+					 * Remote draft roots shall be updated
+					 *
+					 *
+					 * II: Set of local is superset of remote
+					 * 
+					 *       local draft 
+					 * --o---l---o---r---o--
+					 *               remote draft 
+					 *               
+					 * Local draft roots shall be updated
+					 */
+					RevisionSet sharedDraft = presentDraft.intersect(remoteDrafts); // (I: ~presentDraft; II: ~remoteDraft
+					// XXX do I really need sharedDrafts here? why not ancestors(remoteDrafts)?
+					RevisionSet localDraftRemotePublic = presentDraft.ancestors(sharedDraft, parentHelper); // I: 0; II: those treated public on remote
+					// remoteDrafts are local revisions known as draft@remote
+					// remoteDraftsLocalPublic - revisions that would cease to be listed as draft on remote
+					RevisionSet remoteDraftsLocalPublic = remoteDrafts.ancestors(sharedDraft, parentHelper);
+					RevisionSet remoteDraftsLeft = remoteDrafts.subtract(remoteDraftsLocalPublic);
+					// forget those deemed public by remote (drafts shared by both remote and local are ok to stay)
+					RevisionSet combinedDraft = presentDraft.union(remoteDraftsLeft);
+					draftLeft = combinedDraft.subtract(localDraftRemotePublic);
+				}
+				final RevisionSet newDraftRoots = draftLeft.roots(parentHelper);
+				final RevisionSet newSecretRoots = secretLeft.roots(parentHelper);
+				phaseHelper.updateRoots(newDraftRoots.asList(), newSecretRoots.asList());
+				//
+				// if there's a remote draft root that points to revision we know is public
+				RevisionSet remoteDraftsLocalPublic = remoteDrafts.subtract(draftLeft).subtract(secretLeft);
+				if (!remoteDraftsLocalPublic.isEmpty()) {
+					// foreach remoteDraftsLocallyPublic.heads() do push Draft->Public
+					for (Nodeid n : remoteDraftsLocalPublic.heads(parentHelper)) {
+						try {
+							Outcome upo = remoteRepo.updatePhase(HgPhase.Draft, HgPhase.Public, n);
+							if (!upo.isOk()) {
+								implRepo.getLog().dump(getClass(), Severity.Info, "Failed to update remote phase, reason: %s", upo.getMessage());
+							}
+						} catch (HgRemoteConnectionException ex) {
+							implRepo.getLog().dump(getClass(), Severity.Error, ex, String.format("Failed to update phase of %s", n.shortNotation()));
+						}
+					}
+				}
+			}
+			progress.worked(5);
+			//
+			// update bookmark information
+			HgBookmarks localBookmarks = repo.getBookmarks();
+			if (!localBookmarks.getAllBookmarks().isEmpty()) {
+				for (Pair<String,Nodeid> bm : remoteRepo.getBookmarks()) {
+					Nodeid localRevision = localBookmarks.getRevision(bm.first());
+					if (localRevision == null || !parentHelper.knownNode(bm.second())) {
+						continue;
+					}
+					// we know both localRevision and revision of remote bookmark,
+					// need to make sure we don't push  older revision than it's at the server
+					if (parentHelper.isChild(bm.second(), localRevision)) {
+						remoteRepo.updateBookmark(bm.first(), bm.second(), localRevision);
+					}
+				}
+			}
+			// XXX WTF is obsolete in namespaces key??
+			progress.worked(5);
+		} catch (IOException ex) {
+			throw new HgIOException(ex.getMessage(), null); // XXX not a nice idea to throw IOException from BundleGenerator#create
+		} catch (HgRepositoryNotFoundException ex) {
+			final HgInvalidStateException e = new HgInvalidStateException("Failed to load a just-created bundle");
+			e.initCause(ex);
+			throw new HgLibraryFailureException(e);
+		} catch (HgRuntimeException ex) {
+			throw new HgLibraryFailureException(ex);
+		} finally {
+			progress.done();
+		}
+	}
+	
+	public Collection<Nodeid> getPushedRevisions() {
+		return outgoing == null ? Collections.<Nodeid>emptyList() : outgoing.asList();
+	}
+	
+	private RevisionSet knownRemoteDrafts(HgRemoteRepository.Phases remotePhases, HgParentChildMap<HgChangelog> parentHelper, RevisionSet outgoing, RevisionSet localSecret) {
+		ArrayList<Nodeid> knownRemoteDraftRoots = new ArrayList<Nodeid>();
+		for (Nodeid rdr : remotePhases.draftRoots()) {
+			if (parentHelper.knownNode(rdr)) {
+				knownRemoteDraftRoots.add(rdr);
+			}
+		}
+		// knownRemoteDraftRoots + childrenOf(knownRemoteDraftRoots) is everything remote may treat as Draft
+		RevisionSet remoteDrafts = new RevisionSet(knownRemoteDraftRoots);
+		RevisionSet localChildren = remoteDrafts.children(parentHelper);
+		// we didn't send any local secret revision
+		localChildren = localChildren.subtract(localSecret);
+		// draft roots are among remote drafts
+		remoteDrafts = remoteDrafts.union(localChildren);
+		// 1) outgoing.children gives all local revisions accessible from outgoing.
+		// 2) outgoing.roots.children is equivalent with smaller intermediate set, the way we build
+		// childrenOf doesn't really benefits from that.
+		RevisionSet localChildrenNotSent = outgoing.children(parentHelper).subtract(outgoing);
+		// remote shall know only what we've sent, subtract revisions we didn't actually sent
+		remoteDrafts = remoteDrafts.subtract(localChildrenNotSent);
+		return remoteDrafts;
+	}
+}
--- a/src/org/tmatesoft/hg/core/HgRepoFacade.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgRepoFacade.java	Wed Jul 10 11:48:55 2013 +0200
@@ -101,6 +101,14 @@
 	public SessionContext getSessionContext() {
 		return context;
 	}
+	
+	/**
+	 * This factory method doesn't need this facade to be initialized with a repository.
+	 * @return command instance, never <code>null</code>
+	 */
+	public HgInitCommand createInitCommand() {
+		return new HgInitCommand(new HgLookup(context));
+	}
 
 	public HgLogCommand createLogCommand() {
 		return new HgLogCommand(repo/*, getCommandContext()*/);
@@ -153,4 +161,16 @@
 	public HgCommitCommand createCommitCommand() {
 		return new HgCommitCommand(repo);
 	}
+	
+	public HgDiffCommand createDiffCommand() {
+		return new HgDiffCommand(repo);
+	}
+
+	public HgPushCommand createPushCommand() {
+		return new HgPushCommand(repo);
+	}
+	
+	public HgPullCommand createPullCommand() {
+		return new HgPullCommand(repo);
+	}
 }
--- a/src/org/tmatesoft/hg/core/HgRepositoryLockException.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgRepositoryLockException.java	Wed Jul 10 11:48:55 2013 +0200
@@ -16,7 +16,6 @@
  */
 package org.tmatesoft.hg.core;
 
-import org.tmatesoft.hg.internal.Experimental;
 import org.tmatesoft.hg.repo.HgRepositoryLock;
 
 /**
@@ -26,7 +25,6 @@
  * @author TMate Software Ltd.
  */
 @SuppressWarnings("serial")
-@Experimental(reason="Work in progress")
 public class HgRepositoryLockException extends HgException {
 	
 	public HgRepositoryLockException(String message) {
--- a/src/org/tmatesoft/hg/core/HgRevertCommand.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgRevertCommand.java	Wed Jul 10 11:48:55 2013 +0200
@@ -21,14 +21,16 @@
 import java.util.LinkedHashSet;
 import java.util.Set;
 
+import org.tmatesoft.hg.internal.COWTransaction;
 import org.tmatesoft.hg.internal.CsetParamKeeper;
 import org.tmatesoft.hg.internal.DirstateBuilder;
 import org.tmatesoft.hg.internal.DirstateReader;
-import org.tmatesoft.hg.internal.Experimental;
 import org.tmatesoft.hg.internal.Internals;
+import org.tmatesoft.hg.internal.Transaction;
 import org.tmatesoft.hg.repo.HgManifest;
 import org.tmatesoft.hg.repo.HgManifest.Flags;
 import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRepositoryLock;
 import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.CancelSupport;
 import org.tmatesoft.hg.util.CancelledException;
@@ -36,14 +38,12 @@
 import org.tmatesoft.hg.util.ProgressSupport;
 
 /**
- * WORK IN PROGRESS.
- * 
  * Restore files to their checkout state, 'hg revert' counterpart.
  * 
+ * @since 1.1
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
-@Experimental(reason="Work in progress")
 public class HgRevertCommand extends HgAbstractCommand<HgRevertCommand> {
 
 	private final HgRepository repo;
@@ -102,6 +102,8 @@
 	 * @throws CancelledException if execution of the command was cancelled
 	 */
 	public void execute() throws HgException, CancelledException {
+		final HgRepositoryLock wdLock = repo.getWorkingDirLock();
+		wdLock.acquire();
 		try {
 			final ProgressSupport progress = getProgressSupport(null);
 			final CancelSupport cancellation = getCancelSupport(null, true);
@@ -158,11 +160,25 @@
 				progress.worked(1);
 				cancellation.checkCancelled();
 			}
-			dirstateBuilder.serialize();
+			Transaction.Factory trFactory = new COWTransaction.Factory();
+			Transaction tr = trFactory.create(repo);
+			try {
+				// TODO same code in HgAddRemoveCommand and similar in HgCommitCommand
+				dirstateBuilder.serialize(tr);
+				tr.commit();
+			} catch (RuntimeException ex) {
+				tr.rollback();
+				throw ex;
+			} catch (HgException ex) {
+				tr.rollback();
+				throw ex;
+			}
 			progress.worked(1);
 			progress.done();
 		} catch (HgRuntimeException ex) {
 			throw new HgLibraryFailureException(ex);
+		} finally {
+			wdLock.release();
 		}
 	}
 }
--- a/src/org/tmatesoft/hg/core/HgStatus.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgStatus.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -21,6 +21,7 @@
 
 import org.tmatesoft.hg.internal.ChangelogHelper;
 import org.tmatesoft.hg.repo.HgChangelog.RawChangeset;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.Path;
 
 /**
@@ -70,8 +71,9 @@
 
 	/**
 	 * @return <code>null</code> if author for the change can't be deduced (e.g. for clean files it's senseless)
+	 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
 	 */
-	public String getModificationAuthor() {
+	public String getModificationAuthor() throws HgRuntimeException {
 		RawChangeset cset = logHelper.findLatestChangeWith(path);
 		if (cset == null) {
 			if (kind == Kind.Modified || kind == Kind.Added || kind == Kind.Removed /*&& RightBoundary is TIP*/) {
@@ -84,15 +86,20 @@
 		return null;
 	}
 
-	public Date getModificationDate() {
+	/**
+	 * @return date when the file was last modified, never <code>null</code>. Either date of changeset the file was modified at
+	 * or timestamp of local file, if present
+	 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
+	 */
+	public Date getModificationDate() throws HgRuntimeException {
 		RawChangeset cset = logHelper.findLatestChangeWith(path);
 		if (cset == null) {
 			File localFile = new File(logHelper.getRepo().getWorkingDir(), path.toString());
 			if (localFile.canRead()) {
 				return new Date(localFile.lastModified());
 			}
-			// TODO post-1.0 find out what to do in this case, perhaps, throw an exception?
-			// perhaps check dirstate and/or local file for tstamp
+			// TODO post-1.1 find out what to do in this case, perhaps, throw an exception?
+			// perhaps check dirstate and for timestamp
 			return new Date(); // what's correct? 
 		} else {
 			return cset.date();
--- a/src/org/tmatesoft/hg/core/Nodeid.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/core/Nodeid.java	Wed Jul 10 11:48:55 2013 +0200
@@ -33,11 +33,21 @@
  *
  */
 public final class Nodeid implements Comparable<Nodeid> {
-	
+
+	/**
+	 * Length of the nodeid in bytes
+	 */
+	public static final int SIZE = 20;
+
+	/**
+	 * Length of nodeid string representation, in bytes
+	 */
+	public static final int SIZE_ASCII = 40;
+
 	/**
 	 * <b>nullid</b>, empty root revision.
 	 */
-	public static final Nodeid NULL = new Nodeid(new byte[20], false);
+	public static final Nodeid NULL = new Nodeid(new byte[SIZE], false);
 
 	private final byte[] binaryData; 
 
@@ -49,7 +59,7 @@
 	public Nodeid(byte[] binaryRepresentation, boolean shallClone) {
 		// 5 int fields => 32 bytes
 		// byte[20] => 48 bytes (16 bytes is Nodeid with one field, 32 bytes for byte[20] 
-		if (binaryRepresentation == null || binaryRepresentation.length != 20) {
+		if (binaryRepresentation == null || binaryRepresentation.length != SIZE) {
 			throw new HgBadNodeidFormatException(String.format("Bad value: %s", String.valueOf(binaryRepresentation)));
 		}
 		/*
@@ -69,8 +79,18 @@
 
 	@Override
 	public int hashCode() {
+		return hashCode(binaryData);
+	}
+	
+	/**
+	 * Handy alternative to calculate hashcode without need to get {@link Nodeid} instance
+	 * @param binaryNodeid array of exactly 20 bytes
+	 * @return same value as <code>new Nodeid(binaryNodeid, false).hashCode()</code>
+	 */
+	public static int hashCode(byte[] binaryNodeid) {
+		assert binaryNodeid.length == SIZE;
 		// digest (part thereof) seems to be nice candidate for the hashCode
-		byte[] b = binaryData;
+		byte[] b = binaryNodeid;
 		return b[0] << 24 | (b[1] & 0xFF) << 16 | (b[2] & 0xFF) << 8 | (b[3] & 0xFF);
 	}
 	
@@ -93,7 +113,7 @@
 		if (this == o) {
 			return 0;
 		}
-		for (int i = 0; i < 20; i++) {
+		for (int i = 0; i < SIZE; i++) {
 			if (binaryData[i] != o.binaryData[i]) {
 				// if we need truly ascending sort, need to respect byte sign 
 				// return (binaryData[i] & 0xFF) < (o.binaryData[i] & 0xFF) ? -1 : 1;
@@ -121,7 +141,7 @@
 		if (this == NULL) {
 			return true;
 		}
-		for (int i = 0; i < 20; i++) {
+		for (int i = 0; i < SIZE; i++) {
 			if (this.binaryData[i] != 0) {
 				return false;
 			}
@@ -143,19 +163,19 @@
 	 * @throws HgBadNodeidFormatException custom {@link IllegalArgumentException} subclass when arguments don't select 20 bytes
 	 */
 	public static Nodeid fromBinary(byte[] binaryRepresentation, int offset) {
-		if (binaryRepresentation == null || binaryRepresentation.length - offset < 20) {
+		if (binaryRepresentation == null || binaryRepresentation.length - offset < SIZE) {
 			throw new HgBadNodeidFormatException(String.format("Bad value: %s", String.valueOf(binaryRepresentation)));
 		}
 		int i = 0;
-		while (i < 20 && binaryRepresentation[offset+i] == 0) i++;
-		if (i == 20) {
+		while (i < SIZE && binaryRepresentation[offset+i] == 0) i++;
+		if (i == SIZE) {
 			return NULL;
 		}
-		if (offset == 0 && binaryRepresentation.length == 20) {
+		if (offset == 0 && binaryRepresentation.length == SIZE) {
 			return new Nodeid(binaryRepresentation, true);
 		}
-		byte[] b = new byte[20]; // create new instance if no other reasonable guesses possible
-		System.arraycopy(binaryRepresentation, offset, b, 0, 20);
+		byte[] b = new byte[SIZE]; // create new instance if no other reasonable guesses possible
+		System.arraycopy(binaryRepresentation, offset, b, 0, SIZE);
 		return new Nodeid(b, false);
 	}
 
@@ -167,11 +187,11 @@
 	 * @throws HgBadNodeidFormatException custom {@link IllegalArgumentException} subclass when argument doesn't match encoded form of 20-bytes sha1 digest. 
 	 */
 	public static Nodeid fromAscii(String asciiRepresentation) throws HgBadNodeidFormatException {
-		if (asciiRepresentation.length() != 40) {
+		if (asciiRepresentation.length() != SIZE_ASCII) {
 			throw new HgBadNodeidFormatException(String.format("Bad value: %s", asciiRepresentation));
 		}
 		// XXX is better impl for String possible?
-		return fromAscii(asciiRepresentation.toCharArray(), 0, 40);
+		return fromAscii(asciiRepresentation.toCharArray(), 0, SIZE_ASCII);
 	}
 	
 	/**
@@ -179,11 +199,11 @@
 	 * @throws HgBadNodeidFormatException custom {@link IllegalArgumentException} subclass when bytes are not hex digits or number of bytes != 40 (160 bits) 
 	 */
 	public static Nodeid fromAscii(byte[] asciiRepresentation, int offset, int length) throws HgBadNodeidFormatException {
-		if (length != 40) {
-			throw new HgBadNodeidFormatException(String.format("Expected 40 hex characters for nodeid, not %d", length));
+		if (length != SIZE_ASCII) {
+			throw new HgBadNodeidFormatException(String.format("Expected %d hex characters for nodeid, not %d", SIZE_ASCII, length));
 		}
 		try {
-			byte[] data = new byte[20];
+			byte[] data = new byte[SIZE];
 			boolean zeroBytes = DigestHelper.ascii2bin(asciiRepresentation, offset, length, data);
 			if (zeroBytes) {
 				return NULL;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/AddRevInspector.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import java.util.HashMap;
+import java.util.Set;
+
+import org.tmatesoft.hg.core.HgIOException;
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.repo.HgBundle;
+import org.tmatesoft.hg.repo.HgBundle.GroupElement;
+import org.tmatesoft.hg.repo.HgDataFile;
+import org.tmatesoft.hg.repo.HgInvalidControlFileException;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+import org.tmatesoft.hg.util.Pair;
+
+/**
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public final class AddRevInspector implements HgBundle.Inspector {
+	private final Internals repo;
+	private final Transaction tr;
+	private Set<Nodeid> added;
+	private RevlogStreamWriter revlog;
+	private RevMap clogRevs;
+	private RevMap revlogRevs;
+
+	public AddRevInspector(Internals implRepo, Transaction transaction) {
+		repo = implRepo;
+		tr = transaction;
+	}
+
+	public void changelogStart() throws HgRuntimeException {
+		// TODO Auto-generated method stub
+		RevlogStream rs = repo.getImplAccess().getChangelogStream();
+		revlog = new RevlogStreamWriter(repo, rs, tr);
+		revlogRevs = clogRevs = new RevMap(rs);
+	}
+
+	public void changelogEnd() throws HgRuntimeException {
+		revlog = null;
+		revlogRevs = null;
+		added = clogRevs.added();
+	}
+
+	public void manifestStart() throws HgRuntimeException {
+		RevlogStream rs = repo.getImplAccess().getManifestStream();
+		revlog = new RevlogStreamWriter(repo, rs, tr);
+		revlogRevs = new RevMap(rs);
+	}
+
+	public void manifestEnd() throws HgRuntimeException {
+		revlog = null;
+		revlogRevs = null;
+	}
+
+	public void fileStart(String name) throws HgRuntimeException {
+		HgDataFile df = repo.getRepo().getFileNode(name);
+		RevlogStream rs = repo.getImplAccess().getStream(df);
+		revlog = new RevlogStreamWriter(repo, rs, tr);
+		revlogRevs = new RevMap(rs);
+		// FIXME collect new files and update fncache
+	}
+
+	public void fileEnd(String name) throws HgRuntimeException {
+		revlog = null;
+		revlogRevs = null;
+	}
+
+	public boolean element(GroupElement ge) throws HgRuntimeException {
+		assert clogRevs != null;
+		assert revlogRevs != null;
+		try {
+			Pair<Integer, Nodeid> newRev = revlog.addPatchRevision(ge, clogRevs, revlogRevs);
+			revlogRevs.update(newRev.first(), newRev.second());
+			return true;
+		} catch (HgIOException ex) {
+			throw new HgInvalidControlFileException(ex, true);
+		}
+	}
+
+	public RevisionSet addedChangesets() {
+		return new RevisionSet(added);
+	}
+
+	private static class RevMap implements RevlogStreamWriter.RevisionToIndexMap {
+		
+		private final RevlogStream revlog;
+		private HashMap<Nodeid, Integer> added = new HashMap<Nodeid, Integer>();
+
+		public RevMap(RevlogStream revlogStream) {
+			revlog = revlogStream;
+		}
+
+		public int revisionIndex(Nodeid revision) {
+			Integer a = added.get(revision);
+			if (a != null) {
+				return a;
+			}
+			int f = revlog.findRevisionIndex(revision);
+			return f == HgRepository.BAD_REVISION ? HgRepository.NO_REVISION : f;
+		}
+		
+		public void update(Integer revIndex, Nodeid rev) {
+			added.put(rev, revIndex);
+		}
+		
+		Set<Nodeid> added() {
+			return added.keySet();
+		}
+	}
+}
\ No newline at end of file
--- a/src/org/tmatesoft/hg/internal/ArrayHelper.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/ArrayHelper.java	Wed Jul 10 11:48:55 2013 +0200
@@ -16,40 +16,106 @@
  */
 package org.tmatesoft.hg.internal;
 
+import java.util.Arrays;
+
 /**
  * Internal alternative to Arrays.sort to build reversed index along with sorting
+ * and to perform lookup (binary search) without sorted array, using reversed index.
  * 
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
-public class ArrayHelper {
-	private int[] reverse;
+public final class ArrayHelper<T extends Comparable<T>> {
+	private int[] reverse; // aka sorted2natural
+	private final T[] data;
+	private T[] sorted;
+	
+	public ArrayHelper(T[] _data) {
+		assert _data != null;
+		data = _data;
+	}
 
-	@SuppressWarnings("unchecked")
-	public void sort(Comparable<?>[] a) {
-//		Object[] aux = (Object[]) a.clone();
-		reverse = new int[a.length];
-		sort1((Comparable<Object>[])a, 0, a.length);
+	/**
+	 * Sort data this helper wraps, possibly using supplied array (optional)
+	 * to keep sorted elements
+	 * @param sortDest array to keep sorted values at, or <code>null</code>
+	 * @param sortDestIsEmpty <code>false</code> when sortDest already contains copy of data to be sorted
+	 * @param keepSorted <code>true</code> to save sorted array for future use (e.g. in
+	 */
+	public void sort(T[] sortDest, boolean sortDestIsEmpty, boolean keepSorted) {
+		if (sortDest != null) {
+			assert sortDest.length >= data.length;
+			if (sortDestIsEmpty) {
+				System.arraycopy(data, 0, sortDest, 0, data.length);
+			}
+			sorted = sortDest;
+		} else {
+			sorted = data.clone();
+		}
+		reverse = new int[data.length];
 		for (int i = 0; i < reverse.length; i++) {
-			// element that was not moved don't have an index in reverse.
-			// perhaps, can do it inside sort alg?
-			// Alternatively, may start with filling reverse[] array with initial indexes and
-			// avoid != 0 comparisons in #swap altogether?
-			if (reverse[i] == 0) {
-				reverse[i] = i+1;
-			}
+			// initial reverse indexes, so that elements that do
+			// not move during sort got correct indexes
+			reverse[i] = i;
 		}
+		sort1(0, data.length);
+		if (!keepSorted) {
+			sorted = null;
+		}
+	}
+
+	/**
+	 * @return all reverse indexes
+	 */
+	public int[] getReverseIndexes() {
+		return reverse;
+	}
+	
+	public int getReverseIndex(int sortedIndex) {
+		return reverse[sortedIndex];
+	}
+	
+	public T get(int index) {
+		return data[index];
+	}
+	
+	public T[] getData() {
+		return data;
+	}
+
+	/**
+	 * Look up sorted index of the value, using sort information 
+	 * @return same value as {@link Arrays#binarySearch(Object[], Object)} does
+	 */
+	public int binarySearchSorted(T value) {
+		if (sorted != null) {
+			return Arrays.binarySearch(sorted, 0, data.length, value);
+		}
+		return binarySearchWithReverse(0, data.length, value);
+	}
+
+	/**
+	 * Look up index of the value in the original array.
+	 * @return index in original data, or <code>defaultValue</code> if value not found
+	 */
+	public int binarySearch(T value, int defaultValue) {
+		int x = binarySearchSorted(value);
+		if (x < 0) {
+			return defaultValue;
+		}
+		return reverse[x];
 	}
 
 	/**
 	 * Slightly modified version of Arrays.sort1(int[], int, int) quicksort alg (just to deal with Object[])
 	 */
-    private void sort1(Comparable<Object> x[], int off, int len) {
+    private void sort1(int off, int len) {
+		Comparable<Object>[] x = comparableSorted();
     	// Insertion sort on smallest arrays
     	if (len < 7) {
     	    for (int i=off; i<len+off; i++)
     			for (int j=i; j>off && x[j-1].compareTo(x[j]) > 0; j--)
-    			    swap(x, j, j-1);
+    			    swap(j, j-1);
     	    return;
     	}
 
@@ -60,11 +126,11 @@
     	    int n = off + len - 1;
     	    if (len > 40) {        // Big arrays, pseudomedian of 9
     			int s = len/8;
-	    		l = med3(x, l,     l+s, l+2*s);
-	    		m = med3(x, m-s,   m,   m+s);
-	    		n = med3(x, n-2*s, n-s, n);
+	    		l = med3(l,     l+s, l+2*s);
+	    		m = med3(m-s,   m,   m+s);
+	    		n = med3(n-2*s, n-s, n);
     	    }
-    	    m = med3(x, l, m, n); // Mid-size, med of 3
+    	    m = med3(l, m, n); // Mid-size, med of 3
     	}
     	Comparable<Object> v = x[m];
 
@@ -73,67 +139,94 @@
     	while(true) {
     	    while (b <= c && x[b].compareTo(v) <= 0) {
     			if (x[b] == v)
-    			    swap(x, a++, b);
+    			    swap(a++, b);
     			b++;
     	    }
     	    while (c >= b && x[c].compareTo(v) >= 0) {
     			if (x[c] == v)
-    			    swap(x, c, d--);
+    			    swap(c, d--);
     			c--;
     	    }
     	    if (b > c)
     			break;
-    	    swap(x, b++, c--);
+    	    swap(b++, c--);
     	}
 
     	// Swap partition elements back to middle
     	int s, n = off + len;
-    	s = Math.min(a-off, b-a  );  vecswap(x, off, b-s, s);
-    	s = Math.min(d-c,   n-d-1);  vecswap(x, b,   n-s, s);
+    	s = Math.min(a-off, b-a  );  vecswap(off, b-s, s);
+    	s = Math.min(d-c,   n-d-1);  vecswap(b,   n-s, s);
 
     	// Recursively sort non-partition-elements
     	if ((s = b-a) > 1)
-    	    sort1(x, off, s);
+    	    sort1(off, s);
     	if ((s = d-c) > 1)
-    	    sort1(x, n-s, s);
+    	    sort1(n-s, s);
     }
 
     /**
      * Swaps x[a .. (a+n-1)] with x[b .. (b+n-1)].
      */
-    private void vecswap(Object[] x, int a, int b, int n) {
+    private void vecswap(int a, int b, int n) {
 		for (int i=0; i<n; i++, a++, b++) {
-		    swap(x, a, b);
+		    swap(a, b);
 		}
     }
 
     /**
      * Returns the index of the median of the three indexed integers.
      */
-    private static int med3(Comparable<Object>[] x, int a, int b, int c) {
-	return (x[a].compareTo(x[b]) < 0 ?
-		(x[b].compareTo(x[c]) < 0 ? b : x[a].compareTo(x[c]) < 0 ? c : a) :
-		(x[b].compareTo(x[c]) > 0 ? b : x[a].compareTo(x[c]) > 0 ? c : a));
+    private int med3(int a, int b, int c) {
+		Comparable<Object>[] x = comparableSorted();
+		return (x[a].compareTo(x[b]) < 0 ?
+			(x[b].compareTo(x[c]) < 0 ? b : x[a].compareTo(x[c]) < 0 ? c : a) :
+			(x[b].compareTo(x[c]) > 0 ? b : x[a].compareTo(x[c]) > 0 ? c : a));
+    }
+    
+    private Comparable<Object>[] comparableSorted() {
+    	// Comparable<Object>[] x = (Comparable<Object>[]) sorted
+		// eclipse compiler is ok with the line above, while javac doesn't understand it:
+		// inconvertible types found : T[] required: java.lang.Comparable<java.lang.Object>[]
+    	// so need to add another step
+    	Comparable<?>[] oo = sorted;
+		@SuppressWarnings("unchecked")
+		Comparable<Object>[] x = (Comparable<Object>[]) oo;
+		return x;
     }
 
-
-	/**
-	 * @return the reverse
-	 */
-	public int[] getReverse() {
-		return reverse;
-	}
-
-	/**
+    /**
 	 * Swaps x[a] with x[b].
 	 */
-	private void swap(Object[] x, int a, int b) {
+	private void swap(int a, int b) {
+		Object[] x = sorted;
 		Object t = x[a];
 		x[a] = x[b];
 		x[b] = t;
-		int z1 = reverse[a] != 0 ? reverse[a] : a+1;
-		int z2 = reverse[b] != 0 ? reverse[b] : b+1;
+		int z1 = reverse[a];
+		int z2 = reverse[b];
 		reverse[b] = z1;
 		reverse[a] = z2;
 	}
+
+	// copied from Arrays.binarySearch0, update to be instance method and to use reverse indexes
+	private int binarySearchWithReverse(int fromIndex, int toIndex, T key) {
+		int low = fromIndex;
+		int high = toIndex - 1;
+
+		while (low <= high) {
+			int mid = (low + high) >>> 1;
+			// data[reverse[x]] gives sorted value at index x
+			T midVal = data[reverse[mid]];
+			int cmp = midVal.compareTo(key);
+
+			if (cmp < 0)
+				low = mid + 1;
+			else if (cmp > 0)
+				high = mid - 1;
+			else
+				return mid; // key found
+		}
+		return -(low + 1);  // key not found.
+	}
+
 }
--- a/src/org/tmatesoft/hg/internal/BlameHelper.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/BlameHelper.java	Wed Jul 10 11:48:55 2013 +0200
@@ -16,6 +16,7 @@
  */
 package org.tmatesoft.hg.internal;
 
+import static org.tmatesoft.hg.core.HgIterateDirection.OldToNew;
 import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION;
 
 import java.util.LinkedList;
@@ -24,44 +25,62 @@
 import org.tmatesoft.hg.core.HgCallbackTargetException;
 import org.tmatesoft.hg.internal.DiffHelper.LineSequence;
 import org.tmatesoft.hg.internal.DiffHelper.LineSequence.ByteChain;
-import org.tmatesoft.hg.repo.HgBlameFacility.Block;
-import org.tmatesoft.hg.repo.HgBlameFacility.BlockData;
-import org.tmatesoft.hg.repo.HgBlameFacility.ChangeBlock;
-import org.tmatesoft.hg.repo.HgBlameFacility.EqualBlock;
-import org.tmatesoft.hg.repo.HgBlameFacility.Inspector;
-import org.tmatesoft.hg.repo.HgBlameFacility.RevisionDescriptor;
-import org.tmatesoft.hg.repo.HgBlameFacility.RevisionDescriptor.Recipient;
-import org.tmatesoft.hg.repo.HgBlameFacility;
+import org.tmatesoft.hg.core.HgBlameInspector;
+import org.tmatesoft.hg.core.HgBlameInspector.*;
 import org.tmatesoft.hg.repo.HgDataFile;
 import org.tmatesoft.hg.repo.HgInvalidStateException;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.Adaptable;
 import org.tmatesoft.hg.util.CancelledException;
 import org.tmatesoft.hg.util.Pair;
 
 /**
  * Blame implementation
- * @see HgBlameFacility
+ * @see HgBlameInspector
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
 public class BlameHelper {
 	
-	private final Inspector insp;
+	private final HgBlameInspector insp;
 	private FileLinesCache linesCache;
 
-	// FIXME exposing internals (use of FileLinesCache through cons arg and #useFileUpTo) smells bad, refactor!
-
-	public BlameHelper(Inspector inspector, int cacheHint) {
+	public BlameHelper(HgBlameInspector inspector) {
 		insp = inspector;
-		linesCache = new FileLinesCache(cacheHint);
 	}
-	
-	public void useFileUpTo(HgDataFile df, int clogRevIndex) {
-		linesCache.useFileUpTo(df, clogRevIndex);
+
+	/**
+	 * Build history of the file for the specified range (follow renames if necessary). This history
+	 * is used to access various file revision data during subsequent {@link #diff(int, int, int, int)} and
+	 * {@link #annotateChange(int, int, int[], int[])} calls. Callers can use returned history for own approaches 
+	 * to iteration over file history.
+
+	 * <p>NOTE, clogRevIndexEnd has to list name of the supplied file in the corresponding manifest,
+	 * as it's not possible to trace rename history otherwise.
+	 */
+	public FileHistory prepare(HgDataFile df, int clogRevIndexStart, int clogRevIndexEnd) throws HgRuntimeException {
+		assert clogRevIndexStart <= clogRevIndexEnd;
+		FileHistory fileHistory = new FileHistory(df, clogRevIndexStart, clogRevIndexEnd);
+		fileHistory.build();
+		int cacheHint = 5; // cache comes useful when we follow merge branches and don't want to
+		// parse base revision twice. There's no easy way to determine max(distance(all(base,merge))),
+		// hence the heuristics to use the longest history chunk:
+		for (FileRevisionHistoryChunk c : fileHistory.iterate(OldToNew)) {
+			// iteration order is not important here
+			if (c.revisionCount() > cacheHint) {
+				cacheHint = c.revisionCount();
+			}
+		}
+		linesCache = new FileLinesCache(cacheHint);
+		for (FileRevisionHistoryChunk fhc : fileHistory.iterate(OldToNew)) {
+			// iteration order is not important here
+			linesCache.useFileUpTo(fhc.getFile(), fhc.getEndChangeset());
+		}
+		return fileHistory;
 	}
 	
 	// NO_REVISION is not allowed as any argument
-	public void diff(int fileRevIndex1, int clogRevIndex1, int fileRevIndex2, int clogRevIndex2) throws HgCallbackTargetException {
+	public void diff(int fileRevIndex1, int clogRevIndex1, int fileRevIndex2, int clogRevIndex2) throws HgCallbackTargetException, HgRuntimeException {
 		HgDataFile targetFile = linesCache.getFile(clogRevIndex2);
 		LineSequence c1 = linesCache.lines(clogRevIndex1, fileRevIndex1);
 		LineSequence c2 = linesCache.lines(clogRevIndex2, fileRevIndex2);
@@ -72,7 +91,7 @@
 		bbi.checkErrors();
 	}
 
-	public void annotateChange(int fileRevIndex, int csetRevIndex, int[] fileParentRevs, int[] fileParentClogRevs) throws HgCallbackTargetException {
+	public void annotateChange(int fileRevIndex, int csetRevIndex, int[] fileParentRevs, int[] fileParentClogRevs) throws HgCallbackTargetException, HgRuntimeException {
 		HgDataFile targetFile = linesCache.getFile(csetRevIndex);
 		final LineSequence fileRevLines = linesCache.lines(csetRevIndex, fileRevIndex);
 		if (fileParentClogRevs[0] != NO_REVISION && fileParentClogRevs[1] != NO_REVISION) {
@@ -117,6 +136,9 @@
 		private final int limit;
 		private final LinkedList<Pair<Integer, HgDataFile>> files; // TODO in fact, need sparse array 
 
+		/**
+		 * @param lruLimit how many parsed file revisions to keep
+		 */
 		public FileLinesCache(int lruLimit) {
 			limit = lruLimit;
 			lruCache = new LinkedList<Pair<Integer, LineSequence>>();
@@ -150,7 +172,7 @@
 			throw new HgInvalidStateException(String.format("Got %d file-changelog mappings, but no luck for revision %d.", files.size(), clogRevIndex));
 		}
 
-		public LineSequence lines(int clogRevIndex, int fileRevIndex) {
+		public LineSequence lines(int clogRevIndex, int fileRevIndex) throws HgRuntimeException {
 			Pair<Integer, LineSequence> cached = checkCache(clogRevIndex);
 			if (cached != null) {
 				return cached.second();
@@ -192,7 +214,7 @@
 	}
 
 	private static class BlameBlockInspector extends DiffHelper.DeltaInspector<LineSequence> {
-		private final Inspector insp;
+		private final HgBlameInspector insp;
 		private final int csetOrigin;
 		private final int csetTarget;
 		private EqualBlocksCollector p2MergeCommon;
@@ -201,7 +223,7 @@
 		private final AnnotateRev annotatedRevision;
 		private HgCallbackTargetException error;
 
-		public BlameBlockInspector(HgDataFile df, int fileRevIndex, Inspector inspector, int originCset, int targetCset) {
+		public BlameBlockInspector(HgDataFile df, int fileRevIndex, HgBlameInspector inspector, int originCset, int targetCset) {
 			assert inspector != null;
 			insp = inspector;
 			annotatedRevision = new AnnotateRev();
@@ -226,7 +248,7 @@
 			ContentBlock targetContent = new ContentBlock(s2);
 			annotatedRevision.set(originContent, targetContent);
 			annotatedRevision.set(csetOrigin, csetTarget, p2MergeCommon != null ? csetMergeParent : NO_REVISION);
-			Recipient curious = Adaptable.Factory.getAdapter(insp, Recipient.class, null);
+			RevisionDescriptor.Recipient curious = Adaptable.Factory.getAdapter(insp, RevisionDescriptor.Recipient.class, null);
 			if (curious != null) {
 				try {
 					curious.start(annotatedRevision);
@@ -242,7 +264,7 @@
 			if (shallStop()) {
 				return;
 			}
-			Recipient curious = Adaptable.Factory.getAdapter(insp, Recipient.class, null);
+			RevisionDescriptor.Recipient curious = Adaptable.Factory.getAdapter(insp, RevisionDescriptor.Recipient.class, null);
 			if (curious != null) {
 				try {
 					curious.done(annotatedRevision);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/BundleGenerator.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION;
+import static org.tmatesoft.hg.repo.HgRepository.TIP;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.tmatesoft.hg.core.HgIOException;
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.DataSerializer.OutputStreamSerializer;
+import org.tmatesoft.hg.internal.Patch.PatchDataSource;
+import org.tmatesoft.hg.repo.HgBundle;
+import org.tmatesoft.hg.repo.HgChangelog;
+import org.tmatesoft.hg.repo.HgChangelog.RawChangeset;
+import org.tmatesoft.hg.repo.HgDataFile;
+import org.tmatesoft.hg.repo.HgInternals;
+import org.tmatesoft.hg.repo.HgInvalidControlFileException;
+import org.tmatesoft.hg.repo.HgLookup;
+import org.tmatesoft.hg.repo.HgManifest;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+
+/**
+ * @see http://mercurial.selenic.com/wiki/BundleFormat
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class BundleGenerator {
+
+	private final Internals repo;
+
+	public BundleGenerator(Internals hgRepo) {
+		repo = hgRepo;
+	}
+	
+	public File create(List<Nodeid> changesets) throws HgIOException, IOException {
+		final HgChangelog clog = repo.getRepo().getChangelog();
+		final HgManifest manifest = repo.getRepo().getManifest();
+		IntVector clogRevsVector = new IntVector(changesets.size(), 0);
+		for (Nodeid n : changesets) {
+			clogRevsVector.add(clog.getRevisionIndex(n));
+		}
+		clogRevsVector.sort(true);
+		final int[] clogRevs = clogRevsVector.toArray();
+		final IntMap<Nodeid> clogMap = new IntMap<Nodeid>(changesets.size());
+		final IntVector manifestRevs = new IntVector(changesets.size(), 0);
+		final List<HgDataFile> files = new ArrayList<HgDataFile>();
+		clog.range(new HgChangelog.Inspector() {
+			private Set<String> seenFiles = new HashSet<String>();
+			public void next(int revisionIndex, Nodeid nodeid, RawChangeset cset) throws HgRuntimeException {
+				clogMap.put(revisionIndex, nodeid);
+				manifestRevs.add(manifest.getRevisionIndex(cset.manifest()));
+				for (String f : cset.files()) {
+					if (seenFiles.contains(f)) {
+						continue;
+					}
+					seenFiles.add(f);
+					HgDataFile df = repo.getRepo().getFileNode(f);
+					files.add(df);
+				}
+			}
+		}, clogRevs);
+		manifestRevs.sort(true);
+		//
+		final File bundleFile = File.createTempFile("hg4j-", ".bundle");
+		final FileOutputStream osBundle = new FileOutputStream(bundleFile);
+		final OutputStreamSerializer outRaw = new OutputStreamSerializer(osBundle);
+		outRaw.write("HG10UN".getBytes(), 0, 6);
+		//
+		RevlogStream clogStream = repo.getImplAccess().getChangelogStream();
+		new ChunkGenerator(outRaw, clogMap).iterate(clogStream, clogRevs);
+		outRaw.writeInt(0); // null chunk for changelog group
+		//
+		RevlogStream manifestStream = repo.getImplAccess().getManifestStream();
+		new ChunkGenerator(outRaw, clogMap).iterate(manifestStream, manifestRevs.toArray(true));
+		outRaw.writeInt(0); // null chunk for manifest group
+		//
+		for (HgDataFile df : sortedByName(files)) {
+			RevlogStream s = repo.getImplAccess().getStream(df);
+			final IntVector fileRevs = new IntVector();
+			s.iterate(0, TIP, false, new RevlogStream.Inspector() {
+				
+				public void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) throws HgRuntimeException {
+					if (Arrays.binarySearch(clogRevs, linkRevision) >= 0) {
+						fileRevs.add(revisionIndex);
+					}
+				}
+			});
+			fileRevs.sort(true);
+			if (!fileRevs.isEmpty()) {
+				// although BundleFormat page says "filename length, filename" for a file,
+				// in fact there's a sort of 'filename chunk', i.e. filename length field includes
+				// not only length of filename, but also length of the field itseld, i.e. filename.length+sizeof(int)
+				byte[] fnameBytes = df.getPath().toString().getBytes(); // FIXME check encoding in native hg (and fix accordingly in HgBundle)
+				outRaw.writeInt(fnameBytes.length + 4);
+				outRaw.writeByte(fnameBytes);
+				new ChunkGenerator(outRaw, clogMap).iterate(s, fileRevs.toArray(true));
+				outRaw.writeInt(0); // null chunk for file group
+			}
+		}
+		outRaw.writeInt(0); // null chunk to indicate no more files (although BundleFormat page doesn't mention this)
+		outRaw.done();
+		osBundle.flush();
+		osBundle.close();
+		//return new HgBundle(repo.getSessionContext(), repo.getDataAccess(), bundleFile);
+		return bundleFile;
+	}
+	
+	private static Collection<HgDataFile> sortedByName(List<HgDataFile> files) {
+		Collections.sort(files, new Comparator<HgDataFile>() {
+
+			public int compare(HgDataFile o1, HgDataFile o2) {
+				return o1.getPath().compareTo(o2.getPath());
+			}
+		});
+		return files;
+	}
+	
+	
+	public static void main(String[] args) throws Exception {
+		final HgLookup hgLookup = new HgLookup();
+		HgRepository hgRepo = hgLookup.detectFromWorkingDir();
+		BundleGenerator bg = new BundleGenerator(HgInternals.getImplementationRepo(hgRepo));
+		ArrayList<Nodeid> l = new ArrayList<Nodeid>();
+		l.add(Nodeid.fromAscii("9ef1fab9f5e3d51d70941121dc27410e28069c2d")); // 640
+		l.add(Nodeid.fromAscii("2f33f102a8fa59274a27ebbe1c2903cecac6c5d5")); // 639
+		l.add(Nodeid.fromAscii("d074971287478f69ab0a64176ce2284d8c1e91c3")); // 638
+		File bundleFile = bg.create(l);
+		HgBundle b = hgLookup.loadBundle(bundleFile);
+//		Bundle.dump(b); // FIXME dependency from dependant code
+	}
+
+	private static class ChunkGenerator implements RevlogStream.Inspector {
+		
+		private final DataSerializer ds;
+		private final IntMap<Nodeid> parentMap;
+		private final IntMap<Nodeid> clogMap;
+		private byte[] prevContent;
+		private int startParent;
+
+		public ChunkGenerator(DataSerializer dataSerializer, IntMap<Nodeid> clogNodeidMap) {
+			ds = dataSerializer;
+			parentMap = new IntMap<Nodeid>(clogNodeidMap.size());
+			clogMap = clogNodeidMap;
+		}
+		
+		public void iterate(RevlogStream s, int[] revisions) throws HgRuntimeException {
+			int[] p = s.parents(revisions[0], new int[2]);
+			startParent = p[0];
+			int[] revs2read;
+			if (startParent == NO_REVISION) {
+				revs2read = revisions;
+				prevContent = new byte[0];
+			} else {
+				revs2read = new int[revisions.length + 1];
+				revs2read[0] = startParent;
+				System.arraycopy(revisions, 0, revs2read, 1, revisions.length);
+			}
+			// FIXME this is a hack to fill parentsMap with 
+			// parents of elements that we are not going to meet with regular
+			// iteration, e.g. changes from a different branch (with some older parent),
+			// scenario: two revisions added to two different branches
+			// revisions[10, 11], parents(10) == 9, parents(11) == 7
+			// revs2read == [9,10,11], and parentsMap lacks entry for parent rev7.
+			fillMissingParentsMap(s, revisions);
+			s.iterate(revs2read, true, this);
+		}
+		
+		private void fillMissingParentsMap(RevlogStream s, int[] revisions) throws HgRuntimeException {
+			int[] p = new int[2];
+			for (int i = 1; i < revisions.length; i++) {
+				s.parents(revisions[i], p);
+				if (p[0] != NO_REVISION && Arrays.binarySearch(revisions, p[0]) < 0) {
+					parentMap.put(p[0], Nodeid.fromBinary(s.nodeid(p[0]), 0));
+				}
+				if (p[1] != NO_REVISION && Arrays.binarySearch(revisions, p[1]) < 0) {
+					parentMap.put(p[1], Nodeid.fromBinary(s.nodeid(p[1]), 0));
+				}
+			}
+		}
+		
+		public void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) throws HgRuntimeException {
+			try {
+				parentMap.put(revisionIndex, Nodeid.fromBinary(nodeid, 0));
+				byte[] nextContent = data.byteArray();
+				data.done();
+				if (revisionIndex == startParent) {
+					prevContent = nextContent;
+					return;
+				}
+				Patch p = GeneratePatchInspector.delta(prevContent, nextContent);
+				prevContent = nextContent;
+				nextContent = null;
+				PatchDataSource pds = p.new PatchDataSource();
+				int len = pds.serializeLength() + 84;
+				ds.writeInt(len);
+				ds.write(nodeid, 0, Nodeid.SIZE);
+				// TODO assert parents match those in previous group elements
+				if (parent1Revision != NO_REVISION) {
+					ds.writeByte(parentMap.get(parent1Revision).toByteArray());
+				} else {
+					ds.writeByte(Nodeid.NULL.toByteArray());
+				}
+				if (parent2Revision != NO_REVISION) {
+					ds.writeByte(parentMap.get(parent2Revision).toByteArray());
+				} else {
+					ds.writeByte(Nodeid.NULL.toByteArray());
+				}
+				ds.writeByte(clogMap.get(linkRevision).toByteArray());
+				pds.serialize(ds);
+			} catch (IOException ex) {
+				// XXX odd to have object with IOException to use where no checked exception is allowed 
+				throw new HgInvalidControlFileException(ex.getMessage(), ex, null); 
+			} catch (HgIOException ex) {
+				throw new HgInvalidControlFileException(ex, true); // XXX any way to refactor ChunkGenerator not to get checked exception here?
+			}
+		}
+	}
+}
--- a/src/org/tmatesoft/hg/internal/ByteArrayChannel.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/ByteArrayChannel.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -23,7 +23,8 @@
 import org.tmatesoft.hg.util.ByteChannel;
 
 /**
- *
+ * {@link ByteChannel} implementation that serializes data into a byte array
+ * 
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
@@ -48,7 +49,10 @@
 		}
 	}
 
-	// TODO document what happens on write after toArray() in each case
+	/*
+	 * {@link #toArray()} calls do not clear data collected so far, subsequent {@link #write(ByteBuffer)}  
+	 * augment collected content.
+	 */
 	public int write(ByteBuffer buffer) {
 		int rv = buffer.remaining();
 		if (buffers == null) {
@@ -58,9 +62,13 @@
 			copy.put(buffer);
 			buffers.add(copy);
 		}
+		result = null;
 		return rv;
 	}
 
+	/**
+	 * @return content accumulated so far
+	 */
 	public byte[] toArray() {
 		if (result != null) {
 			return result;
@@ -84,7 +92,6 @@
 				bb.get(result, off, bb.limit());
 				off += bb.limit();
 			}
-			buffers.clear();
 			return result;
 		}
 	}
--- a/src/org/tmatesoft/hg/internal/ByteArrayDataAccess.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/ByteArrayDataAccess.java	Wed Jul 10 11:48:55 2013 +0200
@@ -54,9 +54,32 @@
 		if (len > (this.length - pos)) {
 			throw new IOException();
 		}
-		System.arraycopy(data, pos, buf, off, len);
+		System.arraycopy(data, offset+pos, buf, off, len);
 		pos += len;
 	}
+	@Override
+	public int readInt() throws IOException {
+		// overridden not to create an intermediate array
+		if (length - pos < 4) {
+			throw new IOException();
+		}
+		int x = offset + pos;
+		int rv = data[x++] << 24 | (data[x++] & 0xFF) << 16 | (data[x++] & 0xFF) << 8 | (data[x] & 0xFF);
+		pos += 4;
+		return rv;
+	}
+	@Override
+	public long readLong() throws IOException {
+		// overridden not to create an intermediate array
+		if (length - pos < 8) {
+			throw new IOException();
+		}
+		int x = offset + pos;
+		int i1 = data[x++] << 24 | (data[x++] & 0xFF) << 16 | (data[x++] & 0xFF) << 8 | (data[x++] & 0xFF);
+		int i2 = data[x++] << 24 | (data[x++] & 0xFF) << 16 | (data[x++] & 0xFF) << 8 | (data[x] & 0xFF);
+		pos += 8;
+		return ((long) i1) << 32 | ((long) i2 & 0x0FFFFFFFFl);
+	}
 
 	@Override
 	public ByteArrayDataAccess reset() {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/COWTransaction.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.tmatesoft.hg.core.HgIOException;
+import org.tmatesoft.hg.core.SessionContext;
+
+/**
+ * This transaction strategy makes a copy of original file and breaks origin hard links, if any.
+ * Changes are directed to actual repository files.
+ * 
+ * On commit, remove all backup copies
+ * On rollback, move all backup files in place of original
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public final class COWTransaction extends Transaction {
+	
+	private final FileUtils fileHelper;
+	private final List<RollbackEntry> entries = new LinkedList<RollbackEntry>();
+	
+	public COWTransaction(SessionContext.Source ctxSource) {
+		fileHelper = new FileUtils(ctxSource.getSessionContext().getLog(), this);
+	}
+
+	@Override
+	public File prepare(File f) throws HgIOException {
+		if (known(f)) {
+			return f;
+		}
+		if (!f.exists()) {
+			return recordNonExistent(f);
+		}
+		final File parentDir = f.getParentFile();
+		assert parentDir.canWrite();
+		File copy = new File(parentDir, f.getName() + ".hg4j.copy");
+		fileHelper.copy(f, copy);
+		final long lm = f.lastModified();
+		copy.setLastModified(lm);
+		File backup = new File(parentDir, f.getName() + ".hg4j.orig");
+		if (backup.exists()) {
+			backup.delete();
+		}
+		if (!f.renameTo(backup)) {
+			throw new HgIOException(String.format("Failed to backup %s to %s", f.getName(), backup.getName()), backup);
+		}
+		if (!copy.renameTo(f)) {
+			throw new HgIOException(String.format("Failed to bring on-write copy in place (%s to %s)", copy.getName(), f.getName()), copy);
+		}
+		f.setLastModified(lm);
+		record(f, backup);
+		return f;
+	}
+
+	@Override
+	public File prepare(File origin, File backup) throws HgIOException {
+		if (known(origin)) {
+			return origin;
+		}
+		if (!origin.exists()) {
+			return recordNonExistent(origin);
+		}
+		fileHelper.copy(origin, backup);
+		final RollbackEntry e = record(origin, backup);
+		e.keepBackup = true;
+		return origin;
+	}
+
+	@Override
+	public void done(File f) throws HgIOException {
+		find(f).success = true;
+	}
+
+	@Override
+	public void failure(File f, IOException ex) {
+		find(f).failure = ex;
+	}
+
+	// XXX custom exception for commit and rollback to hold information about files rolled back
+	
+	@Override
+	public void commit() throws HgIOException {
+		for (Iterator<RollbackEntry> it = entries.iterator(); it.hasNext();) {
+			RollbackEntry e = it.next();
+			assert e.success;
+			if (e.failure != null) {
+				throw new HgIOException("Can't close transaction with a failure.", e.failure, e.origin);
+			}
+			if (!e.keepBackup && e.backup != null) {
+				e.backup.delete();
+			}
+			it.remove();
+		}
+	}
+
+	@Override
+	public void rollback() throws HgIOException {
+		LinkedList<RollbackEntry> success = new LinkedList<RollbackEntry>();
+		for (Iterator<RollbackEntry> it = entries.iterator(); it.hasNext();) {
+			RollbackEntry e = it.next();
+			e.origin.delete();
+			if (e.backup != null) {
+				if (!e.backup.renameTo(e.origin)) {
+					String msg = String.format("Transaction rollback failed, could not rename backup %s back to %s", e.backup.getName(), e.origin.getName());
+					throw new HgIOException(msg, e.origin);
+				}
+				// renameTo() doesn't update timestamp, while the rest of the code relies
+				// on file timestamp to detect revlog changes. Rollback *is* a change,
+				// even if it brings the old state.
+				e.origin.setLastModified(System.currentTimeMillis());
+			}
+			success.add(e);
+			it.remove();
+		}
+	}
+
+	private File recordNonExistent(File f) throws HgIOException {
+		record(f, null);
+		try {
+			f.getParentFile().mkdirs();
+			f.createNewFile();
+			return f;
+		} catch (IOException ex) {
+			throw new HgIOException("Failed to create new file", ex, f);
+		}
+	}
+	
+	private RollbackEntry record(File origin, File backup) {
+		final RollbackEntry e = new RollbackEntry(origin, backup);
+		entries.add(e);
+		return e;
+	}
+
+	private boolean known(File f) {
+		RollbackEntry e = lookup(f);
+		return e != null;
+	}
+
+	private RollbackEntry find(File f) {
+		RollbackEntry e = lookup(f);
+		if (e != null) {
+			return e;
+		}
+		assert false;
+		return new RollbackEntry(f,f);
+	}
+	
+	private RollbackEntry lookup(File f) {
+		for (RollbackEntry e : entries) {
+			if (e.origin.equals(f)) {
+				return e;
+			}
+		}
+		return null;
+	}
+	
+	private static class RollbackEntry {
+		public final File origin;
+		public final File backup; // may be null to indicate file didn't exist
+		public boolean success = false;
+		public IOException failure = null;
+		public boolean keepBackup = false;
+		
+		public RollbackEntry(File o, File b) {
+			origin = o;
+			backup = b;
+		}
+	}
+	
+	public static class Factory implements Transaction.Factory {
+
+		public Transaction create(SessionContext.Source ctxSource) {
+			return new COWTransaction(ctxSource);
+		}
+		
+	}
+}
--- a/src/org/tmatesoft/hg/internal/ChangelogEntryBuilder.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/ChangelogEntryBuilder.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012 TMate Software Ltd
+ * Copyright (c) 2012-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -16,6 +16,8 @@
  */
 package org.tmatesoft.hg.internal;
 
+import java.io.ByteArrayOutputStream;
+import java.io.UnsupportedEncodingException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -23,24 +25,29 @@
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.TimeZone;
 import java.util.Map.Entry;
+import java.util.TimeZone;
 
+import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.DataSerializer.DataSource;
+import org.tmatesoft.hg.repo.HgInvalidStateException;
 import org.tmatesoft.hg.util.Path;
 
 /**
- *
+ * Builds changelog entry
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
-public class ChangelogEntryBuilder {
+public class ChangelogEntryBuilder implements DataSource {
 
 	private String user;
 	private List<Path> modifiedFiles;
 	private final Map<String, String> extrasMap = new LinkedHashMap<String, String>();
 	private Integer tzOffset;
 	private Long csetTime;
+	private Nodeid manifestRev;
+	private CharSequence comment;
 	
 	public ChangelogEntryBuilder user(String username) {
 		user = username;
@@ -89,6 +96,93 @@
 		return this;
 	}
 	
+	public ChangelogEntryBuilder manifest(Nodeid manifestRevision) {
+		manifestRev = manifestRevision;
+		return this;
+	}
+	
+	public ChangelogEntryBuilder comment(CharSequence commentString) {
+		comment = commentString;
+		return this;
+	}
+
+	public void serialize(DataSerializer out) throws HgIOException {
+		byte[] b = build();
+		out.write(b, 0, b.length);
+	}
+
+	public int serializeLength() {
+		return -1;
+	}
+
+	public byte[] build() {
+		try {
+			ByteArrayOutputStream out = new ByteArrayOutputStream();
+			final int LF = '\n';
+			CharSequence extras = buildExtras();
+			CharSequence files = buildFiles();
+			byte[] manifestRevision = manifestRev.toString().getBytes();
+			byte[] username = user().getBytes(EncodingHelper.getUTF8().name()); // XXX Java 1.5
+			out.write(manifestRevision, 0, manifestRevision.length);
+			out.write(LF);
+			out.write(username, 0, username.length);
+			out.write(LF);
+			final long csetDate = csetTime();
+			byte[] date = String.format("%d %d", csetDate, csetTimezone(csetDate)).getBytes();
+			out.write(date, 0, date.length);
+			if (extras.length() > 0) {
+				out.write(' ');
+				byte[] b = extras.toString().getBytes();
+				out.write(b, 0, b.length);
+			}
+			out.write(LF);
+			byte[] b = files.toString().getBytes();
+			out.write(b, 0, b.length);
+			out.write(LF);
+			out.write(LF);
+			byte[] cmt = comment.toString().getBytes(EncodingHelper.getUTF8().name()); // XXX Java 1.5
+			out.write(cmt, 0, cmt.length);
+			return out.toByteArray();
+		} catch (UnsupportedEncodingException ex) {
+			throw new HgInvalidStateException(ex.getMessage()); // Can't happen, UTF8 is always there
+		}
+	}
+
+	private CharSequence buildExtras() {
+		StringBuilder extras = new StringBuilder();
+		for (Iterator<Entry<String, String>> it = extrasMap.entrySet().iterator(); it.hasNext();) {
+			final Entry<String, String> next = it.next();
+			extras.append(encodeExtrasPair(next.getKey()));
+			extras.append(':');
+			extras.append(encodeExtrasPair(next.getValue()));
+			if (it.hasNext()) {
+				extras.append('\00');
+			}
+		}
+		return extras;
+	}
+
+	private CharSequence buildFiles() {
+		StringBuilder files = new StringBuilder();
+		if (modifiedFiles != null) {
+			Collections.sort(modifiedFiles);
+			for (Iterator<Path> it = modifiedFiles.iterator(); it.hasNext(); ) {
+				files.append(it.next());
+				if (it.hasNext()) {
+					files.append('\n');
+				}
+			}
+		}
+		return files;
+	}
+
+	private final static CharSequence encodeExtrasPair(String s) {
+		if (s != null) {
+			return s.replace("\\", "\\\\").replace("\n", "\\n").replace("\r", "\\r").replace("\00", "\\0");
+		}
+		return s;
+	}
+
 	private long csetTime() {
 		if (csetTime != null) { 
 			return csetTime;
@@ -102,37 +196,4 @@
 		}
 		return -(TimeZone.getDefault().getOffset(time) / 1000);
 	}
-
-	public byte[] build(Nodeid manifestRevision, String comment) {
-		String f = "%s\n%s\n%d %d %s\n%s\n\n%s";
-		StringBuilder extras = new StringBuilder();
-		for (Iterator<Entry<String, String>> it = extrasMap.entrySet().iterator(); it.hasNext();) {
-			final Entry<String, String> next = it.next();
-			extras.append(encodeExtrasPair(next.getKey()));
-			extras.append(':');
-			extras.append(encodeExtrasPair(next.getValue()));
-			if (it.hasNext()) {
-				extras.append('\00');
-			}
-		}
-		StringBuilder files = new StringBuilder();
-		if (modifiedFiles != null) {
-			for (Iterator<Path> it = modifiedFiles.iterator(); it.hasNext(); ) {
-				files.append(it.next());
-				if (it.hasNext()) {
-					files.append('\n');
-				}
-			}
-		}
-		final long date = csetTime();
-		final int tz = csetTimezone(date);
-		return String.format(f, manifestRevision.toString(), user(), date, tz, extras, files, comment).getBytes();
-	}
-
-	private final static CharSequence encodeExtrasPair(String s) {
-		if (s != null) {
-			return s.replace("\\", "\\\\").replace("\n", "\\n").replace("\r", "\\r").replace("\00", "\\0");
-		}
-		return s;
-	}
 }
--- a/src/org/tmatesoft/hg/internal/ChangelogHelper.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/ChangelogHelper.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -19,8 +19,8 @@
 import org.tmatesoft.hg.repo.HgChangelog.RawChangeset;
 import org.tmatesoft.hg.repo.HgDataFile;
 import org.tmatesoft.hg.repo.HgInternals;
-import org.tmatesoft.hg.repo.HgInvalidControlFileException;
 import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.Path;
 
 /**
@@ -56,7 +56,7 @@
 	 * @param file
 	 * @return changeset where specified file is mentioned among affected files, or <code>null</code> if none found up to leftBoundary
 	 */
-	public RawChangeset findLatestChangeWith(Path file) throws HgInvalidControlFileException {
+	public RawChangeset findLatestChangeWith(Path file) throws HgRuntimeException {
 		HgDataFile df = repo.getFileNode(file);
 		if (!df.exists()) {
 			return null;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/ChangelogMonitor.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.core.SessionContext;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+
+/**
+ * Track changes to a repository based on recent changelog revision.
+ * TODO shall be merged with {@link RevlogChangeMonitor} and {@link FileChangeMonitor} into 
+ * a single facility available from {@link SessionContext}
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class ChangelogMonitor {
+	private final HgRepository repo;
+	private int changelogRevCount = -1;
+	private Nodeid changelogLastRev = null;
+	
+	public ChangelogMonitor(HgRepository hgRepo) {
+		repo = hgRepo;
+	}
+	
+	// memorize the state of the repository's changelog
+	public void touch() throws HgRuntimeException {
+		changelogRevCount = repo.getChangelog().getRevisionCount();
+		changelogLastRev = safeGetRevision(changelogRevCount-1);
+	}
+	
+	// if present state doesn't match the one we remember
+	public boolean isChanged() throws HgRuntimeException {
+		int rc = repo.getChangelog().getRevisionCount();
+		if (rc != changelogRevCount) {
+			return true;
+		}
+		Nodeid r = safeGetRevision(rc-1);
+		return !r.equals(changelogLastRev);
+	}
+	
+	// handles empty repository case
+	private Nodeid safeGetRevision(int revIndex) throws HgRuntimeException {
+		if (revIndex >= 0) {
+			return repo.getChangelog().getRevision(revIndex);
+		}
+		return Nodeid.NULL;
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/CommitFacility.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import static org.tmatesoft.hg.repo.HgRepository.DEFAULT_BRANCH_NAME;
+import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION;
+import static org.tmatesoft.hg.repo.HgRepositoryFiles.*;
+import static org.tmatesoft.hg.repo.HgRepositoryFiles.Branch;
+import static org.tmatesoft.hg.repo.HgRepositoryFiles.UndoBranch;
+import static org.tmatesoft.hg.util.LogFacility.Severity.Error;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import org.tmatesoft.hg.core.HgCommitCommand;
+import org.tmatesoft.hg.core.HgIOException;
+import org.tmatesoft.hg.core.HgRepositoryLockException;
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.DataSerializer.DataSource;
+import org.tmatesoft.hg.repo.HgChangelog;
+import org.tmatesoft.hg.repo.HgDataFile;
+import org.tmatesoft.hg.repo.HgPhase;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+import org.tmatesoft.hg.util.Pair;
+import org.tmatesoft.hg.util.Path;
+
+/**
+ * Name: CommitObject, FutureCommit or PendingCommit
+ * The only public API now: {@link HgCommitCommand}.
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public final class CommitFacility {
+	private final Internals repo;
+	private final int p1Commit, p2Commit;
+	private Map<Path, Pair<HgDataFile, DataSource>> files = new LinkedHashMap<Path, Pair<HgDataFile, DataSource>>();
+	private Set<Path> removals = new TreeSet<Path>();
+	private String branch, user;
+
+	public CommitFacility(Internals hgRepo, int parentCommit) {
+		this(hgRepo, parentCommit, NO_REVISION);
+	}
+	
+	public CommitFacility(Internals hgRepo, int parent1Commit, int parent2Commit) {
+		repo = hgRepo;
+		p1Commit = parent1Commit;
+		p2Commit = parent2Commit;
+		if (parent1Commit != NO_REVISION && parent1Commit == parent2Commit) {
+			throw new IllegalArgumentException("Merging same revision is dubious");
+		}
+	}
+
+	public boolean isMerge() {
+		return p1Commit != NO_REVISION && p2Commit != NO_REVISION;
+	}
+
+	public void add(HgDataFile dataFile, DataSource content) {
+		if (content == null) {
+			throw new IllegalArgumentException();
+		}
+		removals.remove(dataFile.getPath());
+		files.put(dataFile.getPath(), new Pair<HgDataFile, DataSource>(dataFile, content));
+	}
+
+	public void forget(HgDataFile dataFile) {
+		files.remove(dataFile.getPath());
+		removals.add(dataFile.getPath());
+	}
+	
+	public void branch(String branchName) {
+		branch = branchName;
+	}
+	
+	public void user(String userName) {
+		user = userName;
+	}
+	
+	// this method doesn't roll transaction back in case of failure, caller's responsibility
+	// this method expects repository to be locked, if needed
+	public Nodeid commit(String message, Transaction transaction) throws HgIOException, HgRepositoryLockException, HgRuntimeException {
+		final HgChangelog clog = repo.getRepo().getChangelog();
+		final int clogRevisionIndex = clog.getRevisionCount();
+		ManifestRevision c1Manifest = new ManifestRevision(null, null);
+		ManifestRevision c2Manifest = new ManifestRevision(null, null);
+		final Nodeid p1Cset = p1Commit == NO_REVISION ? null : clog.getRevision(p1Commit);
+		final Nodeid p2Cset = p2Commit == NO_REVISION ? null : clog.getRevision(p2Commit);
+		if (p1Commit != NO_REVISION) {
+			repo.getRepo().getManifest().walk(p1Commit, p1Commit, c1Manifest);
+		}
+		if (p2Commit != NO_REVISION) {
+			repo.getRepo().getManifest().walk(p2Commit, p2Commit, c2Manifest);
+		}
+//		Pair<Integer, Integer> manifestParents = getManifestParents();
+		Pair<Integer, Integer> manifestParents = new Pair<Integer, Integer>(c1Manifest.revisionIndex(), c2Manifest.revisionIndex());
+		TreeMap<Path, Nodeid> newManifestRevision = new TreeMap<Path, Nodeid>();
+		HashMap<Path, Pair<Integer, Integer>> fileParents = new HashMap<Path, Pair<Integer,Integer>>();
+		for (Path f : c1Manifest.files()) {
+			HgDataFile df = repo.getRepo().getFileNode(f);
+			Nodeid fileKnownRev1 = c1Manifest.nodeid(f), fileKnownRev2;
+			final int fileRevIndex1 = df.getRevisionIndex(fileKnownRev1);
+			final int fileRevIndex2;
+			if ((fileKnownRev2 = c2Manifest.nodeid(f)) != null) {
+				// merged files
+				fileRevIndex2 = df.getRevisionIndex(fileKnownRev2);
+			} else {
+				fileRevIndex2 = NO_REVISION;
+			}
+				
+			fileParents.put(f, new Pair<Integer, Integer>(fileRevIndex1, fileRevIndex2));
+			newManifestRevision.put(f, fileKnownRev1);
+		}
+		//
+		// Forget removed
+		for (Path p : removals) {
+			newManifestRevision.remove(p);
+		}
+		//
+		saveCommitMessage(message);
+		//
+		// Register new/changed
+		LinkedHashMap<Path, RevlogStream> newlyAddedFiles = new LinkedHashMap<Path, RevlogStream>();
+		ArrayList<Path> touchInDirstate = new ArrayList<Path>();
+		for (Pair<HgDataFile, DataSource> e : files.values()) {
+			HgDataFile df = e.first();
+			DataSource bds = e.second();
+			Pair<Integer, Integer> fp = fileParents.get(df.getPath());
+			if (fp == null) {
+				// NEW FILE
+				fp = new Pair<Integer, Integer>(NO_REVISION, NO_REVISION);
+			}
+			RevlogStream contentStream = repo.getImplAccess().getStream(df);
+			if (!df.exists()) {
+				newlyAddedFiles.put(df.getPath(), contentStream);
+			}
+			RevlogStreamWriter fileWriter = new RevlogStreamWriter(repo, contentStream, transaction);
+			Nodeid fileRev = fileWriter.addRevision(bds, clogRevisionIndex, fp.first(), fp.second()).second();
+			newManifestRevision.put(df.getPath(), fileRev);
+			touchInDirstate.add(df.getPath());
+		}
+		//
+		// Manifest
+		final ManifestEntryBuilder manifestBuilder = new ManifestEntryBuilder(repo.buildFileNameEncodingHelper());
+		for (Map.Entry<Path, Nodeid> me : newManifestRevision.entrySet()) {
+			manifestBuilder.add(me.getKey().toString(), me.getValue());
+		}
+		RevlogStreamWriter manifestWriter = new RevlogStreamWriter(repo, repo.getImplAccess().getManifestStream(), transaction);
+		Nodeid manifestRev = manifestWriter.addRevision(manifestBuilder, clogRevisionIndex, manifestParents.first(), manifestParents.second()).second();
+		//
+		// Changelog
+		final ChangelogEntryBuilder changelogBuilder = new ChangelogEntryBuilder();
+		changelogBuilder.setModified(files.keySet());
+		changelogBuilder.branch(branch == null ? DEFAULT_BRANCH_NAME : branch);
+		changelogBuilder.user(String.valueOf(user));
+		changelogBuilder.manifest(manifestRev).comment(message);
+		RevlogStreamWriter changelogWriter = new RevlogStreamWriter(repo, repo.getImplAccess().getChangelogStream(), transaction);
+		Nodeid changesetRev = changelogWriter.addRevision(changelogBuilder, clogRevisionIndex, p1Commit, p2Commit).second();
+		// TODO move fncache update to an external facility, along with dirstate and bookmark update
+		if (!newlyAddedFiles.isEmpty() && repo.fncacheInUse()) {
+			FNCacheFile fncache = new FNCacheFile(repo);
+			for (Path p : newlyAddedFiles.keySet()) {
+				fncache.addIndex(p);
+				if (!newlyAddedFiles.get(p).isInlineData()) {
+					fncache.addData(p);
+				}
+			}
+			try {
+				fncache.write();
+			} catch (IOException ex) {
+				// see comment above for fnchache.read()
+				repo.getLog().dump(getClass(), Error, ex, "Failed to write fncache, error ignored");
+			}
+		}
+		String oldBranchValue = DirstateReader.readBranch(repo);
+		String newBranchValue = branch == null ? DEFAULT_BRANCH_NAME : branch;
+		if (!oldBranchValue.equals(newBranchValue)) {
+			// prepare undo.branch as described in http://mercurial.selenic.com/wiki/FileFormats#undo..2A
+			File branchFile = transaction.prepare(repo.getRepositoryFile(Branch), repo.getRepositoryFile(UndoBranch));
+			FileOutputStream fos = null;
+			try {
+				fos = new FileOutputStream(branchFile);
+				fos.write(newBranchValue.getBytes(EncodingHelper.getUTF8().name())); // XXX Java 1.5
+				fos.flush();
+				fos.close();
+				fos = null;
+				transaction.done(branchFile);
+			} catch (IOException ex) {
+				transaction.failure(branchFile, ex);
+				repo.getLog().dump(getClass(), Error, ex, "Failed to write branch information, error ignored");
+			} finally {
+				try {
+					if (fos != null) {
+						fos.close();
+					}
+				} catch (IOException ex) {
+					repo.getLog().dump(getClass(), Error, ex, null);
+				}
+			}
+		}
+		// bring dirstate up to commit state, TODO share this code with HgAddRemoveCommand
+		final DirstateBuilder dirstateBuilder = new DirstateBuilder(repo);
+		dirstateBuilder.fillFrom(new DirstateReader(repo, new Path.SimpleSource()));
+		for (Path p : removals) {
+			dirstateBuilder.recordRemoved(p);
+		}
+		for (Path p : touchInDirstate) {
+			dirstateBuilder.recordUncertain(p);
+		}
+		dirstateBuilder.parents(changesetRev, Nodeid.NULL);
+		dirstateBuilder.serialize(transaction);
+		// update bookmarks
+		if (p1Commit != NO_REVISION || p2Commit != NO_REVISION) {
+			repo.getRepo().getBookmarks().updateActive(p1Cset, p2Cset, changesetRev);
+		}
+		PhasesHelper phaseHelper = new PhasesHelper(repo);
+		HgPhase newCommitPhase = HgPhase.parse(repo.getRepo().getConfiguration().getStringValue("phases", "new-commit", HgPhase.Draft.mercurialString()));
+		phaseHelper.newCommitNode(changesetRev, newCommitPhase);
+		// TODO Revisit: might be reasonable to send out a "Repo changed" notification, to clear
+		// e.g. cached branch, tags and so on, not to rely on file change detection methods?
+		// The same notification might come useful once Pull is implemented
+		return changesetRev;
+	}
+	
+	private void saveCommitMessage(String message) throws HgIOException {
+		File lastMessage = repo.getRepositoryFile(LastMessage);
+		// do not attempt to write if we are going to fail anyway
+		if ((lastMessage.isFile() && !lastMessage.canWrite()) || !lastMessage.getParentFile().canWrite()) {
+			return;
+		}
+		FileWriter w = null;
+		try {
+			w = new FileWriter(lastMessage);
+			w.write(message == null ? new String() : message);
+			w.flush();
+		} catch (IOException ex) {
+			throw new HgIOException("Failed to save last commit message", ex, lastMessage);
+		} finally {
+			new FileUtils(repo.getLog(), this).closeQuietly(w, lastMessage);
+		}
+	}
+/*
+	private Pair<Integer, Integer> getManifestParents() {
+		return new Pair<Integer, Integer>(extractManifestRevisionIndex(p1Commit), extractManifestRevisionIndex(p2Commit));
+	}
+
+	private int extractManifestRevisionIndex(int clogRevIndex) {
+		if (clogRevIndex == NO_REVISION) {
+			return NO_REVISION;
+		}
+		RawChangeset commitObject = repo.getChangelog().range(clogRevIndex, clogRevIndex).get(0);
+		Nodeid manifestRev = commitObject.manifest();
+		if (manifestRev.isNull()) {
+			return NO_REVISION;
+		}
+		return repo.getManifest().getRevisionIndex(manifestRev);
+	}
+*/
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/CompleteRepoLock.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import static org.tmatesoft.hg.util.LogFacility.Severity.Error;
+
+import org.tmatesoft.hg.core.HgRepositoryLockException;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRepositoryLock;
+import org.tmatesoft.hg.util.LogFacility;
+
+/**
+ * Helper to lock both storage and working directory
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public final class CompleteRepoLock {
+
+	private final HgRepository repo;
+	private HgRepositoryLock wdLock, storeLock;
+
+	public CompleteRepoLock(HgRepository hgRepo) {
+		repo = hgRepo;
+	}
+
+	public void acquire() throws HgRepositoryLockException {
+		wdLock = repo.getWorkingDirLock();
+		storeLock = repo.getStoreLock();
+		wdLock.acquire();
+		try {
+			storeLock.acquire();
+		} catch (HgRepositoryLockException ex) {
+			try {
+				wdLock.release();
+			} catch (HgRepositoryLockException e2) {
+				final LogFacility log = repo.getSessionContext().getLog();
+				log.dump(getClass(), Error, e2, "Nested exception ignored once failed to acquire store lock");
+			}
+			throw ex;
+		}
+
+	}
+	
+	public void release() throws HgRepositoryLockException {
+		try {
+			storeLock.release();
+		} catch (HgRepositoryLockException ex) {
+			try {
+				wdLock.release();
+			} catch (HgRepositoryLockException e2) {
+				final LogFacility log = repo.getSessionContext().getLog();
+				log.dump(getClass(), Error, e2, "Nested exception ignored when releasing working directory lock");
+			}
+			throw ex;
+		}
+		wdLock.release();
+	}
+}
--- a/src/org/tmatesoft/hg/internal/ConfigFile.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/ConfigFile.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -31,8 +31,8 @@
 import java.util.List;
 import java.util.Map;
 
+import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.core.SessionContext;
-import org.tmatesoft.hg.repo.HgInvalidFileException;
 import org.tmatesoft.hg.util.LogFacility;
 
 /**
@@ -50,7 +50,7 @@
 		sessionContext = ctx;
 	}
 
-	public void addLocation(File path) throws HgInvalidFileException {
+	public void addLocation(File path) throws HgIOException {
 		read(path);
 	}
 	
@@ -125,7 +125,7 @@
 		}
 	}
 	
-	private void read(File f) throws HgInvalidFileException {
+	private void read(File f) throws HgIOException {
 		if (f == null || !f.canRead()) {
 			return;
 		}
@@ -183,7 +183,7 @@
 		private Map<String,String> section = new LinkedHashMap<String, String>();
 		private File contextFile;
 
-		// TODO "" and lists
+		// TODO [post-1.1] "" and lists
 		// XXX perhaps, single string to keep whole section with substrings for keys/values to minimize number of arrays (String.value)
 		public boolean consume(String line, ConfigFile cfg) throws IOException {
 			int x;
@@ -227,7 +227,7 @@
 			return true;
 		}
 		
-		public void go(File f, ConfigFile cfg) throws HgInvalidFileException {
+		public void go(File f, ConfigFile cfg) throws HgIOException {
 			contextFile = f;
 			LineReader lr = new LineReader(f, cfg.sessionContext.getLog());
 			lr.ignoreLineComments("#");
@@ -237,7 +237,7 @@
 		// include failure doesn't propagate
 		private void processInclude(String includeValue, ConfigFile cfg) {
 			File f; 
-			// TODO handle environment variable expansion
+			// TODO [post-1.1] handle environment variable expansion
 			if (includeValue.startsWith("~/")) {
 				f = new File(System.getProperty("user.home"), includeValue.substring(2));
 			} else {
@@ -250,7 +250,7 @@
 					LogFacility lf = cfg.sessionContext.getLog();
 					lf.dump(ConfigFile.class, LogFacility.Severity.Debug, "Can't read file to  include: %s", f);
 				}
-			} catch (HgInvalidFileException ex) {
+			} catch (HgIOException ex) {
 				LogFacility lf = cfg.sessionContext.getLog();
 				lf.dump(ConfigFile.class, LogFacility.Severity.Warn, "Can't include %s (%s)", f, includeValue);
 			}
--- a/src/org/tmatesoft/hg/internal/CsetParamKeeper.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/CsetParamKeeper.java	Wed Jul 10 11:48:55 2013 +0200
@@ -23,6 +23,7 @@
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.repo.HgInvalidRevisionException;
 import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 
 /**
  * Common code to keep changelog revision and to perform boundary check.
@@ -43,19 +44,25 @@
 			set(repo.getChangelog().getRevisionIndex(changeset));
 		} catch (HgInvalidRevisionException ex) {
 			throw new HgBadArgumentException("Can't find revision", ex).setRevision(changeset);
+		} catch (HgRuntimeException ex) {
+			throw new HgBadArgumentException(String.format("Can't initialize with revision %s", changeset.shortNotation()), ex);
 		}
 		return this;
 	}
 	
 	public CsetParamKeeper set(int changelogRevIndex) throws HgBadArgumentException {
-		int lastCsetIndex = repo.getChangelog().getLastRevision();
-		if (changelogRevIndex == HgRepository.TIP) {
-			changelogRevIndex = lastCsetIndex;
+		try {
+			int lastCsetIndex = repo.getChangelog().getLastRevision();
+			if (changelogRevIndex == HgRepository.TIP) {
+				changelogRevIndex = lastCsetIndex;
+			}
+			if (changelogRevIndex < 0 || changelogRevIndex > lastCsetIndex) {
+				throw new HgBadArgumentException(String.format("Bad revision index %d, value from [0..%d] expected", changelogRevIndex, lastCsetIndex), null).setRevisionIndex(changelogRevIndex);
+			}
+			doSet(changelogRevIndex);
+		} catch (HgRuntimeException ex) {
+			throw new HgBadArgumentException(String.format("Can't initialize with revision index %d", changelogRevIndex), ex);
 		}
-		if (changelogRevIndex < 0 || changelogRevIndex > lastCsetIndex) {
-			throw new HgBadArgumentException(String.format("Bad revision index %d, value from [0..%d] expected", changelogRevIndex, lastCsetIndex), null).setRevisionIndex(changelogRevIndex);
-		}
-		doSet(changelogRevIndex);
 		return this;
 	}
 	
@@ -74,7 +81,7 @@
 	 * @param defaultRevisionIndex value to return when no revision was set, may be {@link HgRepository#TIP} which gets translated to real index if used
 	 * @return changelog revision index if set, or defaultRevisionIndex value otherwise
 	 */
-	public int get(int defaultRevisionIndex) {
+	public int get(int defaultRevisionIndex) throws HgRuntimeException {
 		// XXX perhaps, shall translate other predefined constants (like WORKING COPY) here, too (e.g. for HgRevertCommand)
 		if (changelogRevisionIndex != BAD_REVISION || changelogRevisionIndex != TIP) {
 			return changelogRevisionIndex;
--- a/src/org/tmatesoft/hg/internal/DataAccess.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/DataAccess.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010-2012 TMate Software Ltd
+ * Copyright (c) 2010-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -99,9 +99,10 @@
 		}
 		throw new IOException(String.format("No data, can't read %d bytes", length));
 	}
-	// reads bytes into ByteBuffer, up to its limit or total data length, whichever smaller
-	// TODO post-1.0 perhaps, in DataAccess paradigm (when we read known number of bytes, we shall pass specific byte count to read)
-	// for 1.0, it's ok as it's our internal class
+	/**
+	 * reads bytes into ByteBuffer, up to its limit or total data length, whichever smaller.
+	 * XXX perhaps, in DataAccess paradigm (when we read known number of bytes, we shall pass specific byte count to read)
+	 */
 	public void readBytes(ByteBuffer buf) throws IOException {
 //		int toRead = Math.min(buf.remaining(), (int) length());
 //		if (buf.hasArray()) {
@@ -111,7 +112,7 @@
 //			readBytes(bb, 0, bb.length);
 //			buf.put(bb);
 //		}
-		// TODO post-1.0 optimize to read as much as possible at once
+		// TODO [post-1.1] optimize to read as much as possible at once
 		while (!isEmpty() && buf.hasRemaining()) {
 			buf.put(readByte());
 		}
@@ -120,8 +121,14 @@
 		throw new UnsupportedOperationException();
 	}
 
-	// XXX decide whether may or may not change position in the DataAccess
-	// TODO REVISIT exception handling may not be right, initially just for the sake of quick test
+	/**
+	 * Content of this DataAccess as byte array.
+	 * Note, likely changes position in the DataAccess.
+	 * Might provide direct access to underlying data structure in certain cases, do not alter.
+	 * 
+	 * @return byte array of {@link #length()} size, filled with data   
+	 * @throws IOException
+	 */
 	public byte[] byteArray() throws IOException {
 		reset();
 		byte[] rv = new byte[length()];
--- a/src/org/tmatesoft/hg/internal/DataAccessProvider.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/DataAccessProvider.java	Wed Jul 10 11:48:55 2013 +0200
@@ -21,14 +21,13 @@
 
 import java.io.File;
 import java.io.FileInputStream;
-import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.OutputStream;
 import java.nio.ByteBuffer;
 import java.nio.MappedByteBuffer;
 import java.nio.channels.FileChannel;
 
+import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.core.SessionContext;
 import org.tmatesoft.hg.util.LogFacility;
 
@@ -54,8 +53,6 @@
 	private final int mapioMagicBoundary;
 	private final int bufferSize, mapioBufSize;
 	private final SessionContext context;
-	// not the right place for the property, but DAP is the only place currently available to RevlogStream to get the value
-	private final boolean shallMergePatches;
 	
 	public DataAccessProvider(SessionContext ctx) {
 		context = ctx;
@@ -63,7 +60,6 @@
 		mapioMagicBoundary = mapioBoundaryValue(pm.getInt(CFG_PROPERTY_MAPIO_LIMIT, DEFAULT_MAPIO_LIMIT));
 		bufferSize = pm.getInt(CFG_PROPERTY_FILE_BUFFER_SIZE, DEFAULT_FILE_BUFFER);
 		mapioBufSize = pm.getInt(CFG_PROPERTY_MAPIO_BUFFER_SIZE, DEFAULT_MAPIO_BUFFER);
-		shallMergePatches = pm.getBoolean(Internals.CFG_PROPERTY_PATCH_MERGE, false);
 	}
 	
 	public DataAccessProvider(SessionContext ctx, int mapioBoundary, int regularBufferSize, int mapioBufferSize) {
@@ -71,36 +67,30 @@
 		mapioMagicBoundary = mapioBoundaryValue(mapioBoundary);
 		bufferSize = regularBufferSize;
 		mapioBufSize = mapioBufferSize;
-		shallMergePatches = new PropertyMarshal(ctx).getBoolean(Internals.CFG_PROPERTY_PATCH_MERGE, false);
 	}
 	
-	// TODO [post-1.1] find a better place for this option, it's unrelated to the DAP
-	public boolean shallMergePatches() {
-		return shallMergePatches;
-	}
-
 	// ensure contract of CFG_PROPERTY_MAPIO_LIMIT, for mapioBoundary == 0 use MAX_VALUE so that no file is memmap-ed
 	private static int mapioBoundaryValue(int mapioBoundary) {
 		return mapioBoundary == 0 ? Integer.MAX_VALUE : mapioBoundary;
 	}
 
-	public DataAccess createReader(File f) {
+	public DataAccess createReader(File f, boolean shortRead) {
 		if (!f.exists()) {
 			return new DataAccess();
 		}
 		try {
-			FileChannel fc = new FileInputStream(f).getChannel();
-			long flen = fc.size();
-			if (flen > mapioMagicBoundary) {
+			FileInputStream fis = new FileInputStream(f);
+			long flen = f.length();
+			if (!shortRead && flen > mapioMagicBoundary) {
 				// TESTS: bufLen of 1024 was used to test MemMapFileAccess
-				return new MemoryMapFileAccess(fc, flen, mapioBufSize, context.getLog());
+				return new MemoryMapFileAccess(fis, flen, mapioBufSize, context.getLog());
 			} else {
 				// XXX once implementation is more or less stable,
 				// may want to try ByteBuffer.allocateDirect() to see
 				// if there's any performance gain. 
 				boolean useDirectBuffer = false; // XXX might be another config option
 				// TESTS: bufferSize of 100 was used to check buffer underflow states when readBytes reads chunks bigger than bufSize
-				return new FileAccess(fc, flen, bufferSize, useDirectBuffer, context.getLog());
+				return new FileAccess(fis, flen, bufferSize, useDirectBuffer, context.getLog());
 			}
 		} catch (IOException ex) {
 			// unlikely to happen, we've made sure file exists.
@@ -109,23 +99,17 @@
 		return new DataAccess(); // non-null, empty.
 	}
 	
-	public DataSerializer createWriter(File f, boolean createNewIfDoesntExist) {
+	public DataSerializer createWriter(final Transaction tr, File f, boolean createNewIfDoesntExist) {
 		if (!f.exists() && !createNewIfDoesntExist) {
 			return new DataSerializer();
 		}
-		try {
-			return new StreamDataSerializer(context.getLog(), new FileOutputStream(f, true));
-		} catch (final FileNotFoundException ex) {
-			context.getLog().dump(getClass(), Error, ex, null);
-			return new DataSerializer() {
-				public void write(byte[] data, int offset, int length) throws IOException {
-					throw ex;
-				}
-			};
-		}
+		// TODO invert RevlogStreamWriter to send DataSource here instead of grabbing DataSerializer
+		// to control the moment transaction gets into play and whether it fails or not
+		return new TransactionAwareFileSerializer(tr, f);
 	}
 
 	private static class MemoryMapFileAccess extends DataAccess {
+		private FileInputStream fileStream;
 		private FileChannel fileChannel;
 		private long position = 0; // always points to buffer's absolute position in the file
 		private MappedByteBuffer buffer;
@@ -133,8 +117,9 @@
 		private final int memBufferSize;
 		private final LogFacility logFacility;
 
-		public MemoryMapFileAccess(FileChannel fc, long channelSize, int bufferSize, LogFacility log) {
-			fileChannel = fc;
+		public MemoryMapFileAccess(FileInputStream fis, long channelSize, int bufferSize, LogFacility log) {
+			fileStream = fis;
+			fileChannel = fis.getChannel();
 			size = channelSize;
 			logFacility = log;
 			memBufferSize = bufferSize > channelSize ? (int) channelSize : bufferSize; // no reason to waste memory more than there's data 
@@ -258,27 +243,26 @@
 		@Override
 		public void done() {
 			buffer = null;
-			if (fileChannel != null) {
-				try {
-					fileChannel.close();
-				} catch (IOException ex) {
-					logFacility.dump(getClass(), Warn, ex, null);
-				}
-				fileChannel = null;
+			if (fileStream != null) {
+				new FileUtils(logFacility, this).closeQuietly(fileStream);
+				fileStream = null;
+				fileChannel = null; // channel is closed together with stream
 			}
 		}
 	}
 
 	// (almost) regular file access - FileChannel and buffers.
 	private static class FileAccess extends DataAccess {
+		private FileInputStream fileStream;
 		private FileChannel fileChannel;
 		private ByteBuffer buffer;
 		private long bufferStartInFile = 0; // offset of this.buffer in the file.
 		private final long size;
 		private final LogFacility logFacility;
 
-		public FileAccess(FileChannel fc, long channelSize, int bufferSizeHint, boolean useDirect, LogFacility log) {
-			fileChannel = fc;
+		public FileAccess(FileInputStream fis, long channelSize, int bufferSizeHint, boolean useDirect, LogFacility log) {
+			fileStream = fis;
+			fileChannel = fis.getChannel();
 			size = channelSize;
 			logFacility = log;
 			final int capacity = size < bufferSizeHint ? (int) size : bufferSizeHint;
@@ -389,69 +373,66 @@
 
 		@Override
 		public void done() {
-			if (buffer != null) {
-				buffer = null;
-			}
-			if (fileChannel != null) {
-				try {
-					fileChannel.close();
-				} catch (IOException ex) {
-					logFacility.dump(getClass(), Warn, ex, null);
-				}
+			buffer = null;
+			if (fileStream != null) {
+				new FileUtils(logFacility, this).closeQuietly(fileStream);
+				fileStream = null;
 				fileChannel = null;
 			}
 		}
 	}
+	
+	/**
+	 * Appends serialized changes to the end of the file
+	 */
+	private static class TransactionAwareFileSerializer extends DataSerializer {
+		
+		private final Transaction transaction;
+		private final File file;
+		private FileOutputStream fos;
+		private File transactionFile;
+		private boolean writeFailed = false;
 
-	public/*XXX, private, once HgCloneCommand stops using it */ static class StreamDataSerializer extends DataSerializer {
-		private final OutputStream out;
-		private final LogFacility log;
-		private byte[] buffer;
-	
-		public StreamDataSerializer(LogFacility logFacility, OutputStream os) {
-			assert os != null;
-			out = os;
-			log = logFacility;
+		public TransactionAwareFileSerializer(Transaction tr, File f) {
+			transaction = tr;
+			file = f;
 		}
 		
 		@Override
-		public void write(byte[] data, int offset, int length) throws IOException {
-			out.write(data, offset, length);
-		}
-	
-		@Override
-		public void writeInt(int... values) throws IOException {
-			ensureBufferSize(4*values.length); // sizeof(int)
-			int idx = 0;
-			for (int v : values) {
-				DataSerializer.bigEndian(v, buffer, idx);
-				idx += 4;
+		public void write(byte[] data, int offset, int length) throws HgIOException {
+			try {
+				if (fos == null) {
+					transactionFile = transaction.prepare(file);
+					fos = new FileOutputStream(transactionFile, true);
+				}
+				fos.write(data, offset, length);
+				fos.flush();
+			} catch (IOException ex) {
+				writeFailed = true;
+				transaction.failure(transactionFile, ex);
+				throw new HgIOException("Write failure", ex, transactionFile);
 			}
-			out.write(buffer, 0, idx);
 		}
 		
 		@Override
-		public void writeByte(byte... values) throws IOException {
-			if (values.length == 1) {
-				out.write(values[0]);
-			} else {
-				out.write(values, 0, values.length);
-			}
-		}
-		
-		private void ensureBufferSize(int bytesNeeded) {
-			if (buffer == null || buffer.length < bytesNeeded) {
-				buffer = new byte[bytesNeeded];
-			}
-		}
-	
-		@Override
-		public void done() {
-			try {
-				out.flush();
-				out.close();
-			} catch (IOException ex) {
-				log.dump(getClass(), Error, ex, "Failure to close stream");
+		public void done() throws HgIOException {
+			if (fos != null) {
+				assert transactionFile != null;
+				try {
+					fos.close();
+					if (!writeFailed) {
+						// XXX, Transaction#done() assumes there's no error , but perhaps it's easier to 
+						// rely on #failure(), and call #done() always (or change #done() to #success()
+						transaction.done(transactionFile);
+					}
+					fos = null;
+				} catch (IOException ex) {
+					if (!writeFailed) {
+						// do not eclipse original exception
+						transaction.failure(transactionFile, ex);
+					}
+					throw new HgIOException("Write failure", ex, transactionFile);
+				}
 			}
 		}
 	}
--- a/src/org/tmatesoft/hg/internal/DataSerializer.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/DataSerializer.java	Wed Jul 10 11:48:55 2013 +0200
@@ -16,7 +16,12 @@
  */
 package org.tmatesoft.hg.internal;
 
+import java.io.ByteArrayOutputStream;
 import java.io.IOException;
+import java.io.OutputStream;
+
+import org.tmatesoft.hg.core.HgIOException;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 
 /**
  * Serialization friend of {@link DataAccess}
@@ -26,28 +31,36 @@
  */
 @Experimental(reason="Work in progress")
 public class DataSerializer {
+	private byte[] buffer;
 	
-	public void writeByte(byte... values) throws IOException {
+	public void writeByte(byte... values) throws HgIOException {
 		write(values, 0, values.length);
 	}
 
-	public void writeInt(int... values) throws IOException {
-		byte[] buf = new byte[4];
+	public void writeInt(int... values) throws HgIOException {
+		ensureBufferSize(4*values.length); // sizeof(int)
+		int idx = 0;
 		for (int v : values) {
-			bigEndian(v, buf, 0);
-			write(buf, 0, buf.length);
+			bigEndian(v, buffer, idx);
+			idx += 4;
+		}
+		write(buffer, 0, idx);
+	}
+
+	public void write(byte[] data, int offset, int length) throws HgIOException {
+		throw new HgIOException("Attempt to write to non-existent file", null);
+	}
+
+	public void done() throws HgIOException {
+		// no-op
+	}
+	
+	private void ensureBufferSize(int bytesNeeded) {
+		if (buffer == null || buffer.length < bytesNeeded) {
+			buffer = new byte[bytesNeeded];
 		}
 	}
 
-	public void write(byte[] data, int offset, int length) throws IOException {
-		throw new IOException("Attempt to write to non-existent file");
-	}
-
-	public void done() {
-		// FIXME perhaps, shall allow IOException, too
-		// no-op
-	}
-	
 	/**
 	 * Writes 4 bytes of supplied value into the buffer at given offset, big-endian. 
 	 */
@@ -63,14 +76,18 @@
 	 * Denotes an entity that wants to/could be serialized
 	 */
 	@Experimental(reason="Work in progress")
-	interface DataSource {
-		public void serialize(DataSerializer out) throws IOException;
+	public interface DataSource {
+		/**
+		 * Invoked once for a single write operation, 
+		 * although the source itself may get serialized several times
+		 */
+		public void serialize(DataSerializer out) throws HgIOException, HgRuntimeException;
 
 		/**
 		 * Hint of data length it would like to writes
 		 * @return -1 if can't answer
 		 */
-		public int serializeLength();
+		public int serializeLength() throws HgRuntimeException;
 	}
 	
 	public static class ByteArrayDataSource implements DataSource {
@@ -81,7 +98,7 @@
 			data = bytes;
 		}
 
-		public void serialize(DataSerializer out) throws IOException {
+		public void serialize(DataSerializer out) throws HgIOException {
 			if (data != null) {
 				out.write(data, 0, data.length);
 			}
@@ -90,6 +107,43 @@
 		public int serializeLength() {
 			return data == null ? 0 : data.length;
 		}
+	}
+	
+	/**
+	 * Serialize data to byte array
+	 */
+	public static class ByteArraySerializer extends DataSerializer {
+		private final ByteArrayOutputStream out = new ByteArrayOutputStream();
+
+		@Override
+		public void write(byte[] data, int offset, int length) {
+			out.write(data, offset, length);
+		}
 		
+		public byte[] toByteArray() {
+			return out.toByteArray();
+		}
+	}
+
+	/**
+	 * Bridge to the world of {@link java.io.OutputStream}.
+	 * Caller instantiates the stream and is responsible to close it as appropriate, 
+	 * {@link #done() DataSerializer.done()} doesn't close the stream. 
+	 */
+	public static class OutputStreamSerializer extends DataSerializer {
+		private final OutputStream out;
+
+		public OutputStreamSerializer(OutputStream outputStream) {
+			out = outputStream;
+		}
+
+		@Override
+		public void write(byte[] data, int offset, int length) throws HgIOException {
+			try {
+				out.write(data, offset, length);
+			} catch (IOException ex) {
+				throw new HgIOException(ex.getMessage(), ex, null);
+			}
+		}
 	}
 }
--- a/src/org/tmatesoft/hg/internal/DeflaterDataSerializer.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/DeflaterDataSerializer.java	Wed Jul 10 11:48:55 2013 +0200
@@ -16,10 +16,11 @@
  */
 package org.tmatesoft.hg.internal;
 
-import java.io.IOException;
 import java.util.zip.Deflater;
 import java.util.zip.DeflaterOutputStream;
 
+import org.tmatesoft.hg.core.HgIOException;
+
 /**
  * {@link DeflaterOutputStream} counterpart for {@link DataSerializer} API
  * 
@@ -43,7 +44,7 @@
 	}
 
 	@Override
-	public void writeInt(int... values) throws IOException {
+	public void writeInt(int... values) throws HgIOException {
 		for (int i = 0; i < values.length; i+= AUX_BUFFER_CAPACITY) {
 			int idx = 0;
 			for (int j = i, x = Math.min(values.length, i + AUX_BUFFER_CAPACITY); j < x; j++) {
@@ -58,7 +59,7 @@
 	}
 
 	@Override
-	public void write(byte[] data, int offset, int length) throws IOException {
+	public void write(byte[] data, int offset, int length) throws HgIOException {
 		// @see DeflaterOutputStream#write(byte[], int, int)
 		int stride = deflateOutBuffer.length;
 		for (int i = 0; i < length; i += stride) {
@@ -66,7 +67,7 @@
 		}
 	}
 	
-	private void internalWrite(byte[] data, int offset, int length) throws IOException {
+	private void internalWrite(byte[] data, int offset, int length) throws HgIOException {
 		deflater.setInput(data, offset, length);
 		while (!deflater.needsInput()) {
 			deflate();
@@ -74,11 +75,11 @@
 	}
 
 	@Override
-	public void done() {
+	public void done() throws HgIOException {
 		delegate.done();
 	}
 
-	public void finish() throws IOException {
+	public void finish() throws HgIOException {
 		if (!deflater.finished()) {
 			deflater.finish();
 			while (!deflater.finished()) {
@@ -87,7 +88,7 @@
 		}
 	}
 
-	protected void deflate() throws IOException {
+	protected void deflate() throws HgIOException {
 		int len = deflater.deflate(deflateOutBuffer, 0, deflateOutBuffer.length);
 		if (len > 0) {
 			delegate.write(deflateOutBuffer, 0, len);
--- a/src/org/tmatesoft/hg/internal/DiffHelper.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/DiffHelper.java	Wed Jul 10 11:48:55 2013 +0200
@@ -20,8 +20,6 @@
 import java.util.HashMap;
 import java.util.Map;
 
-import org.tmatesoft.hg.repo.HgInvalidStateException;
-
 /**
  * Mercurial cares about changes only up to the line level, e.g. a simple file version dump in manifest looks like (RevlogDump output):
  * 
@@ -201,9 +199,7 @@
 				} else {
 					assert changeStartS2 == matchStartSeq2;
 					if (matchStartSeq1 > 0 || matchStartSeq2 > 0) {
-						// FIXME perhaps, exception is too much for the case
-						// once diff is covered with tests, replace with assert false : msg; 
-						throw new HgInvalidStateException(String.format("adjustent equal blocks %d, %d and %d,%d", changeStartS1, matchStartSeq1, changeStartS2, matchStartSeq2));
+						assert false : String.format("adjustent equal blocks %d, %d and %d,%d", changeStartS1, matchStartSeq1, changeStartS2, matchStartSeq2);
 					}
 				}
 			}
--- a/src/org/tmatesoft/hg/internal/DirstateBuilder.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/DirstateBuilder.java	Wed Jul 10 11:48:55 2013 +0200
@@ -16,6 +16,9 @@
  */
 package org.tmatesoft.hg.internal;
 
+import static org.tmatesoft.hg.repo.HgRepositoryFiles.Dirstate;
+import static org.tmatesoft.hg.repo.HgRepositoryFiles.UndoDirstate;
+
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
@@ -30,9 +33,9 @@
 import org.tmatesoft.hg.repo.HgDirstate;
 import org.tmatesoft.hg.repo.HgDirstate.EntryKind;
 import org.tmatesoft.hg.repo.HgDirstate.Record;
+import org.tmatesoft.hg.repo.HgInvalidControlFileException;
 import org.tmatesoft.hg.repo.HgInvalidStateException;
 import org.tmatesoft.hg.repo.HgManifest.Flags;
-import org.tmatesoft.hg.repo.HgRepositoryFiles;
 import org.tmatesoft.hg.util.Path;
 
 /**
@@ -149,18 +152,20 @@
 		}
 	}
 	
-	public void serialize() throws HgIOException {
-		File dirstateFile = hgRepo.getRepositoryFile(HgRepositoryFiles.Dirstate);
+	public void serialize(Transaction tr) throws HgIOException {
+		File dirstateFile = tr.prepare(hgRepo.getRepositoryFile(Dirstate), hgRepo.getRepositoryFile(UndoDirstate));
 		try {
 			FileChannel dirstate = new FileOutputStream(dirstateFile).getChannel();
 			serialize(dirstate);
 			dirstate.close();
+			tr.done(dirstateFile);
 		} catch (IOException ex) {
+			tr.failure(dirstateFile, ex);
 			throw new HgIOException("Can't write down new directory state", ex, dirstateFile);
 		}
 	}
 	
-	public void fillFrom(DirstateReader dirstate) {
+	public void fillFrom(DirstateReader dirstate) throws HgInvalidControlFileException {
 		// TODO preserve order, if reasonable and possible 
 		dirstate.readInto(new HgDirstate.Inspector() {
 			
--- a/src/org/tmatesoft/hg/internal/DirstateReader.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/DirstateReader.java	Wed Jul 10 11:48:55 2013 +0200
@@ -66,7 +66,7 @@
 		if (dirstateFile == null || !dirstateFile.exists()) {
 			return;
 		}
-		DataAccess da = repo.getDataAccess().createReader(dirstateFile);
+		DataAccess da = repo.getDataAccess().createReader(dirstateFile, false);
 		try {
 			if (da.isEmpty()) {
 				return;
@@ -102,7 +102,7 @@
 				} else if (state == 'm') {
 					target.next(EntryKind.Merged, r);
 				} else {
-					repo.getSessionContext().getLog().dump(getClass(), Severity.Warn, "Dirstate record for file %s (size: %d, tstamp:%d) has unknown state '%c'", r.name(), r.size(), r.modificationTime(), state);
+					repo.getLog().dump(getClass(), Severity.Warn, "Dirstate record for file %s (size: %d, tstamp:%d) has unknown state '%c'", r.name(), r.size(), r.modificationTime(), state);
 				}
 			}
 		} catch (IOException ex) {
@@ -142,7 +142,7 @@
 		if (dirstateFile == null || !dirstateFile.exists()) {
 			return new Pair<Nodeid,Nodeid>(NULL, NULL);
 		}
-		DataAccess da = internalRepo.getDataAccess().createReader(dirstateFile);
+		DataAccess da = internalRepo.getDataAccess().createReader(dirstateFile, false);
 		try {
 			if (da.isEmpty()) {
 				return new Pair<Nodeid,Nodeid>(NULL, NULL);
@@ -178,7 +178,7 @@
 				branch = b == null || b.length() == 0 ? HgRepository.DEFAULT_BRANCH_NAME : b;
 				r.close();
 			} catch (FileNotFoundException ex) {
-				internalRepo.getSessionContext().getLog().dump(HgDirstate.class, Debug, ex, null); // log verbose debug, exception might be legal here 
+				internalRepo.getLog().dump(HgDirstate.class, Debug, ex, null); // log verbose debug, exception might be legal here 
 				// IGNORE
 			} catch (IOException ex) {
 				throw new HgInvalidControlFileException("Error reading file with branch information", ex, branchFile);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/EncodeDirPathHelper.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.tmatesoft.hg.util.PathRewrite;
+
+/**
+ * <blockquote cite="http://mercurial.selenic.com/wiki/FileFormats#data.2F">Directory names ending in .i or .d have .hg appended</blockquote>
+ *  
+ * @see http://mercurial.selenic.com/wiki/FileFormats#data.2F
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+final class EncodeDirPathHelper implements PathRewrite {
+	private final Pattern suffix2replace;
+	
+	public EncodeDirPathHelper() {
+		suffix2replace = Pattern.compile("\\.([id]|hg)/");
+	}
+
+	public CharSequence rewrite(CharSequence p) {
+		Matcher suffixMatcher = suffix2replace.matcher(p);
+		CharSequence path;
+		// Matcher.replaceAll, but without extra toString
+		boolean found = suffixMatcher.find();
+		if (found) {
+			StringBuffer sb = new StringBuffer(p.length()  + 20);
+			do {
+				suffixMatcher.appendReplacement(sb, ".$1.hg/");
+			} while (found = suffixMatcher.find());
+			suffixMatcher.appendTail(sb);
+			path = sb;
+		} else {
+			path = p;
+		}
+		return path;
+	}
+
+}
--- a/src/org/tmatesoft/hg/internal/FNCacheFile.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/FNCacheFile.java	Wed Jul 10 11:48:55 2013 +0200
@@ -16,9 +16,14 @@
  */
 package org.tmatesoft.hg.internal;
 
+import static org.tmatesoft.hg.repo.HgRepositoryFiles.FNCache;
+
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.CharBuffer;
+import java.nio.channels.FileChannel;
 import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.List;
@@ -41,11 +46,16 @@
 	
 	private final Internals repo;
 //	private final List<Path> files;
-	private List<Path> added;
+	private final List<Path> addedDotI;
+	private final List<Path> addedDotD;
+	private final FNCachePathHelper pathHelper;
 
 	public FNCacheFile(Internals internalRepo) {
 		repo = internalRepo;
 //		files = new ArrayList<Path>();
+		pathHelper = new FNCachePathHelper();
+		addedDotI = new ArrayList<Path>(5);
+		addedDotD = new ArrayList<Path>(5);
 	}
 
 	/*
@@ -60,36 +70,48 @@
 		// names in fncache are in local encoding, shall translate to unicode
 		new LineReader(f, repo.getSessionContext().getLog(), repo.getFilenameEncoding()).read(new LineReader.SimpleLineCollector(), entries);
 		for (String e : entries) {
-			// FIXME plain wrong, need either to decode paths and strip off .i/.d or (if keep names as is) change write()
+			// XXX plain wrong, need either to decode paths and strip off .i/.d or (if keep names as is) change write()
 			files.add(pathFactory.path(e));
 		}
 	}
 	*/
 	
 	public void write() throws IOException {
-		if (added == null || added.isEmpty()) {
+		if (addedDotI.isEmpty() && addedDotD.isEmpty()) {
 			return;
 		}
-		File f = fncacheFile();
+		File f = repo.getRepositoryFile(FNCache);
 		f.getParentFile().mkdirs();
 		final Charset filenameEncoding = repo.getFilenameEncoding();
-		FileOutputStream fncacheFile = new FileOutputStream(f, true);
-		for (Path p : added) {
-			String s = "data/" + p.toString() + ".i"; // TODO post-1.0 this is plain wrong. (a) need .d files, too; (b) what about dh/ location? 
-			fncacheFile.write(s.getBytes(filenameEncoding));
-			fncacheFile.write(0x0A); // http://mercurial.selenic.com/wiki/fncacheRepoFormat
+		ArrayList<CharBuffer> added = new ArrayList<CharBuffer>();
+		for (Path p : addedDotI) {
+			added.add(CharBuffer.wrap(pathHelper.rewrite(p)));
+		}
+		for (Path p : addedDotD) {
+			// XXX FNCachePathHelper always return name of an index file, need to change it into a name of data file,
+			// although the approach (to replace last char) is depressingly awful
+			CharSequence cs = pathHelper.rewrite(p);
+			CharBuffer cb = CharBuffer.allocate(cs.length());
+			cb.append(cs);
+			cb.put(cs.length()-1, 'd');
+			cb.flip();
+			added.add(cb);
+		}
+		FileChannel fncacheFile = new FileOutputStream(f, true).getChannel();
+		ByteBuffer lf = ByteBuffer.wrap(new byte[] { 0x0A });
+		for (CharBuffer b : added) {
+			fncacheFile.write(filenameEncoding.encode(b));
+			fncacheFile.write(lf);
+			lf.rewind();
 		}
 		fncacheFile.close();
 	}
 
-	public void add(Path p) {
-		if (added == null) {
-			added = new ArrayList<Path>();
-		}
-		added.add(p);
+	public void addIndex(Path p) {
+		addedDotI.add(p);
 	}
 
-	private File fncacheFile() {
-		return repo.getFileFromStoreDir("fncache");
+	public void addData(Path p) {
+		addedDotD.add(p);
 	}
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/FNCachePathHelper.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import static org.tmatesoft.hg.internal.StoragePathHelper.STR_DATA;
+
+import org.tmatesoft.hg.util.PathRewrite;
+
+/**
+ * Prepare filelog names to be written into fncache. 
+ * 
+ * @see http://mercurial.selenic.com/wiki/fncacheRepoFormat#The_fncache_file
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+final class FNCachePathHelper implements PathRewrite {
+
+	private final EncodeDirPathHelper dirPathRewrite;
+
+	
+	public FNCachePathHelper() {
+		dirPathRewrite = new EncodeDirPathHelper();
+	}
+
+	/**
+	 * Input: repository-relative path of a filelog, i.e. without 'data/' or 'dh/' prefix, and/or '.i'/'.d' suffix.
+	 * Output: path ready to be written into fncache file, alaways with '.i' suffix (caller is free to alter the suffix to '.d' as appropriate
+	 */
+	public CharSequence rewrite(CharSequence path) {
+		CharSequence p = dirPathRewrite.rewrite(path);
+		StringBuilder result = new StringBuilder(p.length() + STR_DATA.length() + ".i".length());
+		result.append(STR_DATA);
+		result.append(p);
+		result.append(".i");
+		return result;
+	}
+
+	/*
+	 * There's always 'data/' prefix, even if actual file resides under 'dh/':
+	 *  
+	 * $ cat .hg/store/fncache
+	 * data/very-long-directory-name-level-1/very-long-directory-name-level-2/very-long-directory-name-level-3/file-with-longest-name-i-am-not-lazy-to-type.txt.i
+	 * $ ls .hg/store/dh/very-lon/very-lon/very-lon/
+	 * file-with-longest-name-i-am-not-lazy-to-type.txtbbd4d3327f6364027211b0cd8ca499d3d6308849.i
+	 */
+}
--- a/src/org/tmatesoft/hg/internal/FileAnnotation.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/FileAnnotation.java	Wed Jul 10 11:48:55 2013 +0200
@@ -17,17 +17,9 @@
 package org.tmatesoft.hg.internal;
 
 
-import org.tmatesoft.hg.core.HgCallbackTargetException;
-import org.tmatesoft.hg.core.HgIterateDirection;
-import org.tmatesoft.hg.repo.HgBlameFacility;
+import org.tmatesoft.hg.core.HgBlameInspector;
+import org.tmatesoft.hg.core.HgBlameInspector.RevisionDescriptor;
 import org.tmatesoft.hg.repo.HgInvalidStateException;
-import org.tmatesoft.hg.repo.HgBlameFacility.AddBlock;
-import org.tmatesoft.hg.repo.HgBlameFacility.BlockData;
-import org.tmatesoft.hg.repo.HgBlameFacility.ChangeBlock;
-import org.tmatesoft.hg.repo.HgBlameFacility.DeleteBlock;
-import org.tmatesoft.hg.repo.HgBlameFacility.EqualBlock;
-import org.tmatesoft.hg.repo.HgBlameFacility.RevisionDescriptor;
-import org.tmatesoft.hg.repo.HgDataFile;
 
 /**
  * Produce output like 'hg annotate' does
@@ -35,7 +27,7 @@
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
-public class FileAnnotation implements HgBlameFacility.Inspector, RevisionDescriptor.Recipient {
+public class FileAnnotation implements HgBlameInspector, RevisionDescriptor.Recipient {
 
 	@Experimental(reason="The line-by-line inspector likely to become part of core/command API")
 	@Callback
@@ -50,18 +42,6 @@
 		int totalLines();
 	}
 
-	/**
-	 * Annotate file revision, line by line.
-	 */
-	public static void annotate(HgDataFile df, int changelogRevisionIndex, LineInspector insp) throws HgCallbackTargetException {
-		if (!df.exists()) {
-			return;
-		}
-		FileAnnotation fa = new FileAnnotation(insp);
-		HgBlameFacility af = new HgBlameFacility(df);
-		af.annotate(changelogRevisionIndex, fa, HgIterateDirection.NewToOld);
-	}
-
 	// keeps <startSeq1, startSeq2, len> of equal blocks, origin to target, from some previous step
 	private RangeSeq activeEquals;
 	// equal blocks of the current iteration, to be recalculated before next step
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/FileChangeMonitor.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import java.io.File;
+
+/**
+ * This shall become interface/abstract class accessible from SessionContext,
+ * with plugable implementations, e.g. Java7 (file monitoring facilities) based,
+ * or any other convenient means. It shall allow both "check at the moment asked" 
+ * and "collect changes and dispatch on demand" implementation approaches, so that
+ * implementors may use best available technology   
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class FileChangeMonitor {
+	private final File file;
+	private long lastModified;
+	private long length;
+	
+	/**
+	 * First round: support for 1-monitor-1-file only
+	 * Next round: 1-monitor-N files
+	 */
+	public FileChangeMonitor(File f) {
+		file = f;
+	}
+	
+	// shall work for files that do not exist
+	public void touch(Object source) {
+		lastModified = file.lastModified();
+		length = file.length();
+	}
+	
+	public void check(Object source, Action onChange) {
+		if (changed(source)) {
+			onChange.changed();
+		}
+	}
+
+	public boolean changed(Object source) {
+		if (file.lastModified() != lastModified) {
+			return true;
+		}
+		return file.length() != length; 
+	}
+	
+	public interface Action {
+		public void changed();
+	}
+}
--- a/src/org/tmatesoft/hg/internal/FileContentSupplier.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/FileContentSupplier.java	Wed Jul 10 11:48:55 2013 +0200
@@ -18,57 +18,55 @@
 
 import java.io.File;
 import java.io.FileInputStream;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 
 import org.tmatesoft.hg.core.HgIOException;
-import org.tmatesoft.hg.repo.CommitFacility;
+import org.tmatesoft.hg.core.SessionContext;
+import org.tmatesoft.hg.internal.DataSerializer.DataSource;
 import org.tmatesoft.hg.repo.HgRepository;
 import org.tmatesoft.hg.util.Path;
 
 /**
- * FIXME files are opened at the moment of instantiation, though the moment the data is requested might be distant
+ * {@link DataSource} that reads from regular files
  * 
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
-public class FileContentSupplier implements CommitFacility.ByteDataSupplier {
-	private final FileChannel channel;
-	private IOException error;
+public class FileContentSupplier implements DataSource {
+	private final File file;
+	private final SessionContext ctx;
 	
-	public FileContentSupplier(HgRepository repo, Path file) throws HgIOException {
-		this(new File(repo.getWorkingDir(), file.toString()));
-	}
-
-	public FileContentSupplier(File f) throws HgIOException {
-		if (!f.canRead()) {
-			throw new HgIOException(String.format("Can't read file %s", f), f);
-		}
-		try {
-			channel = new FileInputStream(f).getChannel();
-		} catch (FileNotFoundException ex) {
-			throw new HgIOException("Can't open file", ex, f);
-		}
+	public FileContentSupplier(HgRepository repo, Path file) {
+		this(repo, new File(repo.getWorkingDir(), file.toString()));
 	}
 
-	public int read(ByteBuffer buf) {
-		if (error != null) {
-			return -1;
-		}
-		try {
-			return channel.read(buf);
-		} catch (IOException ex) {
-			error = ex;
-		}
-		return -1;
+	public FileContentSupplier(SessionContext.Source ctxSource, File f) {
+		ctx = ctxSource.getSessionContext();
+		file = f;
 	}
 	
-	public void done() throws IOException {
-		channel.close();
-		if (error != null) {
-			throw error;
+	public void serialize(DataSerializer out) throws HgIOException {
+		FileInputStream fis = null;
+		try {
+			fis = new FileInputStream(file);
+			FileChannel fc = fis.getChannel();
+			ByteBuffer buffer = ByteBuffer.allocate((int) Math.min(100*1024, fc.size()));
+			while (fc.read(buffer) != -1) {
+				buffer.flip();
+				// #allocate() above ensures backing array
+				out.write(buffer.array(), 0, buffer.limit());
+				buffer.clear();
+			}
+		} catch (IOException ex) {
+			throw new HgIOException("Failed to get content of the file", ex, file);
+		} finally {
+			new FileUtils(ctx.getLog(), this).closeQuietly(fis);
 		}
 	}
+	
+	public int serializeLength() {
+		return Internals.ltoi(file.length());
+	}
 }
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/FileHistory.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import static org.tmatesoft.hg.core.HgIterateDirection.NewToOld;
+
+import java.util.Collections;
+import java.util.LinkedList;
+
+import org.tmatesoft.hg.core.HgIterateDirection;
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.repo.HgDataFile;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+
+/**
+ * History of a file, with copy/renames, and corresponding revision information.
+ * Facility for file history iteration. 
+ * 
+ * TODO [post-1.1] Utilize in HgLogCommand and anywhere else we need to follow file history
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class FileHistory {
+	
+	private LinkedList<FileRevisionHistoryChunk> fileCompleteHistory = new LinkedList<FileRevisionHistoryChunk>();
+	private final HgDataFile df;
+	private final int csetTo;
+	private final int csetFrom;
+	
+	public FileHistory(HgDataFile file, int fromChangeset, int toChangeset) {
+		df = file;
+		csetFrom = fromChangeset;
+		csetTo = toChangeset;
+	}
+	
+	public int getStartChangeset() {
+		return csetFrom;
+	}
+	
+	public int getEndChangeset() {
+		return csetTo;
+	}
+
+	public void build() throws HgRuntimeException {
+		assert fileCompleteHistory.isEmpty();
+		HgDataFile currentFile = df;
+		final int changelogRevIndexEnd = csetTo;
+		final int changelogRevIndexStart = csetFrom;
+		int fileLastClogRevIndex = changelogRevIndexEnd;
+		FileRevisionHistoryChunk nextChunk = null;
+		fileCompleteHistory.clear(); // just in case, #build() is not expected to be called more than once
+		do {
+			FileRevisionHistoryChunk fileHistory = new FileRevisionHistoryChunk(currentFile);
+			fileHistory.init(fileLastClogRevIndex);
+			fileHistory.linkTo(nextChunk);
+			fileCompleteHistory.addFirst(fileHistory); // to get the list in old-to-new order
+			nextChunk = fileHistory;
+			if (fileHistory.changeset(0) > changelogRevIndexStart && currentFile.isCopy()) {
+				// fileHistory.changeset(0) is the earliest revision we know about so far,
+				// once we get to revisions earlier than the requested start, stop digging.
+				// The reason there's NO == (i.e. not >=) because:
+				// (easy): once it's equal, we've reached our intended start
+				// (hard): if changelogRevIndexStart happens to be exact start of one of renames in the 
+				// chain of renames (test-annotate2 repository, file1->file1a->file1b, i.e. points 
+				// to the very start of file1a or file1 history), presence of == would get us to the next 
+				// chunk and hence changed parents of present chunk's first element. Our annotate alg 
+				// relies on parents only (i.e. knows nothing about 'last iteration element') to find out 
+				// what to compare, and hence won't report all lines of 'last iteration element' (which is the
+				// first revision of the renamed file) as "added in this revision", leaving gaps in annotate
+				HgRepository repo = currentFile.getRepo();
+				Nodeid originLastRev = currentFile.getCopySourceRevision();
+				currentFile = repo.getFileNode(currentFile.getCopySourceName());
+				fileLastClogRevIndex = currentFile.getChangesetRevisionIndex(currentFile.getRevisionIndex(originLastRev));
+				// XXX perhaps, shall fail with meaningful exception if new file doesn't exist (.i/.d not found for whatever reason)
+				// or source revision is missing?
+			} else {
+				fileHistory.chopAtChangeset(changelogRevIndexStart);
+				currentFile = null; // stop iterating
+			}
+		} while (currentFile != null && fileLastClogRevIndex > changelogRevIndexStart);
+		// fileCompleteHistory is in (origin, intermediate target, ultimate target) order
+	}
+	
+	public Iterable<FileRevisionHistoryChunk> iterate(HgIterateDirection order) {
+		if (order == NewToOld) {
+			return ReverseIterator.reversed(fileCompleteHistory);
+		}
+		return Collections.unmodifiableList(fileCompleteHistory);
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/FileRevisionHistoryChunk.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import static org.tmatesoft.hg.core.HgIterateDirection.OldToNew;
+import static org.tmatesoft.hg.repo.HgRepository.BAD_REVISION;
+import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION;
+
+import java.util.Arrays;
+import java.util.BitSet;
+import java.util.LinkedList;
+
+import org.tmatesoft.hg.core.HgIterateDirection;
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.repo.HgDataFile;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+
+/**
+ * Piece of file history, identified by path, limited to file revisions from range [chop..init] of changesets, 
+ * can be linked to another piece.
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public final class FileRevisionHistoryChunk {
+	private final HgDataFile df;
+	// change ancestry, sequence of file revisions
+	private IntVector fileRevsToVisit;
+	// parent pairs of complete file history
+	private IntVector fileParentRevs;
+	// map file revision to changelog revision (sparse array, only file revisions to visit are set)
+	private int[] file2changelog;
+	private int originChangelogRev = BAD_REVISION, originFileRev = BAD_REVISION;
+	private int csetRangeStart = NO_REVISION, csetRangeEnd = BAD_REVISION; 
+	
+
+	public FileRevisionHistoryChunk(HgDataFile file) {
+		df = file;
+	}
+	
+	/**
+	 * @return file at this specific chunk of history (i.e. its path may be different from the paths of other chunks)
+	 */
+	public HgDataFile getFile() {
+		return df;
+	}
+	
+	/**
+	 * @return changeset this file history chunk was chopped at, or {@link HgRepository#NO_REVISION} if none specified
+	 */
+	public int getStartChangeset() {
+		return csetRangeStart;
+	}
+	
+	/**
+	 * @return changeset this file history chunk ends at
+	 */
+	public int getEndChangeset() {
+		return csetRangeEnd;
+	}
+	
+	public void init(int changelogRevisionIndex) throws HgRuntimeException {
+		csetRangeEnd = changelogRevisionIndex;
+		// XXX df.indexWalk(0, fileRevIndex, ) might be more effective
+		Nodeid fileRev = df.getRepo().getManifest().getFileRevision(changelogRevisionIndex, df.getPath());
+		int fileRevIndex = df.getRevisionIndex(fileRev);
+		int[] fileRevParents = new int[2];
+		fileParentRevs = new IntVector((fileRevIndex+1) * 2, 0);
+		fileParentRevs.add(NO_REVISION, NO_REVISION); // parents of fileRevIndex == 0
+		for (int i = 1; i <= fileRevIndex; i++) {
+			df.parents(i, fileRevParents, null, null);
+			fileParentRevs.add(fileRevParents[0], fileRevParents[1]);
+		}
+		// fileRevsToVisit keep file change ancestry from new to old
+		fileRevsToVisit = new IntVector(fileRevIndex + 1, 0);
+		// keep map of file revision to changelog revision
+		file2changelog = new int[fileRevIndex+1];
+		// only elements worth visit would get mapped, so there would be unfilled areas in the file2changelog,
+		// prevent from error (make it explicit) by bad value
+		Arrays.fill(file2changelog, BAD_REVISION);
+		LinkedList<Integer> queue = new LinkedList<Integer>();
+		BitSet seen = new BitSet(fileRevIndex + 1);
+		queue.add(fileRevIndex);
+		do {
+			int x = queue.removeFirst();
+			if (seen.get(x)) {
+				continue;
+			}
+			seen.set(x);
+			fileRevsToVisit.add(x);
+			file2changelog[x] = df.getChangesetRevisionIndex(x);
+			int p1 = fileParentRevs.get(2*x);
+			int p2 = fileParentRevs.get(2*x + 1);
+			if (p1 != NO_REVISION) {
+				queue.addLast(p1);
+			}
+			if (p2 != NO_REVISION) {
+				queue.addLast(p2);
+			}
+		} while (!queue.isEmpty());
+		// make sure no child is processed before we handled all (grand-)parents of the element
+		fileRevsToVisit.sort(false);
+	}
+	
+	public void linkTo(FileRevisionHistoryChunk target) {
+		// assume that target.init() has been called already 
+		if (target == null) {
+			return;
+		}
+		target.originFileRev = fileRevsToVisit.get(0); // files to visit are new to old
+		target.originChangelogRev = changeset(target.originFileRev);
+	}
+
+	/**
+	 * Mark revision closest(ceil) to specified as the very first one (no parents) 
+	 */
+	public void chopAtChangeset(int firstChangelogRevOfInterest) {
+		csetRangeStart = firstChangelogRevOfInterest;
+		if (firstChangelogRevOfInterest == 0) {
+			return; // nothing to do
+		}
+		int i = 0, x = fileRevsToVisit.size(), fileRev = BAD_REVISION;
+		// fileRevsToVisit is new to old, greater numbers to smaller
+		while (i < x && changeset(fileRev = fileRevsToVisit.get(i)) >= firstChangelogRevOfInterest) {
+			i++;
+		}
+		assert fileRev != BAD_REVISION; // there's at least 1 revision in fileRevsToVisit
+		if (i == x && changeset(fileRev) != firstChangelogRevOfInterest) {
+			assert false : "Requested changeset shall belong to the chunk";
+			return;
+		}
+		fileRevsToVisit.trimTo(i); // no need to iterate more
+		// pretend fileRev got no parents
+		fileParentRevs.set(fileRev * 2, NO_REVISION);
+		fileParentRevs.set(fileRev, NO_REVISION);
+	}
+
+	public int[] fileRevisions(HgIterateDirection iterateOrder) {
+		// fileRevsToVisit is { r10, r7, r6, r5, r0 }, new to old
+		int[] rv = fileRevsToVisit.toArray();
+		if (iterateOrder == OldToNew) {
+			// reverse return value
+			for (int a = 0, b = rv.length-1; a < b; a++, b--) {
+				int t = rv[b];
+				rv[b] = rv[a];
+				rv[a] = t;
+			}
+		}
+		return rv;
+	}
+	
+	/**
+	 * @return number of file revisions in this chunk of its history
+	 */
+	public int revisionCount() {
+		return fileRevsToVisit.size();
+	}
+	
+	public int changeset(int fileRevIndex) {
+		return file2changelog[fileRevIndex];
+	}
+	
+	public void fillFileParents(int fileRevIndex, int[] fileParents) {
+		if (fileRevIndex == 0 && originFileRev != BAD_REVISION) {
+			// this chunk continues another file
+			assert originFileRev != NO_REVISION;
+			fileParents[0] = originFileRev;
+			fileParents[1] = NO_REVISION;
+			return;
+		}
+		fileParents[0] = fileParentRevs.get(fileRevIndex * 2);
+		fileParents[1] = fileParentRevs.get(fileRevIndex * 2 + 1);
+	}
+	
+	public void fillCsetParents(int fileRevIndex, int[] csetParents) {
+		if (fileRevIndex == 0 && originFileRev != BAD_REVISION) {
+			assert originFileRev != NO_REVISION;
+			csetParents[0] = originChangelogRev;
+			csetParents[1] = NO_REVISION; // I wonder if possible to start a copy with two parents?
+			return;
+		}
+		int fp1 = fileParentRevs.get(fileRevIndex * 2);
+		int fp2 = fileParentRevs.get(fileRevIndex * 2 + 1);
+		csetParents[0] = fp1 == NO_REVISION ? NO_REVISION : changeset(fp1);
+		csetParents[1] = fp2 == NO_REVISION ? NO_REVISION : changeset(fp2);
+	}
+}
\ No newline at end of file
--- a/src/org/tmatesoft/hg/internal/FileSystemHelper.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/FileSystemHelper.java	Wed Jul 10 11:48:55 2013 +0200
@@ -63,7 +63,9 @@
 		try {
 			execHelper.exec(command);
 		} catch (InterruptedException ex) {
-			throw new IOException(ex);
+			IOException e = new IOException();
+			ex.initCause(ex); // XXX Java 1.5
+			throw e;
 		}
 	}
 	
@@ -77,7 +79,9 @@
 		try {
 			execHelper.exec(command);
 		} catch (InterruptedException ex) {
-			throw new IOException(ex);
+			IOException e = new IOException();
+			ex.initCause(ex); // XXX Java 1.5
+			throw e;
 		}
 	}
 
@@ -90,12 +94,14 @@
 		String result = null;
 		try {
 			result = execHelper.exec(command).toString().trim();
-			if (result.isEmpty()) {
+			if (result.length() == 0) { // XXX Java 1.5 isEmpty()
 				return defaultValue;
 			}
 			return Integer.parseInt(result, 8);
 		} catch (InterruptedException ex) {
-			throw new IOException(ex);
+			IOException e = new IOException();
+			ex.initCause(ex); // XXX Java 1.5
+			throw e;
 		} catch (NumberFormatException ex) {
 			ctx.getLog().dump(getClass(), Warn, ex, String.format("Bad value for access rights:%s", result));
 			return defaultValue;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/FileUtils.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import static org.tmatesoft.hg.util.LogFacility.Severity.Debug;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.channels.FileChannel;
+
+import org.tmatesoft.hg.core.HgIOException;
+import org.tmatesoft.hg.util.LogFacility;
+import org.tmatesoft.hg.util.LogFacility.Severity;
+
+/**
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public final class FileUtils {
+	
+	private final LogFacility log;
+	private final Class<?> troublemaker;
+	
+	public static void copyFile(File from, File to) throws HgIOException {
+		new FileUtils(new StreamLogFacility(Debug, true, System.err), FileUtils.class).copy(from, to);
+	}
+
+	public FileUtils(LogFacility logFacility, Object troubleSource) {
+		log = logFacility;
+		if (troubleSource == null) {
+			troublemaker = null;
+		} else {
+			troublemaker = troubleSource instanceof Class ? (Class<?>) troubleSource : troubleSource.getClass();
+		}
+	}
+
+	public void copy(File from, File to) throws HgIOException {
+		FileInputStream fis = null;
+		FileOutputStream fos = null;
+		try {
+			fis = new FileInputStream(from);
+			fos = new FileOutputStream(to);
+			FileChannel input = fis.getChannel();
+			FileChannel output = fos.getChannel();
+			long count = input.size();
+			long pos = 0;
+			int zeroCopied = 0; // flag to prevent hang-up
+			do {
+				long c = input.transferTo(pos, count, output);
+				pos += c;
+				count -= c;
+				if (c == 0) {
+					if (++zeroCopied == 3) {
+						String m = String.format("Can't copy %s to %s, transferTo copies 0 bytes. Position: %d, bytes left:%d", from.getName(), to.getName(), pos, count);
+						throw new IOException(m);
+					}
+				} else {
+					// reset
+					zeroCopied = 0;
+				}
+			} while (count > 0);
+			fos.close();
+			fos = null;
+			fis.close();
+			fis = null;
+		} catch (IOException ex) {
+			// not in finally because I don't want to loose exception from fos.close()
+			closeQuietly(fis, from);
+			closeQuietly(fos, to);
+			String m = String.format("Failed to copy %s to %s", from.getName(), to.getName());
+			throw new HgIOException(m, ex, from);
+		}
+		/* Copy of cpython's 00changelog.d, 20Mb+
+		 * Linux&Windows: 300-400 ms,
+		 * Windows uncached run: 1.6 seconds
+		 */
+	}
+
+	public void closeQuietly(Closeable stream) {
+		closeQuietly(stream, null);
+	}
+
+	public void closeQuietly(Closeable stream, File f) {
+		if (stream != null) {
+			try {
+				stream.close();
+			} catch (IOException ex) {
+				// ignore
+				final String msg;
+				if (f == null) {
+					msg = "Exception while closing stream quietly";
+				} else {
+					msg = String.format("Failed to close %s", f);
+				}
+				log.dump(troublemaker == null ? getClass() : troublemaker, Severity.Warn, ex, msg);
+			}
+		}
+	}
+}
--- a/src/org/tmatesoft/hg/internal/FilterDataAccess.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/FilterDataAccess.java	Wed Jul 10 11:48:55 2013 +0200
@@ -45,6 +45,7 @@
 
 	@Override
 	public FilterDataAccess reset() throws IOException {
+		dataAccess.reset();
 		count = length;
 		return this;
 	}
--- a/src/org/tmatesoft/hg/internal/InflaterDataAccess.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/InflaterDataAccess.java	Wed Jul 10 11:48:55 2013 +0200
@@ -39,22 +39,22 @@
 	private int decompressedLength;
 
 	public InflaterDataAccess(DataAccess dataAccess, long offset, int compressedLength) {
-		this(dataAccess, offset, compressedLength, -1, new Inflater(), new byte[512]);
+		this(dataAccess, offset, compressedLength, -1, new Inflater(), new byte[512], null);
 	}
 
 	public InflaterDataAccess(DataAccess dataAccess, long offset, int compressedLength, int actualLength) {
-		this(dataAccess, offset, compressedLength, actualLength, new Inflater(), new byte[512]);
+		this(dataAccess, offset, compressedLength, actualLength, new Inflater(), new byte[512], null);
 	}
 
-	public InflaterDataAccess(DataAccess dataAccess, long offset, int compressedLength, int actualLength, Inflater inflater, byte[] buf) {
+	public InflaterDataAccess(DataAccess dataAccess, long offset, int compressedLength, int actualLength, Inflater inflater, byte[] inBuf, ByteBuffer outBuf) {
 		super(dataAccess, offset, compressedLength);
-		if (inflater == null || buf == null) {
+		if (inflater == null || inBuf == null) {
 			throw new IllegalArgumentException();
 		}
 		this.inflater = inflater;
 		this.decompressedLength = actualLength;
-		inBuffer = buf;
-		outBuffer = ByteBuffer.allocate(inBuffer.length * 2);
+		inBuffer = inBuf;
+		outBuffer = outBuf == null ? ByteBuffer.allocate(inBuffer.length * 2) : outBuf;
 		outBuffer.limit(0); // there's nothing to read in the buffer 
 	}
 	
--- a/src/org/tmatesoft/hg/internal/IntMap.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/IntMap.java	Wed Jul 10 11:48:55 2013 +0200
@@ -17,6 +17,7 @@
 package org.tmatesoft.hg.internal;
 
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -136,7 +137,6 @@
 	/**
 	 * Forget first N entries (in natural order) in the map.
 	 */
-	@Experimental
 	public void removeFromStart(int count) {
 		if (count > 0 && count <= size) {
 			if (count < size) {
@@ -217,6 +217,13 @@
 		}
 		return map;
 	}
+	
+	public Collection<V> values() {
+		@SuppressWarnings("unchecked")
+		V[] rv = (V[]) new Object[size];
+		System.arraycopy(values, 0, rv, 0, size);
+		return Arrays.<V>asList(rv);
+	}
 
 	// copy of Arrays.binarySearch, with upper search limit as argument
 	private static int binarySearch(int[] a, int high, int key) {
--- a/src/org/tmatesoft/hg/internal/IntVector.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/IntVector.java	Wed Jul 10 11:48:55 2013 +0200
@@ -130,7 +130,6 @@
 	/**
 	 * Use only when this instance won't be used any longer
 	 */
-	@Experimental
 	int[] toArray(boolean internalIfSizeMatchCapacity) {
 		if (count == data.length) {
 			return data;
--- a/src/org/tmatesoft/hg/internal/Internals.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/Internals.java	Wed Jul 10 11:48:55 2013 +0200
@@ -19,7 +19,6 @@
 import static org.tmatesoft.hg.util.LogFacility.Severity.Error;
 
 import java.io.File;
-import java.io.IOException;
 import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -29,6 +28,7 @@
 import java.util.List;
 import java.util.StringTokenizer;
 
+import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.core.SessionContext;
 import org.tmatesoft.hg.repo.HgDataFile;
 import org.tmatesoft.hg.repo.HgInternals;
@@ -37,6 +37,8 @@
 import org.tmatesoft.hg.repo.HgRepositoryFiles;
 import org.tmatesoft.hg.repo.HgRepositoryLock;
 import org.tmatesoft.hg.repo.HgRuntimeException;
+import org.tmatesoft.hg.util.LogFacility;
+import org.tmatesoft.hg.util.Path;
 import org.tmatesoft.hg.util.PathRewrite;
 
 /**
@@ -115,25 +117,32 @@
 	private final HgRepository repo;
 	private final File repoDir;
 	private final boolean isCaseSensitiveFileSystem;
-	private final boolean shallCacheRevlogsInRepo;
 	private final DataAccessProvider dataAccess;
+	private final ImplAccess implAccess;
 	
 	private final int requiresFlags;
 
 	private final PathRewrite dataPathHelper; // access to file storage area (usually under .hg/store/data/), with filenames mangled  
 	private final PathRewrite repoPathHelper; // access to system files (under .hg/store if requires has 'store' flag)
 
-	public Internals(HgRepository hgRepo, File hgDir) throws HgRuntimeException {
+	private final boolean shallMergePatches;
+	private final RevlogStreamFactory streamProvider;
+
+	public Internals(HgRepository hgRepo, File hgDir, ImplAccess implementationAccess) throws HgRuntimeException {
 		repo = hgRepo;
 		repoDir = hgDir;
+		implAccess = implementationAccess;
 		isCaseSensitiveFileSystem = !runningOnWindows();
 		SessionContext ctx = repo.getSessionContext();
-		shallCacheRevlogsInRepo = new PropertyMarshal(ctx).getBoolean(CFG_PROPERTY_REVLOG_STREAM_CACHE, true);
 		dataAccess = new DataAccessProvider(ctx);
 		RepoInitializer repoInit = new RepoInitializer().initRequiresFromFile(repoDir);
 		requiresFlags = repoInit.getRequires();
 		dataPathHelper = repoInit.buildDataFilesHelper(getSessionContext());
 		repoPathHelper = repoInit.buildStoreFilesHelper();
+		final PropertyMarshal pm = new PropertyMarshal(ctx);
+		boolean shallCacheRevlogsInRepo = pm.getBoolean(CFG_PROPERTY_REVLOG_STREAM_CACHE, true);
+		streamProvider = new RevlogStreamFactory(this, shallCacheRevlogsInRepo); 
+		shallMergePatches = pm.getBoolean(Internals.CFG_PROPERTY_PATCH_MERGE, true);
 	}
 	
 	public boolean isInvalid() {
@@ -141,12 +150,16 @@
 	}
 	
 	public File getRepositoryFile(HgRepositoryFiles f) {
-		return f.residesUnderRepositoryRoot() ? getFileFromRepoDir(f.getName()) : getFileFromDataDir(f.getName());
+		switch (f.getHome()) {
+			case Store : return getFileFromStoreDir(f.getName());
+			case Repo : return getFileFromRepoDir(f.getName());
+			default : return new File(repo.getWorkingDir(), f.getName());
+		}
 	}
 
 	/**
 	 * Access files under ".hg/".
-	 * File not necessarily exists, this method is merely a factory for Files at specific, configuration-dependent location. 
+	 * File not necessarily exists, this method is merely a factory for {@link File files} at specific, configuration-dependent location. 
 	 * 
 	 * @param name shall be normalized path
 	 */
@@ -180,6 +193,10 @@
 		return repo.getSessionContext();
 	}
 	
+	public LogFacility getLog() {
+		return getSessionContext().getLog();
+	}
+	
 	public HgRepository getRepo() {
 		return repo;
 	}
@@ -260,6 +277,16 @@
 		return requiresFlags;
 	}
 	
+	boolean shallMergePatches() {
+		return shallMergePatches;
+	}
+
+	RevlogChangeMonitor getRevlogTracker(File f) {
+		// TODO decide whether to use one monitor per multiple files or 
+		// an instance per file; and let SessionContext pass alternative implementation)
+		return new RevlogChangeMonitor(f);
+	}
+	
 	public static boolean runningOnWindows() {
 		return System.getProperty("os.name").indexOf("Windows") != -1;
 	}
@@ -314,10 +341,9 @@
 	
 	/**
 	 * User-specific configuration, from system-wide and user home locations, without any repository-specific data.
-	 * 
 	 * @see http://www.selenic.com/mercurial/hgrc.5.html
 	 */
-	public static ConfigFile readConfiguration(SessionContext sessionCtx) throws IOException {
+	public static ConfigFile readConfiguration(SessionContext sessionCtx) throws HgIOException {
 		ConfigFile configFile = new ConfigFile(sessionCtx);
 		File hgInstallRoot = findHgInstallRoot(sessionCtx); // may be null
 		//
@@ -363,7 +389,7 @@
 	 * Repository-specific configuration
 	 * @see http://www.selenic.com/mercurial/hgrc.5.html
 	 */
-	public ConfigFile readConfiguration() throws IOException {
+	public ConfigFile readConfiguration() throws HgIOException {
 		ConfigFile configFile = readConfiguration(repo.getSessionContext());
 		// last one, overrides anything else
 		// <repo>/.hg/hgrc
@@ -371,6 +397,9 @@
 		return configFile;
 	}
 
+	/*package-local*/ImplAccess getImplAccess() {
+		return implAccess;
+	}
 	
 	private static List<File> getWindowsConfigFilesPerInstall(File hgInstallDir) {
 		File f = new File(hgInstallDir, "Mercurial.ini");
@@ -381,7 +410,7 @@
 		if (f.canRead() && f.isDirectory()) {
 			return listConfigFiles(f);
 		}
-		// TODO post-1.0 query registry, e.g. with
+		// TODO [post-1.1] query registry, e.g. with
 		// Runtime.exec("reg query HKLM\Software\Mercurial")
 		//
 		f = new File("C:\\Mercurial\\Mercurial.ini");
@@ -454,11 +483,21 @@
 		// fallback to default, let calling code fail with Exception if can't write
 		return new File(System.getProperty("user.home"), ".hgrc");
 	}
+	
+	public RevlogStream createManifestStream() {
+		File manifestFile = getFileFromStoreDir("00manifest.i");
+		return streamProvider.create(manifestFile);
+	}
 
-	public boolean shallCacheRevlogs() {
-		return shallCacheRevlogsInRepo;
+	public RevlogStream createChangelogStream() {
+		File chlogFile = getFileFromStoreDir("00changelog.i");
+		return streamProvider.create(chlogFile);
 	}
-	
+
+	public RevlogStream resolveStoreFile(Path path) {
+		return streamProvider.getStoreFile(path, false);
+	}
+
 	// marker method
 	public static IllegalStateException notImplemented() {
 		return new IllegalStateException("Not implemented");
@@ -496,4 +535,11 @@
 		assert ((long) i) == l : "Loss of data!";
 		return i;
 	}
+
+	// access implementation details (fields, methods) of oth.repo package
+	public interface ImplAccess {
+		public RevlogStream getStream(HgDataFile df);
+		public RevlogStream getManifestStream();
+		public RevlogStream getChangelogStream();
+	}
 }
--- a/src/org/tmatesoft/hg/internal/LineReader.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/LineReader.java	Wed Jul 10 11:48:55 2013 +0200
@@ -16,8 +16,6 @@
  */
 package org.tmatesoft.hg.internal;
 
-import static org.tmatesoft.hg.util.LogFacility.Severity.Warn;
-
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileInputStream;
@@ -28,8 +26,7 @@
 import java.nio.charset.Charset;
 import java.util.Collection;
 
-import org.tmatesoft.hg.repo.HgInvalidFileException;
-import org.tmatesoft.hg.repo.ext.MqManager;
+import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.util.LogFacility;
 
 /**
@@ -98,7 +95,14 @@
 			return this;
 		}
 
-		public <T> void read(LineConsumer<T> consumer, T paramObj) throws HgInvalidFileException {
+		/**
+		 * 
+		 * @param consumer where to pipe read lines to
+		 * @param paramObj parameterizes consumer
+		 * @return paramObj value for convenience
+		 * @throws HgIOException if there's {@link IOException} while reading file
+		 */
+		public <T> T read(LineConsumer<T> consumer, T paramObj) throws HgIOException {
 			BufferedReader statusFileReader = null;
 			try {
 //				consumer.begin(file, paramObj);
@@ -122,20 +126,15 @@
 						ok = consumer.consume(line, paramObj);
 					}
 				}
+				return paramObj;
 			} catch (IOException ex) {
-				throw new HgInvalidFileException(ex.getMessage(), ex, file);
+				throw new HgIOException(ex.getMessage(), ex, file);
 			} finally {
-				if (statusFileReader != null) {
-					try {
-						statusFileReader.close();
-					} catch (IOException ex) {
-						log.dump(MqManager.class, Warn, ex, null);
-					}
-				}
+				new FileUtils(log, this).closeQuietly(statusFileReader);
 //				try {
 //					consumer.end(file, paramObj);
 //				} catch (IOException ex) {
-//					log.warn(MqManager.class, ex, null);
+//					log.warn(getClass(), ex, null);
 //				}
 			}
 		}
--- a/src/org/tmatesoft/hg/internal/ManifestEntryBuilder.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/ManifestEntryBuilder.java	Wed Jul 10 11:48:55 2013 +0200
@@ -18,7 +18,9 @@
 
 import java.io.ByteArrayOutputStream;
 
+import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.DataSerializer.DataSource;
 
 /**
  * Create binary manifest entry ready to write down into 00manifest.i
@@ -36,16 +38,20 @@
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
-public class ManifestEntryBuilder {
-	private ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+public class ManifestEntryBuilder implements DataSource {
+	private final ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+	private final EncodingHelper encHelper;
 
+	public ManifestEntryBuilder(EncodingHelper encodingHelper) {
+		encHelper = encodingHelper;
+	}
 	
 	public ManifestEntryBuilder reset() {
 		buffer.reset();
 		return this;
 	}
 	public ManifestEntryBuilder add(String fname, Nodeid revision) {
-		byte[] b = fname.getBytes();
+		byte[] b = encHelper.toManifest(fname);
 		buffer.write(b, 0, b.length);
 		buffer.write('\0');
 		b = revision.toString().getBytes();
@@ -58,4 +64,13 @@
 		return buffer.toByteArray();
 	}
 
+	public void serialize(DataSerializer out) throws HgIOException {
+		byte[] r = build();
+		out.write(r, 0 , r.length);
+	}
+
+	public int serializeLength() {
+		return buffer.size();
+	}
+
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/Metadata.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import static org.tmatesoft.hg.util.LogFacility.Severity.Error;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+
+import org.tmatesoft.hg.core.SessionContext;
+import org.tmatesoft.hg.repo.HgInvalidControlFileException;
+import org.tmatesoft.hg.repo.HgInvalidStateException;
+import org.tmatesoft.hg.util.LogFacility;
+
+/**
+ * Container for metadata recorded as part of file revisions
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public final class Metadata {
+	private static class Record {
+		public final int offset;
+		public final MetadataEntry[] entries;
+		
+		public Record(int off, MetadataEntry[] entr) {
+			offset = off;
+			entries = entr;
+		}
+	}
+	// XXX sparse array needed
+	private final IntMap<Metadata.Record> entries = new IntMap<Metadata.Record>(5);
+	
+	private final Metadata.Record NONE = new Record(-1, null); // don't want statics
+
+	private final LogFacility log;
+
+	public Metadata(SessionContext.Source sessionCtx) {
+		log = sessionCtx.getSessionContext().getLog();
+	}
+	
+	// true when there's metadata for given revision
+	public boolean known(int revision) {
+		Metadata.Record i = entries.get(revision);
+		return i != null && NONE != i;
+	}
+
+	// true when revision has been checked for metadata presence.
+	public boolean checked(int revision) {
+		return entries.containsKey(revision);
+	}
+
+	// true when revision has been checked and found not having any metadata
+	public boolean none(int revision) {
+		Metadata.Record i = entries.get(revision);
+		return i == NONE;
+	}
+
+	// mark revision as having no metadata.
+	void recordNone(int revision) {
+		Metadata.Record i = entries.get(revision);
+		if (i == NONE) {
+			return; // already there
+		} 
+		if (i != null) {
+			throw new HgInvalidStateException(String.format("Trying to override Metadata state for revision %d (known offset: %d)", revision, i));
+		}
+		entries.put(revision, NONE);
+	}
+
+	// since this is internal class, callers are supposed to ensure arg correctness (i.e. ask known() before)
+	public int dataOffset(int revision) {
+		return entries.get(revision).offset;
+	}
+	void add(int revision, int dataOffset, Collection<MetadataEntry> e) {
+		assert !entries.containsKey(revision);
+		entries.put(revision, new Record(dataOffset, e.toArray(new MetadataEntry[e.size()])));
+	}
+	
+	/**
+	 * @return <code>true</code> if metadata has been found
+	 */
+	public boolean tryRead(int revisionNumber, DataAccess data) throws IOException, HgInvalidControlFileException {
+		final int daLength = data.length();
+		if (daLength < 4 || data.readByte() != 1 || data.readByte() != 10) {
+			recordNone(revisionNumber);
+			return false;
+		} else {
+			ArrayList<MetadataEntry> _metadata = new ArrayList<MetadataEntry>();
+			int offset = parseMetadata(data, daLength, _metadata);
+			add(revisionNumber, offset, _metadata);
+			return true;
+		}
+	}
+
+	public String find(int revision, String key) {
+		for (MetadataEntry me : entries.get(revision).entries) {
+			if (me.matchKey(key)) {
+				return me.value();
+			}
+		}
+		return null;
+	}
+
+	private int parseMetadata(DataAccess data, final int daLength, ArrayList<MetadataEntry> _metadata) throws IOException, HgInvalidControlFileException {
+		int lastEntryStart = 2;
+		int lastColon = -1;
+		// XXX in fact, need smth like ByteArrayBuilder, similar to StringBuilder,
+		// which can't be used here because we can't convert bytes to chars as we read them
+		// (there might be multi-byte encoding), and we need to collect all bytes before converting to string 
+		ByteArrayOutputStream bos = new ByteArrayOutputStream();
+		String key = null, value = null;
+		boolean byteOne = false;
+		boolean metadataIsComplete = false;
+		for (int i = 2; i < daLength; i++) {
+			byte b = data.readByte();
+			if (b == '\n') {
+				if (byteOne) { // i.e. \n follows 1
+					lastEntryStart = i+1;
+					metadataIsComplete = true;
+					// XXX is it possible to have here incomplete key/value (i.e. if last pair didn't end with \n)
+					// if yes, need to set metadataIsComplete to true in that case as well
+					break;
+				}
+				if (key == null || lastColon == -1 || i <= lastColon) {
+					log.dump(getClass(), Error, "Missing key in file revision metadata at index %d", i);
+				}
+				value = new String(bos.toByteArray()).trim();
+				bos.reset();
+				_metadata.add(new MetadataEntry(key, value));
+				key = value = null;
+				lastColon = -1;
+				lastEntryStart = i+1;
+				continue;
+			} 
+			// byteOne has to be consumed up to this line, if not yet, consume it
+			if (byteOne) {
+				// insert 1 we've read on previous step into the byte builder
+				bos.write(1);
+				byteOne = false;
+				// fall-through to consume current byte
+			}
+			if (b == (int) ':') {
+				assert value == null;
+				key = new String(bos.toByteArray());
+				bos.reset();
+				lastColon = i;
+			} else if (b == 1) {
+				byteOne = true;
+			} else {
+				bos.write(b);
+			}
+		}
+		// data.isEmpty is not reliable, renamed files of size==0 keep only metadata
+		if (!metadataIsComplete) {
+			// XXX perhaps, worth a testcase (empty file, renamed, read or ask ifCopy
+			throw new HgInvalidControlFileException("Metadata is not closed properly", null, null);
+		}
+		return lastEntryStart;
+	}
+
+	/**
+	 * There may be several entries of metadata per single revision, this class captures single entry
+	 */
+	private static class MetadataEntry {
+		private final String entry;
+		private final int valueStart;
+
+		// key may be null
+		/* package-local */MetadataEntry(String key, String value) {
+			if (key == null) {
+				entry = value;
+				valueStart = -1; // not 0 to tell between key == null and key == ""
+			} else {
+				entry = key + value;
+				valueStart = key.length();
+			}
+		}
+
+		/* package-local */boolean matchKey(String key) {
+			return key == null ? valueStart == -1 : key.length() == valueStart && entry.startsWith(key);
+		}
+
+//			uncomment once/if needed
+//			public String key() {
+//				return entry.substring(0, valueStart);
+//			}
+
+		public String value() {
+			return valueStart == -1 ? entry : entry.substring(valueStart);
+		}
+	}
+}
\ No newline at end of file
--- a/src/org/tmatesoft/hg/internal/NewlineFilter.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/NewlineFilter.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -28,7 +28,7 @@
 import java.util.ArrayList;
 import java.util.Map;
 
-import org.tmatesoft.hg.repo.HgInvalidFileException;
+import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.repo.HgInvalidStateException;
 import org.tmatesoft.hg.repo.HgRepository;
 import org.tmatesoft.hg.util.Adaptable;
@@ -314,7 +314,7 @@
 			ConfigFile hgeol = new ConfigFile(hgRepo.getSessionContext());
 			try {
 				hgeol.addLocation(cfgFile);
-			} catch (HgInvalidFileException ex) {
+			} catch (HgIOException ex) {
 				hgRepo.getSessionContext().getLog().dump(getClass(), Warn, ex, null);
 			}
 			nativeRepoFormat = hgeol.getSection("repository").get("native");
--- a/src/org/tmatesoft/hg/internal/Patch.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/Patch.java	Wed Jul 10 11:48:55 2013 +0200
@@ -20,6 +20,8 @@
 import java.util.ArrayList;
 import java.util.Formatter;
 
+import org.tmatesoft.hg.core.HgIOException;
+
 /**
  * @see http://mercurial.selenic.com/wiki/BundleFormat
  * in Changelog group description
@@ -177,7 +179,7 @@
 		return prefix + totalDataLen;
 	}
 	
-	/*package-local*/ void serialize(DataSerializer out) throws IOException {
+	/*package-local*/ void serialize(DataSerializer out) throws HgIOException {
 		for (int i = 0, x = data.size(); i < x; i++) {
 			final int start = starts.get(i);
 			final int end = ends.get(i);
@@ -462,7 +464,7 @@
 
 	public class PatchDataSource implements DataSerializer.DataSource {
 
-		public void serialize(DataSerializer out) throws IOException {
+		public void serialize(DataSerializer out) throws HgIOException {
 			Patch.this.serialize(out);
 		}
 
--- a/src/org/tmatesoft/hg/internal/PhasesHelper.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/PhasesHelper.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012 TMate Software Ltd
+ * Copyright (c) 2012-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -18,25 +18,29 @@
 
 import static org.tmatesoft.hg.repo.HgPhase.Draft;
 import static org.tmatesoft.hg.repo.HgPhase.Secret;
-import static org.tmatesoft.hg.util.LogFacility.Severity.Info;
+import static org.tmatesoft.hg.repo.HgRepositoryFiles.Phaseroots;
 import static org.tmatesoft.hg.util.LogFacility.Severity.Warn;
 
-import java.io.BufferedReader;
 import java.io.File;
-import java.io.FileReader;
+import java.io.FileWriter;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 
 import org.tmatesoft.hg.core.HgChangeset;
+import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.repo.HgChangelog;
 import org.tmatesoft.hg.repo.HgInvalidControlFileException;
+import org.tmatesoft.hg.repo.HgInvalidStateException;
 import org.tmatesoft.hg.repo.HgParentChildMap;
 import org.tmatesoft.hg.repo.HgPhase;
 import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 
 /**
  * Support to deal with Mercurial phases feature (as of Mercurial version 2.1)
@@ -69,21 +73,37 @@
 		return repo.getRepo();
 	}
 
-	public boolean isCapableOfPhases() throws HgInvalidControlFileException {
+	public boolean isCapableOfPhases() throws HgRuntimeException {
 		if (null == repoSupporsPhases) {
 			repoSupporsPhases = readRoots();
 		}
 		return repoSupporsPhases.booleanValue();
 	}
+	
+	public boolean withSecretRoots() {
+		return !secretPhaseRoots.isEmpty();
+	}
 
-
-	public HgPhase getPhase(HgChangeset cset) throws HgInvalidControlFileException {
+	/**
+	 * @param cset revision to query
+	 * @return phase of the changeset, never <code>null</code>
+	 * @throws HgInvalidControlFileException if failed to access revlog index/data entry. <em>Runtime exception</em>
+	 * @throws HgRuntimeException subclass thereof to indicate other issues with the library. <em>Runtime exception</em>
+	 */
+	public HgPhase getPhase(HgChangeset cset) throws HgRuntimeException {
 		final Nodeid csetRev = cset.getNodeid();
 		final int csetRevIndex = cset.getRevisionIndex();
 		return getPhase(csetRevIndex, csetRev);
 	}
 
-	public HgPhase getPhase(final int csetRevIndex, Nodeid csetRev) throws HgInvalidControlFileException {
+	/**
+	 * @param csetRevIndex revision index to query
+	 * @param csetRev revision nodeid, optional 
+	 * @return phase of the changeset, never <code>null</code>
+	 * @throws HgInvalidControlFileException if failed to access revlog index/data entry. <em>Runtime exception</em>
+	 * @throws HgRuntimeException subclass thereof to indicate other issues with the library. <em>Runtime exception</em>
+	 */
+	public HgPhase getPhase(final int csetRevIndex, Nodeid csetRev) throws HgRuntimeException {
 		if (!isCapableOfPhases()) {
 			return HgPhase.Undefined;
 		}
@@ -116,21 +136,104 @@
 			}
 		}
 		return HgPhase.Public;
-
 	}
 
-	private Boolean readRoots() throws HgInvalidControlFileException {
-		File phaseroots = repo.getFileFromStoreDir("phaseroots");
-		BufferedReader br = null;
+
+	/**
+	 * @return all revisions with secret phase
+	 */
+	public RevisionSet allSecret() {
+		return allOf(HgPhase.Secret);
+	}
+	
+	/**
+	 * @return all revisions with draft phase
+	 */
+	public RevisionSet allDraft() {
+		return allOf(HgPhase.Draft).subtract(allOf(HgPhase.Secret));
+	}
+	
+	public void updateRoots(Collection<Nodeid> draftRoots, Collection<Nodeid> secretRoots) throws HgInvalidControlFileException {
+		draftPhaseRoots = draftRoots.isEmpty() ? Collections.<Nodeid>emptyList() : new ArrayList<Nodeid>(draftRoots);
+		secretPhaseRoots = secretRoots.isEmpty() ? Collections.<Nodeid>emptyList() : new ArrayList<Nodeid>(secretRoots);
+		String fmt = "%d %s\n";
+		File phaseroots = repo.getRepositoryFile(Phaseroots);
+		FileWriter fw = null;
+		try {
+			fw = new FileWriter(phaseroots);
+			for (Nodeid n : secretPhaseRoots) {
+				fw.write(String.format(fmt, HgPhase.Secret.mercurialOrdinal(), n.toString()));
+			}
+			for (Nodeid n : draftPhaseRoots) {
+				fw.write(String.format(fmt, HgPhase.Draft.mercurialOrdinal(), n.toString()));
+			}
+			fw.flush();
+		} catch (IOException ex) {
+			throw new HgInvalidControlFileException(ex.getMessage(), ex, phaseroots);
+		} finally {
+			new FileUtils(repo.getLog(), this).closeQuietly(fw);
+		}
+	}
+
+	public void newCommitNode(Nodeid newChangeset, HgPhase newCommitPhase) throws HgRuntimeException {
+		final int riCset = repo.getRepo().getChangelog().getRevisionIndex(newChangeset);
+		HgPhase ph = getPhase(riCset, newChangeset);
+		if (ph.compareTo(newCommitPhase) >= 0) {
+			// present phase is more secret than the desired one
+			return;
+		}
+		// newCommitPhase can't be public here, condition above would be satisfied
+		assert newCommitPhase != HgPhase.Public;
+		// ph is e.g public when newCommitPhase is draft
+		// or is draft when desired phase is secret
+		final RevisionSet rs = allOf(newCommitPhase).union(new RevisionSet(Collections.singleton(newChangeset)));
+		final RevisionSet newRoots;
+		if (parentHelper != null) {
+			newRoots = rs.roots(parentHelper);
+		} else {
+			newRoots = rs.roots(repo.getRepo());
+		}
+		if (newCommitPhase == HgPhase.Draft) {
+			updateRoots(newRoots.asList(), secretPhaseRoots);
+		} else if (newCommitPhase == HgPhase.Secret) {
+			updateRoots(draftPhaseRoots, newRoots.asList());
+		} else {
+			throw new HgInvalidStateException(String.format("Unexpected phase %s for new commits", newCommitPhase));
+		}
+	}
+
+	/**
+	 * For a given phase, collect all revisions with phase that is the same or more private (i.e. for Draft, returns Draft+Secret)
+	 * The reason is not a nice API intention (which is awful, indeed), but an ease of implementation 
+	 */
+	private RevisionSet allOf(HgPhase phase) {
+		assert phase != HgPhase.Public;
+		if (!isCapableOfPhases()) {
+			return new RevisionSet(Collections.<Nodeid>emptyList());
+		}
+		final List<Nodeid> roots = getPhaseRoots(phase);
+		if (parentHelper != null) {
+			return new RevisionSet(roots).union(new RevisionSet(parentHelper.childrenOf(roots)));
+		} else {
+			RevisionSet rv = new RevisionSet(Collections.<Nodeid>emptyList());
+			for (RevisionDescendants rd : getPhaseDescendants(phase)) {
+				rv = rv.union(rd.asRevisionSet());
+			}
+			return rv;
+		}
+	}
+
+	private Boolean readRoots() throws HgRuntimeException {
+		File phaseroots = repo.getRepositoryFile(Phaseroots);
 		try {
 			if (!phaseroots.exists()) {
 				return Boolean.FALSE;
 			}
+			LineReader lr = new LineReader(phaseroots, repo.getLog());
+			final Collection<String> lines = lr.read(new LineReader.SimpleLineCollector(), new LinkedList<String>());
 			HashMap<HgPhase, List<Nodeid>> phase2roots = new HashMap<HgPhase, List<Nodeid>>();
-			br = new BufferedReader(new FileReader(phaseroots));
-			String line;
-			while ((line = br.readLine()) != null) {
-				String[] lc = line.trim().split("\\s+");
+			for (String line : lines) {
+				String[] lc = line.split("\\s+");
 				if (lc.length == 0) {
 					continue;
 				}
@@ -153,17 +256,8 @@
 			}
 			draftPhaseRoots = phase2roots.containsKey(Draft) ? phase2roots.get(Draft) : Collections.<Nodeid>emptyList();
 			secretPhaseRoots = phase2roots.containsKey(Secret) ? phase2roots.get(Secret) : Collections.<Nodeid>emptyList();
-		} catch (IOException ex) {
-			throw new HgInvalidControlFileException(ex.toString(), ex, phaseroots);
-		} finally {
-			if (br != null) {
-				try {
-					br.close();
-				} catch (IOException ex) {
-					repo.getSessionContext().getLog().dump(getClass(), Info, ex, null);
-					// ignore the exception otherwise 
-				}
-			}
+		} catch (HgIOException ex) {
+			throw new HgInvalidControlFileException(ex, true);
 		}
 		return Boolean.TRUE;
 	}
@@ -177,7 +271,7 @@
 	}
 
 
-	private RevisionDescendants[] getPhaseDescendants(HgPhase phase) throws HgInvalidControlFileException {
+	private RevisionDescendants[] getPhaseDescendants(HgPhase phase) throws HgRuntimeException {
 		int ordinal = phase.ordinal();
 		if (phaseDescendants[ordinal] == null) {
 			phaseDescendants[ordinal] = buildPhaseDescendants(phase);
@@ -185,7 +279,7 @@
 		return phaseDescendants[ordinal];
 	}
 
-	private RevisionDescendants[] buildPhaseDescendants(HgPhase phase) throws HgInvalidControlFileException {
+	private RevisionDescendants[] buildPhaseDescendants(HgPhase phase) throws HgRuntimeException {
 		int[] roots = toIndexes(getPhaseRoots(phase));
 		RevisionDescendants[] rv = new RevisionDescendants[roots.length];
 		for (int i = 0; i < roots.length; i++) {
@@ -195,7 +289,7 @@
 		return rv;
 	}
 	
-	private int[] toIndexes(List<Nodeid> roots) throws HgInvalidControlFileException {
+	private int[] toIndexes(List<Nodeid> roots) throws HgRuntimeException {
 		int[] rv = new int[roots.size()];
 		for (int i = 0; i < rv.length; i++) {
 			rv[i] = getRepo().getChangelog().getRevisionIndex(roots.get(i));
--- a/src/org/tmatesoft/hg/internal/RepoInitializer.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/RepoInitializer.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012 TMate Software Ltd
+ * Copyright (c) 2012-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -23,6 +23,7 @@
 import java.io.IOException;
 import java.nio.charset.Charset;
 
+import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.core.SessionContext;
 import org.tmatesoft.hg.repo.HgInvalidControlFileException;
 import org.tmatesoft.hg.util.PathRewrite;
@@ -31,6 +32,7 @@
  * Responsible of `requires` processing both on repo read and repo write
  * XXX needs better name, perhaps
  * 
+ * @see http://mercurial.selenic.com/wiki/RequiresFile
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
@@ -59,23 +61,32 @@
 		return requiresFlags;
 	}
 
-	public void initEmptyRepository(File repoDir) throws IOException {
+	public void initEmptyRepository(File repoDir) throws HgIOException {
 		repoDir.mkdirs();
-		FileOutputStream requiresFile = new FileOutputStream(new File(repoDir, "requires"));
-		StringBuilder sb = new StringBuilder(40);
-		sb.append("revlogv1\n");
-		if ((requiresFlags & STORE) != 0) {
-			sb.append("store\n");
+		final File requiresFile = new File(repoDir, "requires");
+		try {
+			FileOutputStream requiresStream = new FileOutputStream(requiresFile);
+			StringBuilder sb = new StringBuilder(40);
+			if ((requiresFlags & REVLOGV1) != 0) {
+				sb.append("revlogv1\n");
+			}
+			if ((requiresFlags & STORE) != 0) {
+				sb.append("store\n");
+			}
+			if ((requiresFlags & FNCACHE) != 0) {
+				sb.append("fncache\n");
+			}
+			if ((requiresFlags & DOTENCODE) != 0) {
+				sb.append("dotencode\n");
+			}
+			requiresStream.write(sb.toString().getBytes());
+			requiresStream.close();
+		} catch (IOException ex) {
+			throw new HgIOException("Failed to initialize empty repo", ex, requiresFile);
 		}
-		if ((requiresFlags & FNCACHE) != 0) {
-			sb.append("fncache\n");
+		if ((requiresFlags & STORE) != 0) {
+			new File(repoDir, "store").mkdir(); // with that, hg verify says ok.
 		}
-		if ((requiresFlags & DOTENCODE) != 0) {
-			sb.append("dotencode\n");
-		}
-		requiresFile.write(sb.toString().getBytes());
-		requiresFile.close();
-		new File(repoDir, "store").mkdir(); // with that, hg verify says ok.
 	}
 
 	public PathRewrite buildDataFilesHelper(SessionContext ctx) {
--- a/src/org/tmatesoft/hg/internal/RepositoryComparator.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/RepositoryComparator.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -33,12 +33,11 @@
 import org.tmatesoft.hg.core.HgRemoteConnectionException;
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.repo.HgChangelog;
-import org.tmatesoft.hg.repo.HgInvalidControlFileException;
 import org.tmatesoft.hg.repo.HgInvalidStateException;
+import org.tmatesoft.hg.repo.HgParentChildMap;
 import org.tmatesoft.hg.repo.HgRemoteRepository;
 import org.tmatesoft.hg.repo.HgRemoteRepository.Range;
 import org.tmatesoft.hg.repo.HgRemoteRepository.RemoteBranch;
-import org.tmatesoft.hg.repo.HgParentChildMap;
 import org.tmatesoft.hg.util.CancelSupport;
 import org.tmatesoft.hg.util.CancelledException;
 import org.tmatesoft.hg.util.ProgressSupport;
@@ -54,6 +53,7 @@
 	private final HgParentChildMap<HgChangelog> localRepo;
 	private final HgRemoteRepository remoteRepo;
 	private List<Nodeid> common;
+	private List<Nodeid> remoteHeads;
 
 	public RepositoryComparator(HgParentChildMap<HgChangelog> pwLocal, HgRemoteRepository hgRemote) {
 		localRepo = pwLocal;
@@ -81,54 +81,43 @@
 		return common;
 	}
 	
+	public List<Nodeid> getRemoteHeads() {
+		assert remoteHeads != null;
+		return remoteHeads;
+	}
+	
 	/**
 	 * @return revisions that are children of common entries, i.e. revisions that are present on the local server and not on remote.
 	 */
 	public List<Nodeid> getLocalOnlyRevisions() {
-		return localRepo.childrenOf(getCommon());
+		final List<Nodeid> c = getCommon();
+		if (c.isEmpty()) {
+			return localRepo.all();
+		} else {
+			final RevisionSet rsCommon = new RevisionSet(c);
+			final RevisionSet localHeads = new RevisionSet(localRepo.heads());
+			final List<Nodeid> commonChildren = localRepo.childrenOf(c);
+			final RevisionSet rsCommonChildren = new RevisionSet(commonChildren);
+			// check if there's any revision in the repository that doesn't trace to common
+			// e.g. branches from one of common ancestors
+			RevisionSet headsNotFromCommon = localHeads.subtract(rsCommonChildren).subtract(rsCommon);
+			if (headsNotFromCommon.isEmpty()) {
+				return commonChildren;
+			}
+			RevisionSet all = new RevisionSet(localRepo.all());
+			// need outgoing := ancestors(missing) - ancestors(common):
+			RevisionSet rsAncestors = all.ancestors(headsNotFromCommon, localRepo);
+			// #ancestors gives only parents, we need terminating children as well
+			rsAncestors = rsAncestors.union(headsNotFromCommon);
+			final RevisionSet rsAncestorsCommon = all.ancestors(rsCommon, localRepo);
+			RevisionSet outgoing = rsAncestors.subtract(rsAncestorsCommon).subtract(rsCommon);
+			// outgoing keeps children that spined off prior to common revisions
+			return outgoing.union(rsCommonChildren).asList();
+		}
 	}
 	
-	/**
-	 * Similar to @link {@link #getLocalOnlyRevisions()}, use this one if you need access to changelog entry content, not 
-	 * only its revision number. 
-	 * @param inspector delegate to analyze changesets, shall not be <code>null</code>
-	 */
-	public void visitLocalOnlyRevisions(HgChangelog.Inspector inspector) throws HgInvalidControlFileException {
-		if (inspector == null) {
-			throw new IllegalArgumentException();
-		}
-		// one can use localRepo.childrenOf(getCommon()) and then iterate over nodeids, but there seems to be
-		// another approach to get all changes after common:
-		// find index of earliest revision, and report all that were later
-		final HgChangelog changelog = localRepo.getRepo().getChangelog();
-		int earliestRevision = Integer.MAX_VALUE;
-		List<Nodeid> commonKnown = getCommon();
-		for (Nodeid n : commonKnown) {
-			if (!localRepo.hasChildren(n)) {
-				// there might be (old) nodes, known both locally and remotely, with no children
-				// hence, we don't need to consider their local revision number
-				continue;
-			}
-			int lr = changelog.getRevisionIndex(n);
-			if (lr < earliestRevision) {
-				earliestRevision = lr;
-			}
-		}
-		if (earliestRevision == Integer.MAX_VALUE) {
-			// either there are no common nodes (known locally and at remote)
-			// or no local children found (local is up to date). In former case, perhaps I shall bit return silently,
-			// but check for possible wrong repo comparison (hs says 'repository is unrelated' if I try to 
-			// check in/out for a repo that has no common nodes.
-			return;
-		}
-		if (earliestRevision < 0 || earliestRevision >= changelog.getLastRevision()) {
-			throw new HgInvalidStateException(String.format("Invalid index of common known revision: %d in total of %d", earliestRevision, 1+changelog.getLastRevision()));
-		}
-		changelog.range(earliestRevision+1, changelog.getLastRevision(), inspector);
-	}
-
 	private List<Nodeid> findCommonWithRemote() throws HgRemoteConnectionException {
-		List<Nodeid> remoteHeads = remoteRepo.heads();
+		remoteHeads = remoteRepo.heads();
 		LinkedList<Nodeid> resultCommon = new LinkedList<Nodeid>(); // these remotes are known in local
 		LinkedList<Nodeid> toQuery = new LinkedList<Nodeid>(); // these need further queries to find common
 		for (Nodeid rh : remoteHeads) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/ReverseIterator.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.ListIterator;
+
+/**
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class ReverseIterator<E> implements Iterator<E> {
+	private final ListIterator<E> listIterator;
+	
+	public ReverseIterator(List<E> list) {
+		listIterator = list.listIterator(list.size());
+	}
+
+	public boolean hasNext() {
+		return listIterator.hasPrevious();
+	}
+	public E next() {
+		return listIterator.previous();
+	}
+	public void remove() {
+		listIterator.remove();
+	}
+
+	public static <T> Iterable<T> reversed(final List<T> list) {
+		return new Iterable<T>() {
+
+			public Iterator<T> iterator() {
+				return new ReverseIterator<T>(list);
+			}
+		};
+	}
+}
\ No newline at end of file
--- a/src/org/tmatesoft/hg/internal/RevisionDescendants.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/RevisionDescendants.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012 TMate Software Ltd
+ * Copyright (c) 2012-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -16,13 +16,14 @@
  */
 package org.tmatesoft.hg.internal;
 
+import java.util.ArrayList;
 import java.util.BitSet;
 
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.repo.HgChangelog;
-import org.tmatesoft.hg.repo.HgInvalidControlFileException;
 import org.tmatesoft.hg.repo.HgInvalidStateException;
 import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 
 /**
  * Represent indicators which revisions are descendants of the supplied root revision
@@ -37,9 +38,10 @@
 	private final int rootRevIndex;
 	private final int tipRevIndex; // this is the last revision we cache to
 	private final BitSet descendants;
+	private RevisionSet revset;
 
 	// in fact, may be refactored to deal not only with changelog, but any revlog (not sure what would be the usecase, though)
-	public RevisionDescendants(HgRepository hgRepo, int revisionIndex) {
+	public RevisionDescendants(HgRepository hgRepo, int revisionIndex) throws HgRuntimeException {
 		repo = hgRepo;
 		rootRevIndex = revisionIndex;
 		// even if tip moves, we still answer correctly for those isCandidate()
@@ -51,7 +53,7 @@
 		descendants = new BitSet(tipRevIndex - rootRevIndex + 1);
 	}
 	
-	public void build() throws HgInvalidControlFileException {
+	public void build() throws HgRuntimeException {
 		final BitSet result = descendants;
 		result.set(0);
 		if (rootRevIndex == tipRevIndex) {
@@ -108,4 +110,21 @@
 		assert ix < descendants.size();
 		return descendants.get(ix);
 	}
+
+	public RevisionSet asRevisionSet() {
+		if (revset == null) {
+			final ArrayList<Nodeid> revisions = new ArrayList<Nodeid>(descendants.cardinality());
+			repo.getChangelog().indexWalk(rootRevIndex, tipRevIndex, new HgChangelog.RevisionInspector() {
+
+				public void next(int revisionIndex, Nodeid revision, int linkedRevisionIndex) throws HgRuntimeException {
+					if (isDescendant(revisionIndex)) {
+						revisions.add(revision);
+					}
+				}
+			});
+			assert revisions.size() == descendants.cardinality();
+			revset = new RevisionSet(revisions);
+		}
+		return revset;
+	}
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/RevisionLookup.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import static org.tmatesoft.hg.repo.HgRepository.BAD_REVISION;
+
+import java.util.Arrays;
+
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.repo.HgInvalidControlFileException;
+import org.tmatesoft.hg.repo.HgInvalidRevisionException;
+import org.tmatesoft.hg.repo.HgRevisionMap;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+
+/**
+ * Lite alternative to {@link HgRevisionMap}, to speed up nodeid to index conversion without consuming too much memory.
+ * E.g. for a 100k revisions, {@link HgRevisionMap} consumes 3 * (N * sizeof(int)) for indexes plus 48 bytes per 
+ * Nodeid instance, total (12+48)*N = 6 mb of memory. {RevisionLookup} instead keeps only Nodeid hashes, (N * sizeof(int) = 400 kb),
+ * but is slower in lookup, O(N/2) to find potential match plus disk read operatin (or few, in an unlikely case of hash collisions).
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class RevisionLookup implements RevlogStream.Inspector {
+	
+	private final RevlogStream content;
+	private int[] nodeidHashes;
+
+	public RevisionLookup(RevlogStream stream) {
+		assert stream != null;
+		content = stream;
+	}
+	
+	public static RevisionLookup createFor(RevlogStream stream) throws HgRuntimeException {
+		RevisionLookup rv = new RevisionLookup(stream);
+		int revCount = stream.revisionCount();
+		rv.prepare(revCount);
+		if (revCount > 0) {
+			stream.iterate(0, revCount - 1, false, rv);
+		}
+		return rv;
+	}
+
+	public void prepare(int count) {
+		nodeidHashes = new int[count];
+		Arrays.fill(nodeidHashes, BAD_REVISION);
+	}
+	public void next(int index, byte[] nodeid) {
+		nodeidHashes[index] = Nodeid.hashCode(nodeid);
+	}
+	public void next(int index, Nodeid nodeid) {
+		nodeidHashes[index] = nodeid.hashCode();
+	}
+	public int findIndex(Nodeid nodeid) throws HgInvalidControlFileException, HgInvalidRevisionException {
+		final int hash = nodeid.hashCode();
+		for (int i = 0; i < nodeidHashes.length; i++) {
+			if (nodeidHashes[i] == hash) {
+				byte[] nodeidAtI = content.nodeid(i);
+				if (nodeid.equalsTo(nodeidAtI)) {
+					return i;
+				}
+			}
+			// else: false match (only 4 head bytes matched, continue loop
+		}
+		return BAD_REVISION;
+	}
+
+	public void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) {
+		next(revisionIndex, nodeid);
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/RevisionSet.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.repo.HgChangelog;
+import org.tmatesoft.hg.repo.HgParentChildMap;
+import org.tmatesoft.hg.repo.HgRepository;
+
+/**
+ * Unmodifiable collection of revisions with handy set operations
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public final class RevisionSet implements Iterable<Nodeid> {
+	
+	private final Set<Nodeid> elements;
+	
+	public RevisionSet(Nodeid... revisions) {
+		this(revisions == null ? null : Arrays.asList(revisions));
+	}
+	
+	public RevisionSet(Collection<Nodeid> revisions) {
+		this(revisions == null ? new HashSet<Nodeid>() : new HashSet<Nodeid>(revisions));
+	}
+	
+	private RevisionSet(HashSet<Nodeid> revisions) {
+		if (revisions.isEmpty()) {
+			elements = Collections.<Nodeid>emptySet();
+		} else {
+			elements = revisions;
+		}
+	}
+
+	/**
+	 * elements of the set with no parents or parents not from the same set 
+	 */
+	public RevisionSet roots(HgParentChildMap<HgChangelog> ph) {
+		HashSet<Nodeid> copy = new HashSet<Nodeid>(elements);
+		for (Nodeid n : elements) {
+			assert ph.knownNode(n);
+			Nodeid p1 = ph.firstParent(n);
+			if (p1 != null && elements.contains(p1)) {
+				copy.remove(n);
+				continue;
+			}
+			Nodeid p2 = ph.secondParent(n);
+			if (p2 != null && elements.contains(p2)) {
+				copy.remove(n);
+				continue;
+			}
+		}
+		return copy.size() == elements.size() ? this : new RevisionSet(copy);
+	}
+	
+	/**
+	 * Same as {@link #roots(HgParentChildMap)}, but doesn't require a parent-child map
+	 */
+	public RevisionSet roots(HgRepository repo) {
+		// TODO introduce parent access interface, use it here, provide implementations 
+		// that delegate to HgParentChildMap or HgRepository
+		HashSet<Nodeid> copy = new HashSet<Nodeid>(elements);
+		final HgChangelog clog = repo.getChangelog();
+		byte[] parent1 = new byte[Nodeid.SIZE], parent2 = new byte[Nodeid.SIZE];
+		int[] parentRevs = new int[2];
+		for (Nodeid n : elements) {
+			assert clog.isKnown(n);
+			clog.parents(clog.getRevisionIndex(n), parentRevs, parent1, parent2);
+			if (parentRevs[0] != NO_REVISION && elements.contains(new Nodeid(parent1, false))) {
+				copy.remove(n);
+				continue;
+			}
+			if (parentRevs[1] != NO_REVISION && elements.contains(new Nodeid(parent2, false))) {
+				copy.remove(n);
+				continue;
+			}
+		}
+		return copy.size() == elements.size() ? this : new RevisionSet(copy);
+	}
+	
+	/**
+	 * elements of the set that has no children in this set 
+	 */
+	public RevisionSet heads(HgParentChildMap<HgChangelog> ph) {
+		HashSet<Nodeid> copy = new HashSet<Nodeid>(elements);
+		// can't do copy.removeAll(ph.childrenOf(asList())); as actual heads are indeed children of some other node
+		for (Nodeid n : elements) {
+			assert ph.knownNode(n);
+			Nodeid p1 = ph.firstParent(n);
+			Nodeid p2 = ph.secondParent(n);
+			if (p1 != null && elements.contains(p1)) {
+				copy.remove(p1);
+			}
+			if (p2 != null && elements.contains(p2)) {
+				copy.remove(p2);
+			}
+		}
+		return copy.size() == elements.size() ? this : new RevisionSet(copy);
+	}
+
+	/**
+	 * Any ancestor of an element from the supplied child set found in this one. 
+	 * Elements of the supplied child set are not part of return value.  
+	 */
+	public RevisionSet ancestors(RevisionSet children, HgParentChildMap<HgChangelog> parentHelper) {
+		if (isEmpty()) {
+			return this;
+		}
+		if (children.isEmpty()) {
+			return children;
+		}
+		RevisionSet chRoots = children.roots(parentHelper);
+		HashSet<Nodeid> ancestors = new HashSet<Nodeid>();
+		Set<Nodeid> childrenToCheck = chRoots.elements;
+		while (!childrenToCheck.isEmpty()) {
+			HashSet<Nodeid> nextRound = new HashSet<Nodeid>();
+			for (Nodeid n : childrenToCheck) {
+				Nodeid p1 = parentHelper.firstParent(n);
+				Nodeid p2 = parentHelper.secondParent(n);
+				if (p1 != null && elements.contains(p1)) {
+					nextRound.add(p1);
+				}
+				if (p2 != null && elements.contains(p2)) {
+					nextRound.add(p2);
+				}
+			}
+			ancestors.addAll(nextRound);
+			childrenToCheck = nextRound;
+		} 
+		return new RevisionSet(ancestors);
+	}
+	
+	/**
+	 * Revisions that are both direct and indirect children of elements of this revision set
+	 * as known in supplied parent-child map
+	 */
+	public RevisionSet children(HgParentChildMap<HgChangelog> parentHelper) {
+		if (isEmpty()) {
+			return this;
+		}
+		List<Nodeid> children = parentHelper.childrenOf(elements);
+		return new RevisionSet(new HashSet<Nodeid>(children));
+	}
+
+	public RevisionSet intersect(RevisionSet other) {
+		if (isEmpty()) {
+			return this;
+		}
+		if (other.isEmpty()) {
+			return other;
+		}
+		HashSet<Nodeid> copy = new HashSet<Nodeid>(elements);
+		copy.retainAll(other.elements);
+		return copy.size() == elements.size() ? this : new RevisionSet(copy);
+	}
+	
+	public RevisionSet subtract(RevisionSet other) {
+		if (isEmpty() || other.isEmpty()) {
+			return this;
+		}
+		HashSet<Nodeid> copy = new HashSet<Nodeid>(elements);
+		copy.removeAll(other.elements);
+		return copy.size() == elements.size() ? this : new RevisionSet(copy);
+	}
+
+	public RevisionSet union(RevisionSet other) {
+		if (isEmpty()) {
+			return other;
+		}
+		if (other.isEmpty()) {
+			return this;
+		}
+		HashSet<Nodeid> copy = new HashSet<Nodeid>(elements);
+		copy.addAll(other.elements);
+		return copy.size() == elements.size() ? this : new RevisionSet(copy);
+	}
+
+	/**
+	 * A ^ B := (A\B).union(B\A)
+	 * A ^ B := A.union(B) \ A.intersect(B)
+	 */
+	public RevisionSet symmetricDifference(RevisionSet other) {
+		if (isEmpty()) {
+			return this;
+		}
+		if (other.isEmpty()) {
+			return other;
+		}
+		HashSet<Nodeid> copyA = new HashSet<Nodeid>(elements);
+		HashSet<Nodeid> copyB = new HashSet<Nodeid>(other.elements);
+		copyA.removeAll(other.elements);
+		copyB.removeAll(elements);
+		copyA.addAll(copyB);
+		return new RevisionSet(copyA);
+	}
+
+	public boolean isEmpty() {
+		return elements.isEmpty();
+	}
+
+	public int size() {
+		return elements.size();
+	}
+
+	public List<Nodeid> asList() {
+		return new ArrayList<Nodeid>(elements);
+	}
+	
+	public Iterator<Nodeid> iterator() {
+		return elements.iterator();
+	}
+	
+	@Override
+	public String toString() {
+		StringBuilder sb = new StringBuilder();
+		sb.append('<');
+		if (!isEmpty()) {
+			sb.append(elements.size());
+			sb.append(':');
+		}
+		for (Nodeid n : elements) {
+			sb.append(n.shortNotation());
+			sb.append(',');
+		}
+		if (sb.length() > 1) {
+			sb.setCharAt(sb.length() - 1, '>');
+		} else {
+			sb.append('>');
+		}
+		return sb.toString();
+	}
+	
+	@Override
+	public boolean equals(Object obj) {
+		if (false == obj instanceof RevisionSet) {
+			return false;
+		}
+		return elements.equals(((RevisionSet) obj).elements);
+	}
+	
+	@Override
+	public int hashCode() {
+		return elements.hashCode();
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/RevlogChangeMonitor.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import java.io.File;
+import java.util.WeakHashMap;
+
+/**
+ * Detect changes to revlog files. Not a general file change monitoring as we utilize the fact revlogs are append-only (and even in case
+ * of stripped-off tail revisions, with e.g. mq, detection approach is still valid).
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class RevlogChangeMonitor {
+	
+	private final WeakHashMap<File, Long> lastKnownSize;
+	private final WeakHashMap<File, Long> lastKnownTime;
+	private final File soleFile;
+	private long soleFileSize = -1;
+	private long soleFileTime = -1;
+	
+	// use single for multiple files. TODO [1.2] repository/session context shall provide
+	// alternative (configurable) implementations, so that Java7 users may supply better one
+	public RevlogChangeMonitor() {
+		lastKnownSize = new WeakHashMap<File, Long>();
+		lastKnownTime= new WeakHashMap<File, Long>();
+		soleFile = null;
+	}
+	
+	public RevlogChangeMonitor(File f) {
+		assert f != null;
+		lastKnownSize = lastKnownTime = null;
+		soleFile = f;
+	}
+	
+	public void touch(File f) {
+		assert f != null;
+		if (lastKnownSize == null) {
+			assert f == soleFile;
+			soleFileSize = f.length();
+			soleFileTime = f.lastModified();
+		} else {
+			lastKnownSize.put(f, f.length());
+			lastKnownTime.put(f, f.lastModified());
+		}
+	}
+	
+	public boolean hasChanged(File f) {
+		assert f != null;
+		if (lastKnownSize == null) {
+			assert f == soleFile;
+			return soleFileSize != f.length() || soleFileTime != f.lastModified();
+		} else {
+			Long lastSize = lastKnownSize.get(f);
+			Long lastTime = lastKnownTime.get(f);
+			if (lastSize == null || lastTime == null) {
+				return true;
+			}
+			return f.length() != lastSize || f.lastModified() != lastTime;
+		}
+	}
+}
--- a/src/org/tmatesoft/hg/internal/RevlogCompressor.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/RevlogCompressor.java	Wed Jul 10 11:48:55 2013 +0200
@@ -16,10 +16,11 @@
  */
 package org.tmatesoft.hg.internal;
 
-import java.io.IOException;
 import java.util.zip.Deflater;
 
+import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.core.SessionContext;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.LogFacility.Severity;
 
 /**
@@ -44,7 +45,7 @@
 	}
 	
 	// out stream is not closed!
-	public int writeCompressedData(DataSerializer out) throws IOException {
+	public int writeCompressedData(DataSerializer out) throws HgIOException, HgRuntimeException {
 		zip.reset();
 		DeflaterDataSerializer dds = new DeflaterDataSerializer(out, zip, sourceData.serializeLength());
 		sourceData.serialize(dds);
@@ -52,7 +53,7 @@
 		return zip.getTotalOut();
 	}
 
-	public int getCompressedLength() {
+	public int getCompressedLength() throws HgRuntimeException {
 		if (compressedLen != -1) {
 			return compressedLen;
 		}
@@ -61,7 +62,7 @@
 			compressedLen = writeCompressedData(counter);
 			assert counter.totalWritten == compressedLen;
 	        return compressedLen;
-		} catch (IOException ex) {
+		} catch (HgIOException ex) {
 			// can't happen provided we write to our stream that does nothing but byte counting
 			ctx.getLog().dump(getClass(), Severity.Error, ex, "Failed estimating compressed length of revlog data");
 			return counter.totalWritten; // use best known value so far
@@ -71,15 +72,15 @@
 	private static class Counter extends DataSerializer {
 		public int totalWritten = 0;
 
-		public void writeByte(byte... values) throws IOException {
+		public void writeByte(byte... values) throws HgIOException {
 			totalWritten += values.length;
 		}
 
-		public void writeInt(int... values) throws IOException {
+		public void writeInt(int... values) throws HgIOException {
 			totalWritten += 4 * values.length;
 		}
 
-		public void write(byte[] data, int offset, int length) throws IOException {
+		public void write(byte[] data, int offset, int length) throws HgIOException {
 			totalWritten += length;
 		}
 	}
--- a/src/org/tmatesoft/hg/internal/RevlogStream.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/RevlogStream.java	Wed Jul 10 11:48:55 2013 +0200
@@ -17,19 +17,28 @@
 package org.tmatesoft.hg.internal;
 
 import static org.tmatesoft.hg.repo.HgRepository.BAD_REVISION;
+import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION;
 import static org.tmatesoft.hg.repo.HgRepository.TIP;
 import static org.tmatesoft.hg.internal.Internals.REVLOGV1_RECORD_SIZE;
 
 import java.io.File;
 import java.io.IOException;
+import java.lang.ref.Reference;
+import java.lang.ref.ReferenceQueue;
+import java.lang.ref.SoftReference;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.zip.Inflater;
 
+import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.repo.HgInternals;
 import org.tmatesoft.hg.repo.HgInvalidControlFileException;
 import org.tmatesoft.hg.repo.HgInvalidRevisionException;
 import org.tmatesoft.hg.repo.HgInvalidStateException;
 import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.Adaptable;
 
 
@@ -45,6 +54,8 @@
  */
 public class RevlogStream {
 
+	static final int INLINEDATA = 1 << 16;
+
 	/*
 	 * makes sense for index with inline data only - actual offset of the record in the .i file (record entry + revision * record size))
 	 * 
@@ -58,30 +69,54 @@
 	private boolean inline = false;
 	private final File indexFile;
 	private File dataFile;
-	private final DataAccessProvider dataAccess;
+	private final Internals repo;
+	// keeps last complete revision we've read. Note, this cached revision doesn't help
+	// for subsequent #iterate() calls with the same revision (Inspector needs more data than 
+	// we currently cache here, perhaps, we shall cache everything it wants to cover same 
+	// revision case as well). Now this helps when second #iterate() call is for a revision greater
+	// than one from the first call, and both revisions got same base rev. It's often the case when
+	// parents/children are analyzed.
+	private SoftReference<CachedRevision> lastRevisionRead;
+	private final ReferenceQueue<CachedRevision> lastRevisionQueue = new ReferenceQueue<CachedRevision>();
+	//
+	private final RevlogChangeMonitor changeTracker;
+	private List<Observer> observers;
+	private boolean shallDropDerivedCaches = false;
 
-	// if we need anything else from HgRepo, might replace DAP parameter with HgRepo and query it for DAP.
-	public RevlogStream(DataAccessProvider dap, File indexFile) {
-		this.dataAccess = dap;
+	public RevlogStream(Internals hgRepo, File indexFile) {
+		repo = hgRepo;
 		this.indexFile = indexFile;
+		changeTracker = repo.getRevlogTracker(indexFile);
+	}
+	
+	public boolean exists() {
+		return indexFile.exists();
 	}
 
-	/*package*/ DataAccess getIndexStream() {
-		// FIXME [1.1] must supply a hint that I'll need really few bytes of data (perhaps, at some offset) 
-		// to avoid mmap files when only few bytes are to be read (i.e. #dataLength())
-		return dataAccess.createReader(indexFile);
+	/**
+	 * @param shortRead pass <code>true</code> to indicate intention to read few revisions only (as opposed to reading most of/complete revlog)
+	 * @return never <code>null</code>, empty {@link DataAccess} if no stream is available
+	 */
+	/*package*/ DataAccess getIndexStream(boolean shortRead) {
+		// shortRead hint helps  to avoid mmap files when only 
+		// few bytes are to be read (i.e. #dataLength())
+		DataAccessProvider dataAccess = repo.getDataAccess();
+		return dataAccess.createReader(indexFile, shortRead);
 	}
 
 	/*package*/ DataAccess getDataStream() {
-		return dataAccess.createReader(getDataFile());
+		DataAccessProvider dataAccess = repo.getDataAccess();
+		return dataAccess.createReader(getDataFile(), false);
 	}
 	
-	/*package*/ DataSerializer getIndexStreamWriter() {
-		return dataAccess.createWriter(indexFile, true);
+	/*package*/ DataSerializer getIndexStreamWriter(Transaction tr) throws HgIOException {
+		DataAccessProvider dataAccess = repo.getDataAccess();
+		return dataAccess.createWriter(tr, indexFile, true);
 	}
 	
-	/*package*/ DataSerializer getDataStreamWriter() {
-		return dataAccess.createWriter(getDataFile(), true);
+	/*package*/ DataSerializer getDataStreamWriter(Transaction tr) throws HgIOException {
+		DataAccessProvider dataAccess = repo.getDataAccess();
+		return dataAccess.createWriter(tr, getDataFile(), true);
 	}
 	
 	/**
@@ -118,12 +153,12 @@
 		return inline ? indexFile.getPath() : getDataFile().getPath();
 	}
 
-	public boolean isInlineData() {
+	public boolean isInlineData() throws HgInvalidControlFileException {
 		initOutline();
 		return inline;
 	}
 	
-	public int revisionCount() {
+	public int revisionCount() throws HgInvalidControlFileException {
 		initOutline();
 		return baseRevisions.length;
 	}
@@ -136,7 +171,7 @@
 		// XXX in fact, use of iterate() instead of this implementation may be quite reasonable.
 		//
 		revisionIndex = checkRevisionIndex(revisionIndex);
-		DataAccess daIndex = getIndexStream();
+		DataAccess daIndex = getIndexStream(true);
 		try {
 			int recordOffset = getIndexOffsetInt(revisionIndex);
 			daIndex.seek(recordOffset + 12); // 6+2+4
@@ -157,7 +192,7 @@
 	 */
 	public byte[] nodeid(int revisionIndex) throws HgInvalidControlFileException, HgInvalidRevisionException {
 		revisionIndex = checkRevisionIndex(revisionIndex);
-		DataAccess daIndex = getIndexStream();
+		DataAccess daIndex = getIndexStream(true);
 		try {
 			int recordOffset = getIndexOffsetInt(revisionIndex);
 			daIndex.seek(recordOffset + 32);
@@ -179,7 +214,7 @@
 	 */
 	public int linkRevision(int revisionIndex) throws HgInvalidControlFileException, HgInvalidRevisionException {
 		revisionIndex = checkRevisionIndex(revisionIndex);
-		DataAccess daIndex = getIndexStream();
+		DataAccess daIndex = getIndexStream(true);
 		try {
 			int recordOffset = getIndexOffsetInt(revisionIndex);
 			daIndex.seek(recordOffset + 20);
@@ -199,11 +234,38 @@
 	 * @throws HgInvalidRevisionException if revisionIndex argument doesn't represent a valid record in the revlog
 	 */
 	public int baseRevision(int revisionIndex) throws HgInvalidControlFileException, HgInvalidRevisionException {
-		initOutline();
 		revisionIndex = checkRevisionIndex(revisionIndex);
 		return getBaseRevision(revisionIndex);
 	}
 	
+	/**
+	 * Read indexes of parent revisions
+	 * @param revisionIndex index of child revision
+	 * @param parents array to hold return value, length >= 2
+	 * @return value of <code>parents</code> parameter for convenience
+	 * @throws HgInvalidControlFileException if attempt to read index file failed
+	 * @throws HgInvalidRevisionException if revisionIndex argument doesn't represent a valid record in the revlog
+	 */
+	public int[] parents(int revisionIndex, int[] parents) throws HgInvalidControlFileException, HgInvalidRevisionException {
+		assert parents.length > 1;
+		revisionIndex = checkRevisionIndex(revisionIndex);
+		DataAccess daIndex = getIndexStream(true);
+		try {
+			int recordOffset = getIndexOffsetInt(revisionIndex);
+			daIndex.seek(recordOffset + 24);
+			int p1 = daIndex.readInt();
+			int p2 = daIndex.readInt();
+			// although NO_REVISION == -1, it doesn't hurt to ensure this
+			parents[0] = p1 == -1 ? NO_REVISION : p1;
+			parents[1] = p2 == -1 ? NO_REVISION : p2;
+			return parents;
+		} catch (IOException ex) {
+			throw new HgInvalidControlFileException("Parents lookup failed", ex, indexFile).setRevisionIndex(revisionIndex);
+		} finally {
+			daIndex.done();
+		}
+	}
+	
 	// Perhaps, RevlogStream should be limited to use of plain int revisions for access,
 	// while Nodeids should be kept on the level up, in Revlog. Guess, Revlog better keep
 	// map of nodeids, and once this comes true, we may get rid of this method.
@@ -215,7 +277,7 @@
 	public int findRevisionIndex(Nodeid nodeid) throws HgInvalidControlFileException {
 		// XXX this one may be implemented with iterate() once there's mechanism to stop iterations
 		final int indexSize = revisionCount();
-		DataAccess daIndex = getIndexStream();
+		DataAccess daIndex = getIndexStream(false);
 		try {
 			byte[] nodeidBuf = new byte[20];
 			for (int i = 0; i < indexSize; i++) {
@@ -240,11 +302,11 @@
 	 * @return value suitable for the corresponding field in the new revision's header, not physical offset in the file 
 	 * (which is different in case of inline revlogs)
 	 */
-	public long newEntryOffset() {
+	public long newEntryOffset() throws HgInvalidControlFileException {
 		if (revisionCount() == 0) {
 			return 0;
 		}
-		DataAccess daIndex = getIndexStream();
+		DataAccess daIndex = getIndexStream(true);
 		int lastRev = revisionCount() - 1;
 		try {
 			int recordOffset = getIndexOffsetInt(lastRev);
@@ -260,11 +322,12 @@
 		}
 	}
 
-
-
-	// should be possible to use TIP, ALL, or -1, -2, -n notation of Hg
-	// ? boolean needsNodeid
-	public void iterate(int start, int end, boolean needData, Inspector inspector) throws HgInvalidRevisionException, HgInvalidControlFileException {
+	/**
+	 * should be possible to use TIP, ALL, or -1, -2, -n notation of Hg
+	 * ? boolean needsNodeid
+	 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
+	 */
+	public void iterate(int start, int end, boolean needData, Inspector inspector) throws HgRuntimeException {
 		initOutline();
 		final int indexSize = revisionCount();
 		if (indexSize == 0) {
@@ -279,16 +342,15 @@
 		HgInternals.checkRevlogRange(start, end, indexSize-1);
 		// XXX may cache [start .. end] from index with a single read (pre-read)
 		
-		ReaderN1 r = new ReaderN1(needData, inspector, dataAccess.shallMergePatches());
+		ReaderN1 r = new ReaderN1(needData, inspector, repo.shallMergePatches());
 		try {
-			r.start(end - start + 1);
+			r.start(end - start + 1, getLastRevisionRead());
 			r.range(start, end);
 		} catch (IOException ex) {
 			throw new HgInvalidControlFileException(String.format("Failed reading [%d..%d]", start, end), ex, indexFile);
-		} catch (HgInvalidControlFileException ex) {
-			throw ex;
 		} finally {
-			r.finish();
+			CachedRevision cr = r.finish();
+			setLastRevisionRead(cr);
 		}
 	}
 	
@@ -298,8 +360,9 @@
 	 * @param sortedRevisions revisions to walk, in ascending order.
 	 * @param needData whether inspector needs access to header only
 	 * @param inspector callback to process entries
+	 * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em>
 	 */
-	public void iterate(int[] sortedRevisions, boolean needData, Inspector inspector) throws HgInvalidRevisionException, HgInvalidControlFileException /*REVISIT - too general exception*/ {
+	public void iterate(int[] sortedRevisions, boolean needData, Inspector inspector) throws HgRuntimeException {
 		final int indexSize = revisionCount();
 		if (indexSize == 0 || sortedRevisions.length == 0) {
 			return;
@@ -311,9 +374,9 @@
 			throw new HgInvalidRevisionException(String.format("Can't iterate [%d, %d] in range [0..%d]", sortedRevisions[0], sortedRevisions[sortedRevisions.length - 1], indexSize), null, sortedRevisions[sortedRevisions.length - 1]);
 		}
 
-		ReaderN1 r = new ReaderN1(needData, inspector, dataAccess.shallMergePatches());
+		ReaderN1 r = new ReaderN1(needData, inspector, repo.shallMergePatches());
 		try {
-			r.start(sortedRevisions.length);
+			r.start(sortedRevisions.length, getLastRevisionRead());
 			for (int i = 0; i < sortedRevisions.length; ) {
 				int x = i;
 				i++;
@@ -331,16 +394,47 @@
 			}
 		} catch (IOException ex) {
 			final int c = sortedRevisions.length;
-			throw new HgInvalidControlFileException(String.format("Failed reading %d revisions in [%d; %d]",c, sortedRevisions[0], sortedRevisions[c-1]), ex, indexFile);
-		} catch (HgInvalidControlFileException ex) {
-			// TODO post-1.0 fill HgRuntimeException with appropriate file (either index or data, depending on error source)
-			throw ex;
+			throw new HgInvalidControlFileException(String.format("Failed reading %d revisions in [%d; %d]", c, sortedRevisions[0], sortedRevisions[c-1]), ex, indexFile);
 		} finally {
-			r.finish();
+			CachedRevision cr = r.finish();
+			setLastRevisionRead(cr);
 		}
 	}
+	
+	public void attach(Observer listener) {
+		assert listener != null;
+		if (observers == null) {
+			observers = new ArrayList<Observer>(3);
+		}
+		observers.add(listener);
+	}
+	
+	public void detach(Observer listener) {
+		assert listener != null;
+		if (observers != null) {
+			observers.remove(listener);
+		}
+	}
+	
+	/*
+	 * Note, this method IS NOT a replacement for Observer. It has to be invoked when the validity of any
+	 * cache built using revision information is in doubt, but it provides reasonable value only till the
+	 * first initOutline() to be invoked, i.e. in [change..revlog read operation] time frame. If your code
+	 * accesses cached information without any prior explicit read operation, you shall consult this method
+	 * if next read operation would in fact bring changed content.
+	 * Observer is needed in addition to this method because any revlog read operation (e.g. Revlog#getLastRevision)
+	 * would clear shallDropDerivedCaches(), and if code relies only on this method to clear its derived caches,
+	 * it would miss the update.
+	 */
+	public boolean shallDropDerivedCaches() {
+		if (shallDropDerivedCaches) {
+			return shallDropDerivedCaches;
+		}
+		return shallDropDerivedCaches = changeTracker.hasChanged(indexFile);
+	}
 
 	void revisionAdded(int revisionIndex, Nodeid revision, int baseRevisionIndex, long revisionOffset) throws HgInvalidControlFileException {
+		shallDropDerivedCaches = true;
 		if (!outlineCached()) {
 			return;
 		}
@@ -353,6 +447,10 @@
 		}
 		assert revision != null;
 		assert !revision.isNull();
+		// next effort doesn't seem to be of any value at least in case of regular commit
+		// as the next call to #initOutline would recognize the file change and reload complete revlog anyway
+		// OTOH, there might be transaction strategy that doesn't update the file until its completion,
+		// while it's handy to know new revisions meanwhile.
 		int[] baseRevisionsCopy = new int[baseRevisions.length + 1];
 		System.arraycopy(baseRevisions, 0, baseRevisionsCopy, 0, baseRevisions.length);
 		baseRevisionsCopy[baseRevisions.length] = baseRevisionIndex;
@@ -379,7 +477,7 @@
 		return inline ? indexRecordOffset[revisionIndex] : revisionIndex * REVLOGV1_RECORD_SIZE;
 	}
 	
-	private int checkRevisionIndex(int revisionIndex) throws HgInvalidRevisionException {
+	private int checkRevisionIndex(int revisionIndex) throws HgInvalidControlFileException, HgInvalidRevisionException {
 		final int last = revisionCount() - 1;
 		if (revisionIndex == TIP) {
 			revisionIndex = last;
@@ -406,11 +504,21 @@
 		return o + REVLOGV1_RECORD_SIZE * recordIndex;
 	}
 
+	// every access to index revlog goes after this method only.
 	private void initOutline() throws HgInvalidControlFileException {
+		// true to send out 'drop-your-caches' event after outline has been built
+		final boolean notifyReload;
 		if (outlineCached()) {
-			return;
+			if (!changeTracker.hasChanged(indexFile)) {
+				return;
+			}
+			notifyReload = true;
+		} else {
+			// no cached outline - inital read, do not send any reload/invalidate notifications
+			notifyReload = false;
 		}
-		DataAccess da = getIndexStream();
+		changeTracker.touch(indexFile);
+		DataAccess da = getIndexStream(false);
 		try {
 			if (da.isEmpty()) {
 				// do not fail with exception if stream is empty, it's likely intentional
@@ -421,7 +529,6 @@
 			}
 			int versionField = da.readInt();
 			da.readInt(); // just to skip next 4 bytes of offset + flags
-			final int INLINEDATA = 1 << 16;
 			inline = (versionField & INLINEDATA) != 0;
 			IntVector resBases, resOffsets = null;
 			int entryCountGuess = Internals.ltoi(da.longLength() / REVLOGV1_RECORD_SIZE);
@@ -468,9 +575,46 @@
 			throw new HgInvalidControlFileException("Failed to analyze revlog index", ex, indexFile);
 		} finally {
 			da.done();
+			if (notifyReload && observers != null) {
+				for (Observer l : observers) {
+					l.reloaded(this);
+				}
+				shallDropDerivedCaches = false;
+			}
 		}
 	}
 	
+	private CachedRevision getLastRevisionRead() {
+		return lastRevisionRead == null ? null : lastRevisionRead.get();
+	}
+	
+	private void setLastRevisionRead(CachedRevision cr) {
+		// done() for lastRevisionRead.userData has been called by ReaderN1 once
+		// it noticed unsuitable DataAccess.
+		// Now, done() for any CachedRevision cleared by GC:
+		for (Reference<? extends CachedRevision> r; (r = lastRevisionQueue.poll()) != null;) {
+			CachedRevision toClean = r.get();
+			if (toClean != null && toClean.userData != null) {
+				toClean.userData.done();
+			}
+		}
+		if (cr != null) {
+			lastRevisionRead = new SoftReference<CachedRevision>(cr, lastRevisionQueue);
+		} else {
+			lastRevisionRead = null;
+		}
+	}
+	
+	final static class CachedRevision {
+		final int revision;
+		final DataAccess userData;
+		
+		public CachedRevision(int lastRevisionRead, DataAccess lastUserData) {
+			revision = lastRevisionRead;
+			userData = lastUserData;
+		}
+	}
+
 	/**
 	 * operation with single file open/close and multiple diverse reads.
 	 * XXX initOutline might need similar extraction to keep N1 format knowledge  
@@ -488,7 +632,8 @@
 		// next are transient values, for range() use only
 		private final Inflater inflater = new Inflater();
 		// can share buffer between instances of InflaterDataAccess as I never read any two of them in parallel
-		private final byte[] inflaterBuffer = new byte[10 * 1024]; // TODO consider using DAP.DEFAULT_FILE_BUFFER
+		private final byte[] inflaterBuffer = new byte[10 * 1024]; // TODO [post-1.1] consider using DAP.DEFAULT_FILE_BUFFER
+		private final ByteBuffer inflaterOutBuffer = ByteBuffer.allocate(inflaterBuffer.length * 2);
 		private final byte[] nodeidBuf = new byte[20];
 		// revlog record fields
 		private long offset;
@@ -500,8 +645,6 @@
 		private int linkRevision;
 		private int parent1Revision;
 		private int parent2Revision;
-		// next are to track two major bottlenecks - patch application and actual time spent in inspector 
-//		private long applyTime, inspectorTime; // TIMING
 		
 		public ReaderN1(boolean dataRequested, Inspector insp, boolean usePatchMerge) {
 			assert insp != null;
@@ -510,8 +653,8 @@
 			mergePatches = usePatchMerge;
 		}
 		
-		public void start(int totalWork) {
-			daIndex = getIndexStream();
+		public void start(int totalWork, CachedRevision cachedRevision) {
+			daIndex = getIndexStream(totalWork <= 10);
 			if (needData && !inline) {
 				daData = getDataStream();
 			}
@@ -520,13 +663,27 @@
 				cb = new Lifecycle.BasicCallback();
 				lifecycleListener.start(totalWork, cb, cb);
 			}
-//			applyTime = inspectorTime = 0; // TIMING
+			if (needData && cachedRevision != null) {
+				lastUserData = cachedRevision.userData;
+				lastRevisionRead = cachedRevision.revision;
+				assert lastUserData != null;
+			}
 		}
 
 		// invoked only once per instance
-		public void finish() {
+		public CachedRevision finish() {
+			CachedRevision rv = null;
 			if (lastUserData != null) {
-				lastUserData.done();
+				if (lastUserData instanceof ByteArrayDataAccess) {
+					// it's safe to cache only in-memory revision texts,
+					// if lastUserData is merely a filter over file stream,
+					// we'd need to keep file open, and this is bad.
+					// XXX perhaps, wrap any DataAccess.byteArray into
+					// ByteArrayDataAccess?
+					rv = new CachedRevision(lastRevisionRead, lastUserData);
+				} else {
+					lastUserData.done();
+				}
 				lastUserData = null;
 			}
 			if (lifecycleListener != null) {
@@ -540,7 +697,7 @@
 				daData.done();
 				daData = null;
 			}
-//			System.out.printf("applyTime:%d ms, inspectorTime: %d ms\n", applyTime, inspectorTime); // TIMING
+			return rv;
 		}
 		
 		private void readHeaderRecord(int i) throws IOException {
@@ -586,7 +743,7 @@
 				final byte firstByte = streamDataAccess.readByte();
 				if (firstByte == 0x78 /* 'x' */) {
 					inflater.reset();
-					userDataAccess = new InflaterDataAccess(streamDataAccess, streamOffset, compressedLen, isPatch(i) ? -1 : actualLen, inflater, inflaterBuffer);
+					userDataAccess = new InflaterDataAccess(streamDataAccess, streamOffset, compressedLen, isPatch(i) ? -1 : actualLen, inflater, inflaterBuffer, inflaterOutBuffer);
 				} else if (firstByte == 0x75 /* 'u' */) {
 					userDataAccess = new FilterDataAccess(streamDataAccess, streamOffset+1, compressedLen-1);
 				} else {
@@ -601,7 +758,7 @@
 		}
 
 		// may be invoked few times per instance life
-		public boolean range(int start, int end) throws IOException {
+		public boolean range(int start, int end) throws IOException, HgRuntimeException {
 			int i;
 			// it (i.e. replace with i >= start)
 			if (needData && (i = getBaseRevision(start)) < start) {
@@ -713,10 +870,27 @@
 
 	
 	public interface Inspector {
-		// XXX boolean retVal to indicate whether to continue?
-		// TODO specify nodeid and data length, and reuse policy (i.e. if revlog stream doesn't reuse nodeid[] for each call)
-		// implementers shall not invoke DataAccess.done(), it's accomplished by #iterate at appropraite moment
-		void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[/*20*/] nodeid, DataAccess data);
+		/**
+		 * XXX boolean retVal to indicate whether to continue?
+		 * 
+		 * Implementers shall not invoke DataAccess.done(), it's accomplished by #iterate at appropriate moment
+		 * 
+		 * @param revisionIndex absolute index of revision in revlog being iterated
+		 * @param actualLen length of the user data at this revision
+		 * @param baseRevision last revision known to hold complete revision (other hold patches). 
+		 *        if baseRevision != revisionIndex, data for this revision is a result of a sequence of patches
+		 * @param linkRevision index of corresponding changeset revision
+		 * @param parent1Revision index of first parent revision in this revlog, or {@link HgRepository#NO_REVISION}
+		 * @param parent2Revision index of second parent revision in this revlog, or {@link HgRepository#NO_REVISION}
+		 * @param nodeid 20-byte buffer, shared between invocations 
+		 * @param data access to revision content of actualLen size, or <code>null</code> if no data has been requested with 
+		 *        {@link RevlogStream#iterate(int[], boolean, Inspector)}
+		 */
+		void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[/*20*/] nodeid, DataAccess data) throws HgRuntimeException;
 	}
 
+	public interface Observer {
+		// notify observer of invalidate/reload event in the stream
+		public void reloaded(RevlogStream src);
+	}
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/RevlogStreamFactory.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import java.io.File;
+import java.lang.ref.SoftReference;
+import java.util.HashMap;
+
+import org.tmatesoft.hg.util.Path;
+
+/**
+ * Factory to create {@link RevlogStream RevlogStreams}, cache-capable.
+ *   
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public final class RevlogStreamFactory {
+	
+	private final Internals repo;
+	private final HashMap<Path, SoftReference<RevlogStream>> streamsCache;
+
+
+	public RevlogStreamFactory(Internals hgRepo, boolean shallCacheRevlogs) {
+		repo = hgRepo;
+		if (shallCacheRevlogs) {
+			streamsCache = new HashMap<Path, SoftReference<RevlogStream>>();
+		} else {
+			streamsCache = null;
+		}
+	}
+	
+	/**
+	 * Creates a stream for specified file, doesn't cache stream
+	 */
+	/*package-local*/ RevlogStream create(File f) {
+		return new RevlogStream(repo, f);
+	}
+
+	/**
+	 * Perhaps, should be separate interface, like ContentLookup
+	 * @param path - normalized file name
+	 * @return <code>null</code> if path doesn't resolve to a existing file
+	 */
+	/*package-local*/ RevlogStream getStoreFile(Path path, boolean onlyIfExists) {
+		final SoftReference<RevlogStream> ref = shallCacheRevlogs() ? streamsCache.get(path) : null;
+		RevlogStream cached = ref == null ? null : ref.get();
+		if (cached != null) {
+			return cached;
+		}
+		File f = repo.getFileFromDataDir(path);
+		if (!onlyIfExists || f.exists()) {
+			RevlogStream s = create(f);
+			if (shallCacheRevlogs()) {
+				streamsCache.put(path, new SoftReference<RevlogStream>(s));
+			}
+			return s;
+		}
+		return null;
+	}
+	
+	private boolean shallCacheRevlogs() {
+		return streamsCache != null;
+	}
+}
--- a/src/org/tmatesoft/hg/internal/RevlogStreamWriter.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/RevlogStreamWriter.java	Wed Jul 10 11:48:55 2013 +0200
@@ -22,14 +22,23 @@
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
+import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.core.SessionContext;
+import org.tmatesoft.hg.internal.DataSerializer.ByteArrayDataSource;
+import org.tmatesoft.hg.internal.DataSerializer.ByteArraySerializer;
+import org.tmatesoft.hg.internal.DataSerializer.DataSource;
+import org.tmatesoft.hg.repo.HgBundle.GroupElement;
 import org.tmatesoft.hg.repo.HgInvalidControlFileException;
+import org.tmatesoft.hg.repo.HgInvalidRevisionException;
 import org.tmatesoft.hg.repo.HgInvalidStateException;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+import org.tmatesoft.hg.util.Pair;
 
 /**
  * 
- * TODO separate operation to check if index is too big and split into index+data
+ * TODO [post-1.1] separate operation to check if index is too big and split into index+data
  * 
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
@@ -38,34 +47,117 @@
 
 	private final DigestHelper dh = new DigestHelper();
 	private final RevlogCompressor revlogDataZip;
-	private int lastEntryBase, lastEntryIndex;
-	private byte[] lastEntryContent;
+	private final Transaction transaction;
+	private int lastEntryBase, lastEntryIndex, lastEntryActualLen;
+	// record revision and its full content
+	// the name might be misleading, it does not necessarily match lastEntryIndex
+	private Pair<Integer, byte[]> lastFullContent;
 	private Nodeid lastEntryRevision;
 	private IntMap<Nodeid> revisionCache = new IntMap<Nodeid>(32);
 	private RevlogStream revlogStream;
 	
-	public RevlogStreamWriter(SessionContext ctx, RevlogStream stream) {
-		assert ctx != null;
+	public RevlogStreamWriter(SessionContext.Source ctxSource, RevlogStream stream, Transaction tr) {
+		assert ctxSource != null;
 		assert stream != null;
+		assert tr != null;
 				
-		revlogDataZip = new RevlogCompressor(ctx);
+		revlogDataZip = new RevlogCompressor(ctxSource.getSessionContext());
 		revlogStream = stream;
+		transaction = tr;
+	}
+	
+	public Pair<Integer,Nodeid> addPatchRevision(GroupElement ge, RevisionToIndexMap clogRevs, RevisionToIndexMap revlogRevs) throws HgIOException, HgRuntimeException {
+		populateLastEntryIndex();
+		//
+		final Nodeid nodeRev = ge.node();
+		final Nodeid csetRev = ge.cset();
+		int linkRev;
+		if (nodeRev.equals(csetRev)) {
+			linkRev = lastEntryIndex+1;
+		} else {
+			linkRev = clogRevs.revisionIndex(csetRev);
+		}
+		assert linkRev >= 0;
+		final Nodeid p1Rev = ge.firstParent();
+		int p1 = p1Rev.isNull() ? NO_REVISION : revlogRevs.revisionIndex(p1Rev);
+		final Nodeid p2Rev = ge.secondParent();
+		int p2 = p2Rev.isNull() ? NO_REVISION : revlogRevs.revisionIndex(p2Rev);
+		Patch p = new Patch();
+		final byte[] patchBytes;
+		try {
+			// XXX there's ge.rawData(), to avoid extra array wrap
+			patchBytes = ge.rawDataByteArray();
+			p.read(new ByteArrayDataAccess(patchBytes));
+		} catch (IOException ex) {
+			throw new HgIOException("Failed to read patch information", ex, null);
+		}
+		//
+		final Nodeid patchBase = ge.patchBase();
+		int patchBaseRev = patchBase.isNull() ? NO_REVISION : revlogRevs.revisionIndex(patchBase);
+		int baseRev = lastEntryIndex == NO_REVISION ? 0 : revlogStream.baseRevision(patchBaseRev);
+		int revLen;
+		DataSource ds;
+		byte[] complete = null;
+		if (patchBaseRev == lastEntryIndex && lastEntryIndex != NO_REVISION) {
+			// we may write patch from GroupElement as is
+			int patchBaseLen = dataLength(patchBaseRev);
+			revLen = patchBaseLen + p.patchSizeDelta();
+			ds = new ByteArrayDataSource(patchBytes);
+		} else {
+			// read baseRev, unless it's the pull to empty repository
+			try {
+				if (lastEntryIndex == NO_REVISION) {
+					complete = p.apply(new ByteArrayDataAccess(new byte[0]), -1);
+					baseRev = 0; // it's done above, but doesn't hurt
+				} else {
+					ReadContentInspector insp = new ReadContentInspector().read(revlogStream, baseRev);
+					complete = p.apply(new ByteArrayDataAccess(insp.content), -1);
+					baseRev = lastEntryIndex + 1;
+				}
+				ds = new ByteArrayDataSource(complete);
+				revLen = complete.length;
+			} catch (IOException ex) {
+				// unlikely to happen, as ByteArrayDataSource doesn't throw IOException
+				throw new HgIOException("Failed to reconstruct revision", ex, null);
+			}
+		}
+		doAdd(nodeRev, p1, p2, linkRev, baseRev, revLen, ds);
+		if (complete != null) {
+			lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, complete);
+		}
+		return new Pair<Integer, Nodeid>(lastEntryIndex, lastEntryRevision);
 	}
 	
 	/**
 	 * @return nodeid of added revision
+	 * @throws HgRuntimeException 
 	 */
-	public Nodeid addRevision(byte[] content, int linkRevision, int p1, int p2) {
-		lastEntryRevision = Nodeid.NULL;
-		int revCount = revlogStream.revisionCount();
-		lastEntryIndex = revCount == 0 ? NO_REVISION : revCount - 1;
-		populateLastEntry();
+	public Pair<Integer,Nodeid> addRevision(DataSource content, int linkRevision, int p1, int p2) throws HgIOException, HgRuntimeException {
+		populateLastEntryIndex();
+		populateLastEntryContent();
 		//
-		Patch patch = GeneratePatchInspector.delta(lastEntryContent, content);
+		byte[] contentByteArray = toByteArray(content);
+		Patch patch = GeneratePatchInspector.delta(lastFullContent.second(), contentByteArray);
 		int patchSerializedLength = patch.serializedLength();
 		
-		final boolean writeComplete = preferCompleteOverPatch(patchSerializedLength, content.length);
-		DataSerializer.DataSource dataSource = writeComplete ? new DataSerializer.ByteArrayDataSource(content) : patch.new PatchDataSource();
+		final boolean writeComplete = preferCompleteOverPatch(patchSerializedLength, contentByteArray.length);
+		DataSerializer.DataSource dataSource = writeComplete ? new ByteArrayDataSource(contentByteArray) : patch.new PatchDataSource();
+		//
+		Nodeid p1Rev = revision(p1);
+		Nodeid p2Rev = revision(p2);
+		Nodeid newRev = Nodeid.fromBinary(dh.sha1(p1Rev, p2Rev, contentByteArray).asBinary(), 0);
+		doAdd(newRev, p1, p2, linkRevision, writeComplete ? lastEntryIndex+1 : lastEntryBase, contentByteArray.length, dataSource);
+		lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, contentByteArray);
+		return new Pair<Integer, Nodeid>(lastEntryIndex, lastEntryRevision);
+	}
+
+	private Nodeid doAdd(Nodeid rev, int p1, int p2, int linkRevision, int baseRevision, int revLen, DataSerializer.DataSource dataSource) throws HgIOException, HgRuntimeException  {
+		assert linkRevision >= 0;
+		assert baseRevision >= 0;
+		assert p1 == NO_REVISION || p1 >= 0;
+		assert p2 == NO_REVISION || p2 >= 0;
+		assert !rev.isNull();
+		assert revLen >= 0;
 		revlogDataZip.reset(dataSource);
 		final int compressedLen;
 		final boolean useCompressedData = preferCompressedOverComplete(revlogDataZip.getCompressedLength(), dataSource.serializeLength());
@@ -76,23 +168,18 @@
 			compressedLen = dataSource.serializeLength() + 1 /*1 byte for 'u' - uncompressed prefix byte*/;
 		}
 		//
-		Nodeid p1Rev = revision(p1);
-		Nodeid p2Rev = revision(p2);
-		byte[] revisionNodeidBytes = dh.sha1(p1Rev, p2Rev, content).asBinary();
-		//
-
-		DataSerializer indexFile, dataFile, activeFile;
-		indexFile = dataFile = activeFile = null;
+		DataSerializer indexFile, dataFile;
+		indexFile = dataFile = null;
 		try {
 			//
-			activeFile = indexFile = revlogStream.getIndexStreamWriter();
+			indexFile = revlogStream.getIndexStreamWriter(transaction);
 			final boolean isInlineData = revlogStream.isInlineData();
 			HeaderWriter revlogHeader = new HeaderWriter(isInlineData);
-			revlogHeader.length(content.length, compressedLen);
-			revlogHeader.nodeid(revisionNodeidBytes);
+			revlogHeader.length(revLen, compressedLen);
+			revlogHeader.nodeid(rev.toByteArray());
 			revlogHeader.linkRevision(linkRevision);
 			revlogHeader.parents(p1, p2);
-			revlogHeader.baseRevision(writeComplete ? lastEntryIndex+1 : lastEntryBase);
+			revlogHeader.baseRevision(baseRevision);
 			long lastEntryOffset = revlogStream.newEntryOffset();
 			revlogHeader.offset(lastEntryOffset);
 			//
@@ -101,9 +188,8 @@
 			if (isInlineData) {
 				dataFile = indexFile;
 			} else {
-				dataFile = revlogStream.getDataStreamWriter();
+				dataFile = revlogStream.getDataStreamWriter(transaction);
 			}
-			activeFile = dataFile;
 			if (useCompressedData) {
 				int actualCompressedLenWritten = revlogDataZip.writeCompressedData(dataFile);
 				if (actualCompressedLenWritten != compressedLen) {
@@ -114,22 +200,13 @@
 				dataSource.serialize(dataFile);
 			}
 			
-			
-			lastEntryContent = content;
 			lastEntryBase = revlogHeader.baseRevision();
 			lastEntryIndex++;
-			lastEntryRevision = Nodeid.fromBinary(revisionNodeidBytes, 0);
+			lastEntryActualLen = revLen;
+			lastEntryRevision = rev;
 			revisionCache.put(lastEntryIndex, lastEntryRevision);
 
 			revlogStream.revisionAdded(lastEntryIndex, lastEntryRevision, lastEntryBase, lastEntryOffset);
-		} catch (IOException ex) {
-			String m = String.format("Failed to write revision %d", lastEntryIndex+1, null);
-			HgInvalidControlFileException t = new HgInvalidControlFileException(m, ex, null);
-			if (activeFile == dataFile) {
-				throw revlogStream.initWithDataFile(t);
-			} else {
-				throw revlogStream.initWithIndexFile(t);
-			}
 		} finally {
 			indexFile.done();
 			if (dataFile != null && dataFile != indexFile) {
@@ -139,7 +216,13 @@
 		return lastEntryRevision;
 	}
 	
-	private Nodeid revision(int revisionIndex) {
+	private byte[] toByteArray(DataSource content) throws HgIOException, HgRuntimeException {
+		ByteArraySerializer ba = new ByteArraySerializer();
+		content.serialize(ba);
+		return ba.toByteArray();
+	}
+
+	private Nodeid revision(int revisionIndex) throws HgInvalidControlFileException, HgInvalidRevisionException {
 		if (revisionIndex == NO_REVISION) {
 			return Nodeid.NULL;
 		}
@@ -151,32 +234,38 @@
 		return n;
 	}
 	
-	private void populateLastEntry() throws HgInvalidControlFileException {
-		if (lastEntryContent != null) {
+	private int dataLength(int revisionIndex) throws HgInvalidControlFileException, HgInvalidRevisionException {
+		assert revisionIndex >= 0;
+		if (revisionIndex == lastEntryIndex) {
+			return lastEntryActualLen;
+		}
+		if (lastFullContent != null && lastFullContent.first() == revisionIndex) {
+			return lastFullContent.second().length;
+		}
+		return revlogStream.dataLength(revisionIndex);
+	}
+	
+	private void populateLastEntryIndex() throws HgRuntimeException {
+		int revCount = revlogStream.revisionCount();
+		lastEntryIndex = revCount == 0 ? NO_REVISION : revCount - 1;
+	}
+	
+	private void populateLastEntryContent() throws HgRuntimeException {
+		if (lastFullContent != null && lastFullContent.first() == lastEntryIndex) {
+			// we have last entry cached
 			return;
 		}
+		lastEntryRevision = Nodeid.NULL;
 		if (lastEntryIndex != NO_REVISION) {
-			assert lastEntryIndex >= 0;
-			final IOException[] failure = new IOException[1];
-			revlogStream.iterate(lastEntryIndex, lastEntryIndex, true, new RevlogStream.Inspector() {
-				
-				public void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) {
-					try {
-						lastEntryBase = baseRevision;
-						lastEntryRevision = Nodeid.fromBinary(nodeid, 0);
-						lastEntryContent = data.byteArray();
-					} catch (IOException ex) {
-						failure[0] = ex;
-					}
-				}
-			});
-			if (failure[0] != null) {
-				String m = String.format("Failed to get content of most recent revision %d", lastEntryIndex);
-				throw revlogStream.initWithDataFile(new HgInvalidControlFileException(m, failure[0], null));
-			}
+			ReadContentInspector insp = new ReadContentInspector().read(revlogStream, lastEntryIndex);
+			lastEntryBase = insp.baseRev;
+			lastEntryRevision = insp.rev;
+			lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, insp.content);
 		} else {
-			lastEntryContent = new byte[0];
+			lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, new byte[0]);
 		}
+		assert lastFullContent.first() == lastEntryIndex;
+		assert lastFullContent.second() != null;
 	}
 	
 	public static boolean preferCompleteOverPatch(int patchLength, int fullContentLength) {
@@ -250,13 +339,12 @@
 			return this;
 		}
 		
-		public void serialize(DataSerializer out) throws IOException {
+		public void serialize(DataSerializer out) throws HgIOException {
 			header.clear();
 			if (offset == 0) {
 				int version = 1 /* RevlogNG */;
 				if (isInline) {
-					final int INLINEDATA = 1 << 16; // FIXME extract constant
-					version |= INLINEDATA;
+					version |= RevlogStream.INLINEDATA;
 				}
 				header.putInt(version);
 				header.putInt(0);
@@ -283,4 +371,40 @@
 			return header.capacity();
 		}
 	}
-}
+	
+	// XXX part of HgRevisionMap contract, need public counterparts (along with IndexToRevisionMap)
+	public interface RevisionToIndexMap {
+		
+		/**
+		 * @return {@link HgRepository#NO_REVISION} if unknown revision
+		 */
+		int revisionIndex(Nodeid revision);
+	}
+
+	private static class ReadContentInspector implements RevlogStream.Inspector {
+		public int baseRev;
+		public Nodeid rev;
+		public byte[] content;
+		private IOException failure;
+		
+		public ReadContentInspector read(RevlogStream rs, int revIndex) throws HgInvalidControlFileException {
+			assert revIndex >= 0;
+			rs.iterate(revIndex, revIndex, true, this);
+			if (failure != null) {
+				String m = String.format("Failed to get content of revision %d", revIndex);
+				throw rs.initWithDataFile(new HgInvalidControlFileException(m, failure, null));
+			}
+			return this;
+		}
+		
+		public void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) {
+			try {
+				baseRev = baseRevision;
+				rev = Nodeid.fromBinary(nodeid, 0);
+				content = data.byteArray();
+			} catch (IOException ex) {
+				failure = ex;
+			}
+		}
+	}
+}
\ No newline at end of file
--- a/src/org/tmatesoft/hg/internal/StoragePathHelper.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/StoragePathHelper.java	Wed Jul 10 11:48:55 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -22,8 +22,6 @@
 import java.nio.charset.CharsetEncoder;
 import java.util.Arrays;
 import java.util.TreeSet;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
 
 import org.tmatesoft.hg.util.PathRewrite;
 
@@ -36,11 +34,15 @@
  * @author TMate Software Ltd.
  */
 class StoragePathHelper implements PathRewrite {
-	
+
+	static final String STR_STORE = "store/";
+	static final String STR_DATA = "data/";
+	static final String STR_DH = "dh/";
+
 	private final boolean store;
 	private final boolean fncache;
 	private final boolean dotencode;
-	private final Pattern suffix2replace;
+	private final EncodeDirPathHelper dirPathRewrite;
 	private final CharsetEncoder csEncoder;
 	private final char[] hexEncodedByte = new char[] {'~', '0', '0'};
 	private final ByteBuffer byteEncodingBuf;
@@ -55,7 +57,7 @@
 		store = isStore;
 		fncache = isFncache;
 		dotencode = isDotencode;
-		suffix2replace = Pattern.compile("\\.([id]|hg)/");
+		dirPathRewrite = new EncodeDirPathHelper();
 		csEncoder = fsEncoding.newEncoder();
 		byteEncodingBuf = ByteBuffer.allocate(Math.round(csEncoder.maxBytesPerChar()) + 1/*in fact, need ceil, hence +1*/);
 		charEncodingBuf = CharBuffer.allocate(1);
@@ -66,25 +68,9 @@
 	 * It has to be normalized (slashes) and shall not include extension .i or .d.
 	 */
 	public CharSequence rewrite(CharSequence p) {
-		final String STR_STORE = "store/";
-		final String STR_DATA = "data/";
-		final String STR_DH = "dh/";
 		final String reservedChars = "\\:*?\"<>|";
 		
-		Matcher suffixMatcher = suffix2replace.matcher(p);
-		CharSequence path;
-		// Matcher.replaceAll, but without extra toString
-		boolean found = suffixMatcher.find();
-		if (found) {
-			StringBuffer sb = new StringBuffer(p.length()  + 20);
-			do {
-				suffixMatcher.appendReplacement(sb, ".$1.hg/");
-			} while (found = suffixMatcher.find());
-			suffixMatcher.appendTail(sb);
-			path = sb;
-		} else {
-			path = p;
-		}
+		CharSequence path = dirPathRewrite.rewrite(p);
 		
 		StringBuilder sb = new StringBuilder(path.length() << 1);
 		if (store || fncache) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/Transaction.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.tmatesoft.hg.core.HgIOException;
+import org.tmatesoft.hg.core.SessionContext;
+import org.tmatesoft.hg.repo.HgInvalidStateException;
+
+/**
+ * Implementation strategies possible:<ul>
+ * <li> Get a copy, write changes to origin, keep copy as backup till #commit
+ *   <p>(-) doesn't break hard links 
+ * <li> Get a copy, write changes to a copy, on commit rename copy to origin. 
+ *   <p>(-) What if we read newly written data (won't find it);
+ *   <p>(-) complex #commit
+ *   <p>(+) simple rollback
+ * <li> Get a copy, rename origin to backup (breaks hard links), rename copy to origin, write changes 
+ *   <p>(+) Modified file is in place right away;
+ *   <p>(+) easy #commit
+ * <li> Do not copy, just record file size, truncate to that size on rollback
+ * <li> ...?
+ * </ul> 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public abstract class Transaction {
+	/**
+	 * Record the file is going to be modified during this transaction, obtain actual
+	 * destination to write to.
+	 * The file to be modified not necessarily exists, might be just a name of an added file  
+	 */
+	public abstract File prepare(File f) throws HgIOException;
+	/**
+	 * overwrites backup if exists, backup is kept after successful {@link #commit()}
+	 */
+	public abstract File prepare(File origin, File backup) throws HgIOException;
+	/**
+	 * Tell that file was successfully processed
+	 */
+	public abstract void done(File f) throws HgIOException;
+	/**
+	 * optional?
+	 */
+	public abstract void failure(File f, IOException ex);
+	/**
+	 * Complete the transaction
+	 */
+	public abstract void commit() throws HgIOException;
+	/**
+	 * Undo all the changes
+	 */
+	public abstract void rollback() throws HgIOException;
+
+	public interface Factory {
+		public Transaction create(SessionContext.Source ctxSource);
+	}
+
+	public static class NoRollback extends Transaction {
+
+		@Override
+		public File prepare(File f) throws HgIOException {
+			return f;
+		}
+
+		@Override
+		public File prepare(File origin, File backup) throws HgIOException {
+			return origin;
+		}
+
+		@Override
+		public void done(File f) throws HgIOException {
+			// no-op
+		}
+
+		@Override
+		public void failure(File f, IOException ex) {
+			// no-op
+		}
+
+		@Override
+		public void commit() throws HgIOException {
+			// no-op
+		}
+
+		@Override
+		public void rollback() throws HgIOException {
+			throw new HgInvalidStateException("This transaction doesn't support rollback");
+		}
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/WorkingCopyContent.java	Wed Jul 10 11:48:55 2013 +0200
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.tmatesoft.hg.core.HgCommitCommand;
+import org.tmatesoft.hg.core.HgIOException;
+import org.tmatesoft.hg.repo.HgDataFile;
+import org.tmatesoft.hg.repo.HgInvalidFileException;
+import org.tmatesoft.hg.repo.HgInvalidStateException;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+import org.tmatesoft.hg.util.ByteChannel;
+import org.tmatesoft.hg.util.CancelledException;
+
+/**
+ * Access content of the working copy. The difference with {@link FileContentSupplier} is that this one doesn't need {@link File}
+ * in the working directory. However, provided this class is used from {@link HgCommitCommand} when "modified" file was detected,
+ * it's odd to expect no file in the working dir.
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class WorkingCopyContent implements DataSerializer.DataSource {
+	private final HgDataFile file;
+
+	public WorkingCopyContent(HgDataFile dataFile) {
+		file = dataFile;
+		if (!dataFile.exists()) {
+			throw new IllegalArgumentException();
+		}
+	}
+
+	public void serialize(final DataSerializer out) throws HgIOException, HgRuntimeException {
+		final HgIOException failure[] = new HgIOException[1];
+		try {
+			// TODO #workingCopy API is very limiting, CancelledException is inconvenient, 
+			// and absence of HgIOException is very uncomfortable
+			file.workingCopy(new ByteChannel() {
+				
+				public int write(ByteBuffer buffer) throws IOException {
+					try {
+						if (buffer.hasArray()) {
+							out.write(buffer.array(), buffer.position(), buffer.remaining());
+						}
+						int rv = buffer.remaining();
+						buffer.position(buffer.limit()); // pretend we've consumed the data
+						return rv;
+					} catch (HgIOException ex) {
+						failure[0] = ex;
+						IOException e = new IOException();
+						ex.initCause(ex); // XXX Java 1.5
+						throw e;
+					}
+				}
+			});
+		} catch (HgInvalidFileException ex) {
+			if (failure[0] != null) {
+				throw failure[0];
+			}
+			throw new HgIOException("Write failure", ex, new File(file.getRepo().getWorkingDir(), file.getPath().toString()));
+		} catch (CancelledException ex) {
+			throw new HgInvalidStateException("Our channel doesn't cancel here");
+		}
+	}
+
+	public int serializeLength() throws HgRuntimeException {
+		return file.getLength(HgRepository.WORKING_COPY);
+	}
+}
\ No newline at end of file
--- a/src/org/tmatesoft/hg/internal/WorkingDirFileWriter.java	Thu Jun 06 14:21:11 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/WorkingDirFileWriter.java	Wed Jul 10 11:48:55 2013 +0200
@@ -26,6 +26,7 @@
 
 import org.tmatesoft.hg.repo.HgDataFile;
 import org.tmatesoft.hg.repo.HgManifest;
+import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.ByteChannel;
 import org.tmatesoft.hg.util.CancelledException;
 import org.tmatesoft.hg.util.LogFacility.Severity;
@@ -62,8 +63,9 @@
 	/**
 	 * Writes content of specified file revision into local filesystem, or create a symlink according to flags. 
 	 * Executable bit is set if specified and filesystem supports it. 
+	 * @throws HgRuntimeException 
 	 */
-	public void processFile(HgDataFile df, int fileRevIndex, HgManifest.Flags flags) throws IOException {
+	public void processFile(HgDataFile df, int fileRevIndex, HgManifest.Flags flags) throws IOException, HgRuntimeException {
 		try {
 			prepare(df.getPath());
 			if (flags != HgManifest.Flags.Link) {
--- a/src/org/tmatesoft/hg/repo/CommitFacility.java	Thu Jun 06 14:21:11 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,250 +0,0 @@
-/*
- * Copyright (c) 2013 TMate Software Ltd
- *  
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * For information on how to redistribute this software under
- * the terms of a license other than GNU General Public License
- * contact TMate Software at support@hg4j.com
- */
-package org.tmatesoft.hg.repo;
-
-import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-import org.tmatesoft.hg.core.HgCommitCommand;
-import org.tmatesoft.hg.core.HgIOException;
-import org.tmatesoft.hg.core.HgRepositoryLockException;
-import org.tmatesoft.hg.core.Nodeid;
-import org.tmatesoft.hg.internal.ByteArrayChannel;
-import org.tmatesoft.hg.internal.ChangelogEntryBuilder;
-import org.tmatesoft.hg.internal.DirstateBuilder;
-import org.tmatesoft.hg.internal.DirstateReader;
-import org.tmatesoft.hg.internal.Experimental;
-import org.tmatesoft.hg.internal.FNCacheFile;
-import org.tmatesoft.hg.internal.Internals;
-import org.tmatesoft.hg.internal.ManifestEntryBuilder;
-import org.tmatesoft.hg.internal.ManifestRevision;
-import org.tmatesoft.hg.internal.RevlogStream;
-import org.tmatesoft.hg.internal.RevlogStreamWriter;
-import org.tmatesoft.hg.util.Pair;
-import org.tmatesoft.hg.util.Path;
-import org.tmatesoft.hg.util.LogFacility.Severity;
-
-/**
- * WORK IN PROGRESS
- * Name: CommitObject, FutureCommit or PendingCommit
- * Only public API now: {@link HgCommitCommand}. TEMPORARILY lives in the oth.repo public packages, until code interdependencies are resolved
- * 
- * @author Artem Tikhomirov
- * @author TMate Software Ltd.
- */
-@Experimental(reason="Work in progress")
-public final class CommitFacility {
-	private final HgRepository repo;
-	private final int p1Commit, p2Commit;
-	private Map<Path, Pair<HgDataFile, ByteDataSupplier>> files = new LinkedHashMap<Path, Pair<HgDataFile, ByteDataSupplier>>();
-	private Set<Path> removals = new TreeSet<Path>();
-	private String branch, user;
-
-	public CommitFacility(HgRepository hgRepo, int parentCommit) {
-		this(hgRepo, parentCommit, NO_REVISION);
-	}
-	
-	public CommitFacility(HgRepository hgRepo, int parent1Commit, int parent2Commit) {
-		repo = hgRepo;
-		p1Commit = parent1Commit;
-		p2Commit = parent2Commit;
-		if (parent1Commit != NO_REVISION && parent1Commit == parent2Commit) {
-			throw new IllegalArgumentException("Merging same revision is dubious");
-		}
-	}
-
-	public boolean isMerge() {
-		return p1Commit != NO_REVISION && p2Commit != NO_REVISION;
-	}
-
-	public void add(HgDataFile dataFile, ByteDataSupplier content) {
-		if (content == null) {
-			throw new IllegalArgumentException();
-		}
-		removals.remove(dataFile.getPath());
-		files.put(dataFile.getPath(), new Pair<HgDataFile, ByteDataSupplier>(dataFile, content));
-	}
-
-	public void forget(HgDataFile dataFile) {
-		files.remove(dataFile.getPath());
-		removals.add(dataFile.getPath());
-	}
-	
-	public void branch(String branchName) {
-		branch = branchName;
-	}
-	
-	public void user(String userName) {
-		user = userName;
-	}
-	
-	public Nodeid commit(String message) throws HgIOException, HgRepositoryLockException {
-		
-		final HgChangelog clog = repo.getChangelog();
-		final int clogRevisionIndex = clog.getRevisionCount();
-		ManifestRevision c1Manifest = new ManifestRevision(null, null);
-		ManifestRevision c2Manifest = new ManifestRevision(null, null);
-		if (p1Commit != NO_REVISION) {
-			repo.getManifest().walk(p1Commit, p1Commit, c1Manifest);
-		}
-		if (p2Commit != NO_REVISION) {
-			repo.getManifest().walk(p2Commit, p2Commit, c2Manifest);
-		}
-//		Pair<Integer, Integer> manifestParents = getManifestParents();
-		Pair<Integer, Integer> manifestParents = new Pair<Integer, Integer>(c1Manifest.revisionIndex(), c2Manifest.revisionIndex());
-		TreeMap<Path, Nodeid> newManifestRevision = new TreeMap<Path, Nodeid>();
-		HashMap<Path, Pair<Integer, Integer>> fileParents = new HashMap<Path, Pair<Integer,Integer>>();
-		for (Path f : c1Manifest.files()) {
-			HgDataFile df = repo.getFileNode(f);
-			Nodeid fileKnownRev1 = c1Manifest.nodeid(f), fileKnownRev2;
-			final int fileRevIndex1 = df.getRevisionIndex(fileKnownRev1);
-			final int fileRevIndex2;
-			if ((fileKnownRev2 = c2Manifest.nodeid(f)) != null) {
-				// merged files
-				fileRevIndex2 = df.getRevisionIndex(fileKnownRev2);
-			} else {
-				fileRevIndex2 = NO_REVISION;
-			}
-				
-			fileParents.put(f, new Pair<Integer, Integer>(fileRevIndex1, fileRevIndex2));
-			newManifestRevision.put(f, fileKnownRev1);
-		}
-		//
-		// Forget removed
-		for (Path p : removals) {
-			newManifestRevision.remove(p);
-		}
-		//
-		// Register new/changed
-		ArrayList<Path> newlyAddedFiles = new ArrayList<Path>();
-		ArrayList<Path> touchInDirstate = new ArrayList<Path>();
-		for (Pair<HgDataFile, ByteDataSupplier> e : files.values()) {
-			HgDataFile df = e.first();
-			Pair<Integer, Integer> fp = fileParents.get(df.getPath());
-			if (fp == null) {
-				// NEW FILE
-				fp = new Pair<Integer, Integer>(NO_REVISION, NO_REVISION);
-			}
-			ByteDataSupplier bds = e.second();
-			// FIXME quickfix, instead, pass ByteDataSupplier directly to RevlogStreamWriter
-			ByteBuffer bb = ByteBuffer.allocate(2048);
-			ByteArrayChannel bac = new ByteArrayChannel();
-			while (bds.read(bb) != -1) {
-				bb.flip();
-				bac.write(bb);
-				bb.clear();
-			}
-			RevlogStream contentStream;
-			if (df.exists()) {
-				contentStream = df.content;
-			} else {
-				contentStream = repo.createStoreFile(df.getPath());
-				newlyAddedFiles.add(df.getPath());
-				// FIXME df doesn't get df.content updated, and clients
-				// that would attempt to access newly added file after commit would fail
-				// (despite the fact the file is in there)
-			}
-			RevlogStreamWriter fileWriter = new RevlogStreamWriter(repo.getSessionContext(), contentStream);
-			Nodeid fileRev = fileWriter.addRevision(bac.toArray(), clogRevisionIndex, fp.first(), fp.second());
-			newManifestRevision.put(df.getPath(), fileRev);
-			touchInDirstate.add(df.getPath());
-		}
-		//
-		// Manifest
-		final ManifestEntryBuilder manifestBuilder = new ManifestEntryBuilder();
-		for (Map.Entry<Path, Nodeid> me : newManifestRevision.entrySet()) {
-			manifestBuilder.add(me.getKey().toString(), me.getValue());
-		}
-		RevlogStreamWriter manifestWriter = new RevlogStreamWriter(repo.getSessionContext(), repo.getManifest().content);
-		Nodeid manifestRev = manifestWriter.addRevision(manifestBuilder.build(), clogRevisionIndex, manifestParents.first(), manifestParents.second());
-		//
-		// Changelog
-		final ChangelogEntryBuilder changelogBuilder = new ChangelogEntryBuilder();
-		changelogBuilder.setModified(files.keySet());
-		changelogBuilder.branch(branch == null ? HgRepository.DEFAULT_BRANCH_NAME : branch);
-		changelogBuilder.user(String.valueOf(user));
-		byte[] clogContent = changelogBuilder.build(manifestRev, message);
-		RevlogStreamWriter changelogWriter = new RevlogStreamWriter(repo.getSessionContext(), clog.content);
-		Nodeid changesetRev = changelogWriter.addRevision(clogContent, clogRevisionIndex, p1Commit, p2Commit);
-		// FIXME move fncache update to an external facility, along with dirstate update
-		if (!newlyAddedFiles.isEmpty() && repo.getImplHelper().fncacheInUse()) {
-			FNCacheFile fncache = new FNCacheFile(repo.getImplHelper());
-			for (Path p : newlyAddedFiles) {
-				fncache.add(p);
-			}
-			try {
-				fncache.write();
-			} catch (IOException ex) {
-				// see comment above for fnchache.read()
-				repo.getSessionContext().getLog().dump(getClass(), Severity.Error, ex, "Failed to write fncache, error ignored");
-			}
-		}
-		// bring dirstate up to commit state
-		Internals implRepo = Internals.getInstance(repo);
-		final DirstateBuilder dirstateBuilder = new DirstateBuilder(implRepo);
-		dirstateBuilder.fillFrom(new DirstateReader(implRepo, new Path.SimpleSource()));
-		for (Path p : removals) {
-			dirstateBuilder.recordRemoved(p);
-		}
-		for (Path p : touchInDirstate) {
-			dirstateBuilder.recordUncertain(p);
-		}
-		dirstateBuilder.parents(changesetRev, Nodeid.NULL);
-		dirstateBuilder.serialize();
-		return changesetRev;
-	}
-/*
-	private Pair<Integer, Integer> getManifestParents() {
-		return new Pair<Integer, Integer>(extractManifestRevisionIndex(p1Commit), extractManifestRevisionIndex(p2Commit));
-	}
-
-	private int extractManifestRevisionIndex(int clogRevIndex) {
-		if (clogRevIndex == NO_REVISION) {
-			return NO_REVISION;
-		}
-		RawChangeset commitObject = repo.getChangelog().range(clogRevIndex, clogRevIndex).get(0);
-		Nodeid manifestRev = commitObject.manifest();
-		if (manifestRev.isNull()) {
-			return NO_REVISION;
-		}
-		return repo.getManifest().getRevisionIndex(manifestRev);
-	}
-*/
-
-	// unlike DataAccess (which provides structured access), this one 
-	// deals with a sequence of bytes, when there's no need in structure of the data
-	// FIXME java.nio.ReadableByteChannel or ByteStream/ByteSequence(read, length, reset)
-	// SHALL be inline with util.ByteChannel, reading bytes from HgDataFile, preferably DataAccess#readBytes(BB) to match API,
-	// and a wrap for ByteVector
-	public interface ByteDataSupplier { // TODO look if can resolve DataAccess in HgCloneCommand visibility issue
-		// FIXME needs lifecycle, e.g. for supplier that reads from WC
-		int read(ByteBuffer buf);
-	}
-	
-	public interface ByteDataConsumer {
-		void write(ByteBuffer buf);
-	}
-}
--- a/src/org/tmatesoft/hg/repo/HgBlameFacility.java	Thu Jun 06 14:21:11 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,487 +0,0 @@
-/*
- * Copyright (c) 2013 TMate Software Ltd
- *  
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * For information on how to redistribute this software under
- * the terms of a license other than GNU General Public License
- * contact TMate Software at support@hg4j.com
- */
-package org.tmatesoft.hg.repo;
-
-import static org.tmatesoft.hg.core.HgIterateDirection.NewToOld;
-import static org.tmatesoft.hg.core.HgIterateDirection.OldToNew;
-import static org.tmatesoft.hg.repo.HgInternals.wrongRevisionIndex;
-import static org.tmatesoft.hg.repo.HgRepository.*;
-
-import java.util.Arrays;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.LinkedList;
-
-import org.tmatesoft.hg.core.HgCallbackTargetException;
-import org.tmatesoft.hg.core.HgIterateDirection;
-import org.tmatesoft.hg.core.Nodeid;
-import org.tmatesoft.hg.internal.BlameHelper;
-import org.tmatesoft.hg.internal.Callback;
-import org.tmatesoft.hg.internal.Experimental;
-import org.tmatesoft.hg.internal.IntVector;
-import org.tmatesoft.hg.util.Adaptable;
-
-/**
- * Facility with diff/annotate functionality.
- * 
- * @author Artem Tikhomirov
- * @author TMate Software Ltd.
- */
-@Experimental(reason="Unstable API")
-public final class HgBlameFacility {
-	private final HgDataFile df;
-	
-	public HgBlameFacility(HgDataFile file) {
-		if (file == null) {
-			throw new IllegalArgumentException();
-		}
-		df = file;
-	}
-	
-	/**
-	 * mimic 'hg diff -r clogRevIndex1 -r clogRevIndex2'
-	 */
-	public void diff(int clogRevIndex1, int clogRevIndex2, Inspector insp) throws HgCallbackTargetException {
-		// FIXME clogRevIndex1 and clogRevIndex2 may point to different files, need to decide whether to throw an exception
-		// or to attempt to look up correct file node (tricky)
-		int fileRevIndex1 = fileRevIndex(df, clogRevIndex1);
-		int fileRevIndex2 = fileRevIndex(df, clogRevIndex2);
-		BlameHelper bh = new BlameHelper(insp, 5);
-		bh.useFileUpTo(df, clogRevIndex2);
-		bh.diff(fileRevIndex1, clogRevIndex1, fileRevIndex2, clogRevIndex2);
-	}
-	
-	/**
-	 * Walk file history up/down to revision at given changeset and report changes for each revision
-	 */
-	public void annotate(int changelogRevisionIndex, Inspector insp, HgIterateDirection iterateOrder) throws HgCallbackTargetException {
-		annotate(0, changelogRevisionIndex, insp, iterateOrder);
-	}
-
-	/**
-	 * Walk file history range and report changes for each revision
-	 */
-	public void annotate(int changelogRevIndexStart, int changelogRevIndexEnd, Inspector insp, HgIterateDirection iterateOrder) throws HgCallbackTargetException {
-		if (wrongRevisionIndex(changelogRevIndexStart) || wrongRevisionIndex(changelogRevIndexEnd)) {
-			throw new IllegalArgumentException();
-		}
-		// Note, changelogRevIndexEnd may be TIP, while the code below doesn't tolerate constants
-		//
-		int lastRevision = df.getRepo().getChangelog().getLastRevision();
-		if (changelogRevIndexEnd == TIP) {
-			changelogRevIndexEnd = lastRevision;
-		}
-		HgInternals.checkRevlogRange(changelogRevIndexStart, changelogRevIndexEnd, lastRevision);
-		if (!df.exists()) {
-			return;
-		}
-		BlameHelper bh = new BlameHelper(insp, 10);
-		HgDataFile currentFile = df;
-		int fileLastClogRevIndex = changelogRevIndexEnd;
-		FileRevisionHistoryChunk nextChunk = null;
-		LinkedList<FileRevisionHistoryChunk> fileCompleteHistory = new LinkedList<FileRevisionHistoryChunk>();
-		do {
-			FileRevisionHistoryChunk fileHistory = new FileRevisionHistoryChunk(currentFile);
-			fileHistory.init(fileLastClogRevIndex);
-			fileHistory.linkTo(nextChunk);
-			fileCompleteHistory.addFirst(fileHistory); // to get the list in old-to-new order
-			nextChunk = fileHistory;
-			bh.useFileUpTo(currentFile, fileLastClogRevIndex);
-			if (fileHistory.changeset(0) > changelogRevIndexStart && currentFile.isCopy()) {
-				// fileHistory.changeset(0) is the earliest revision we know about so far,
-				// once we get to revisions earlier than the requested start, stop digging.
-				// The reason there's NO == (i.e. not >=) because:
-				// (easy): once it's equal, we've reached our intended start
-				// (hard): if changelogRevIndexStart happens to be exact start of one of renames in the 
-				// chain of renames (test-annotate2 repository, file1->file1a->file1b, i.e. points 
-				// to the very start of file1a or file1 history), presence of == would get us to the next 
-				// chunk and hence changed parents of present chunk's first element. Our annotate alg 
-				// relies on parents only (i.e. knows nothing about 'last iteration element') to find out 
-				// what to compare, and hence won't report all lines of 'last iteration element' (which is the
-				// first revision of the renamed file) as "added in this revision", leaving gaps in annotate
-				HgRepository repo = currentFile.getRepo();
-				Nodeid originLastRev = currentFile.getCopySourceRevision();
-				currentFile = repo.getFileNode(currentFile.getCopySourceName());
-				fileLastClogRevIndex = currentFile.getChangesetRevisionIndex(currentFile.getRevisionIndex(originLastRev));
-				// XXX perhaps, shall fail with meaningful exception if new file doesn't exist (.i/.d not found for whatever reason)
-				// or source revision is missing?
-			} else {
-				fileHistory.chopAtChangeset(changelogRevIndexStart);
-				currentFile = null; // stop iterating
-			}
-		} while (currentFile != null && fileLastClogRevIndex > changelogRevIndexStart);
-		// fileCompleteHistory is in (origin, intermediate target, ultimate target) order
-
-		int[] fileClogParentRevs = new int[2];
-		int[] fileParentRevs = new int[2];
-		if (iterateOrder == NewToOld) {
-			Collections.reverse(fileCompleteHistory);
-		}
-		boolean shallFilterStart = changelogRevIndexStart != 0; // no reason if complete history is walked
-		for (FileRevisionHistoryChunk fileHistory : fileCompleteHistory) {
-			for (int fri : fileHistory.fileRevisions(iterateOrder)) {
-				int clogRevIndex = fileHistory.changeset(fri);
-				if (shallFilterStart) {
-					if (iterateOrder == NewToOld) {
-						// clogRevIndex decreases
-						if (clogRevIndex < changelogRevIndexStart) {
-							break;
-						}
-						// fall-through, clogRevIndex is in the [start..end] range
-					} else { // old to new
-						// the way we built fileHistory ensures we won't walk past changelogRevIndexEnd