changeset 662:af5223b86dd3

Merge branch smartgit-4.6
author Artem Tikhomirov <tikhomirov.artem@gmail.com>
date Wed, 10 Jul 2013 11:53:19 +0200
parents 5d8798772cca (diff) a5cf64f2e7e4 (current diff)
children 46b56864b483
files src/org/tmatesoft/hg/repo/HgBranches.java src/org/tmatesoft/hg/repo/HgParentChildMap.java
diffstat 52 files changed, 3208 insertions(+), 437 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Fri Jul 05 20:42:45 2013 +0200
+++ b/.hgtags	Wed Jul 10 11:53:19 2013 +0200
@@ -9,3 +9,4 @@
 f41dd9a3b8af1a5f74b533cd9f00b7d77423cc04 v1.1m4
 5afc7eedb3dd109f75e5f5a02dd88c9c4e7b7f3b v1.1rc1
 54e16ab771ec03d69cb05e38622ebdf9c3302c8c v1.1rc2
+2f33f102a8fa59274a27ebbe1c2903cecac6c5d5 v1.1.0
--- a/build.gradle	Fri Jul 05 20:42:45 2013 +0200
+++ b/build.gradle	Wed Jul 10 11:53:19 2013 +0200
@@ -16,7 +16,7 @@
  */
 def isRelease = false
 
-  version = '1.1.0-SNAPSHOT'
+  version = '1.2.0-SNAPSHOT'
   description = 'Pure Java API and Toolkit for Mercurial DVCS'
   group = 'org.tmatesoft.hg4j'
   
--- a/build.xml	Fri Jul 05 20:42:45 2013 +0200
+++ b/build.xml	Wed Jul 10 11:53:19 2013 +0200
@@ -27,7 +27,7 @@
 
 	<property name="junit.jar" value="lib/junit-4.8.2.jar" />
 	<property name="ver.qualifier" value="" />
-	<property name="version.lib" value="1.1.0" />
+	<property name="version.lib" value="1.2" />
 	<property name="version.jar" value="${version.lib}${ver.qualifier}" />
 	<property name="compile-with-debug" value="yes"/>
 
@@ -110,6 +110,9 @@
 			<test name="org.tmatesoft.hg.test.TestBlame" />
 			<test name="org.tmatesoft.hg.test.TestDiffHelper" />
 			<test name="org.tmatesoft.hg.test.TestRepositoryLock" />
+			<test name="org.tmatesoft.hg.test.TestRevisionSet" />
+			<test name="org.tmatesoft.hg.test.TestRevisionMaps" />
+			<test name="org.tmatesoft.hg.test.TestPush" />
 			<test name="org.tmatesoft.hg.test.ComplexTest" />
 		</junit>
 	</target>
--- a/cmdline/org/tmatesoft/hg/console/Main.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/cmdline/org/tmatesoft/hg/console/Main.java	Wed Jul 10 11:53:19 2013 +0200
@@ -103,7 +103,7 @@
 
 	public static void main(String[] args) throws Exception {
 		Main m = new Main(args);
-		m.checkFileSneakerPerformance();
+//		m.checkFileSneakerPerformance();
 //		m.testRevert();
 //		m.testCheckout();
 //		m.tryExtensions();
@@ -119,7 +119,7 @@
 //		m.testEffectiveFileLog();
 //		m.testMergeState();
 //		m.testFileStatus();
-//		m.dumpBranches();
+		m.dumpBranches();
 //		m.inflaterLengthException();
 //		m.dumpIgnored();
 //		m.dumpDirstate();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/cmdline/org/tmatesoft/hg/console/Push.java	Wed Jul 10 11:53:19 2013 +0200
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.console;
+
+import java.util.Collections;
+
+import org.tmatesoft.hg.core.HgPushCommand;
+import org.tmatesoft.hg.core.HgRepoFacade;
+import org.tmatesoft.hg.repo.HgLookup;
+import org.tmatesoft.hg.repo.HgRemoteRepository;
+
+/**
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class Push {
+
+	public static void main(String[] args) throws Exception {
+		Options cmdLineOpts = Options.parse(args, Collections.<String>emptySet());
+		HgRepoFacade hgRepo = new HgRepoFacade();
+		if (!hgRepo.init(cmdLineOpts.findRepository())) {
+			System.err.printf("Can't find repository in: %s\n", hgRepo.getRepository().getLocation());
+			return;
+		}
+		// XXX perhaps, HgRepoFacade shall get detectRemote() analog (to get remote server with respect of facade's repo)
+		HgRemoteRepository hgRemote = new HgLookup().detectRemote(cmdLineOpts.getSingle(""), hgRepo.getRepository());
+		if (hgRemote.isInvalid()) {
+			System.err.printf("Remote repository %s is not valid", hgRemote.getLocation());
+			return;
+		}
+		HgPushCommand cmd = hgRepo.createPushCommand();
+		cmd.destination(hgRemote);
+		cmd.execute();
+		System.out.printf("Added %d changesets\n", cmd.getPushedRevisions().size());
+	}
+}
--- a/src/org/tmatesoft/hg/core/HgOutgoingCommand.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgOutgoingCommand.java	Wed Jul 10 11:53:19 2013 +0200
@@ -21,7 +21,9 @@
 import java.util.TreeSet;
 
 import org.tmatesoft.hg.internal.Internals;
+import org.tmatesoft.hg.internal.PhasesHelper;
 import org.tmatesoft.hg.internal.RepositoryComparator;
+import org.tmatesoft.hg.internal.RevisionSet;
 import org.tmatesoft.hg.repo.HgChangelog;
 import org.tmatesoft.hg.repo.HgParentChildMap;
 import org.tmatesoft.hg.repo.HgRemoteRepository;
@@ -103,8 +105,7 @@
 	public List<Nodeid> executeLite() throws HgRemoteConnectionException, HgException, CancelledException {
 		final ProgressSupport ps = getProgressSupport(null);
 		try {
-			ps.start(10);
-			return getComparator(new ProgressSupport.Sub(ps, 5), getCancelSupport(null, true)).getLocalOnlyRevisions();
+			return getOutgoingRevisions(ps, getCancelSupport(null, true));
 		} catch (HgRuntimeException ex) {
 			throw new HgLibraryFailureException(ex);
 		} finally {
@@ -128,10 +129,16 @@
 		final ProgressSupport ps = getProgressSupport(handler);
 		final CancelSupport cs = getCancelSupport(handler, true);
 		try {
-			ps.start(-1);
-			ChangesetTransformer inspector = new ChangesetTransformer(localRepo, handler, getParentHelper(), ps, cs);
+			ps.start(200);
+			ChangesetTransformer inspector = new ChangesetTransformer(localRepo, handler, getParentHelper(), new ProgressSupport.Sub(ps, 100), cs);
 			inspector.limitBranches(branches);
-			getComparator(new ProgressSupport.Sub(ps, 1), cs).visitLocalOnlyRevisions(inspector);
+			List<Nodeid> out = getOutgoingRevisions(new ProgressSupport.Sub(ps, 100), cs);
+			int[] outRevIndex = new int[out.size()];
+			int i = 0;
+			for (Nodeid o : out) {
+				outRevIndex[i++] = localRepo.getChangelog().getRevisionIndex(o);
+			}
+			localRepo.getChangelog().range(inspector, outRevIndex);
 			inspector.checkFailure();
 		} catch (HgRuntimeException ex) {
 			throw new HgLibraryFailureException(ex);
@@ -159,4 +166,17 @@
 		return parentHelper;
 	}
 
+	
+	private List<Nodeid> getOutgoingRevisions(ProgressSupport ps, CancelSupport cs) throws HgRemoteConnectionException, HgException, CancelledException {
+		ps.start(10);
+		final RepositoryComparator c = getComparator(new ProgressSupport.Sub(ps, 5), cs);
+		List<Nodeid> local = c.getLocalOnlyRevisions();
+		ps.worked(3);
+		PhasesHelper phaseHelper = new PhasesHelper(Internals.getInstance(localRepo));
+		if (phaseHelper.isCapableOfPhases() && phaseHelper.withSecretRoots()) {
+			local = new RevisionSet(local).subtract(phaseHelper.allSecret()).asList();
+		}
+		ps.worked(2);
+		return local;
+	}
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/core/HgPullCommand.java	Wed Jul 10 11:53:19 2013 +0200
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.core;
+
+import java.util.List;
+
+import org.tmatesoft.hg.internal.AddRevInspector;
+import org.tmatesoft.hg.internal.COWTransaction;
+import org.tmatesoft.hg.internal.Internals;
+import org.tmatesoft.hg.internal.PhasesHelper;
+import org.tmatesoft.hg.internal.RepositoryComparator;
+import org.tmatesoft.hg.internal.RevisionSet;
+import org.tmatesoft.hg.internal.Transaction;
+import org.tmatesoft.hg.repo.HgBundle;
+import org.tmatesoft.hg.repo.HgChangelog;
+import org.tmatesoft.hg.repo.HgInternals;
+import org.tmatesoft.hg.repo.HgParentChildMap;
+import org.tmatesoft.hg.repo.HgRemoteRepository;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+import org.tmatesoft.hg.util.CancelledException;
+import org.tmatesoft.hg.util.ProgressSupport;
+
+/**
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class HgPullCommand extends HgAbstractCommand<HgPullCommand> {
+
+	private final HgRepository repo;
+	private HgRemoteRepository remote;
+
+	public HgPullCommand(HgRepository hgRepo) {
+		repo = hgRepo;
+	}
+
+	public HgPullCommand source(HgRemoteRepository hgRemote) {
+		remote = hgRemote;
+		return this;
+	}
+
+	public void execute() throws HgRemoteConnectionException, HgIOException, HgLibraryFailureException, CancelledException {
+		final ProgressSupport progress = getProgressSupport(null);
+		try {
+			progress.start(100);
+			// TODO refactor same code in HgIncomingCommand #getComparator and #getParentHelper
+			final HgChangelog clog = repo.getChangelog();
+			final HgParentChildMap<HgChangelog> parentHelper = new HgParentChildMap<HgChangelog>(clog);
+			parentHelper.init();
+			final RepositoryComparator comparator = new RepositoryComparator(parentHelper, remote);
+			// get incoming revisions
+			comparator.compare(new ProgressSupport.Sub(progress, 50), getCancelSupport(null, true));
+			final List<Nodeid> common = comparator.getCommon();
+			// get bundle with changes from remote
+			HgBundle incoming = remote.getChanges(common);
+			//
+			// add revisions to changelog, manifest, files
+			final Internals implRepo = HgInternals.getImplementationRepo(repo);
+			final AddRevInspector insp;
+			Transaction.Factory trFactory = new COWTransaction.Factory();
+			Transaction tr = trFactory.create(repo);
+			try {
+				incoming.inspectAll(insp = new AddRevInspector(implRepo, tr));
+				tr.commit();
+			} catch (HgRuntimeException ex) {
+				tr.rollback();
+				throw ex;
+			} catch (RuntimeException ex) {
+				tr.rollback();
+				throw ex;
+			}
+			progress.worked(45);
+			RevisionSet added = insp.addedChangesets();
+			
+			// get remote phases, update local phases to match that of remote
+			final PhasesHelper phaseHelper = new PhasesHelper(implRepo, parentHelper);
+			if (phaseHelper.isCapableOfPhases()) {
+				RevisionSet rsCommon = new RevisionSet(common);
+				HgRemoteRepository.Phases remotePhases = remote.getPhases();
+				if (remotePhases.isPublishingServer()) {
+					final RevisionSet knownPublic = rsCommon.union(added);
+					RevisionSet newDraft = phaseHelper.allDraft().subtract(knownPublic);
+					RevisionSet newSecret = phaseHelper.allSecret().subtract(knownPublic);
+					phaseHelper.updateRoots(newDraft.asList(), newSecret.asList());
+				} else {
+					// FIXME refactor reuse from HgPushCommand
+				}
+			}
+			progress.worked(5);
+		} catch (HgRuntimeException ex) {
+			throw new HgLibraryFailureException(ex);
+		} finally {
+			progress.done();
+		}
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/core/HgPushCommand.java	Wed Jul 10 11:53:19 2013 +0200
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.core;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+import org.tmatesoft.hg.internal.BundleGenerator;
+import org.tmatesoft.hg.internal.Internals;
+import org.tmatesoft.hg.internal.PhasesHelper;
+import org.tmatesoft.hg.internal.RepositoryComparator;
+import org.tmatesoft.hg.internal.RevisionSet;
+import org.tmatesoft.hg.repo.HgBookmarks;
+import org.tmatesoft.hg.repo.HgBundle;
+import org.tmatesoft.hg.repo.HgChangelog;
+import org.tmatesoft.hg.repo.HgInternals;
+import org.tmatesoft.hg.repo.HgInvalidStateException;
+import org.tmatesoft.hg.repo.HgLookup;
+import org.tmatesoft.hg.repo.HgParentChildMap;
+import org.tmatesoft.hg.repo.HgPhase;
+import org.tmatesoft.hg.repo.HgRemoteRepository;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+import org.tmatesoft.hg.util.CancelledException;
+import org.tmatesoft.hg.util.LogFacility.Severity;
+import org.tmatesoft.hg.util.Outcome;
+import org.tmatesoft.hg.util.Pair;
+import org.tmatesoft.hg.util.ProgressSupport;
+
+/**
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class HgPushCommand extends HgAbstractCommand<HgPushCommand> {
+	
+	private final HgRepository repo;
+	private HgRemoteRepository remoteRepo;
+	private RevisionSet outgoing;
+
+	public HgPushCommand(HgRepository hgRepo) {
+		repo = hgRepo;
+	}
+	
+	public HgPushCommand destination(HgRemoteRepository hgRemote) {
+		remoteRepo = hgRemote;
+		return this;
+	}
+
+	public void execute() throws HgRemoteConnectionException, HgIOException, CancelledException, HgLibraryFailureException {
+		final ProgressSupport progress = getProgressSupport(null);
+		try {
+			progress.start(100);
+			//
+			// find out missing
+			// TODO refactor same code in HgOutgoingCommand #getComparator and #getParentHelper
+			final HgChangelog clog = repo.getChangelog();
+			final HgParentChildMap<HgChangelog> parentHelper = new HgParentChildMap<HgChangelog>(clog);
+			parentHelper.init();
+			final Internals implRepo = HgInternals.getImplementationRepo(repo);
+			final PhasesHelper phaseHelper = new PhasesHelper(implRepo, parentHelper);
+			final RepositoryComparator comparator = new RepositoryComparator(parentHelper, remoteRepo);
+			comparator.compare(new ProgressSupport.Sub(progress, 50), getCancelSupport(null, true));
+			List<Nodeid> l = comparator.getLocalOnlyRevisions();
+			if (phaseHelper.isCapableOfPhases() && phaseHelper.withSecretRoots()) {
+				RevisionSet secret = phaseHelper.allSecret();
+				outgoing = new RevisionSet(l).subtract(secret);
+			} else {
+				outgoing = new RevisionSet(l);
+			}
+			//
+			// prepare bundle
+			BundleGenerator bg = new BundleGenerator(implRepo);
+			File bundleFile = bg.create(outgoing.asList());
+			progress.worked(20);
+			HgBundle b = new HgLookup(repo.getSessionContext()).loadBundle(bundleFile);
+			//
+			// send changes
+			remoteRepo.unbundle(b, comparator.getRemoteHeads());
+			progress.worked(20);
+			//
+			// update phase information
+			if (phaseHelper.isCapableOfPhases()) {
+				RevisionSet presentSecret = phaseHelper.allSecret();
+				RevisionSet presentDraft = phaseHelper.allDraft();
+				RevisionSet secretLeft, draftLeft;
+				HgRemoteRepository.Phases remotePhases = remoteRepo.getPhases();
+				RevisionSet remoteDrafts = knownRemoteDrafts(remotePhases, parentHelper, outgoing, presentSecret);
+				if (remotePhases.isPublishingServer()) {
+					// although it's unlikely outgoing would affect secret changesets,
+					// it doesn't hurt to check secret roots along with draft ones
+					secretLeft = presentSecret.subtract(outgoing);
+					draftLeft = presentDraft.subtract(outgoing);
+				} else {
+					// shall merge local and remote phase states
+					// revisions that cease to be secret (gonna become Public), e.g. someone else pushed them
+					RevisionSet secretGone = presentSecret.intersect(remoteDrafts);
+					// parents of those remote drafts are public, mark them as public locally, too
+					RevisionSet remotePublic = presentSecret.ancestors(secretGone, parentHelper);
+					secretLeft = presentSecret.subtract(secretGone).subtract(remotePublic);
+					/*
+					 * Revisions grow from left to right (parents to the left, children to the right)
+					 * 
+					 * I: Set of local is subset of remote
+					 * 
+					 *               local draft 
+					 * --o---r---o---l---o--
+					 *       remote draft
+					 * 
+					 * Remote draft roots shall be updated
+					 *
+					 *
+					 * II: Set of local is superset of remote
+					 * 
+					 *       local draft 
+					 * --o---l---o---r---o--
+					 *               remote draft 
+					 *               
+					 * Local draft roots shall be updated
+					 */
+					RevisionSet sharedDraft = presentDraft.intersect(remoteDrafts); // (I: ~presentDraft; II: ~remoteDraft
+					// XXX do I really need sharedDrafts here? why not ancestors(remoteDrafts)?
+					RevisionSet localDraftRemotePublic = presentDraft.ancestors(sharedDraft, parentHelper); // I: 0; II: those treated public on remote
+					// remoteDrafts are local revisions known as draft@remote
+					// remoteDraftsLocalPublic - revisions that would cease to be listed as draft on remote
+					RevisionSet remoteDraftsLocalPublic = remoteDrafts.ancestors(sharedDraft, parentHelper);
+					RevisionSet remoteDraftsLeft = remoteDrafts.subtract(remoteDraftsLocalPublic);
+					// forget those deemed public by remote (drafts shared by both remote and local are ok to stay)
+					RevisionSet combinedDraft = presentDraft.union(remoteDraftsLeft);
+					draftLeft = combinedDraft.subtract(localDraftRemotePublic);
+				}
+				final RevisionSet newDraftRoots = draftLeft.roots(parentHelper);
+				final RevisionSet newSecretRoots = secretLeft.roots(parentHelper);
+				phaseHelper.updateRoots(newDraftRoots.asList(), newSecretRoots.asList());
+				//
+				// if there's a remote draft root that points to revision we know is public
+				RevisionSet remoteDraftsLocalPublic = remoteDrafts.subtract(draftLeft).subtract(secretLeft);
+				if (!remoteDraftsLocalPublic.isEmpty()) {
+					// foreach remoteDraftsLocallyPublic.heads() do push Draft->Public
+					for (Nodeid n : remoteDraftsLocalPublic.heads(parentHelper)) {
+						try {
+							Outcome upo = remoteRepo.updatePhase(HgPhase.Draft, HgPhase.Public, n);
+							if (!upo.isOk()) {
+								implRepo.getLog().dump(getClass(), Severity.Info, "Failed to update remote phase, reason: %s", upo.getMessage());
+							}
+						} catch (HgRemoteConnectionException ex) {
+							implRepo.getLog().dump(getClass(), Severity.Error, ex, String.format("Failed to update phase of %s", n.shortNotation()));
+						}
+					}
+				}
+			}
+			progress.worked(5);
+			//
+			// update bookmark information
+			HgBookmarks localBookmarks = repo.getBookmarks();
+			if (!localBookmarks.getAllBookmarks().isEmpty()) {
+				for (Pair<String,Nodeid> bm : remoteRepo.getBookmarks()) {
+					Nodeid localRevision = localBookmarks.getRevision(bm.first());
+					if (localRevision == null || !parentHelper.knownNode(bm.second())) {
+						continue;
+					}
+					// we know both localRevision and revision of remote bookmark,
+					// need to make sure we don't push  older revision than it's at the server
+					if (parentHelper.isChild(bm.second(), localRevision)) {
+						remoteRepo.updateBookmark(bm.first(), bm.second(), localRevision);
+					}
+				}
+			}
+			// XXX WTF is obsolete in namespaces key??
+			progress.worked(5);
+		} catch (IOException ex) {
+			throw new HgIOException(ex.getMessage(), null); // XXX not a nice idea to throw IOException from BundleGenerator#create
+		} catch (HgRepositoryNotFoundException ex) {
+			final HgInvalidStateException e = new HgInvalidStateException("Failed to load a just-created bundle");
+			e.initCause(ex);
+			throw new HgLibraryFailureException(e);
+		} catch (HgRuntimeException ex) {
+			throw new HgLibraryFailureException(ex);
+		} finally {
+			progress.done();
+		}
+	}
+	
+	public Collection<Nodeid> getPushedRevisions() {
+		return outgoing == null ? Collections.<Nodeid>emptyList() : outgoing.asList();
+	}
+	
+	private RevisionSet knownRemoteDrafts(HgRemoteRepository.Phases remotePhases, HgParentChildMap<HgChangelog> parentHelper, RevisionSet outgoing, RevisionSet localSecret) {
+		ArrayList<Nodeid> knownRemoteDraftRoots = new ArrayList<Nodeid>();
+		for (Nodeid rdr : remotePhases.draftRoots()) {
+			if (parentHelper.knownNode(rdr)) {
+				knownRemoteDraftRoots.add(rdr);
+			}
+		}
+		// knownRemoteDraftRoots + childrenOf(knownRemoteDraftRoots) is everything remote may treat as Draft
+		RevisionSet remoteDrafts = new RevisionSet(knownRemoteDraftRoots);
+		RevisionSet localChildren = remoteDrafts.children(parentHelper);
+		// we didn't send any local secret revision
+		localChildren = localChildren.subtract(localSecret);
+		// draft roots are among remote drafts
+		remoteDrafts = remoteDrafts.union(localChildren);
+		// 1) outgoing.children gives all local revisions accessible from outgoing.
+		// 2) outgoing.roots.children is equivalent with smaller intermediate set, the way we build
+		// childrenOf doesn't really benefits from that.
+		RevisionSet localChildrenNotSent = outgoing.children(parentHelper).subtract(outgoing);
+		// remote shall know only what we've sent, subtract revisions we didn't actually sent
+		remoteDrafts = remoteDrafts.subtract(localChildrenNotSent);
+		return remoteDrafts;
+	}
+}
--- a/src/org/tmatesoft/hg/core/HgRepoFacade.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgRepoFacade.java	Wed Jul 10 11:53:19 2013 +0200
@@ -165,4 +165,12 @@
 	public HgDiffCommand createDiffCommand() {
 		return new HgDiffCommand(repo);
 	}
+
+	public HgPushCommand createPushCommand() {
+		return new HgPushCommand(repo);
+	}
+	
+	public HgPullCommand createPullCommand() {
+		return new HgPullCommand(repo);
+	}
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/AddRevInspector.java	Wed Jul 10 11:53:19 2013 +0200
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import java.util.HashMap;
+import java.util.Set;
+
+import org.tmatesoft.hg.core.HgIOException;
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.repo.HgBundle;
+import org.tmatesoft.hg.repo.HgBundle.GroupElement;
+import org.tmatesoft.hg.repo.HgDataFile;
+import org.tmatesoft.hg.repo.HgInvalidControlFileException;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+import org.tmatesoft.hg.util.Pair;
+
+/**
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public final class AddRevInspector implements HgBundle.Inspector {
+	private final Internals repo;
+	private final Transaction tr;
+	private Set<Nodeid> added;
+	private RevlogStreamWriter revlog;
+	private RevMap clogRevs;
+	private RevMap revlogRevs;
+
+	public AddRevInspector(Internals implRepo, Transaction transaction) {
+		repo = implRepo;
+		tr = transaction;
+	}
+
+	public void changelogStart() throws HgRuntimeException {
+		// TODO Auto-generated method stub
+		RevlogStream rs = repo.getImplAccess().getChangelogStream();
+		revlog = new RevlogStreamWriter(repo, rs, tr);
+		revlogRevs = clogRevs = new RevMap(rs);
+	}
+
+	public void changelogEnd() throws HgRuntimeException {
+		revlog = null;
+		revlogRevs = null;
+		added = clogRevs.added();
+	}
+
+	public void manifestStart() throws HgRuntimeException {
+		RevlogStream rs = repo.getImplAccess().getManifestStream();
+		revlog = new RevlogStreamWriter(repo, rs, tr);
+		revlogRevs = new RevMap(rs);
+	}
+
+	public void manifestEnd() throws HgRuntimeException {
+		revlog = null;
+		revlogRevs = null;
+	}
+
+	public void fileStart(String name) throws HgRuntimeException {
+		HgDataFile df = repo.getRepo().getFileNode(name);
+		RevlogStream rs = repo.getImplAccess().getStream(df);
+		revlog = new RevlogStreamWriter(repo, rs, tr);
+		revlogRevs = new RevMap(rs);
+		// FIXME collect new files and update fncache
+	}
+
+	public void fileEnd(String name) throws HgRuntimeException {
+		revlog = null;
+		revlogRevs = null;
+	}
+
+	public boolean element(GroupElement ge) throws HgRuntimeException {
+		assert clogRevs != null;
+		assert revlogRevs != null;
+		try {
+			Pair<Integer, Nodeid> newRev = revlog.addPatchRevision(ge, clogRevs, revlogRevs);
+			revlogRevs.update(newRev.first(), newRev.second());
+			return true;
+		} catch (HgIOException ex) {
+			throw new HgInvalidControlFileException(ex, true);
+		}
+	}
+
+	public RevisionSet addedChangesets() {
+		return new RevisionSet(added);
+	}
+
+	private static class RevMap implements RevlogStreamWriter.RevisionToIndexMap {
+		
+		private final RevlogStream revlog;
+		private HashMap<Nodeid, Integer> added = new HashMap<Nodeid, Integer>();
+
+		public RevMap(RevlogStream revlogStream) {
+			revlog = revlogStream;
+		}
+
+		public int revisionIndex(Nodeid revision) {
+			Integer a = added.get(revision);
+			if (a != null) {
+				return a;
+			}
+			int f = revlog.findRevisionIndex(revision);
+			return f == HgRepository.BAD_REVISION ? HgRepository.NO_REVISION : f;
+		}
+		
+		public void update(Integer revIndex, Nodeid rev) {
+			added.put(rev, revIndex);
+		}
+		
+		Set<Nodeid> added() {
+			return added.keySet();
+		}
+	}
+}
\ No newline at end of file
--- a/src/org/tmatesoft/hg/internal/ArrayHelper.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/ArrayHelper.java	Wed Jul 10 11:53:19 2013 +0200
@@ -16,40 +16,106 @@
  */
 package org.tmatesoft.hg.internal;
 
+import java.util.Arrays;
+
 /**
  * Internal alternative to Arrays.sort to build reversed index along with sorting
+ * and to perform lookup (binary search) without sorted array, using reversed index.
  * 
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
-public class ArrayHelper {
-	private int[] reverse;
+public final class ArrayHelper<T extends Comparable<T>> {
+	private int[] reverse; // aka sorted2natural
+	private final T[] data;
+	private T[] sorted;
+	
+	public ArrayHelper(T[] _data) {
+		assert _data != null;
+		data = _data;
+	}
 
-	@SuppressWarnings("unchecked")
-	public void sort(Comparable<?>[] a) {
-//		Object[] aux = (Object[]) a.clone();
-		reverse = new int[a.length];
-		sort1((Comparable<Object>[])a, 0, a.length);
+	/**
+	 * Sort data this helper wraps, possibly using supplied array (optional)
+	 * to keep sorted elements
+	 * @param sortDest array to keep sorted values at, or <code>null</code>
+	 * @param sortDestIsEmpty <code>false</code> when sortDest already contains copy of data to be sorted
+	 * @param keepSorted <code>true</code> to save sorted array for future use (e.g. in
+	 */
+	public void sort(T[] sortDest, boolean sortDestIsEmpty, boolean keepSorted) {
+		if (sortDest != null) {
+			assert sortDest.length >= data.length;
+			if (sortDestIsEmpty) {
+				System.arraycopy(data, 0, sortDest, 0, data.length);
+			}
+			sorted = sortDest;
+		} else {
+			sorted = data.clone();
+		}
+		reverse = new int[data.length];
 		for (int i = 0; i < reverse.length; i++) {
-			// element that was not moved don't have an index in reverse.
-			// perhaps, can do it inside sort alg?
-			// Alternatively, may start with filling reverse[] array with initial indexes and
-			// avoid != 0 comparisons in #swap altogether?
-			if (reverse[i] == 0) {
-				reverse[i] = i+1;
-			}
+			// initial reverse indexes, so that elements that do
+			// not move during sort got correct indexes
+			reverse[i] = i;
 		}
+		sort1(0, data.length);
+		if (!keepSorted) {
+			sorted = null;
+		}
+	}
+
+	/**
+	 * @return all reverse indexes
+	 */
+	public int[] getReverseIndexes() {
+		return reverse;
+	}
+	
+	public int getReverseIndex(int sortedIndex) {
+		return reverse[sortedIndex];
+	}
+	
+	public T get(int index) {
+		return data[index];
+	}
+	
+	public T[] getData() {
+		return data;
+	}
+
+	/**
+	 * Look up sorted index of the value, using sort information 
+	 * @return same value as {@link Arrays#binarySearch(Object[], Object)} does
+	 */
+	public int binarySearchSorted(T value) {
+		if (sorted != null) {
+			return Arrays.binarySearch(sorted, 0, data.length, value);
+		}
+		return binarySearchWithReverse(0, data.length, value);
+	}
+
+	/**
+	 * Look up index of the value in the original array.
+	 * @return index in original data, or <code>defaultValue</code> if value not found
+	 */
+	public int binarySearch(T value, int defaultValue) {
+		int x = binarySearchSorted(value);
+		if (x < 0) {
+			return defaultValue;
+		}
+		return reverse[x];
 	}
 
 	/**
 	 * Slightly modified version of Arrays.sort1(int[], int, int) quicksort alg (just to deal with Object[])
 	 */
-    private void sort1(Comparable<Object> x[], int off, int len) {
+    private void sort1(int off, int len) {
+		Comparable<Object>[] x = comparableSorted();
     	// Insertion sort on smallest arrays
     	if (len < 7) {
     	    for (int i=off; i<len+off; i++)
     			for (int j=i; j>off && x[j-1].compareTo(x[j]) > 0; j--)
-    			    swap(x, j, j-1);
+    			    swap(j, j-1);
     	    return;
     	}
 
@@ -60,11 +126,11 @@
     	    int n = off + len - 1;
     	    if (len > 40) {        // Big arrays, pseudomedian of 9
     			int s = len/8;
-	    		l = med3(x, l,     l+s, l+2*s);
-	    		m = med3(x, m-s,   m,   m+s);
-	    		n = med3(x, n-2*s, n-s, n);
+	    		l = med3(l,     l+s, l+2*s);
+	    		m = med3(m-s,   m,   m+s);
+	    		n = med3(n-2*s, n-s, n);
     	    }
-    	    m = med3(x, l, m, n); // Mid-size, med of 3
+    	    m = med3(l, m, n); // Mid-size, med of 3
     	}
     	Comparable<Object> v = x[m];
 
@@ -73,67 +139,94 @@
     	while(true) {
     	    while (b <= c && x[b].compareTo(v) <= 0) {
     			if (x[b] == v)
-    			    swap(x, a++, b);
+    			    swap(a++, b);
     			b++;
     	    }
     	    while (c >= b && x[c].compareTo(v) >= 0) {
     			if (x[c] == v)
-    			    swap(x, c, d--);
+    			    swap(c, d--);
     			c--;
     	    }
     	    if (b > c)
     			break;
-    	    swap(x, b++, c--);
+    	    swap(b++, c--);
     	}
 
     	// Swap partition elements back to middle
     	int s, n = off + len;
-    	s = Math.min(a-off, b-a  );  vecswap(x, off, b-s, s);
-    	s = Math.min(d-c,   n-d-1);  vecswap(x, b,   n-s, s);
+    	s = Math.min(a-off, b-a  );  vecswap(off, b-s, s);
+    	s = Math.min(d-c,   n-d-1);  vecswap(b,   n-s, s);
 
     	// Recursively sort non-partition-elements
     	if ((s = b-a) > 1)
-    	    sort1(x, off, s);
+    	    sort1(off, s);
     	if ((s = d-c) > 1)
-    	    sort1(x, n-s, s);
+    	    sort1(n-s, s);
     }
 
     /**
      * Swaps x[a .. (a+n-1)] with x[b .. (b+n-1)].
      */
-    private void vecswap(Object[] x, int a, int b, int n) {
+    private void vecswap(int a, int b, int n) {
 		for (int i=0; i<n; i++, a++, b++) {
-		    swap(x, a, b);
+		    swap(a, b);
 		}
     }
 
     /**
      * Returns the index of the median of the three indexed integers.
      */
-    private static int med3(Comparable<Object>[] x, int a, int b, int c) {
-	return (x[a].compareTo(x[b]) < 0 ?
-		(x[b].compareTo(x[c]) < 0 ? b : x[a].compareTo(x[c]) < 0 ? c : a) :
-		(x[b].compareTo(x[c]) > 0 ? b : x[a].compareTo(x[c]) > 0 ? c : a));
+    private int med3(int a, int b, int c) {
+		Comparable<Object>[] x = comparableSorted();
+		return (x[a].compareTo(x[b]) < 0 ?
+			(x[b].compareTo(x[c]) < 0 ? b : x[a].compareTo(x[c]) < 0 ? c : a) :
+			(x[b].compareTo(x[c]) > 0 ? b : x[a].compareTo(x[c]) > 0 ? c : a));
+    }
+    
+    private Comparable<Object>[] comparableSorted() {
+    	// Comparable<Object>[] x = (Comparable<Object>[]) sorted
+		// eclipse compiler is ok with the line above, while javac doesn't understand it:
+		// inconvertible types found : T[] required: java.lang.Comparable<java.lang.Object>[]
+    	// so need to add another step
+    	Comparable<?>[] oo = sorted;
+		@SuppressWarnings("unchecked")
+		Comparable<Object>[] x = (Comparable<Object>[]) oo;
+		return x;
     }
 
-
-	/**
-	 * @return the reverse
-	 */
-	public int[] getReverse() {
-		return reverse;
-	}
-
-	/**
+    /**
 	 * Swaps x[a] with x[b].
 	 */
-	private void swap(Object[] x, int a, int b) {
+	private void swap(int a, int b) {
+		Object[] x = sorted;
 		Object t = x[a];
 		x[a] = x[b];
 		x[b] = t;
-		int z1 = reverse[a] != 0 ? reverse[a] : a+1;
-		int z2 = reverse[b] != 0 ? reverse[b] : b+1;
+		int z1 = reverse[a];
+		int z2 = reverse[b];
 		reverse[b] = z1;
 		reverse[a] = z2;
 	}
+
+	// copied from Arrays.binarySearch0, update to be instance method and to use reverse indexes
+	private int binarySearchWithReverse(int fromIndex, int toIndex, T key) {
+		int low = fromIndex;
+		int high = toIndex - 1;
+
+		while (low <= high) {
+			int mid = (low + high) >>> 1;
+			// data[reverse[x]] gives sorted value at index x
+			T midVal = data[reverse[mid]];
+			int cmp = midVal.compareTo(key);
+
+			if (cmp < 0)
+				low = mid + 1;
+			else if (cmp > 0)
+				high = mid - 1;
+			else
+				return mid; // key found
+		}
+		return -(low + 1);  // key not found.
+	}
+
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/BundleGenerator.java	Wed Jul 10 11:53:19 2013 +0200
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION;
+import static org.tmatesoft.hg.repo.HgRepository.TIP;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.tmatesoft.hg.core.HgIOException;
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.DataSerializer.OutputStreamSerializer;
+import org.tmatesoft.hg.internal.Patch.PatchDataSource;
+import org.tmatesoft.hg.repo.HgBundle;
+import org.tmatesoft.hg.repo.HgChangelog;
+import org.tmatesoft.hg.repo.HgChangelog.RawChangeset;
+import org.tmatesoft.hg.repo.HgDataFile;
+import org.tmatesoft.hg.repo.HgInternals;
+import org.tmatesoft.hg.repo.HgInvalidControlFileException;
+import org.tmatesoft.hg.repo.HgLookup;
+import org.tmatesoft.hg.repo.HgManifest;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRuntimeException;
+
+/**
+ * @see http://mercurial.selenic.com/wiki/BundleFormat
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class BundleGenerator {
+
+	private final Internals repo;
+
+	public BundleGenerator(Internals hgRepo) {
+		repo = hgRepo;
+	}
+	
+	public File create(List<Nodeid> changesets) throws HgIOException, IOException {
+		final HgChangelog clog = repo.getRepo().getChangelog();
+		final HgManifest manifest = repo.getRepo().getManifest();
+		IntVector clogRevsVector = new IntVector(changesets.size(), 0);
+		for (Nodeid n : changesets) {
+			clogRevsVector.add(clog.getRevisionIndex(n));
+		}
+		clogRevsVector.sort(true);
+		final int[] clogRevs = clogRevsVector.toArray();
+		final IntMap<Nodeid> clogMap = new IntMap<Nodeid>(changesets.size());
+		final IntVector manifestRevs = new IntVector(changesets.size(), 0);
+		final List<HgDataFile> files = new ArrayList<HgDataFile>();
+		clog.range(new HgChangelog.Inspector() {
+			private Set<String> seenFiles = new HashSet<String>();
+			public void next(int revisionIndex, Nodeid nodeid, RawChangeset cset) throws HgRuntimeException {
+				clogMap.put(revisionIndex, nodeid);
+				manifestRevs.add(manifest.getRevisionIndex(cset.manifest()));
+				for (String f : cset.files()) {
+					if (seenFiles.contains(f)) {
+						continue;
+					}
+					seenFiles.add(f);
+					HgDataFile df = repo.getRepo().getFileNode(f);
+					files.add(df);
+				}
+			}
+		}, clogRevs);
+		manifestRevs.sort(true);
+		//
+		final File bundleFile = File.createTempFile("hg4j-", ".bundle");
+		final FileOutputStream osBundle = new FileOutputStream(bundleFile);
+		final OutputStreamSerializer outRaw = new OutputStreamSerializer(osBundle);
+		outRaw.write("HG10UN".getBytes(), 0, 6);
+		//
+		RevlogStream clogStream = repo.getImplAccess().getChangelogStream();
+		new ChunkGenerator(outRaw, clogMap).iterate(clogStream, clogRevs);
+		outRaw.writeInt(0); // null chunk for changelog group
+		//
+		RevlogStream manifestStream = repo.getImplAccess().getManifestStream();
+		new ChunkGenerator(outRaw, clogMap).iterate(manifestStream, manifestRevs.toArray(true));
+		outRaw.writeInt(0); // null chunk for manifest group
+		//
+		for (HgDataFile df : sortedByName(files)) {
+			RevlogStream s = repo.getImplAccess().getStream(df);
+			final IntVector fileRevs = new IntVector();
+			s.iterate(0, TIP, false, new RevlogStream.Inspector() {
+				
+				public void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) throws HgRuntimeException {
+					if (Arrays.binarySearch(clogRevs, linkRevision) >= 0) {
+						fileRevs.add(revisionIndex);
+					}
+				}
+			});
+			fileRevs.sort(true);
+			if (!fileRevs.isEmpty()) {
+				// although BundleFormat page says "filename length, filename" for a file,
+				// in fact there's a sort of 'filename chunk', i.e. filename length field includes
+				// not only length of filename, but also length of the field itseld, i.e. filename.length+sizeof(int)
+				byte[] fnameBytes = df.getPath().toString().getBytes(); // FIXME check encoding in native hg (and fix accordingly in HgBundle)
+				outRaw.writeInt(fnameBytes.length + 4);
+				outRaw.writeByte(fnameBytes);
+				new ChunkGenerator(outRaw, clogMap).iterate(s, fileRevs.toArray(true));
+				outRaw.writeInt(0); // null chunk for file group
+			}
+		}
+		outRaw.writeInt(0); // null chunk to indicate no more files (although BundleFormat page doesn't mention this)
+		outRaw.done();
+		osBundle.flush();
+		osBundle.close();
+		//return new HgBundle(repo.getSessionContext(), repo.getDataAccess(), bundleFile);
+		return bundleFile;
+	}
+	
+	private static Collection<HgDataFile> sortedByName(List<HgDataFile> files) {
+		Collections.sort(files, new Comparator<HgDataFile>() {
+
+			public int compare(HgDataFile o1, HgDataFile o2) {
+				return o1.getPath().compareTo(o2.getPath());
+			}
+		});
+		return files;
+	}
+	
+	
+	public static void main(String[] args) throws Exception {
+		final HgLookup hgLookup = new HgLookup();
+		HgRepository hgRepo = hgLookup.detectFromWorkingDir();
+		BundleGenerator bg = new BundleGenerator(HgInternals.getImplementationRepo(hgRepo));
+		ArrayList<Nodeid> l = new ArrayList<Nodeid>();
+		l.add(Nodeid.fromAscii("9ef1fab9f5e3d51d70941121dc27410e28069c2d")); // 640
+		l.add(Nodeid.fromAscii("2f33f102a8fa59274a27ebbe1c2903cecac6c5d5")); // 639
+		l.add(Nodeid.fromAscii("d074971287478f69ab0a64176ce2284d8c1e91c3")); // 638
+		File bundleFile = bg.create(l);
+		HgBundle b = hgLookup.loadBundle(bundleFile);
+//		Bundle.dump(b); // FIXME dependency from dependant code
+	}
+
+	private static class ChunkGenerator implements RevlogStream.Inspector {
+		
+		private final DataSerializer ds;
+		private final IntMap<Nodeid> parentMap;
+		private final IntMap<Nodeid> clogMap;
+		private byte[] prevContent;
+		private int startParent;
+
+		public ChunkGenerator(DataSerializer dataSerializer, IntMap<Nodeid> clogNodeidMap) {
+			ds = dataSerializer;
+			parentMap = new IntMap<Nodeid>(clogNodeidMap.size());
+			clogMap = clogNodeidMap;
+		}
+		
+		public void iterate(RevlogStream s, int[] revisions) throws HgRuntimeException {
+			int[] p = s.parents(revisions[0], new int[2]);
+			startParent = p[0];
+			int[] revs2read;
+			if (startParent == NO_REVISION) {
+				revs2read = revisions;
+				prevContent = new byte[0];
+			} else {
+				revs2read = new int[revisions.length + 1];
+				revs2read[0] = startParent;
+				System.arraycopy(revisions, 0, revs2read, 1, revisions.length);
+			}
+			// FIXME this is a hack to fill parentsMap with 
+			// parents of elements that we are not going to meet with regular
+			// iteration, e.g. changes from a different branch (with some older parent),
+			// scenario: two revisions added to two different branches
+			// revisions[10, 11], parents(10) == 9, parents(11) == 7
+			// revs2read == [9,10,11], and parentsMap lacks entry for parent rev7.
+			fillMissingParentsMap(s, revisions);
+			s.iterate(revs2read, true, this);
+		}
+		
+		private void fillMissingParentsMap(RevlogStream s, int[] revisions) throws HgRuntimeException {
+			int[] p = new int[2];
+			for (int i = 1; i < revisions.length; i++) {
+				s.parents(revisions[i], p);
+				if (p[0] != NO_REVISION && Arrays.binarySearch(revisions, p[0]) < 0) {
+					parentMap.put(p[0], Nodeid.fromBinary(s.nodeid(p[0]), 0));
+				}
+				if (p[1] != NO_REVISION && Arrays.binarySearch(revisions, p[1]) < 0) {
+					parentMap.put(p[1], Nodeid.fromBinary(s.nodeid(p[1]), 0));
+				}
+			}
+		}
+		
+		public void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) throws HgRuntimeException {
+			try {
+				parentMap.put(revisionIndex, Nodeid.fromBinary(nodeid, 0));
+				byte[] nextContent = data.byteArray();
+				data.done();
+				if (revisionIndex == startParent) {
+					prevContent = nextContent;
+					return;
+				}
+				Patch p = GeneratePatchInspector.delta(prevContent, nextContent);
+				prevContent = nextContent;
+				nextContent = null;
+				PatchDataSource pds = p.new PatchDataSource();
+				int len = pds.serializeLength() + 84;
+				ds.writeInt(len);
+				ds.write(nodeid, 0, Nodeid.SIZE);
+				// TODO assert parents match those in previous group elements
+				if (parent1Revision != NO_REVISION) {
+					ds.writeByte(parentMap.get(parent1Revision).toByteArray());
+				} else {
+					ds.writeByte(Nodeid.NULL.toByteArray());
+				}
+				if (parent2Revision != NO_REVISION) {
+					ds.writeByte(parentMap.get(parent2Revision).toByteArray());
+				} else {
+					ds.writeByte(Nodeid.NULL.toByteArray());
+				}
+				ds.writeByte(clogMap.get(linkRevision).toByteArray());
+				pds.serialize(ds);
+			} catch (IOException ex) {
+				// XXX odd to have object with IOException to use where no checked exception is allowed 
+				throw new HgInvalidControlFileException(ex.getMessage(), ex, null); 
+			} catch (HgIOException ex) {
+				throw new HgInvalidControlFileException(ex, true); // XXX any way to refactor ChunkGenerator not to get checked exception here?
+			}
+		}
+	}
+}
--- a/src/org/tmatesoft/hg/internal/COWTransaction.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/COWTransaction.java	Wed Jul 10 11:53:19 2013 +0200
@@ -41,7 +41,7 @@
 	private final List<RollbackEntry> entries = new LinkedList<RollbackEntry>();
 	
 	public COWTransaction(SessionContext.Source ctxSource) {
-		fileHelper = new FileUtils(ctxSource.getSessionContext().getLog());
+		fileHelper = new FileUtils(ctxSource.getSessionContext().getLog(), this);
 	}
 
 	@Override
--- a/src/org/tmatesoft/hg/internal/CommitFacility.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/CommitFacility.java	Wed Jul 10 11:53:19 2013 +0200
@@ -42,6 +42,7 @@
 import org.tmatesoft.hg.internal.DataSerializer.DataSource;
 import org.tmatesoft.hg.repo.HgChangelog;
 import org.tmatesoft.hg.repo.HgDataFile;
+import org.tmatesoft.hg.repo.HgPhase;
 import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.Pair;
 import org.tmatesoft.hg.util.Path;
@@ -156,7 +157,7 @@
 				newlyAddedFiles.put(df.getPath(), contentStream);
 			}
 			RevlogStreamWriter fileWriter = new RevlogStreamWriter(repo, contentStream, transaction);
-			Nodeid fileRev = fileWriter.addRevision(bds, clogRevisionIndex, fp.first(), fp.second());
+			Nodeid fileRev = fileWriter.addRevision(bds, clogRevisionIndex, fp.first(), fp.second()).second();
 			newManifestRevision.put(df.getPath(), fileRev);
 			touchInDirstate.add(df.getPath());
 		}
@@ -167,7 +168,7 @@
 			manifestBuilder.add(me.getKey().toString(), me.getValue());
 		}
 		RevlogStreamWriter manifestWriter = new RevlogStreamWriter(repo, repo.getImplAccess().getManifestStream(), transaction);
-		Nodeid manifestRev = manifestWriter.addRevision(manifestBuilder, clogRevisionIndex, manifestParents.first(), manifestParents.second());
+		Nodeid manifestRev = manifestWriter.addRevision(manifestBuilder, clogRevisionIndex, manifestParents.first(), manifestParents.second()).second();
 		//
 		// Changelog
 		final ChangelogEntryBuilder changelogBuilder = new ChangelogEntryBuilder();
@@ -176,7 +177,7 @@
 		changelogBuilder.user(String.valueOf(user));
 		changelogBuilder.manifest(manifestRev).comment(message);
 		RevlogStreamWriter changelogWriter = new RevlogStreamWriter(repo, repo.getImplAccess().getChangelogStream(), transaction);
-		Nodeid changesetRev = changelogWriter.addRevision(changelogBuilder, clogRevisionIndex, p1Commit, p2Commit);
+		Nodeid changesetRev = changelogWriter.addRevision(changelogBuilder, clogRevisionIndex, p1Commit, p2Commit).second();
 		// TODO move fncache update to an external facility, along with dirstate and bookmark update
 		if (!newlyAddedFiles.isEmpty() && repo.fncacheInUse()) {
 			FNCacheFile fncache = new FNCacheFile(repo);
@@ -234,6 +235,9 @@
 		if (p1Commit != NO_REVISION || p2Commit != NO_REVISION) {
 			repo.getRepo().getBookmarks().updateActive(p1Cset, p2Cset, changesetRev);
 		}
+		PhasesHelper phaseHelper = new PhasesHelper(repo);
+		HgPhase newCommitPhase = HgPhase.parse(repo.getRepo().getConfiguration().getStringValue("phases", "new-commit", HgPhase.Draft.mercurialString()));
+		phaseHelper.newCommitNode(changesetRev, newCommitPhase);
 		// TODO Revisit: might be reasonable to send out a "Repo changed" notification, to clear
 		// e.g. cached branch, tags and so on, not to rely on file change detection methods?
 		// The same notification might come useful once Pull is implemented
@@ -254,7 +258,7 @@
 		} catch (IOException ex) {
 			throw new HgIOException("Failed to save last commit message", ex, lastMessage);
 		} finally {
-			new FileUtils(repo.getLog()).closeQuietly(w, lastMessage);
+			new FileUtils(repo.getLog(), this).closeQuietly(w, lastMessage);
 		}
 	}
 /*
--- a/src/org/tmatesoft/hg/internal/DataAccessProvider.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/DataAccessProvider.java	Wed Jul 10 11:53:19 2013 +0200
@@ -244,7 +244,7 @@
 		public void done() {
 			buffer = null;
 			if (fileStream != null) {
-				new FileUtils(logFacility).closeQuietly(fileStream);
+				new FileUtils(logFacility, this).closeQuietly(fileStream);
 				fileStream = null;
 				fileChannel = null; // channel is closed together with stream
 			}
@@ -375,7 +375,7 @@
 		public void done() {
 			buffer = null;
 			if (fileStream != null) {
-				new FileUtils(logFacility).closeQuietly(fileStream);
+				new FileUtils(logFacility, this).closeQuietly(fileStream);
 				fileStream = null;
 				fileChannel = null;
 			}
--- a/src/org/tmatesoft/hg/internal/DataSerializer.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/DataSerializer.java	Wed Jul 10 11:53:19 2013 +0200
@@ -17,6 +17,8 @@
 package org.tmatesoft.hg.internal;
 
 import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
 
 import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.repo.HgRuntimeException;
@@ -74,7 +76,7 @@
 	 * Denotes an entity that wants to/could be serialized
 	 */
 	@Experimental(reason="Work in progress")
-	interface DataSource {
+	public interface DataSource {
 		/**
 		 * Invoked once for a single write operation, 
 		 * although the source itself may get serialized several times
@@ -107,7 +109,10 @@
 		}
 	}
 	
-	public static class ByteArrayDataSerializer extends DataSerializer {
+	/**
+	 * Serialize data to byte array
+	 */
+	public static class ByteArraySerializer extends DataSerializer {
 		private final ByteArrayOutputStream out = new ByteArrayOutputStream();
 
 		@Override
@@ -119,4 +124,26 @@
 			return out.toByteArray();
 		}
 	}
+
+	/**
+	 * Bridge to the world of {@link java.io.OutputStream}.
+	 * Caller instantiates the stream and is responsible to close it as appropriate, 
+	 * {@link #done() DataSerializer.done()} doesn't close the stream. 
+	 */
+	public static class OutputStreamSerializer extends DataSerializer {
+		private final OutputStream out;
+
+		public OutputStreamSerializer(OutputStream outputStream) {
+			out = outputStream;
+		}
+
+		@Override
+		public void write(byte[] data, int offset, int length) throws HgIOException {
+			try {
+				out.write(data, offset, length);
+			} catch (IOException ex) {
+				throw new HgIOException(ex.getMessage(), ex, null);
+			}
+		}
+	}
 }
--- a/src/org/tmatesoft/hg/internal/FNCacheFile.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/FNCacheFile.java	Wed Jul 10 11:53:19 2013 +0200
@@ -16,6 +16,8 @@
  */
 package org.tmatesoft.hg.internal;
 
+import static org.tmatesoft.hg.repo.HgRepositoryFiles.FNCache;
+
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
@@ -78,7 +80,7 @@
 		if (addedDotI.isEmpty() && addedDotD.isEmpty()) {
 			return;
 		}
-		File f = fncacheFile();
+		File f = repo.getRepositoryFile(FNCache);
 		f.getParentFile().mkdirs();
 		final Charset filenameEncoding = repo.getFilenameEncoding();
 		ArrayList<CharBuffer> added = new ArrayList<CharBuffer>();
@@ -112,8 +114,4 @@
 	public void addData(Path p) {
 		addedDotD.add(p);
 	}
-
-	private File fncacheFile() {
-		return repo.getFileFromStoreDir("fncache");
-	}
 }
--- a/src/org/tmatesoft/hg/internal/FileContentSupplier.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/FileContentSupplier.java	Wed Jul 10 11:53:19 2013 +0200
@@ -62,7 +62,7 @@
 		} catch (IOException ex) {
 			throw new HgIOException("Failed to get content of the file", ex, file);
 		} finally {
-			new FileUtils(ctx.getLog()).closeQuietly(fis);
+			new FileUtils(ctx.getLog(), this).closeQuietly(fis);
 		}
 	}
 	
--- a/src/org/tmatesoft/hg/internal/FileUtils.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/FileUtils.java	Wed Jul 10 11:53:19 2013 +0200
@@ -37,13 +37,19 @@
 public final class FileUtils {
 	
 	private final LogFacility log;
+	private final Class<?> troublemaker;
 	
 	public static void copyFile(File from, File to) throws HgIOException {
-		new FileUtils(new StreamLogFacility(Debug, true, System.err)).copy(from, to);
+		new FileUtils(new StreamLogFacility(Debug, true, System.err), FileUtils.class).copy(from, to);
 	}
 
-	public FileUtils(LogFacility logFacility) {
+	public FileUtils(LogFacility logFacility, Object troubleSource) {
 		log = logFacility;
+		if (troubleSource == null) {
+			troublemaker = null;
+		} else {
+			troublemaker = troubleSource instanceof Class ? (Class<?>) troubleSource : troubleSource.getClass();
+		}
 	}
 
 	public void copy(File from, File to) throws HgIOException {
@@ -104,7 +110,7 @@
 				} else {
 					msg = String.format("Failed to close %s", f);
 				}
-				log.dump(getClass(), Severity.Warn, ex, msg);
+				log.dump(troublemaker == null ? getClass() : troublemaker, Severity.Warn, ex, msg);
 			}
 		}
 	}
--- a/src/org/tmatesoft/hg/internal/InflaterDataAccess.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/InflaterDataAccess.java	Wed Jul 10 11:53:19 2013 +0200
@@ -39,22 +39,22 @@
 	private int decompressedLength;
 
 	public InflaterDataAccess(DataAccess dataAccess, long offset, int compressedLength) {
-		this(dataAccess, offset, compressedLength, -1, new Inflater(), new byte[512]);
+		this(dataAccess, offset, compressedLength, -1, new Inflater(), new byte[512], null);
 	}
 
 	public InflaterDataAccess(DataAccess dataAccess, long offset, int compressedLength, int actualLength) {
-		this(dataAccess, offset, compressedLength, actualLength, new Inflater(), new byte[512]);
+		this(dataAccess, offset, compressedLength, actualLength, new Inflater(), new byte[512], null);
 	}
 
-	public InflaterDataAccess(DataAccess dataAccess, long offset, int compressedLength, int actualLength, Inflater inflater, byte[] buf) {
+	public InflaterDataAccess(DataAccess dataAccess, long offset, int compressedLength, int actualLength, Inflater inflater, byte[] inBuf, ByteBuffer outBuf) {
 		super(dataAccess, offset, compressedLength);
-		if (inflater == null || buf == null) {
+		if (inflater == null || inBuf == null) {
 			throw new IllegalArgumentException();
 		}
 		this.inflater = inflater;
 		this.decompressedLength = actualLength;
-		inBuffer = buf;
-		outBuffer = ByteBuffer.allocate(inBuffer.length * 2);
+		inBuffer = inBuf;
+		outBuffer = outBuf == null ? ByteBuffer.allocate(inBuffer.length * 2) : outBuf;
 		outBuffer.limit(0); // there's nothing to read in the buffer 
 	}
 	
--- a/src/org/tmatesoft/hg/internal/IntMap.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/IntMap.java	Wed Jul 10 11:53:19 2013 +0200
@@ -17,6 +17,7 @@
 package org.tmatesoft.hg.internal;
 
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -216,6 +217,13 @@
 		}
 		return map;
 	}
+	
+	public Collection<V> values() {
+		@SuppressWarnings("unchecked")
+		V[] rv = (V[]) new Object[size];
+		System.arraycopy(values, 0, rv, 0, size);
+		return Arrays.<V>asList(rv);
+	}
 
 	// copy of Arrays.binarySearch, with upper search limit as argument
 	private static int binarySearch(int[] a, int high, int key) {
--- a/src/org/tmatesoft/hg/internal/Internals.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/Internals.java	Wed Jul 10 11:53:19 2013 +0200
@@ -142,7 +142,7 @@
 		final PropertyMarshal pm = new PropertyMarshal(ctx);
 		boolean shallCacheRevlogsInRepo = pm.getBoolean(CFG_PROPERTY_REVLOG_STREAM_CACHE, true);
 		streamProvider = new RevlogStreamFactory(this, shallCacheRevlogsInRepo); 
-		shallMergePatches = pm.getBoolean(Internals.CFG_PROPERTY_PATCH_MERGE, false);
+		shallMergePatches = pm.getBoolean(Internals.CFG_PROPERTY_PATCH_MERGE, true);
 	}
 	
 	public boolean isInvalid() {
@@ -150,12 +150,16 @@
 	}
 	
 	public File getRepositoryFile(HgRepositoryFiles f) {
-		return f.residesUnderRepositoryRoot() ? getFileFromRepoDir(f.getName()) : new File(repo.getWorkingDir(), f.getName());
+		switch (f.getHome()) {
+			case Store : return getFileFromStoreDir(f.getName());
+			case Repo : return getFileFromRepoDir(f.getName());
+			default : return new File(repo.getWorkingDir(), f.getName());
+		}
 	}
 
 	/**
 	 * Access files under ".hg/".
-	 * File not necessarily exists, this method is merely a factory for Files at specific, configuration-dependent location. 
+	 * File not necessarily exists, this method is merely a factory for {@link File files} at specific, configuration-dependent location. 
 	 * 
 	 * @param name shall be normalized path
 	 */
--- a/src/org/tmatesoft/hg/internal/LineReader.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/LineReader.java	Wed Jul 10 11:53:19 2013 +0200
@@ -95,7 +95,14 @@
 			return this;
 		}
 
-		public <T> void read(LineConsumer<T> consumer, T paramObj) throws HgIOException {
+		/**
+		 * 
+		 * @param consumer where to pipe read lines to
+		 * @param paramObj parameterizes consumer
+		 * @return paramObj value for convenience
+		 * @throws HgIOException if there's {@link IOException} while reading file
+		 */
+		public <T> T read(LineConsumer<T> consumer, T paramObj) throws HgIOException {
 			BufferedReader statusFileReader = null;
 			try {
 //				consumer.begin(file, paramObj);
@@ -119,10 +126,11 @@
 						ok = consumer.consume(line, paramObj);
 					}
 				}
+				return paramObj;
 			} catch (IOException ex) {
 				throw new HgIOException(ex.getMessage(), ex, file);
 			} finally {
-				new FileUtils(log).closeQuietly(statusFileReader);
+				new FileUtils(log, this).closeQuietly(statusFileReader);
 //				try {
 //					consumer.end(file, paramObj);
 //				} catch (IOException ex) {
--- a/src/org/tmatesoft/hg/internal/PhasesHelper.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/PhasesHelper.java	Wed Jul 10 11:53:19 2013 +0200
@@ -18,22 +18,25 @@
 
 import static org.tmatesoft.hg.repo.HgPhase.Draft;
 import static org.tmatesoft.hg.repo.HgPhase.Secret;
-import static org.tmatesoft.hg.util.LogFacility.Severity.Info;
+import static org.tmatesoft.hg.repo.HgRepositoryFiles.Phaseroots;
 import static org.tmatesoft.hg.util.LogFacility.Severity.Warn;
 
-import java.io.BufferedReader;
 import java.io.File;
-import java.io.FileReader;
+import java.io.FileWriter;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 
 import org.tmatesoft.hg.core.HgChangeset;
+import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.repo.HgChangelog;
 import org.tmatesoft.hg.repo.HgInvalidControlFileException;
+import org.tmatesoft.hg.repo.HgInvalidStateException;
 import org.tmatesoft.hg.repo.HgParentChildMap;
 import org.tmatesoft.hg.repo.HgPhase;
 import org.tmatesoft.hg.repo.HgRepository;
@@ -76,7 +79,10 @@
 		}
 		return repoSupporsPhases.booleanValue();
 	}
-
+	
+	public boolean withSecretRoots() {
+		return !secretPhaseRoots.isEmpty();
+	}
 
 	/**
 	 * @param cset revision to query
@@ -130,21 +136,104 @@
 			}
 		}
 		return HgPhase.Public;
+	}
 
+
+	/**
+	 * @return all revisions with secret phase
+	 */
+	public RevisionSet allSecret() {
+		return allOf(HgPhase.Secret);
+	}
+	
+	/**
+	 * @return all revisions with draft phase
+	 */
+	public RevisionSet allDraft() {
+		return allOf(HgPhase.Draft).subtract(allOf(HgPhase.Secret));
+	}
+	
+	public void updateRoots(Collection<Nodeid> draftRoots, Collection<Nodeid> secretRoots) throws HgInvalidControlFileException {
+		draftPhaseRoots = draftRoots.isEmpty() ? Collections.<Nodeid>emptyList() : new ArrayList<Nodeid>(draftRoots);
+		secretPhaseRoots = secretRoots.isEmpty() ? Collections.<Nodeid>emptyList() : new ArrayList<Nodeid>(secretRoots);
+		String fmt = "%d %s\n";
+		File phaseroots = repo.getRepositoryFile(Phaseroots);
+		FileWriter fw = null;
+		try {
+			fw = new FileWriter(phaseroots);
+			for (Nodeid n : secretPhaseRoots) {
+				fw.write(String.format(fmt, HgPhase.Secret.mercurialOrdinal(), n.toString()));
+			}
+			for (Nodeid n : draftPhaseRoots) {
+				fw.write(String.format(fmt, HgPhase.Draft.mercurialOrdinal(), n.toString()));
+			}
+			fw.flush();
+		} catch (IOException ex) {
+			throw new HgInvalidControlFileException(ex.getMessage(), ex, phaseroots);
+		} finally {
+			new FileUtils(repo.getLog(), this).closeQuietly(fw);
+		}
+	}
+
+	public void newCommitNode(Nodeid newChangeset, HgPhase newCommitPhase) throws HgRuntimeException {
+		final int riCset = repo.getRepo().getChangelog().getRevisionIndex(newChangeset);
+		HgPhase ph = getPhase(riCset, newChangeset);
+		if (ph.compareTo(newCommitPhase) >= 0) {
+			// present phase is more secret than the desired one
+			return;
+		}
+		// newCommitPhase can't be public here, condition above would be satisfied
+		assert newCommitPhase != HgPhase.Public;
+		// ph is e.g public when newCommitPhase is draft
+		// or is draft when desired phase is secret
+		final RevisionSet rs = allOf(newCommitPhase).union(new RevisionSet(Collections.singleton(newChangeset)));
+		final RevisionSet newRoots;
+		if (parentHelper != null) {
+			newRoots = rs.roots(parentHelper);
+		} else {
+			newRoots = rs.roots(repo.getRepo());
+		}
+		if (newCommitPhase == HgPhase.Draft) {
+			updateRoots(newRoots.asList(), secretPhaseRoots);
+		} else if (newCommitPhase == HgPhase.Secret) {
+			updateRoots(draftPhaseRoots, newRoots.asList());
+		} else {
+			throw new HgInvalidStateException(String.format("Unexpected phase %s for new commits", newCommitPhase));
+		}
+	}
+
+	/**
+	 * For a given phase, collect all revisions with phase that is the same or more private (i.e. for Draft, returns Draft+Secret)
+	 * The reason is not a nice API intention (which is awful, indeed), but an ease of implementation 
+	 */
+	private RevisionSet allOf(HgPhase phase) {
+		assert phase != HgPhase.Public;
+		if (!isCapableOfPhases()) {
+			return new RevisionSet(Collections.<Nodeid>emptyList());
+		}
+		final List<Nodeid> roots = getPhaseRoots(phase);
+		if (parentHelper != null) {
+			return new RevisionSet(roots).union(new RevisionSet(parentHelper.childrenOf(roots)));
+		} else {
+			RevisionSet rv = new RevisionSet(Collections.<Nodeid>emptyList());
+			for (RevisionDescendants rd : getPhaseDescendants(phase)) {
+				rv = rv.union(rd.asRevisionSet());
+			}
+			return rv;
+		}
 	}
 
 	private Boolean readRoots() throws HgRuntimeException {
-		File phaseroots = repo.getFileFromStoreDir("phaseroots"); // TODO into HgRepositoryFiles
-		BufferedReader br = null;
+		File phaseroots = repo.getRepositoryFile(Phaseroots);
 		try {
 			if (!phaseroots.exists()) {
 				return Boolean.FALSE;
 			}
+			LineReader lr = new LineReader(phaseroots, repo.getLog());
+			final Collection<String> lines = lr.read(new LineReader.SimpleLineCollector(), new LinkedList<String>());
 			HashMap<HgPhase, List<Nodeid>> phase2roots = new HashMap<HgPhase, List<Nodeid>>();
-			br = new BufferedReader(new FileReader(phaseroots));
-			String line;
-			while ((line = br.readLine()) != null) {
-				String[] lc = line.trim().split("\\s+");
+			for (String line : lines) {
+				String[] lc = line.split("\\s+");
 				if (lc.length == 0) {
 					continue;
 				}
@@ -167,17 +256,8 @@
 			}
 			draftPhaseRoots = phase2roots.containsKey(Draft) ? phase2roots.get(Draft) : Collections.<Nodeid>emptyList();
 			secretPhaseRoots = phase2roots.containsKey(Secret) ? phase2roots.get(Secret) : Collections.<Nodeid>emptyList();
-		} catch (IOException ex) {
-			throw new HgInvalidControlFileException(ex.toString(), ex, phaseroots);
-		} finally {
-			if (br != null) {
-				try {
-					br.close();
-				} catch (IOException ex) {
-					repo.getSessionContext().getLog().dump(getClass(), Info, ex, null);
-					// ignore the exception otherwise 
-				}
-			}
+		} catch (HgIOException ex) {
+			throw new HgInvalidControlFileException(ex, true);
 		}
 		return Boolean.TRUE;
 	}
--- a/src/org/tmatesoft/hg/internal/RepositoryComparator.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/RepositoryComparator.java	Wed Jul 10 11:53:19 2013 +0200
@@ -38,7 +38,6 @@
 import org.tmatesoft.hg.repo.HgRemoteRepository;
 import org.tmatesoft.hg.repo.HgRemoteRepository.Range;
 import org.tmatesoft.hg.repo.HgRemoteRepository.RemoteBranch;
-import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.CancelSupport;
 import org.tmatesoft.hg.util.CancelledException;
 import org.tmatesoft.hg.util.ProgressSupport;
@@ -54,6 +53,7 @@
 	private final HgParentChildMap<HgChangelog> localRepo;
 	private final HgRemoteRepository remoteRepo;
 	private List<Nodeid> common;
+	private List<Nodeid> remoteHeads;
 
 	public RepositoryComparator(HgParentChildMap<HgChangelog> pwLocal, HgRemoteRepository hgRemote) {
 		localRepo = pwLocal;
@@ -81,54 +81,43 @@
 		return common;
 	}
 	
+	public List<Nodeid> getRemoteHeads() {
+		assert remoteHeads != null;
+		return remoteHeads;
+	}
+	
 	/**
 	 * @return revisions that are children of common entries, i.e. revisions that are present on the local server and not on remote.
 	 */
 	public List<Nodeid> getLocalOnlyRevisions() {
-		return localRepo.childrenOf(getCommon());
+		final List<Nodeid> c = getCommon();
+		if (c.isEmpty()) {
+			return localRepo.all();
+		} else {
+			final RevisionSet rsCommon = new RevisionSet(c);
+			final RevisionSet localHeads = new RevisionSet(localRepo.heads());
+			final List<Nodeid> commonChildren = localRepo.childrenOf(c);
+			final RevisionSet rsCommonChildren = new RevisionSet(commonChildren);
+			// check if there's any revision in the repository that doesn't trace to common
+			// e.g. branches from one of common ancestors
+			RevisionSet headsNotFromCommon = localHeads.subtract(rsCommonChildren).subtract(rsCommon);
+			if (headsNotFromCommon.isEmpty()) {
+				return commonChildren;
+			}
+			RevisionSet all = new RevisionSet(localRepo.all());
+			// need outgoing := ancestors(missing) - ancestors(common):
+			RevisionSet rsAncestors = all.ancestors(headsNotFromCommon, localRepo);
+			// #ancestors gives only parents, we need terminating children as well
+			rsAncestors = rsAncestors.union(headsNotFromCommon);
+			final RevisionSet rsAncestorsCommon = all.ancestors(rsCommon, localRepo);
+			RevisionSet outgoing = rsAncestors.subtract(rsAncestorsCommon).subtract(rsCommon);
+			// outgoing keeps children that spined off prior to common revisions
+			return outgoing.union(rsCommonChildren).asList();
+		}
 	}
 	
-	/**
-	 * Similar to @link {@link #getLocalOnlyRevisions()}, use this one if you need access to changelog entry content, not 
-	 * only its revision number. 
-	 * @param inspector delegate to analyze changesets, shall not be <code>null</code>
-	 */
-	public void visitLocalOnlyRevisions(HgChangelog.Inspector inspector) throws HgRuntimeException {
-		if (inspector == null) {
-			throw new IllegalArgumentException();
-		}
-		// one can use localRepo.childrenOf(getCommon()) and then iterate over nodeids, but there seems to be
-		// another approach to get all changes after common:
-		// find index of earliest revision, and report all that were later
-		final HgChangelog changelog = localRepo.getRepo().getChangelog();
-		int earliestRevision = Integer.MAX_VALUE;
-		List<Nodeid> commonKnown = getCommon();
-		for (Nodeid n : commonKnown) {
-			if (!localRepo.hasChildren(n)) {
-				// there might be (old) nodes, known both locally and remotely, with no children
-				// hence, we don't need to consider their local revision number
-				continue;
-			}
-			int lr = changelog.getRevisionIndex(n);
-			if (lr < earliestRevision) {
-				earliestRevision = lr;
-			}
-		}
-		if (earliestRevision == Integer.MAX_VALUE) {
-			// either there are no common nodes (known locally and at remote)
-			// or no local children found (local is up to date). In former case, perhaps I shall bit return silently,
-			// but check for possible wrong repo comparison (hs says 'repository is unrelated' if I try to 
-			// check in/out for a repo that has no common nodes.
-			return;
-		}
-		if (earliestRevision < 0 || earliestRevision >= changelog.getLastRevision()) {
-			throw new HgInvalidStateException(String.format("Invalid index of common known revision: %d in total of %d", earliestRevision, 1+changelog.getLastRevision()));
-		}
-		changelog.range(earliestRevision+1, changelog.getLastRevision(), inspector);
-	}
-
 	private List<Nodeid> findCommonWithRemote() throws HgRemoteConnectionException {
-		List<Nodeid> remoteHeads = remoteRepo.heads();
+		remoteHeads = remoteRepo.heads();
 		LinkedList<Nodeid> resultCommon = new LinkedList<Nodeid>(); // these remotes are known in local
 		LinkedList<Nodeid> toQuery = new LinkedList<Nodeid>(); // these need further queries to find common
 		for (Nodeid rh : remoteHeads) {
--- a/src/org/tmatesoft/hg/internal/RevisionDescendants.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/RevisionDescendants.java	Wed Jul 10 11:53:19 2013 +0200
@@ -16,6 +16,7 @@
  */
 package org.tmatesoft.hg.internal;
 
+import java.util.ArrayList;
 import java.util.BitSet;
 
 import org.tmatesoft.hg.core.Nodeid;
@@ -37,6 +38,7 @@
 	private final int rootRevIndex;
 	private final int tipRevIndex; // this is the last revision we cache to
 	private final BitSet descendants;
+	private RevisionSet revset;
 
 	// in fact, may be refactored to deal not only with changelog, but any revlog (not sure what would be the usecase, though)
 	public RevisionDescendants(HgRepository hgRepo, int revisionIndex) throws HgRuntimeException {
@@ -108,4 +110,21 @@
 		assert ix < descendants.size();
 		return descendants.get(ix);
 	}
+
+	public RevisionSet asRevisionSet() {
+		if (revset == null) {
+			final ArrayList<Nodeid> revisions = new ArrayList<Nodeid>(descendants.cardinality());
+			repo.getChangelog().indexWalk(rootRevIndex, tipRevIndex, new HgChangelog.RevisionInspector() {
+
+				public void next(int revisionIndex, Nodeid revision, int linkedRevisionIndex) throws HgRuntimeException {
+					if (isDescendant(revisionIndex)) {
+						revisions.add(revision);
+					}
+				}
+			});
+			assert revisions.size() == descendants.cardinality();
+			revset = new RevisionSet(revisions);
+		}
+		return revset;
+	}
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/RevisionSet.java	Wed Jul 10 11:53:19 2013 +0200
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.repo.HgChangelog;
+import org.tmatesoft.hg.repo.HgParentChildMap;
+import org.tmatesoft.hg.repo.HgRepository;
+
+/**
+ * Unmodifiable collection of revisions with handy set operations
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public final class RevisionSet implements Iterable<Nodeid> {
+	
+	private final Set<Nodeid> elements;
+	
+	public RevisionSet(Nodeid... revisions) {
+		this(revisions == null ? null : Arrays.asList(revisions));
+	}
+	
+	public RevisionSet(Collection<Nodeid> revisions) {
+		this(revisions == null ? new HashSet<Nodeid>() : new HashSet<Nodeid>(revisions));
+	}
+	
+	private RevisionSet(HashSet<Nodeid> revisions) {
+		if (revisions.isEmpty()) {
+			elements = Collections.<Nodeid>emptySet();
+		} else {
+			elements = revisions;
+		}
+	}
+
+	/**
+	 * elements of the set with no parents or parents not from the same set 
+	 */
+	public RevisionSet roots(HgParentChildMap<HgChangelog> ph) {
+		HashSet<Nodeid> copy = new HashSet<Nodeid>(elements);
+		for (Nodeid n : elements) {
+			assert ph.knownNode(n);
+			Nodeid p1 = ph.firstParent(n);
+			if (p1 != null && elements.contains(p1)) {
+				copy.remove(n);
+				continue;
+			}
+			Nodeid p2 = ph.secondParent(n);
+			if (p2 != null && elements.contains(p2)) {
+				copy.remove(n);
+				continue;
+			}
+		}
+		return copy.size() == elements.size() ? this : new RevisionSet(copy);
+	}
+	
+	/**
+	 * Same as {@link #roots(HgParentChildMap)}, but doesn't require a parent-child map
+	 */
+	public RevisionSet roots(HgRepository repo) {
+		// TODO introduce parent access interface, use it here, provide implementations 
+		// that delegate to HgParentChildMap or HgRepository
+		HashSet<Nodeid> copy = new HashSet<Nodeid>(elements);
+		final HgChangelog clog = repo.getChangelog();
+		byte[] parent1 = new byte[Nodeid.SIZE], parent2 = new byte[Nodeid.SIZE];
+		int[] parentRevs = new int[2];
+		for (Nodeid n : elements) {
+			assert clog.isKnown(n);
+			clog.parents(clog.getRevisionIndex(n), parentRevs, parent1, parent2);
+			if (parentRevs[0] != NO_REVISION && elements.contains(new Nodeid(parent1, false))) {
+				copy.remove(n);
+				continue;
+			}
+			if (parentRevs[1] != NO_REVISION && elements.contains(new Nodeid(parent2, false))) {
+				copy.remove(n);
+				continue;
+			}
+		}
+		return copy.size() == elements.size() ? this : new RevisionSet(copy);
+	}
+	
+	/**
+	 * elements of the set that has no children in this set 
+	 */
+	public RevisionSet heads(HgParentChildMap<HgChangelog> ph) {
+		HashSet<Nodeid> copy = new HashSet<Nodeid>(elements);
+		// can't do copy.removeAll(ph.childrenOf(asList())); as actual heads are indeed children of some other node
+		for (Nodeid n : elements) {
+			assert ph.knownNode(n);
+			Nodeid p1 = ph.firstParent(n);
+			Nodeid p2 = ph.secondParent(n);
+			if (p1 != null && elements.contains(p1)) {
+				copy.remove(p1);
+			}
+			if (p2 != null && elements.contains(p2)) {
+				copy.remove(p2);
+			}
+		}
+		return copy.size() == elements.size() ? this : new RevisionSet(copy);
+	}
+
+	/**
+	 * Any ancestor of an element from the supplied child set found in this one. 
+	 * Elements of the supplied child set are not part of return value.  
+	 */
+	public RevisionSet ancestors(RevisionSet children, HgParentChildMap<HgChangelog> parentHelper) {
+		if (isEmpty()) {
+			return this;
+		}
+		if (children.isEmpty()) {
+			return children;
+		}
+		RevisionSet chRoots = children.roots(parentHelper);
+		HashSet<Nodeid> ancestors = new HashSet<Nodeid>();
+		Set<Nodeid> childrenToCheck = chRoots.elements;
+		while (!childrenToCheck.isEmpty()) {
+			HashSet<Nodeid> nextRound = new HashSet<Nodeid>();
+			for (Nodeid n : childrenToCheck) {
+				Nodeid p1 = parentHelper.firstParent(n);
+				Nodeid p2 = parentHelper.secondParent(n);
+				if (p1 != null && elements.contains(p1)) {
+					nextRound.add(p1);
+				}
+				if (p2 != null && elements.contains(p2)) {
+					nextRound.add(p2);
+				}
+			}
+			ancestors.addAll(nextRound);
+			childrenToCheck = nextRound;
+		} 
+		return new RevisionSet(ancestors);
+	}
+	
+	/**
+	 * Revisions that are both direct and indirect children of elements of this revision set
+	 * as known in supplied parent-child map
+	 */
+	public RevisionSet children(HgParentChildMap<HgChangelog> parentHelper) {
+		if (isEmpty()) {
+			return this;
+		}
+		List<Nodeid> children = parentHelper.childrenOf(elements);
+		return new RevisionSet(new HashSet<Nodeid>(children));
+	}
+
+	public RevisionSet intersect(RevisionSet other) {
+		if (isEmpty()) {
+			return this;
+		}
+		if (other.isEmpty()) {
+			return other;
+		}
+		HashSet<Nodeid> copy = new HashSet<Nodeid>(elements);
+		copy.retainAll(other.elements);
+		return copy.size() == elements.size() ? this : new RevisionSet(copy);
+	}
+	
+	public RevisionSet subtract(RevisionSet other) {
+		if (isEmpty() || other.isEmpty()) {
+			return this;
+		}
+		HashSet<Nodeid> copy = new HashSet<Nodeid>(elements);
+		copy.removeAll(other.elements);
+		return copy.size() == elements.size() ? this : new RevisionSet(copy);
+	}
+
+	public RevisionSet union(RevisionSet other) {
+		if (isEmpty()) {
+			return other;
+		}
+		if (other.isEmpty()) {
+			return this;
+		}
+		HashSet<Nodeid> copy = new HashSet<Nodeid>(elements);
+		copy.addAll(other.elements);
+		return copy.size() == elements.size() ? this : new RevisionSet(copy);
+	}
+
+	/**
+	 * A ^ B := (A\B).union(B\A)
+	 * A ^ B := A.union(B) \ A.intersect(B)
+	 */
+	public RevisionSet symmetricDifference(RevisionSet other) {
+		if (isEmpty()) {
+			return this;
+		}
+		if (other.isEmpty()) {
+			return other;
+		}
+		HashSet<Nodeid> copyA = new HashSet<Nodeid>(elements);
+		HashSet<Nodeid> copyB = new HashSet<Nodeid>(other.elements);
+		copyA.removeAll(other.elements);
+		copyB.removeAll(elements);
+		copyA.addAll(copyB);
+		return new RevisionSet(copyA);
+	}
+
+	public boolean isEmpty() {
+		return elements.isEmpty();
+	}
+
+	public int size() {
+		return elements.size();
+	}
+
+	public List<Nodeid> asList() {
+		return new ArrayList<Nodeid>(elements);
+	}
+	
+	public Iterator<Nodeid> iterator() {
+		return elements.iterator();
+	}
+	
+	@Override
+	public String toString() {
+		StringBuilder sb = new StringBuilder();
+		sb.append('<');
+		if (!isEmpty()) {
+			sb.append(elements.size());
+			sb.append(':');
+		}
+		for (Nodeid n : elements) {
+			sb.append(n.shortNotation());
+			sb.append(',');
+		}
+		if (sb.length() > 1) {
+			sb.setCharAt(sb.length() - 1, '>');
+		} else {
+			sb.append('>');
+		}
+		return sb.toString();
+	}
+	
+	@Override
+	public boolean equals(Object obj) {
+		if (false == obj instanceof RevisionSet) {
+			return false;
+		}
+		return elements.equals(((RevisionSet) obj).elements);
+	}
+	
+	@Override
+	public int hashCode() {
+		return elements.hashCode();
+	}
+}
--- a/src/org/tmatesoft/hg/internal/RevlogStream.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/RevlogStream.java	Wed Jul 10 11:53:19 2013 +0200
@@ -17,6 +17,7 @@
 package org.tmatesoft.hg.internal;
 
 import static org.tmatesoft.hg.repo.HgRepository.BAD_REVISION;
+import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION;
 import static org.tmatesoft.hg.repo.HgRepository.TIP;
 import static org.tmatesoft.hg.internal.Internals.REVLOGV1_RECORD_SIZE;
 
@@ -25,6 +26,7 @@
 import java.lang.ref.Reference;
 import java.lang.ref.ReferenceQueue;
 import java.lang.ref.SoftReference;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.zip.Inflater;
@@ -236,6 +238,34 @@
 		return getBaseRevision(revisionIndex);
 	}
 	
+	/**
+	 * Read indexes of parent revisions
+	 * @param revisionIndex index of child revision
+	 * @param parents array to hold return value, length >= 2
+	 * @return value of <code>parents</code> parameter for convenience
+	 * @throws HgInvalidControlFileException if attempt to read index file failed
+	 * @throws HgInvalidRevisionException if revisionIndex argument doesn't represent a valid record in the revlog
+	 */
+	public int[] parents(int revisionIndex, int[] parents) throws HgInvalidControlFileException, HgInvalidRevisionException {
+		assert parents.length > 1;
+		revisionIndex = checkRevisionIndex(revisionIndex);
+		DataAccess daIndex = getIndexStream(true);
+		try {
+			int recordOffset = getIndexOffsetInt(revisionIndex);
+			daIndex.seek(recordOffset + 24);
+			int p1 = daIndex.readInt();
+			int p2 = daIndex.readInt();
+			// although NO_REVISION == -1, it doesn't hurt to ensure this
+			parents[0] = p1 == -1 ? NO_REVISION : p1;
+			parents[1] = p2 == -1 ? NO_REVISION : p2;
+			return parents;
+		} catch (IOException ex) {
+			throw new HgInvalidControlFileException("Parents lookup failed", ex, indexFile).setRevisionIndex(revisionIndex);
+		} finally {
+			daIndex.done();
+		}
+	}
+	
 	// Perhaps, RevlogStream should be limited to use of plain int revisions for access,
 	// while Nodeids should be kept on the level up, in Revlog. Guess, Revlog better keep
 	// map of nodeids, and once this comes true, we may get rid of this method.
@@ -603,6 +633,7 @@
 		private final Inflater inflater = new Inflater();
 		// can share buffer between instances of InflaterDataAccess as I never read any two of them in parallel
 		private final byte[] inflaterBuffer = new byte[10 * 1024]; // TODO [post-1.1] consider using DAP.DEFAULT_FILE_BUFFER
+		private final ByteBuffer inflaterOutBuffer = ByteBuffer.allocate(inflaterBuffer.length * 2);
 		private final byte[] nodeidBuf = new byte[20];
 		// revlog record fields
 		private long offset;
@@ -712,7 +743,7 @@
 				final byte firstByte = streamDataAccess.readByte();
 				if (firstByte == 0x78 /* 'x' */) {
 					inflater.reset();
-					userDataAccess = new InflaterDataAccess(streamDataAccess, streamOffset, compressedLen, isPatch(i) ? -1 : actualLen, inflater, inflaterBuffer);
+					userDataAccess = new InflaterDataAccess(streamDataAccess, streamOffset, compressedLen, isPatch(i) ? -1 : actualLen, inflater, inflaterBuffer, inflaterOutBuffer);
 				} else if (firstByte == 0x75 /* 'u' */) {
 					userDataAccess = new FilterDataAccess(streamDataAccess, streamOffset+1, compressedLen-1);
 				} else {
--- a/src/org/tmatesoft/hg/internal/RevlogStreamWriter.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/RevlogStreamWriter.java	Wed Jul 10 11:53:19 2013 +0200
@@ -25,13 +25,16 @@
 import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.core.SessionContext;
-import org.tmatesoft.hg.internal.DataSerializer.ByteArrayDataSerializer;
 import org.tmatesoft.hg.internal.DataSerializer.ByteArrayDataSource;
+import org.tmatesoft.hg.internal.DataSerializer.ByteArraySerializer;
 import org.tmatesoft.hg.internal.DataSerializer.DataSource;
+import org.tmatesoft.hg.repo.HgBundle.GroupElement;
 import org.tmatesoft.hg.repo.HgInvalidControlFileException;
 import org.tmatesoft.hg.repo.HgInvalidRevisionException;
 import org.tmatesoft.hg.repo.HgInvalidStateException;
+import org.tmatesoft.hg.repo.HgRepository;
 import org.tmatesoft.hg.repo.HgRuntimeException;
+import org.tmatesoft.hg.util.Pair;
 
 /**
  * 
@@ -45,8 +48,10 @@
 	private final DigestHelper dh = new DigestHelper();
 	private final RevlogCompressor revlogDataZip;
 	private final Transaction transaction;
-	private int lastEntryBase, lastEntryIndex;
-	private byte[] lastEntryContent;
+	private int lastEntryBase, lastEntryIndex, lastEntryActualLen;
+	// record revision and its full content
+	// the name might be misleading, it does not necessarily match lastEntryIndex
+	private Pair<Integer, byte[]> lastFullContent;
 	private Nodeid lastEntryRevision;
 	private IntMap<Nodeid> revisionCache = new IntMap<Nodeid>(32);
 	private RevlogStream revlogStream;
@@ -61,22 +66,98 @@
 		transaction = tr;
 	}
 	
+	public Pair<Integer,Nodeid> addPatchRevision(GroupElement ge, RevisionToIndexMap clogRevs, RevisionToIndexMap revlogRevs) throws HgIOException, HgRuntimeException {
+		populateLastEntryIndex();
+		//
+		final Nodeid nodeRev = ge.node();
+		final Nodeid csetRev = ge.cset();
+		int linkRev;
+		if (nodeRev.equals(csetRev)) {
+			linkRev = lastEntryIndex+1;
+		} else {
+			linkRev = clogRevs.revisionIndex(csetRev);
+		}
+		assert linkRev >= 0;
+		final Nodeid p1Rev = ge.firstParent();
+		int p1 = p1Rev.isNull() ? NO_REVISION : revlogRevs.revisionIndex(p1Rev);
+		final Nodeid p2Rev = ge.secondParent();
+		int p2 = p2Rev.isNull() ? NO_REVISION : revlogRevs.revisionIndex(p2Rev);
+		Patch p = new Patch();
+		final byte[] patchBytes;
+		try {
+			// XXX there's ge.rawData(), to avoid extra array wrap
+			patchBytes = ge.rawDataByteArray();
+			p.read(new ByteArrayDataAccess(patchBytes));
+		} catch (IOException ex) {
+			throw new HgIOException("Failed to read patch information", ex, null);
+		}
+		//
+		final Nodeid patchBase = ge.patchBase();
+		int patchBaseRev = patchBase.isNull() ? NO_REVISION : revlogRevs.revisionIndex(patchBase);
+		int baseRev = lastEntryIndex == NO_REVISION ? 0 : revlogStream.baseRevision(patchBaseRev);
+		int revLen;
+		DataSource ds;
+		byte[] complete = null;
+		if (patchBaseRev == lastEntryIndex && lastEntryIndex != NO_REVISION) {
+			// we may write patch from GroupElement as is
+			int patchBaseLen = dataLength(patchBaseRev);
+			revLen = patchBaseLen + p.patchSizeDelta();
+			ds = new ByteArrayDataSource(patchBytes);
+		} else {
+			// read baseRev, unless it's the pull to empty repository
+			try {
+				if (lastEntryIndex == NO_REVISION) {
+					complete = p.apply(new ByteArrayDataAccess(new byte[0]), -1);
+					baseRev = 0; // it's done above, but doesn't hurt
+				} else {
+					ReadContentInspector insp = new ReadContentInspector().read(revlogStream, baseRev);
+					complete = p.apply(new ByteArrayDataAccess(insp.content), -1);
+					baseRev = lastEntryIndex + 1;
+				}
+				ds = new ByteArrayDataSource(complete);
+				revLen = complete.length;
+			} catch (IOException ex) {
+				// unlikely to happen, as ByteArrayDataSource doesn't throw IOException
+				throw new HgIOException("Failed to reconstruct revision", ex, null);
+			}
+		}
+		doAdd(nodeRev, p1, p2, linkRev, baseRev, revLen, ds);
+		if (complete != null) {
+			lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, complete);
+		}
+		return new Pair<Integer, Nodeid>(lastEntryIndex, lastEntryRevision);
+	}
+	
 	/**
 	 * @return nodeid of added revision
 	 * @throws HgRuntimeException 
 	 */
-	public Nodeid addRevision(DataSource content, int linkRevision, int p1, int p2) throws HgIOException, HgRuntimeException {
-		lastEntryRevision = Nodeid.NULL;
-		int revCount = revlogStream.revisionCount();
-		lastEntryIndex = revCount == 0 ? NO_REVISION : revCount - 1;
-		populateLastEntry();
+	public Pair<Integer,Nodeid> addRevision(DataSource content, int linkRevision, int p1, int p2) throws HgIOException, HgRuntimeException {
+		populateLastEntryIndex();
+		populateLastEntryContent();
 		//
 		byte[] contentByteArray = toByteArray(content);
-		Patch patch = GeneratePatchInspector.delta(lastEntryContent, contentByteArray);
+		Patch patch = GeneratePatchInspector.delta(lastFullContent.second(), contentByteArray);
 		int patchSerializedLength = patch.serializedLength();
 		
 		final boolean writeComplete = preferCompleteOverPatch(patchSerializedLength, contentByteArray.length);
 		DataSerializer.DataSource dataSource = writeComplete ? new ByteArrayDataSource(contentByteArray) : patch.new PatchDataSource();
+		//
+		Nodeid p1Rev = revision(p1);
+		Nodeid p2Rev = revision(p2);
+		Nodeid newRev = Nodeid.fromBinary(dh.sha1(p1Rev, p2Rev, contentByteArray).asBinary(), 0);
+		doAdd(newRev, p1, p2, linkRevision, writeComplete ? lastEntryIndex+1 : lastEntryBase, contentByteArray.length, dataSource);
+		lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, contentByteArray);
+		return new Pair<Integer, Nodeid>(lastEntryIndex, lastEntryRevision);
+	}
+
+	private Nodeid doAdd(Nodeid rev, int p1, int p2, int linkRevision, int baseRevision, int revLen, DataSerializer.DataSource dataSource) throws HgIOException, HgRuntimeException  {
+		assert linkRevision >= 0;
+		assert baseRevision >= 0;
+		assert p1 == NO_REVISION || p1 >= 0;
+		assert p2 == NO_REVISION || p2 >= 0;
+		assert !rev.isNull();
+		assert revLen >= 0;
 		revlogDataZip.reset(dataSource);
 		final int compressedLen;
 		final boolean useCompressedData = preferCompressedOverComplete(revlogDataZip.getCompressedLength(), dataSource.serializeLength());
@@ -87,11 +168,6 @@
 			compressedLen = dataSource.serializeLength() + 1 /*1 byte for 'u' - uncompressed prefix byte*/;
 		}
 		//
-		Nodeid p1Rev = revision(p1);
-		Nodeid p2Rev = revision(p2);
-		byte[] revisionNodeidBytes = dh.sha1(p1Rev, p2Rev, contentByteArray).asBinary();
-		//
-
 		DataSerializer indexFile, dataFile;
 		indexFile = dataFile = null;
 		try {
@@ -99,11 +175,11 @@
 			indexFile = revlogStream.getIndexStreamWriter(transaction);
 			final boolean isInlineData = revlogStream.isInlineData();
 			HeaderWriter revlogHeader = new HeaderWriter(isInlineData);
-			revlogHeader.length(contentByteArray.length, compressedLen);
-			revlogHeader.nodeid(revisionNodeidBytes);
+			revlogHeader.length(revLen, compressedLen);
+			revlogHeader.nodeid(rev.toByteArray());
 			revlogHeader.linkRevision(linkRevision);
 			revlogHeader.parents(p1, p2);
-			revlogHeader.baseRevision(writeComplete ? lastEntryIndex+1 : lastEntryBase);
+			revlogHeader.baseRevision(baseRevision);
 			long lastEntryOffset = revlogStream.newEntryOffset();
 			revlogHeader.offset(lastEntryOffset);
 			//
@@ -124,11 +200,10 @@
 				dataSource.serialize(dataFile);
 			}
 			
-			
-			lastEntryContent = contentByteArray;
 			lastEntryBase = revlogHeader.baseRevision();
 			lastEntryIndex++;
-			lastEntryRevision = Nodeid.fromBinary(revisionNodeidBytes, 0);
+			lastEntryActualLen = revLen;
+			lastEntryRevision = rev;
 			revisionCache.put(lastEntryIndex, lastEntryRevision);
 
 			revlogStream.revisionAdded(lastEntryIndex, lastEntryRevision, lastEntryBase, lastEntryOffset);
@@ -142,7 +217,7 @@
 	}
 	
 	private byte[] toByteArray(DataSource content) throws HgIOException, HgRuntimeException {
-		ByteArrayDataSerializer ba = new ByteArrayDataSerializer();
+		ByteArraySerializer ba = new ByteArraySerializer();
 		content.serialize(ba);
 		return ba.toByteArray();
 	}
@@ -159,32 +234,38 @@
 		return n;
 	}
 	
-	private void populateLastEntry() throws HgRuntimeException {
-		if (lastEntryContent != null) {
+	private int dataLength(int revisionIndex) throws HgInvalidControlFileException, HgInvalidRevisionException {
+		assert revisionIndex >= 0;
+		if (revisionIndex == lastEntryIndex) {
+			return lastEntryActualLen;
+		}
+		if (lastFullContent != null && lastFullContent.first() == revisionIndex) {
+			return lastFullContent.second().length;
+		}
+		return revlogStream.dataLength(revisionIndex);
+	}
+	
+	private void populateLastEntryIndex() throws HgRuntimeException {
+		int revCount = revlogStream.revisionCount();
+		lastEntryIndex = revCount == 0 ? NO_REVISION : revCount - 1;
+	}
+	
+	private void populateLastEntryContent() throws HgRuntimeException {
+		if (lastFullContent != null && lastFullContent.first() == lastEntryIndex) {
+			// we have last entry cached
 			return;
 		}
+		lastEntryRevision = Nodeid.NULL;
 		if (lastEntryIndex != NO_REVISION) {
-			assert lastEntryIndex >= 0;
-			final IOException[] failure = new IOException[1];
-			revlogStream.iterate(lastEntryIndex, lastEntryIndex, true, new RevlogStream.Inspector() {
-				
-				public void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) {
-					try {
-						lastEntryBase = baseRevision;
-						lastEntryRevision = Nodeid.fromBinary(nodeid, 0);
-						lastEntryContent = data.byteArray();
-					} catch (IOException ex) {
-						failure[0] = ex;
-					}
-				}
-			});
-			if (failure[0] != null) {
-				String m = String.format("Failed to get content of most recent revision %d", lastEntryIndex);
-				throw revlogStream.initWithDataFile(new HgInvalidControlFileException(m, failure[0], null));
-			}
+			ReadContentInspector insp = new ReadContentInspector().read(revlogStream, lastEntryIndex);
+			lastEntryBase = insp.baseRev;
+			lastEntryRevision = insp.rev;
+			lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, insp.content);
 		} else {
-			lastEntryContent = new byte[0];
+			lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, new byte[0]);
 		}
+		assert lastFullContent.first() == lastEntryIndex;
+		assert lastFullContent.second() != null;
 	}
 	
 	public static boolean preferCompleteOverPatch(int patchLength, int fullContentLength) {
@@ -290,4 +371,40 @@
 			return header.capacity();
 		}
 	}
-}
+	
+	// XXX part of HgRevisionMap contract, need public counterparts (along with IndexToRevisionMap)
+	public interface RevisionToIndexMap {
+		
+		/**
+		 * @return {@link HgRepository#NO_REVISION} if unknown revision
+		 */
+		int revisionIndex(Nodeid revision);
+	}
+
+	private static class ReadContentInspector implements RevlogStream.Inspector {
+		public int baseRev;
+		public Nodeid rev;
+		public byte[] content;
+		private IOException failure;
+		
+		public ReadContentInspector read(RevlogStream rs, int revIndex) throws HgInvalidControlFileException {
+			assert revIndex >= 0;
+			rs.iterate(revIndex, revIndex, true, this);
+			if (failure != null) {
+				String m = String.format("Failed to get content of revision %d", revIndex);
+				throw rs.initWithDataFile(new HgInvalidControlFileException(m, failure, null));
+			}
+			return this;
+		}
+		
+		public void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) {
+			try {
+				baseRev = baseRevision;
+				rev = Nodeid.fromBinary(nodeid, 0);
+				content = data.byteArray();
+			} catch (IOException ex) {
+				failure = ex;
+			}
+		}
+	}
+}
\ No newline at end of file
--- a/src/org/tmatesoft/hg/repo/HgBranches.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/repo/HgBranches.java	Wed Jul 10 11:53:19 2013 +0200
@@ -118,15 +118,16 @@
 			repo.getSessionContext().getLog().dump(getClass(), Error, ex, null);
 			// FALL THROUGH
 		} finally {
-			new FileUtils(repo.getSessionContext().getLog()).closeQuietly(br);
+			new FileUtils(repo.getSessionContext().getLog(), this).closeQuietly(br);
 		}
 		return -1; // deliberately not lastInCache, to avoid anything but -1 when 1st line was read and there's error is in lines 2..end
 	}
-
+	
 	void collect(final ProgressSupport ps) throws HgRuntimeException {
 		branches.clear();
 		final HgRepository repo = internalRepo.getRepo();
 		final HgChangelog clog = repo.getChangelog();
+		final HgRevisionMap<HgChangelog> rmap;
 		ps.start(1 + clog.getRevisionCount() * 2);
 		//
 		int lastCached = readCache();
@@ -195,8 +196,10 @@
 				}
 				branches.put(bn, bi);
 			}
+			rmap = pw.getRevisionMap();
+		} else { // !cacheActual
+			rmap = new HgRevisionMap<HgChangelog>(clog).init(); 
 		}
-		final HgRevisionMap<HgChangelog> rmap = new HgRevisionMap<HgChangelog>(clog).init();
 		for (BranchInfo bi : branches.values()) {
 			bi.validate(clog, rmap);
 		}
--- a/src/org/tmatesoft/hg/repo/HgBundle.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/repo/HgBundle.java	Wed Jul 10 11:53:19 2013 +0200
@@ -17,9 +17,11 @@
 package org.tmatesoft.hg.repo;
 
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
 import java.util.ConcurrentModificationException;
 
+import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.core.SessionContext;
 import org.tmatesoft.hg.internal.ByteArrayChannel;
@@ -27,8 +29,10 @@
 import org.tmatesoft.hg.internal.Callback;
 import org.tmatesoft.hg.internal.DataAccess;
 import org.tmatesoft.hg.internal.DataAccessProvider;
+import org.tmatesoft.hg.internal.DataSerializer;
 import org.tmatesoft.hg.internal.DigestHelper;
 import org.tmatesoft.hg.internal.Experimental;
+import org.tmatesoft.hg.internal.FileUtils;
 import org.tmatesoft.hg.internal.InflaterDataAccess;
 import org.tmatesoft.hg.internal.Internals;
 import org.tmatesoft.hg.internal.Lifecycle;
@@ -50,11 +54,11 @@
 
 	private final File bundleFile;
 	private final DataAccessProvider accessProvider;
-//	private final SessionContext sessionContext;
+	private final SessionContext ctx;
 	private Lifecycle.BasicCallback flowControl;
 
-	HgBundle(SessionContext ctx, DataAccessProvider dap, File bundle) {
-//		sessionContext = ctx;
+	HgBundle(SessionContext sessionContext, DataAccessProvider dap, File bundle) {
+		ctx = sessionContext;
 		accessProvider = dap;
 		bundleFile = bundle;
 	}
@@ -533,4 +537,29 @@
 			return String.format("%s %s %s %s; patches:%d\n", node().shortNotation(), firstParent().shortNotation(), secondParent().shortNotation(), cset().shortNotation(), patchCount);
 		}
 	}
+
+	@Experimental(reason="Work in progress, not an API")
+	public class BundleSerializer implements DataSerializer.DataSource {
+
+		public void serialize(DataSerializer out) throws HgIOException, HgRuntimeException {
+			FileInputStream fis = null;
+			try {
+				fis = new FileInputStream(HgBundle.this.bundleFile);
+				byte[] buffer = new byte[8*1024];
+				int r;
+				while ((r = fis.read(buffer, 0, buffer.length)) > 0) {
+					out.write(buffer, 0, r);
+				}
+				
+			} catch (IOException ex) {
+				throw new HgIOException("Failed to serialize bundle", HgBundle.this.bundleFile);
+			} finally {
+				new FileUtils(HgBundle.this.ctx.getLog(), this).closeQuietly(fis, HgBundle.this.bundleFile);
+			}
+		}
+
+		public int serializeLength() throws HgRuntimeException {
+			return Internals.ltoi(HgBundle.this.bundleFile.length());
+		}
+	}
 }
--- a/src/org/tmatesoft/hg/repo/HgChangelog.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/repo/HgChangelog.java	Wed Jul 10 11:53:19 2013 +0200
@@ -305,23 +305,7 @@
 			// unixTime is local time, and timezone records difference of the local time to UTC.
 			Date _time = new Date(unixTime * 1000);
 			String _extras = space2 < _timeString.length() ? _timeString.substring(space2 + 1) : null;
-			Map<String, String> _extrasMap;
-			final String extras_branch_key = "branch";
-			if (_extras == null || _extras.trim().length() == 0) {
-				_extrasMap = Collections.singletonMap(extras_branch_key, HgRepository.DEFAULT_BRANCH_NAME);
-			} else {
-				_extrasMap = new HashMap<String, String>();
-				for (String pair : _extras.split("\00")) {
-					pair = decode(pair);
-					int eq = pair.indexOf(':');
-					_extrasMap.put(pair.substring(0, eq), pair.substring(eq + 1));
-				}
-				if (!_extrasMap.containsKey(extras_branch_key)) {
-					_extrasMap.put(extras_branch_key, HgRepository.DEFAULT_BRANCH_NAME);
-				}
-				_extrasMap = Collections.unmodifiableMap(_extrasMap);
-			}
-
+			Map<String, String> _extrasMap = parseExtras(_extras);
 			//
 			int lastStart = breakIndex3 + 1;
 			int breakIndex4 = indexOf(data, lineBreak, lastStart, bufferEndIndex);
@@ -329,6 +313,8 @@
 			if (breakIndex4 > lastStart) {
 				// if breakIndex4 == lastStart, we already found \n\n and hence there are no files (e.g. merge revision)
 				_files = new ArrayList<String>(5);
+				// TODO pool file names
+				// TODO encoding of filenames?
 				while (breakIndex4 != -1 && breakIndex4 + 1 < bufferEndIndex) {
 					_files.add(new String(data, lastStart, breakIndex4 - lastStart));
 					lastStart = breakIndex4 + 1;
@@ -364,6 +350,34 @@
 			this.extras = _extrasMap;
 		}
 
+		private Map<String, String> parseExtras(String _extras) {
+			final String extras_branch_key = "branch";
+			_extras = _extras == null ? null : _extras.trim();
+			if (_extras == null || _extras.length() == 0) {
+				return Collections.singletonMap(extras_branch_key, HgRepository.DEFAULT_BRANCH_NAME);
+			}
+			Map<String, String> _extrasMap = new HashMap<String, String>();
+			int lastIndex = 0;
+			do {
+				String pair;
+				int sp = _extras.indexOf('\0', lastIndex);
+				if (sp == -1) {
+					sp = _extras.length();
+				}
+				if (sp > lastIndex) {
+					pair = _extras.substring(lastIndex, sp);
+					pair = decode(pair);
+					int eq = pair.indexOf(':');
+					_extrasMap.put(pair.substring(0, eq), pair.substring(eq + 1));
+					lastIndex = sp + 1;
+				}
+			} while (lastIndex < _extras.length());
+			if (!_extrasMap.containsKey(extras_branch_key)) {
+				_extrasMap.put(extras_branch_key, HgRepository.DEFAULT_BRANCH_NAME);
+			}
+			return Collections.unmodifiableMap(_extrasMap);
+		}
+
 		private static int indexOf(byte[] src, byte what, int startOffset, int endIndex) {
 			for (int i = startOffset; i < endIndex; i++) {
 				if (src[i] == what) {
--- a/src/org/tmatesoft/hg/repo/HgDataFile.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/repo/HgDataFile.java	Wed Jul 10 11:53:19 2013 +0200
@@ -172,7 +172,7 @@
 			} finally {
 				progress.done();
 				if (fis != null) {
-					new FileUtils(getRepo().getSessionContext().getLog()).closeQuietly(fis);
+					new FileUtils(getRepo().getSessionContext().getLog(), this).closeQuietly(fis);
 				}
 			}
 		} else {
--- a/src/org/tmatesoft/hg/repo/HgParentChildMap.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/repo/HgParentChildMap.java	Wed Jul 10 11:53:19 2013 +0200
@@ -18,13 +18,18 @@
 
 import static org.tmatesoft.hg.repo.HgRepository.TIP;
 
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.BitSet;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 
 import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.ArrayHelper;
+import org.tmatesoft.hg.internal.IntMap;
 import org.tmatesoft.hg.repo.Revlog.ParentInspector;
 
 /**
@@ -56,15 +61,18 @@
  */
 public final class HgParentChildMap<T extends Revlog> implements ParentInspector {
 
-	
+	// IMPORTANT: Nodeid instances shall be shared between all arrays
+
+	private final T revlog;
 	private Nodeid[] sequential; // natural repository order, childrenOf rely on ordering
-	private Nodeid[] sorted; // for binary search
-	private int[] sorted2natural;
-	private Nodeid[] firstParent;
+	private Nodeid[] sorted; // for binary search, just an origin of the actual value in use, the one inside seqWrapper
+	private Nodeid[] firstParent; // parents by natural order (i.e. firstParent[A] is parent of revision with index A)
 	private Nodeid[] secondParent;
-	private final T revlog;
+	private IntMap<Nodeid> heads;
+	private BitSet headsBitSet; // 1 indicates revision got children, != null only during init;
+	private HgRevisionMap<T> revisionIndexMap;
+	private ArrayHelper<Nodeid> seqWrapper; 
 
-	// Nodeid instances shall be shared between all arrays
 
 	public HgParentChildMap(T owner) {
 		revlog = owner;
@@ -82,9 +90,11 @@
 		sequential[ix] = sorted[ix] = revision;
 		if (parent1Revision != -1) {
 			firstParent[ix] = sequential[parent1Revision];
+			headsBitSet.set(parent1Revision);
 		}
 		if (parent2Revision != -1) { // revlog of DataAccess.java has p2 set when p1 is -1
 			secondParent[ix] = sequential[parent2Revision];
+			headsBitSet.set(parent2Revision);
 		}
 	}
 	
@@ -96,22 +106,33 @@
 	public void init() throws HgRuntimeException {
 		final int revisionCount = revlog.getRevisionCount();
 		firstParent = new Nodeid[revisionCount];
-		// TODO [post 1.0] Branches/merges are less frequent, and most of secondParent would be -1/null, hence 
+		// TODO [post 1.1] Branches/merges are less frequent, and most of secondParent would be -1/null, hence 
 		// IntMap might be better alternative here, but need to carefully analyze (test) whether this brings
-		// real improvement (IntMap has 2n capacity, and element lookup is log(n) instead of array's constant)
+		// real improvement (IntMap has 2n capacity, and element lookup is log(n) instead of array's constant).
+		// FWIW: in cpython's repo, with 70k+ revisions, there are 2618 values in secondParent 
 		secondParent = new Nodeid[revisionCount];
 		//
 		sequential = new Nodeid[revisionCount];
-		sorted = new Nodeid[revisionCount];
+		sorted = new Nodeid[revisionCount]; 
+		headsBitSet = new BitSet(revisionCount);
 		revlog.indexWalk(0, TIP, this);
-		Arrays.sort(sorted);
-		sorted2natural = new int[revisionCount];
-		for (int i = 0; i < revisionCount; i++) {
-			Nodeid n = sequential[i];
-			int x = Arrays.binarySearch(sorted, n);
-			assertSortedIndex(x);
-			sorted2natural[x] = i;
-		}
+		seqWrapper = new ArrayHelper<Nodeid>(sequential);
+		// HgRevisionMap doesn't keep sorted, try alternative here.
+		// reference this.sorted (not only from ArrayHelper) helps to track ownership in hprof/mem dumps
+		seqWrapper.sort(sorted, false, true);
+		// no reason to keep BitSet, number of heads is usually small
+		IntMap<Nodeid> _heads = new IntMap<Nodeid>(headsBitSet.size() - headsBitSet.cardinality());
+		int index = 0;
+		while (index < sequential.length) {
+			index = headsBitSet.nextClearBit(index);
+			// nextClearBit(length-1) gives length when bit is set,
+			// however, last revision can't be a parent of any other, and
+			// the last bit would be always 0, and no AIOOBE 
+			_heads.put(index, sequential[index]);
+			index++;
+		} 
+		headsBitSet = null;
+		heads = _heads;
 	}
 	
 	private void assertSortedIndex(int x) {
@@ -127,16 +148,16 @@
 	 * @return <code>true</code> if revision matches any revision in this revlog
 	 */
 	public boolean knownNode(Nodeid nid) {
-		return Arrays.binarySearch(sorted, nid) >= 0;
+		return seqWrapper.binarySearchSorted(nid) >= 0;
 	}
 
 	/**
 	 * null if none. only known nodes (as per #knownNode) are accepted as arguments
 	 */
 	public Nodeid firstParent(Nodeid nid) {
-		int x = Arrays.binarySearch(sorted, nid);
+		int x = seqWrapper.binarySearchSorted(nid);
 		assertSortedIndex(x);
-		int i = sorted2natural[x];
+		int i = seqWrapper.getReverseIndex(x);
 		return firstParent[i];
 	}
 
@@ -147,9 +168,9 @@
 	}
 	
 	public Nodeid secondParent(Nodeid nid) {
-		int x = Arrays.binarySearch(sorted, nid);
+		int x = seqWrapper.binarySearchSorted(nid);
 		assertSortedIndex(x);
-		int i = sorted2natural[x];
+		int i = seqWrapper.getReverseIndex(x);
 		return secondParent[i];
 	}
 
@@ -159,9 +180,9 @@
 	}
 
 	public boolean appendParentsOf(Nodeid nid, Collection<Nodeid> c) {
-		int x = Arrays.binarySearch(sorted, nid);
+		int x = seqWrapper.binarySearchSorted(nid);
 		assertSortedIndex(x);
-		int i = sorted2natural[x];
+		int i = seqWrapper.getReverseIndex(x);
 		Nodeid p1 = firstParent[i];
 		boolean modified = false;
 		if (p1 != null) {
@@ -179,7 +200,10 @@
 	
 	// @return ordered collection of all children rooted at supplied nodes. Nodes shall not be descendants of each other!
 	// Nodeids shall belong to this revlog
-	public List<Nodeid> childrenOf(List<Nodeid> roots) {
+	public List<Nodeid> childrenOf(Collection<Nodeid> roots) {
+		if (roots.isEmpty()) {
+			return Collections.emptyList();
+		}
 		HashSet<Nodeid> parents = new HashSet<Nodeid>();
 		LinkedList<Nodeid> result = new LinkedList<Nodeid>();
 		int earliestRevision = Integer.MAX_VALUE;
@@ -187,9 +211,9 @@
 		// first, find earliest index of roots in question, as there's  no sense 
 		// to check children among nodes prior to branch's root node
 		for (Nodeid r : roots) {
-			int x = Arrays.binarySearch(sorted, r);
+			int x = seqWrapper.binarySearchSorted(r);
 			assertSortedIndex(x);
-			int i = sorted2natural[x];
+			int i = seqWrapper.getReverseIndex(x);
 			if (i < earliestRevision) {
 				earliestRevision = i;
 			}
@@ -208,11 +232,14 @@
 	 * @return revisions that have supplied revision as their immediate parent
 	 */
 	public List<Nodeid> directChildren(Nodeid nid) {
-		LinkedList<Nodeid> result = new LinkedList<Nodeid>();
-		int x = Arrays.binarySearch(sorted, nid);
+		int x = seqWrapper.binarySearchSorted(nid);
 		assertSortedIndex(x);
-		nid = sorted[x]; // canonical instance
-		int start = sorted2natural[x];
+		int start = seqWrapper.getReverseIndex(x);
+		nid = sequential[start]; // canonical instance
+		if (!hasChildren(start)) {
+			return Collections.emptyList();
+		}
+		ArrayList<Nodeid> result = new ArrayList<Nodeid>(5);
 		for (int i = start + 1; i < sequential.length; i++) {
 			if (nid == firstParent[i] || nid == secondParent[i]) {
 				result.add(sequential[i]);
@@ -226,53 +253,72 @@
 	 * @return <code>true</code> if there's any node in this revlog that has specified node as one of its parents. 
 	 */
 	public boolean hasChildren(Nodeid nid) {
-		int x = Arrays.binarySearch(sorted, nid);
+		int x = seqWrapper.binarySearchSorted(nid);
 		assertSortedIndex(x);
-		int i = sorted2natural[x];
-		assert firstParent.length == secondParent.length; // just in case later I implement sparse array for secondParent
-		assert firstParent.length == sequential.length;
-		// to use == instead of equals, take the same Nodeid instance we used to fill all the arrays.
-		final Nodeid canonicalNode = sequential[i];
-		i++; // no need to check node itself. child nodes may appear in sequential only after revision in question
-		for (; i < sequential.length; i++) {
-			// TODO [post 1.0] likely, not very effective. 
-			// May want to optimize it with another (Tree|Hash)Set, created on demand on first use, 
-			// however, need to be careful with memory usage
-			if (firstParent[i] == canonicalNode || secondParent[i] == canonicalNode) {
-				return true;
-			}
-		}
-		return false;
+		int i = seqWrapper.getReverseIndex(x);
+		return hasChildren(i);
 	}
 
 	/**
-     * Find out whether a given node is among descendants of another.
-     *
-     * @param root revision to check for being (grand-)*parent of a child
-     * @param wannaBeChild candidate descendant revision
-     * @return <code>true</code> if <code>wannaBeChild</code> is among children of <code>root</code>
-     */
-    public boolean isChild(Nodeid root, Nodeid wannaBeChild) {
-            int x = Arrays.binarySearch(sorted, root);
-            assertSortedIndex(x);
-            root = sorted[x]; // canonical instance
-            final int start = sorted2natural[x];
-            int y = Arrays.binarySearch(sorted, wannaBeChild);
-            if (y < 0) {
-                    return false; // not found
-            }
-            wannaBeChild = sorted[y]; // canonicalize
-            final int end = sorted2natural[y];
-            if (end <= start) {
-                    return false; // potential child was in repository earlier than root
-            }
-            HashSet<Nodeid> parents = new HashSet<Nodeid>();
-            parents.add(root);
-            for (int i = start + 1; i < end; i++) {
-                    if (parents.contains(firstParent[i]) || parents.contains(secondParent[i])) {
-                            parents.add(sequential[i]); // collect ancestors line
-                    }
-            }
-            return parents.contains(firstParent[end]) || parents.contains(secondParent[end]);
-    }
+	 * @return all revisions this map knows about
+	 */
+	public List<Nodeid> all() {
+		return Arrays.asList(sequential);
+	}
+
+	/**
+	 * Find out whether a given node is among descendants of another.
+	 * 
+	 * @param root revision to check for being (grand-)*parent of a child
+	 * @param wannaBeChild candidate descendant revision
+	 * @return <code>true</code> if <code>wannaBeChild</code> is among children of <code>root</code>
+	 */
+	public boolean isChild(Nodeid root, Nodeid wannaBeChild) {
+		int x = seqWrapper.binarySearchSorted(root);
+		assertSortedIndex(x);
+		final int start = seqWrapper.getReverseIndex(x);
+		root = sequential[start]; // canonical instance
+		if (!hasChildren(start)) {
+			return false; // root got no children at all
+		}
+		int y = seqWrapper.binarySearchSorted(wannaBeChild);
+		if (y < 0) {
+			return false; // not found
+		}
+		final int end = seqWrapper.getReverseIndex(y);
+		wannaBeChild = sequential[end]; // canonicalize
+		if (end <= start) {
+			return false; // potential child was in repository earlier than root
+		}
+		HashSet<Nodeid> parents = new HashSet<Nodeid>();
+		parents.add(root);
+		for (int i = start + 1; i < end; i++) {
+			if (parents.contains(firstParent[i]) || parents.contains(secondParent[i])) {
+				parents.add(sequential[i]); // collect ancestors line
+			}
+		}
+		return parents.contains(firstParent[end]) || parents.contains(secondParent[end]);
+	}
+	
+	/**
+	 * @return elements of this map that do not have a child recorded therein.
+	 */
+	public Collection<Nodeid> heads() {
+		return heads.values();
+	}
+	
+	/**
+	 * @return map of revision to indexes
+	 */
+	public HgRevisionMap<T> getRevisionMap() {
+		if (revisionIndexMap == null) {
+			revisionIndexMap = new HgRevisionMap<T>(revlog);
+			revisionIndexMap.init(seqWrapper);
+		}
+		return revisionIndexMap;
+	}
+
+	private boolean hasChildren(int sequentialIndex) {
+		return !heads.containsKey(sequentialIndex);
+	}
 }
\ No newline at end of file
--- a/src/org/tmatesoft/hg/repo/HgPhase.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/repo/HgPhase.java	Wed Jul 10 11:53:19 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012 TMate Software Ltd
+ * Copyright (c) 2012-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -58,4 +58,18 @@
 		}
 		throw new IllegalArgumentException(String.format("Bad phase name: %d", value));
 	}
+	
+	/**
+	 * @return integer value Mercurial uses to identify the phase
+	 */
+	public int mercurialOrdinal() {
+		if (this == Undefined) {
+			throw new IllegalStateException("Undefined phase is an artifical value, which doesn't possess a valid native mercurial ordinal");
+		}
+		return ordinal(); // what a coincidence
+	}
+	
+	public String mercurialString() {
+		return hgString;
+	}
 }
--- a/src/org/tmatesoft/hg/repo/HgRemoteRepository.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/repo/HgRemoteRepository.java	Wed Jul 10 11:53:19 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -17,8 +17,11 @@
 package org.tmatesoft.hg.repo;
 
 import static org.tmatesoft.hg.util.LogFacility.Severity.Info;
+import static org.tmatesoft.hg.util.Outcome.Kind.Failure;
+import static org.tmatesoft.hg.util.Outcome.Kind.Success;
 
 import java.io.BufferedReader;
+import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
@@ -26,6 +29,8 @@
 import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.StreamTokenizer;
+import java.net.ContentHandler;
+import java.net.ContentHandlerFactory;
 import java.net.HttpURLConnection;
 import java.net.MalformedURLException;
 import java.net.URL;
@@ -53,16 +58,25 @@
 import javax.net.ssl.X509TrustManager;
 
 import org.tmatesoft.hg.core.HgBadArgumentException;
+import org.tmatesoft.hg.core.HgIOException;
 import org.tmatesoft.hg.core.HgRemoteConnectionException;
 import org.tmatesoft.hg.core.HgRepositoryNotFoundException;
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.core.SessionContext;
+import org.tmatesoft.hg.internal.DataSerializer;
+import org.tmatesoft.hg.internal.DataSerializer.OutputStreamSerializer;
+import org.tmatesoft.hg.internal.EncodingHelper;
+import org.tmatesoft.hg.internal.Internals;
 import org.tmatesoft.hg.internal.PropertyMarshal;
+import org.tmatesoft.hg.util.LogFacility.Severity;
+import org.tmatesoft.hg.util.Outcome;
+import org.tmatesoft.hg.util.Pair;
 
 /**
  * WORK IN PROGRESS, DO NOT USE
  * 
  * @see http://mercurial.selenic.com/wiki/WireProtocol
+ * @see http://mercurial.selenic.com/wiki/HttpCommandProtocol
  * 
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
@@ -77,6 +91,33 @@
 	private final SessionContext sessionContext;
 	private Set<String> remoteCapabilities;
 	
+	static {
+		URLConnection.setContentHandlerFactory(new ContentHandlerFactory() {
+			
+			public ContentHandler createContentHandler(String mimetype) {
+				if ("application/mercurial-0.1".equals(mimetype)) {
+					return new ContentHandler() {
+						
+						@Override
+						public Object getContent(URLConnection urlc) throws IOException {
+							if (urlc.getContentLength() > 0) {
+								ByteArrayOutputStream bos = new ByteArrayOutputStream();
+								InputStream is = urlc.getInputStream();
+								int r;
+								while ((r = is.read()) != -1) {
+									bos.write(r);
+								}
+								return new String(bos.toByteArray());
+							}
+							return "<empty>";
+						}
+					};
+				}
+				return null;
+			}
+		});
+	}
+	
 	HgRemoteRepository(SessionContext ctx, URL url) throws HgBadArgumentException {
 		if (url == null || ctx == null) {
 			throw new IllegalArgumentException();
@@ -128,48 +169,7 @@
 	}
 	
 	public boolean isInvalid() throws HgRemoteConnectionException {
-		if (remoteCapabilities == null) {
-			remoteCapabilities = new HashSet<String>();
-			// say hello to server, check response
-			try {
-				URL u = new URL(url, url.getPath() + "?cmd=hello");
-				HttpURLConnection c = setupConnection(u.openConnection());
-				c.connect();
-				if (debug) {
-					dumpResponseHeader(u, c);
-				}
-				BufferedReader r = new BufferedReader(new InputStreamReader(c.getInputStream(), "US-ASCII"));
-				String line = r.readLine();
-				c.disconnect();
-				final String capsPrefix = "capabilities:";
-				if (line == null || !line.startsWith(capsPrefix)) {
-					// for whatever reason, some servers do not respond to hello command (e.g. svnkit)
-					// but respond to 'capabilities' instead. Try it.
-					// TODO [post-1.0] tests needed
-					u = new URL(url, url.getPath() + "?cmd=capabilities");
-					c = setupConnection(u.openConnection());
-					c.connect();
-					if (debug) {
-						dumpResponseHeader(u, c);
-					}
-					r = new BufferedReader(new InputStreamReader(c.getInputStream(), "US-ASCII"));
-					line = r.readLine();
-					c.disconnect();
-					if (line == null || line.trim().length() == 0) {
-						return true;
-					}
-				} else {
-					line = line.substring(capsPrefix.length()).trim();
-				}
-				String[] caps = line.split("\\s");
-				remoteCapabilities.addAll(Arrays.asList(caps));
-				c.disconnect();
-			} catch (MalformedURLException ex) {
-				throw new HgRemoteConnectionException("Bad URL", ex).setRemoteCommand("hello").setServerInfo(getLocation());
-			} catch (IOException ex) {
-				throw new HgRemoteConnectionException("Communication failure", ex).setRemoteCommand("hello").setServerInfo(getLocation());
-			}
-		}
+		initCapabilities();
 		return remoteCapabilities.isEmpty();
 	}
 
@@ -192,9 +192,10 @@
 	}
 
 	public List<Nodeid> heads() throws HgRemoteConnectionException {
+		HttpURLConnection c = null;
 		try {
 			URL u = new URL(url, url.getPath() + "?cmd=heads");
-			HttpURLConnection c = setupConnection(u.openConnection());
+			c = setupConnection(u.openConnection());
 			c.connect();
 			if (debug) {
 				dumpResponseHeader(u, c);
@@ -213,6 +214,10 @@
 			throw new HgRemoteConnectionException("Bad URL", ex).setRemoteCommand("heads").setServerInfo(getLocation());
 		} catch (IOException ex) {
 			throw new HgRemoteConnectionException("Communication failure", ex).setRemoteCommand("heads").setServerInfo(getLocation());
+		} finally {
+			if (c != null) {
+				c.disconnect();
+			}
 		}
 	}
 	
@@ -245,10 +250,11 @@
 			// strip last space 
 			sb.setLength(sb.length() - 1);
 		}
+		HttpURLConnection c = null;
 		try {
 			boolean usePOST = ranges.size() > 3;
 			URL u = new URL(url, url.getPath() + "?cmd=between" + (usePOST ? "" : '&' + sb.toString()));
-			HttpURLConnection c = setupConnection(u.openConnection());
+			c = setupConnection(u.openConnection());
 			if (usePOST) {
 				c.setRequestMethod("POST");
 				c.setRequestProperty("Content-Length", String.valueOf(sb.length()/*nodeids are ASCII, bytes == characters */));
@@ -314,23 +320,19 @@
 			throw new HgRemoteConnectionException("Bad URL", ex).setRemoteCommand("between").setServerInfo(getLocation());
 		} catch (IOException ex) {
 			throw new HgRemoteConnectionException("Communication failure", ex).setRemoteCommand("between").setServerInfo(getLocation());
+		} finally {
+			if (c != null) {
+				c.disconnect();
+			}
 		}
 	}
 
 	public List<RemoteBranch> branches(List<Nodeid> nodes) throws HgRemoteConnectionException {
-		StringBuilder sb = new StringBuilder(20 + nodes.size() * 41);
-		sb.append("nodes=");
-		for (Nodeid n : nodes) {
-			sb.append(n.toString());
-			sb.append('+');
-		}
-		if (sb.charAt(sb.length() - 1) == '+') {
-			// strip last space 
-			sb.setLength(sb.length() - 1);
-		}
+		StringBuilder sb = appendNodeidListArgument("nodes", nodes, null);
+		HttpURLConnection c = null;
 		try {
 			URL u = new URL(url, url.getPath() + "?cmd=branches&" + sb.toString());
-			HttpURLConnection c = setupConnection(u.openConnection());
+			c = setupConnection(u.openConnection());
 			c.connect();
 			if (debug) {
 				dumpResponseHeader(u, c);
@@ -357,6 +359,10 @@
 			throw new HgRemoteConnectionException("Bad URL", ex).setRemoteCommand("branches").setServerInfo(getLocation());
 		} catch (IOException ex) {
 			throw new HgRemoteConnectionException("Communication failure", ex).setRemoteCommand("branches").setServerInfo(getLocation());
+		} finally {
+			if (c != null) {
+				c.disconnect();
+			}
 		}
 	}
 
@@ -378,19 +384,11 @@
 	 */
 	public HgBundle getChanges(List<Nodeid> roots) throws HgRemoteConnectionException, HgRuntimeException {
 		List<Nodeid> _roots = roots.isEmpty() ? Collections.singletonList(Nodeid.NULL) : roots;
-		StringBuilder sb = new StringBuilder(20 + _roots.size() * 41);
-		sb.append("roots=");
-		for (Nodeid n : _roots) {
-			sb.append(n.toString());
-			sb.append('+');
-		}
-		if (sb.charAt(sb.length() - 1) == '+') {
-			// strip last space 
-			sb.setLength(sb.length() - 1);
-		}
+		StringBuilder sb = appendNodeidListArgument("roots", _roots, null);
+		HttpURLConnection c = null;
 		try {
 			URL u = new URL(url, url.getPath() + "?cmd=changegroup&" + sb.toString());
-			HttpURLConnection c = setupConnection(u.openConnection());
+			c = setupConnection(u.openConnection());
 			c.connect();
 			if (debug) {
 				dumpResponseHeader(u, c);
@@ -407,13 +405,168 @@
 			throw new HgRemoteConnectionException("Communication failure", ex).setRemoteCommand("changegroup").setServerInfo(getLocation());
 		} catch (HgRepositoryNotFoundException ex) {
 			throw new HgRemoteConnectionException("Communication failure", ex).setRemoteCommand("changegroup").setServerInfo(getLocation());
+		} finally {
+			if (c != null) {
+				c.disconnect();
+			}
 		}
 	}
+	
+	public void unbundle(HgBundle bundle, List<Nodeid> remoteHeads) throws HgRemoteConnectionException, HgRuntimeException {
+		if (remoteHeads == null) {
+			// TODO collect heads from bundle:
+			// bundle.inspectChangelog(new HeadCollector(for each c : if collected has c.p1 or c.p2, remove them. Add c))
+			// or get from remote server???
+			throw Internals.notImplemented();
+		}
+		StringBuilder sb = appendNodeidListArgument("heads", remoteHeads, null);
+		
+		HttpURLConnection c = null;
+		DataSerializer.DataSource bundleData = bundle.new BundleSerializer();
+		try {
+			URL u = new URL(url, url.getPath() + "?cmd=unbundle&" + sb.toString());
+			c = setupConnection(u.openConnection());
+			c.setRequestMethod("POST");
+			c.setRequestProperty("Content-Length", String.valueOf(bundleData.serializeLength()));
+			c.setRequestProperty("Content-Type", "application/mercurial-0.1");
+			c.setDoOutput(true);
+			c.connect();
+			OutputStream os = c.getOutputStream();
+			bundleData.serialize(new OutputStreamSerializer(os));
+			os.flush();
+			os.close();
+			if (debug) {
+				dumpResponseHeader(u, c);
+				dumpResponse(c);
+			}
+			checkResponseOk(c, "Push", "unbundle");
+		} catch (MalformedURLException ex) {
+			throw new HgRemoteConnectionException("Bad URL", ex).setRemoteCommand("unbundle").setServerInfo(getLocation());
+		} catch (IOException ex) {
+			throw new HgRemoteConnectionException("Communication failure", ex).setRemoteCommand("unbundle").setServerInfo(getLocation());
+		} catch (HgIOException ex) {
+			throw new HgRemoteConnectionException("Communication failure", ex).setRemoteCommand("unbundle").setServerInfo(getLocation());
+		} finally {
+			if (c != null) {
+				c.disconnect();
+			}
+		}
+	}
+
+	public Bookmarks getBookmarks() throws HgRemoteConnectionException, HgRuntimeException {
+		final String actionName = "Get remote bookmarks";
+		final List<Pair<String, String>> values = listkeys("bookmarks", actionName);
+		ArrayList<Pair<String, Nodeid>> rv = new ArrayList<Pair<String, Nodeid>>();
+		for (Pair<String, String> l : values) {
+			if (l.second().length() != Nodeid.SIZE_ASCII) {
+				sessionContext.getLog().dump(getClass(), Severity.Warn, "%s: bad nodeid '%s', ignored", actionName, l.second());
+				continue;
+			}
+			Nodeid n = Nodeid.fromAscii(l.second());
+			String bm = new String(l.first());
+			rv.add(new Pair<String, Nodeid>(bm, n));
+		}
+		return new Bookmarks(rv);
+	}
+
+	public Outcome updateBookmark(String name, Nodeid oldRev, Nodeid newRev) throws HgRemoteConnectionException, HgRuntimeException {
+		initCapabilities();
+		if (!remoteCapabilities.contains("pushkey")) {
+			return new Outcome(Failure, "Server doesn't support pushkey protocol");
+		}
+		if (pushkey("Update remote bookmark", "bookmarks", name, oldRev.toString(), newRev.toString())) {
+			return new Outcome(Success, String.format("Bookmark %s updated to %s", name, newRev.shortNotation()));
+		}
+		return new Outcome(Failure, String.format("Bookmark update (%s: %s -> %s) failed", name, oldRev.shortNotation(), newRev.shortNotation()));
+	}
+	
+	public Phases getPhases() throws HgRemoteConnectionException, HgRuntimeException {
+		initCapabilities();
+		if (!remoteCapabilities.contains("pushkey")) {
+			// old server defaults to publishing
+			return new Phases(true, Collections.<Nodeid>emptyList());
+		}
+		final List<Pair<String, String>> values = listkeys("phases", "Get remote phases");
+		boolean publishing = false;
+		ArrayList<Nodeid> draftRoots = new ArrayList<Nodeid>();
+		for (Pair<String, String> l : values) {
+			if ("publishing".equalsIgnoreCase(l.first())) {
+				publishing = Boolean.parseBoolean(l.second());
+				continue;
+			}
+			Nodeid root = Nodeid.fromAscii(l.first());
+			int ph = Integer.parseInt(l.second());
+			if (ph == HgPhase.Draft.mercurialOrdinal()) {
+				draftRoots.add(root);
+			} else {
+				assert false;
+				sessionContext.getLog().dump(getClass(), Severity.Error, "Unexpected phase value %d for revision %s", ph, root);
+			}
+		}
+		return new Phases(publishing, draftRoots);
+	}
+	
+	public Outcome updatePhase(HgPhase from, HgPhase to, Nodeid n) throws HgRemoteConnectionException, HgRuntimeException {
+		initCapabilities();
+		if (!remoteCapabilities.contains("pushkey")) {
+			return new Outcome(Failure, "Server doesn't support pushkey protocol");
+		}
+		if (pushkey("Update remote phases", "phases", n.toString(), String.valueOf(from.mercurialOrdinal()), String.valueOf(to.mercurialOrdinal()))) {
+			return new Outcome(Success, String.format("Phase of %s updated to %s", n.shortNotation(), to.name()));
+		}
+		return new Outcome(Failure, String.format("Phase update (%s: %s -> %s) failed", n.shortNotation(), from.name(), to.name()));
+	}
 
 	@Override
 	public String toString() {
 		return getClass().getSimpleName() + '[' + getLocation() + ']';
 	}
+	
+	
+	private void initCapabilities() throws HgRemoteConnectionException {
+		if (remoteCapabilities == null) {
+			remoteCapabilities = new HashSet<String>();
+			// say hello to server, check response
+			try {
+				URL u = new URL(url, url.getPath() + "?cmd=hello");
+				HttpURLConnection c = setupConnection(u.openConnection());
+				c.connect();
+				if (debug) {
+					dumpResponseHeader(u, c);
+				}
+				BufferedReader r = new BufferedReader(new InputStreamReader(c.getInputStream(), "US-ASCII"));
+				String line = r.readLine();
+				c.disconnect();
+				final String capsPrefix = "capabilities:";
+				if (line == null || !line.startsWith(capsPrefix)) {
+					// for whatever reason, some servers do not respond to hello command (e.g. svnkit)
+					// but respond to 'capabilities' instead. Try it.
+					// TODO [post-1.0] tests needed
+					u = new URL(url, url.getPath() + "?cmd=capabilities");
+					c = setupConnection(u.openConnection());
+					c.connect();
+					if (debug) {
+						dumpResponseHeader(u, c);
+					}
+					r = new BufferedReader(new InputStreamReader(c.getInputStream(), "US-ASCII"));
+					line = r.readLine();
+					c.disconnect();
+					if (line == null || line.trim().length() == 0) {
+						return;
+					}
+				} else {
+					line = line.substring(capsPrefix.length()).trim();
+				}
+				String[] caps = line.split("\\s");
+				remoteCapabilities.addAll(Arrays.asList(caps));
+				c.disconnect();
+			} catch (MalformedURLException ex) {
+				throw new HgRemoteConnectionException("Bad URL", ex).setRemoteCommand("hello").setServerInfo(getLocation());
+			} catch (IOException ex) {
+				throw new HgRemoteConnectionException("Communication failure", ex).setRemoteCommand("hello").setServerInfo(getLocation());
+			}
+		}
+	}
 
 	private HgLookup getLookupHelper() {
 		if (lookupHelper == null) {
@@ -421,9 +574,78 @@
 		}
 		return lookupHelper;
 	}
+
+	private List<Pair<String,String>> listkeys(String namespace, String actionName) throws HgRemoteConnectionException, HgRuntimeException {
+		HttpURLConnection c = null;
+		try {
+			URL u = new URL(url, url.getPath() + "?cmd=listkeys&namespace=" + namespace);
+			c = setupConnection(u.openConnection());
+			c.connect();
+			if (debug) {
+				dumpResponseHeader(u, c);
+			}
+			checkResponseOk(c, actionName, "listkeys");
+			ArrayList<Pair<String, String>> rv = new ArrayList<Pair<String, String>>();
+			BufferedReader r = new BufferedReader(new InputStreamReader(c.getInputStream(), EncodingHelper.getUTF8()));
+			String l;
+			while ((l = r.readLine()) != null) {
+				int sep = l.indexOf('\t');
+				if (sep == -1) {
+					sessionContext.getLog().dump(getClass(), Severity.Warn, "%s: bad line '%s', ignored", actionName, l);
+					continue;
+				}
+				rv.add(new Pair<String,String>(l.substring(0, sep), l.substring(sep+1)));
+			}
+			r.close();
+			return rv;
+		} catch (MalformedURLException ex) {
+			throw new HgRemoteConnectionException("Bad URL", ex).setRemoteCommand("listkeys").setServerInfo(getLocation());
+		} catch (IOException ex) {
+			throw new HgRemoteConnectionException("Communication failure", ex).setRemoteCommand("listkeys").setServerInfo(getLocation());
+		} finally {
+			if (c != null) {
+				c.disconnect();
+			}
+		}
+	}
 	
+	private boolean pushkey(String opName, String namespace, String key, String oldValue, String newValue) throws HgRemoteConnectionException, HgRuntimeException {
+		HttpURLConnection c = null;
+		try {
+			final String p = String.format("%s?cmd=pushkey&namespace=%s&key=%s&old=%s&new=%s", url.getPath(), namespace, key, oldValue, newValue);
+			URL u = new URL(url, p);
+			c = setupConnection(u.openConnection());
+			c.setRequestMethod("POST");
+			c.connect();
+			if (debug) {
+				dumpResponseHeader(u, c);
+			}
+			checkResponseOk(c, opName, "pushkey");
+			final InputStream is = c.getInputStream();
+			int rv = is.read();
+			is.close();
+			return rv == '1';
+		} catch (MalformedURLException ex) {
+			throw new HgRemoteConnectionException("Bad URL", ex).setRemoteCommand("pushkey").setServerInfo(getLocation());
+		} catch (IOException ex) {
+			throw new HgRemoteConnectionException("Communication failure", ex).setRemoteCommand("pushkey").setServerInfo(getLocation());
+		} finally {
+			if (c != null) {
+				c.disconnect();
+			}
+		}
+	}
+	
+	private void checkResponseOk(HttpURLConnection c, String opName, String remoteCmd) throws HgRemoteConnectionException, IOException {
+		if (c.getResponseCode() != 200) {
+			String m = c.getResponseMessage() == null ? "unknown reason" : c.getResponseMessage();
+			String em = String.format("%s failed: %s (HTTP error:%d)", opName, m, c.getResponseCode());
+			throw new HgRemoteConnectionException(em).setRemoteCommand(remoteCmd).setServerInfo(getLocation());
+		}
+	}
+
 	private HttpURLConnection setupConnection(URLConnection urlConnection) {
-		urlConnection.setRequestProperty("User-Agent", "hg4j/0.5.0");
+		urlConnection.setRequestProperty("User-Agent", "hg4j/1.0.0");
 		urlConnection.addRequestProperty("Accept", "application/mercurial-0.1");
 		if (authInfo != null) {
 			urlConnection.addRequestProperty("Authorization", "Basic " + authInfo);
@@ -433,6 +655,23 @@
 		}
 		return (HttpURLConnection) urlConnection;
 	}
+	
+	private StringBuilder appendNodeidListArgument(String key, List<Nodeid> values, StringBuilder sb) {
+		if (sb == null) {
+			sb = new StringBuilder(20 + values.size() * 41);
+		}
+		sb.append(key);
+		sb.append('=');
+		for (Nodeid n : values) {
+			sb.append(n.toString());
+			sb.append('+');
+		}
+		if (sb.charAt(sb.length() - 1) == '+') {
+			// strip last space 
+			sb.setLength(sb.length() - 1);
+		}
+		return sb;
+	}
 
 	private void dumpResponseHeader(URL u, HttpURLConnection c) {
 		System.out.printf("Query (%d bytes):%s\n", u.getQuery().length(), u.getQuery());
@@ -443,9 +682,16 @@
 		}
 	}
 	
+	private void dumpResponse(HttpURLConnection c) throws IOException {
+		if (c.getContentLength() > 0) {
+			final Object content = c.getContent();
+			System.out.println(content);
+		}
+	}
+	
 	private static File writeBundle(InputStream is, boolean decompress, String header) throws IOException {
 		InputStream zipStream = decompress ? new InflaterInputStream(is) : is;
-		File tf = File.createTempFile("hg-bundle-", null);
+		File tf = File.createTempFile("hg4j-bundle-", null);
 		FileOutputStream fos = new FileOutputStream(tf);
 		fos.write(header.getBytes());
 		int r;
@@ -502,4 +748,44 @@
 			return head.equals(o.head) && root.equals(o.root) && (p1 == null && o.p1 == null || p1.equals(o.p1)) && (p2 == null && o.p2 == null || p2.equals(o.p2));
 		}
 	}
+
+	public static final class Bookmarks implements Iterable<Pair<String, Nodeid>> {
+		private final List<Pair<String, Nodeid>> bm;
+
+		private Bookmarks(List<Pair<String, Nodeid>> bookmarks) {
+			bm = bookmarks;
+		}
+
+		public Iterator<Pair<String, Nodeid>> iterator() {
+			return bm.iterator();
+		}
+	}
+	
+	public static final class Phases {
+		private final boolean pub;
+		private final List<Nodeid> droots;
+		
+		private Phases(boolean publishing, List<Nodeid> draftRoots) {
+			pub = publishing;
+			droots = draftRoots;
+		}
+		
+		/**
+		 * Non-publishing servers may (shall?) respond with a list of draft roots.
+		 * This method doesn't make sense when {@link #isPublishingServer()} is <code>true</code>
+		 * 
+		 * @return list of draft roots on remote server
+		 */
+		public List<Nodeid> draftRoots() {
+			return droots;
+		}
+
+		/**
+		 * @return <code>true</code> if revisions on remote server shall be deemed published (either 
+		 * old server w/o explicit setting, or a new one with <code>phases.publish == true</code>)
+		 */
+		public boolean isPublishingServer() {
+			return pub;
+		}
+	}
 }
--- a/src/org/tmatesoft/hg/repo/HgRepository.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/repo/HgRepository.java	Wed Jul 10 11:53:19 2013 +0200
@@ -16,7 +16,7 @@
  */
 package org.tmatesoft.hg.repo;
 
-import static org.tmatesoft.hg.repo.HgRepositoryFiles.LastMessage;
+import static org.tmatesoft.hg.repo.HgRepositoryFiles.*;
 import static org.tmatesoft.hg.util.LogFacility.Severity.Warn;
 
 import java.io.File;
@@ -373,7 +373,7 @@
 		} catch (IOException ex) {
 			throw new HgInvalidControlFileException("Can't retrieve message of last commit attempt", ex, lastMessage);
 		} finally {
-			new FileUtils(getSessionContext().getLog()).closeQuietly(fr, lastMessage);
+			new FileUtils(getSessionContext().getLog(), this).closeQuietly(fr, lastMessage);
 		}
 	}
 
@@ -389,7 +389,7 @@
 	public HgRepositoryLock getWorkingDirLock() {
 		if (wdLock == null) {
 			int timeout = getLockTimeout();
-			File lf = impl.getFileFromRepoDir("wlock");
+			File lf = impl.getRepositoryFile(WorkingCopyLock);
 			synchronized (this) {
 				if (wdLock == null) {
 					wdLock = new HgRepositoryLock(lf, timeout);
@@ -407,7 +407,7 @@
 	public HgRepositoryLock getStoreLock() {
 		if (storeLock == null) {
 			int timeout = getLockTimeout();
-			File fl = impl.getFileFromStoreDir("lock");
+			File fl = impl.getRepositoryFile(StoreLock);
 			synchronized (this) {
 				if (storeLock == null) {
 					storeLock = new HgRepositoryLock(fl, timeout);
--- a/src/org/tmatesoft/hg/repo/HgRepositoryFiles.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/repo/HgRepositoryFiles.java	Wed Jul 10 11:53:19 2013 +0200
@@ -25,35 +25,46 @@
  */
 public enum HgRepositoryFiles {
 
-	HgIgnore(".hgignore"), HgTags(".hgtags"), HgEol(".hgeol"), 
-	Dirstate(false, "dirstate"), HgLocalTags(false, "localtags"),
-	HgSub(".hgsub"), HgSubstate(".hgsubstate"),
-	LastMessage(false, "last-message.txt"),
-	Bookmarks(false, "bookmarks"), BookmarksCurrent(false, "bookmarks.current"),
-	Branch(false, "branch"), 
-	UndoBranch(false, "undo.branch"), UndoDirstate(false, "undo.dirstate");
+	HgIgnore(Home.Root, ".hgignore"), HgTags(Home.Root, ".hgtags"), HgEol(Home.Root, ".hgeol"), 
+	Dirstate(Home.Repo, "dirstate"), HgLocalTags(Home.Repo, "localtags"),
+	HgSub(Home.Root, ".hgsub"), HgSubstate(Home.Root, ".hgsubstate"),
+	LastMessage(Home.Repo, "last-message.txt"),
+	Bookmarks(Home.Repo, "bookmarks"), BookmarksCurrent(Home.Repo, "bookmarks.current"),
+	Branch(Home.Repo, "branch"), 
+	UndoBranch(Home.Repo, "undo.branch"), UndoDirstate(Home.Repo, "undo.dirstate"),
+	Phaseroots(Home.Store, "phaseroots"), FNCache(Home.Store, "fncache"),
+	WorkingCopyLock(Home.Repo, "wlock"), StoreLock(Home.Store, "lock");
+
+	/**
+	 * Possible file locations 
+	 */
+	public enum Home {
+		Root, Repo, Store
+	}
 
 	private final String fname;
-	private final boolean livesInWC; 
+	private final Home residesIn; 
 	
-	private HgRepositoryFiles(String filename) {
-		this(true, filename);
-	}
-
-	private HgRepositoryFiles(boolean wcNotRepoRoot, String filename) {
+	private HgRepositoryFiles(Home home, String filename) {
 		fname = filename;
-		livesInWC = wcNotRepoRoot;
+		residesIn = home;
 	}
 
 	/**
-	 * Path to the file, relative to the parent it lives in.
+	 * Path to the file, relative to the repository root.
 	 * 
 	 * For repository files that reside in working directory, return their location relative to the working dir.
-	 * For files that reside under repository root, path returned would include '.hg/' prefix.
+	 * For files that reside under repository root, path returned includes '.hg/' prefix.
+	 * For files from {@link Home#Store} storage area, path starts with '.hg/store/', although actual use of 'store' folder
+	 * is controlled by repository requirements. Returned value shall be deemed as 'most likely' path in a general environment.
 	 * @return file location, never <code>null</code>
 	 */
 	public String getPath() {
-		return livesInWC ? getName() : ".hg/" + getName();
+		switch (residesIn) {
+			case Store : return ".hg/store/" + getName();
+			case Repo : return ".hg/" + getName();
+			default : return getName();
+		}
 	}
 
 	/**
@@ -73,13 +84,20 @@
 	 * @return <code>true</code> if file lives in working tree
 	 */
 	public boolean residesUnderWorkingDir() {
-		return livesInWC;
+		return residesIn == Home.Root;
 	}
 
 	/**
 	 * @return <code>true</code> if file lives under '.hg/' 
 	 */
 	public boolean residesUnderRepositoryRoot() {
-		return !livesInWC;
+		return residesIn == Home.Repo;
+	}
+	
+	/**
+	 * Identify a root the file lives under
+	 */
+	public Home getHome() {
+		return residesIn;
 	}
 }
--- a/src/org/tmatesoft/hg/repo/HgRevisionMap.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/repo/HgRevisionMap.java	Wed Jul 10 11:53:19 2013 +0200
@@ -19,8 +19,6 @@
 import static org.tmatesoft.hg.repo.HgRepository.BAD_REVISION;
 import static org.tmatesoft.hg.repo.HgRepository.TIP;
 
-import java.util.Arrays;
-
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.internal.ArrayHelper;
 import org.tmatesoft.hg.repo.Revlog.RevisionInspector;
@@ -60,15 +58,14 @@
 	 * for complete changelog iteration. 
 	 */
 	
+	private final T revlog;
 	/*
 	 * XXX 3 * (x * 4) bytes. Can I do better?
 	 * It seems, yes. Don't need to keep sorted, always can emulate it with indirect access to sequential through sorted2natural.
 	 * i.e. instead sorted[mid].compareTo(toFind), do sequential[sorted2natural[mid]].compareTo(toFind) 
 	 */
-	private Nodeid[] sequential; // natural repository order, childrenOf rely on ordering
-	private Nodeid[] sorted; // for binary search
-	private int[] sorted2natural;
-	private final T revlog;
+	private Nodeid[] sequential; // natural repository order
+	private ArrayHelper<Nodeid> seqWrapper;
 
 	public HgRevisionMap(T owner) {
 		revlog = owner;
@@ -79,7 +76,7 @@
 	}
 	
 	public void next(int revisionIndex, Nodeid revision, int linkedRevision) {
-		sequential[revisionIndex] = sorted[revisionIndex] = revision;
+		sequential[revisionIndex] = revision;
 	}
 
 	/**
@@ -89,28 +86,29 @@
 		// XXX HgRepository.register((RepoChangeListener) this); // listen to changes in repo, re-init if needed?
 		final int revisionCount = revlog.getRevisionCount();
 		sequential = new Nodeid[revisionCount];
-		sorted = new Nodeid[revisionCount];
 		revlog.indexWalk(0, TIP, this);
 		// next is alternative to Arrays.sort(sorted), and build sorted2natural looking up each element of sequential in sorted.
 		// the way sorted2natural was build is O(n*log n).  
-		final ArrayHelper ah = new ArrayHelper();
-		ah.sort(sorted);
-		// note, values in ArrayHelper#getReversed are 1-based indexes, not 0-based 
-		sorted2natural = ah.getReverse();
+		seqWrapper = new ArrayHelper<Nodeid>(sequential);
+		seqWrapper.sort(null, true, false);
 		return this;
 	}
+
+	/* friendly initializer to use from HgParentChildMap
+	/*package*/ void init(ArrayHelper<Nodeid> _seqWrapper) {
+		assert _seqWrapper.getData().length == revlog.getRevisionCount();
+		sequential = _seqWrapper.getData();
+		seqWrapper = _seqWrapper;
+	}
 	
 	public Nodeid revision(int revisionIndex) {
 		return sequential[revisionIndex];
 	}
+
 	public int revisionIndex(Nodeid revision) {
 		if (revision == null || revision.isNull()) {
 			return BAD_REVISION;
 		}
-		int x = Arrays.binarySearch(sorted, revision);
-		if (x < 0) {
-			return BAD_REVISION;
-		}
-		return sorted2natural[x]-1;
+		return seqWrapper.binarySearch(revision, BAD_REVISION);
 	}
 }
\ No newline at end of file
--- a/src/org/tmatesoft/hg/repo/HgWorkingCopyStatusCollector.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/src/org/tmatesoft/hg/repo/HgWorkingCopyStatusCollector.java	Wed Jul 10 11:53:19 2013 +0200
@@ -589,7 +589,7 @@
 		} catch (IOException ex) {
 			throw new HgInvalidFileException("File comparison failed", ex).setFileName(p);
 		} finally {
-			new FileUtils(repo.getSessionContext().getLog()).closeQuietly(is);
+			new FileUtils(repo.getSessionContext().getLog(), this).closeQuietly(is);
 		}
 	}
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/org/tmatesoft/hg/test/HgServer.java	Wed Jul 10 11:53:19 2013 +0200
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.test;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Wraps hg server
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+class HgServer {
+	private Process serverProcess;
+	private boolean publish = true;
+	
+	public HgServer publishing(boolean pub) {
+		publish = pub;
+		return this;
+	}
+
+	public HgServer start(File dir) throws IOException, InterruptedException {
+		if (serverProcess != null) {
+			stop();
+		}
+		List<String> cmdline = new ArrayList<String>();
+		cmdline.add("hg");
+		cmdline.add("--config");
+		cmdline.add("web.allow_push=*");
+		cmdline.add("--config");
+		cmdline.add("web.push_ssl=False");
+		cmdline.add("--config");
+		cmdline.add("server.validate=True");
+		cmdline.add("--config");
+		cmdline.add(String.format("web.port=%d", port()));
+		if (!publish) {
+			cmdline.add("--config");
+			cmdline.add("phases.publish=False");
+		}
+		cmdline.add("serve");
+		serverProcess = new ProcessBuilder(cmdline).directory(dir).start();
+		Thread.sleep(500);
+		return this;
+	}
+	
+	public URL getURL() throws MalformedURLException {
+		return new URL(String.format("http://localhost:%d/", port()));
+	}
+
+	public int port() {
+		return 9090;
+	}
+	
+	public void stop() {
+		if (serverProcess == null) {
+			return;
+		}
+		// if Process#destroy() doesn't perform well with scripts and child processes
+		// may need to write server pid to a file and send a kill <pid> here
+		serverProcess.destroy();
+		serverProcess = null;
+	}
+}
\ No newline at end of file
--- a/test/org/tmatesoft/hg/test/RepoUtils.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/test/org/tmatesoft/hg/test/RepoUtils.java	Wed Jul 10 11:53:19 2013 +0200
@@ -34,6 +34,7 @@
 
 import org.tmatesoft.hg.core.HgException;
 import org.tmatesoft.hg.core.HgInitCommand;
+import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.internal.FileUtils;
 import org.tmatesoft.hg.internal.StreamLogFacility;
 import org.tmatesoft.hg.repo.HgRepository;
@@ -114,7 +115,7 @@
 				throw new UnsupportedOperationException();
 			}
 		};
-		FileUtils fu = new FileUtils(new StreamLogFacility(Debug, true, System.err));
+		FileUtils fu = new FileUtils(new StreamLogFacility(Debug, true, System.err), RepoUtils.class);
 		String srcPrefix = srcDir.getAbsolutePath();
 		while (it.hasNext()) {
 			File next = it.next();
@@ -190,4 +191,18 @@
 		}
 		dest.delete();
 	}
+
+	static Nodeid[] allRevisions(HgRepository repo) {
+		Nodeid[] allRevs = new Nodeid[repo.getChangelog().getRevisionCount()];
+		for (int i = 0; i < allRevs.length; i++) {
+			allRevs[i] = repo.getChangelog().getRevision(i);
+		}
+		return allRevs;
+	}
+
+	static void assertHgVerifyOk(ErrorCollectorExt errorCollector, File repoLoc) throws InterruptedException, IOException {
+		ExecHelper verifyRun = new ExecHelper(new OutputParser.Stub(), repoLoc);
+		verifyRun.run("hg", "verify");
+		errorCollector.assertEquals("hg verify", 0, verifyRun.getExitValue());
+	}
 }
--- a/test/org/tmatesoft/hg/test/TestAuxUtilities.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/test/org/tmatesoft/hg/test/TestAuxUtilities.java	Wed Jul 10 11:53:19 2013 +0200
@@ -23,6 +23,7 @@
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.Arrays;
 
 import org.junit.Assert;
 import org.junit.Rule;
@@ -62,25 +63,31 @@
 	@Test
 	public void testArrayHelper() {
 		String[] initial = {"d", "w", "k", "b", "c", "i", "a", "r", "e", "h" };
-		ArrayHelper ah = new ArrayHelper();
+		ArrayHelper<String> ah = new ArrayHelper<String>(initial);
 		String[] result = initial.clone();
-		ah.sort(result);
-		String[] restored = restore(result, ah.getReverse());
+		ah.sort(result, false, false);
+		String[] restored = restore(result, ah.getReverseIndexes());
 		assertArrayEquals(initial, restored);
 		//
 		// few elements are on the right place from the very start and do not shift during sort.
 		// make sure for them we've got correct reversed indexes as well
 		initial = new String[] {"d", "h", "c", "b", "k", "i", "a", "r", "e", "w" };
-		ah.sort(result = initial.clone());
-		restored = restore(result, ah.getReverse());
+		ah = new ArrayHelper<String>(initial);
+		ah.sort(result = new String[initial.length], true, true);
+		restored = restore(result, ah.getReverseIndexes());
 		assertArrayEquals(initial, restored);
+		for (int i = 0; i < initial.length; i++) {
+			String s = initial[i];
+			errorCollector.assertEquals(i, ah.binarySearch(s, -1));
+			errorCollector.assertEquals(Arrays.binarySearch(result, s), ah.binarySearchSorted(s));
+		}
 	}
 
 	private static String[] restore(String[] sorted, int[] sortReverse) {
 		String[] rebuilt = new String[sorted.length];
 		for (int i = 0; i < sorted.length; i++) {
 			int indexInOriginal = sortReverse[i];
-			rebuilt[indexInOriginal-1] = sorted[i];
+			rebuilt[indexInOriginal] = sorted[i];
 		}
 		return rebuilt;
 	}
--- a/test/org/tmatesoft/hg/test/TestCommit.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/test/org/tmatesoft/hg/test/TestCommit.java	Wed Jul 10 11:53:19 2013 +0200
@@ -20,7 +20,6 @@
 import static org.tmatesoft.hg.repo.HgRepository.*;
 
 import java.io.File;
-import java.io.IOException;
 import java.util.List;
 
 import org.junit.Rule;
@@ -44,7 +43,9 @@
 import org.tmatesoft.hg.internal.Transaction;
 import org.tmatesoft.hg.repo.HgDataFile;
 import org.tmatesoft.hg.repo.HgLookup;
+import org.tmatesoft.hg.repo.HgPhase;
 import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRepositoryFiles;
 import org.tmatesoft.hg.util.Outcome;
 import org.tmatesoft.hg.util.Path;
 
@@ -158,7 +159,7 @@
 		// check if cached value in hgRepo got updated
 		errorCollector.assertEquals("branch1", hgRepo.getWorkingCopyBranchName());
 		//
-		assertHgVerifyOk(repoLoc);
+		RepoUtils.assertHgVerifyOk(errorCollector, repoLoc);
 	}
 
 	/**
@@ -190,7 +191,7 @@
 		new HgCatCommand(hgRepo).file(Path.create("xx")).changeset(commitRev).execute(sink);
 		assertArrayEquals("xyz".getBytes(), sink.toArray());
 		//
-		assertHgVerifyOk(repoLoc);
+		RepoUtils.assertHgVerifyOk(errorCollector, repoLoc);
 	}
 	/**
 	 * perform few commits one by one, into different branches
@@ -238,12 +239,14 @@
 		errorCollector.assertEquals("FIRST", c1.getComment());
 		errorCollector.assertEquals("SECOND", c2.getComment());
 		errorCollector.assertEquals("THIRD", c3.getComment());
-		assertHgVerifyOk(repoLoc);
+		RepoUtils.assertHgVerifyOk(errorCollector, repoLoc);
 	}
 	
 	@Test
 	public void testCommandBasics() throws Exception {
 		File repoLoc = RepoUtils.cloneRepoToTempLocation("log-1", "test-commit-cmd", false);
+		// PhasesHelper relies on file existence to tell phase enablement
+		RepoUtils.createFile(new File(repoLoc, HgRepositoryFiles.Phaseroots.getPath()), "");
 		HgRepository hgRepo = new HgLookup().detect(repoLoc);
 		HgDataFile dfB = hgRepo.getFileNode("b");
 		assertTrue("[sanity]", dfB.exists());
@@ -285,7 +288,13 @@
 		errorCollector.assertEquals(csets.get(1).getNodeid(), c2);
 		errorCollector.assertEquals(csets.get(0).getComment(), "FIRST");
 		errorCollector.assertEquals(csets.get(1).getComment(), "SECOND");
-		assertHgVerifyOk(repoLoc);
+		RepoUtils.assertHgVerifyOk(errorCollector, repoLoc);
+		// new commits are drafts by default, check our commit respects this
+		// TODO more tests with children of changesets with draft, secret or public phases (latter - 
+		// new commit is child of public, but there are other commits with draft/secret phases - ensure they are intact)
+		assertEquals(HgPhase.Draft, HgPhase.parse(hgRepo.getConfiguration().getStringValue("phases", "new-commit", HgPhase.Draft.mercurialString())));
+		errorCollector.assertEquals(HgPhase.Draft, csets.get(0).getPhase());
+		errorCollector.assertEquals(HgPhase.Draft, csets.get(1).getPhase());
 	}
 	
 	@Test
@@ -478,13 +487,7 @@
 		errorCollector.assertTrue(status.get(Kind.Modified).contains(dfB.getPath()));
 		errorCollector.assertTrue(status.get(Kind.Removed).contains(dfD.getPath()));
 		
-		assertHgVerifyOk(repoLoc);
-	}
-	
-	private void assertHgVerifyOk(File repoLoc) throws InterruptedException, IOException {
-		ExecHelper verifyRun = new ExecHelper(new OutputParser.Stub(), repoLoc);
-		verifyRun.run("hg", "verify");
-		errorCollector.assertEquals("hg verify", 0, verifyRun.getExitValue());
+		RepoUtils.assertHgVerifyOk(errorCollector, repoLoc);
 	}
 	
 	private Transaction newTransaction(SessionContext.Source ctxSource) {
--- a/test/org/tmatesoft/hg/test/TestIncoming.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/test/org/tmatesoft/hg/test/TestIncoming.java	Wed Jul 10 11:53:19 2013 +0200
@@ -115,8 +115,8 @@
 		HashSet<Nodeid> set = new HashSet<Nodeid>(liteResult);
 		for (Nodeid nid : expected) {
 			boolean removed = set.remove(nid);
-			errorCollector.checkThat(what + " Missing " +  nid.shortNotation() + " in HgIncomingCommand.execLite result", removed, equalTo(true));
+			errorCollector.checkThat(what + " Missing " +  nid.shortNotation() + " in execLite result", removed, equalTo(true));
 		}
-		errorCollector.checkThat(what + " Superfluous cset reported by HgIncomingCommand.execLite", set.isEmpty(), equalTo(true));
+		errorCollector.checkThat(what + " Superfluous cset reported by execLite", set.isEmpty(), equalTo(true));
 	}
 }
--- a/test/org/tmatesoft/hg/test/TestInflaterDataAccess.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/test/org/tmatesoft/hg/test/TestInflaterDataAccess.java	Wed Jul 10 11:53:19 2013 +0200
@@ -51,7 +51,7 @@
 	@Test
 	public void testSeek() throws Exception {
 		DataAccess zip = zip(testContent1);
-		InflaterDataAccess ida = new InflaterDataAccess(zip, 0, zip.length(), -1, new Inflater(), new byte[25]);
+		InflaterDataAccess ida = new InflaterDataAccess(zip, 0, zip.length(), -1, new Inflater(), new byte[25], null);
 		ida.seek(20);
 		final int bufferCapacity = 10;
 		ByteBuffer chunk1 = ByteBuffer.allocate(bufferCapacity);
@@ -66,15 +66,15 @@
 	@Test
 	public void testLength() throws Exception {
 		DataAccess zip = zip(testContent1);
-		InflaterDataAccess ida = new InflaterDataAccess(zip, 0, zip.length(), -1, new Inflater(), new byte[25]);
+		InflaterDataAccess ida = new InflaterDataAccess(zip, 0, zip.length(), -1, new Inflater(), new byte[25], null);
 		errorCollector.assertEquals("Plain #length()", testContent1.length, ida.length());
 		//
-		ida = new InflaterDataAccess(zip, 0, zip.length(), -1, new Inflater(), new byte[25]);
+		ida = new InflaterDataAccess(zip, 0, zip.length(), -1, new Inflater(), new byte[25], null);
 		byte[] dummy = new byte[30];
 		ida.readBytes(dummy, 0, dummy.length);
 		errorCollector.assertEquals("#length() after readBytes()", testContent1.length, ida.length());
 		//
-		ida = new InflaterDataAccess(zip, 0, zip.length(), -1, new Inflater(), new byte[25]);
+		ida = new InflaterDataAccess(zip, 0, zip.length(), -1, new Inflater(), new byte[25], null);
 		// consume most of the stream, so that all original compressed data is already read
 		dummy = new byte[testContent1.length - 1];
 		ida.readBytes(dummy, 0, dummy.length);
@@ -86,7 +86,7 @@
 	@Test
 	public void testReadBytes() throws Exception {
 		DataAccess zip = zip(testContent1);
-		InflaterDataAccess ida = new InflaterDataAccess(zip, 0, zip.length(), -1, new Inflater(), new byte[25]);
+		InflaterDataAccess ida = new InflaterDataAccess(zip, 0, zip.length(), -1, new Inflater(), new byte[25], null);
 		ida.skip(10);
 		byte[] chunk1 = new byte[22];
 		ida.readBytes(chunk1, 0, 20);
--- a/test/org/tmatesoft/hg/test/TestOutgoing.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/test/org/tmatesoft/hg/test/TestOutgoing.java	Wed Jul 10 11:53:19 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011 TMate Software Ltd
+ * Copyright (c) 2011-2013 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -16,19 +16,23 @@
  */
 package org.tmatesoft.hg.test;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
 import java.util.List;
 
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
+import org.tmatesoft.hg.core.HgCheckoutCommand;
+import org.tmatesoft.hg.core.HgCommitCommand;
 import org.tmatesoft.hg.core.HgLogCommand;
 import org.tmatesoft.hg.core.HgOutgoingCommand;
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.repo.HgLookup;
 import org.tmatesoft.hg.repo.HgRemoteRepository;
+import org.tmatesoft.hg.repo.HgRepository;
 
 /**
  *
@@ -71,10 +75,10 @@
 			TestIncoming.report(collector, outParser, liteResult, errorCollector);
 			//
 			File f = new File(dest, "Test.txt");
-			append(f, "1");
+			RepoUtils.createFile(f, "1");
 			eh0.run("hg", "add");
 			eh0.run("hg", "commit", "-m", "1");
-			append(f, "2");
+			RepoUtils.modifyFileAppend(f, "2");
 			eh0.run("hg", "commit", "-m", "2");
 			//
 			cmd = new HgOutgoingCommand(lookup.detect(dest)).against(hgRemote);
@@ -85,10 +89,41 @@
 			TestIncoming.report(collector, outParser, liteResult, errorCollector);
 		}
 	}
-
-	static void append(File f, String s) throws IOException {
-		FileWriter fw = new FileWriter(f);
-		fw.append(s);
-		fw.close();
+	
+	/**
+	 * Issue 47: Incorrect set of outgoing changes when revision spins off prior to common revision of local and remote repos
+	 */
+	@Test
+	public void testOutgoingPreceedsCommon() throws Exception {
+		File srcRepoLoc = RepoUtils.cloneRepoToTempLocation("test-annotate", "test-outgoing-src", false);
+		File dstRepoLoc = RepoUtils.cloneRepoToTempLocation("test-annotate", "test-outgoing-dst", false);
+		File f1 = new File(srcRepoLoc, "file1");
+		assertTrue("[sanity]", f1.canWrite());
+		HgServer server = new HgServer().start(dstRepoLoc);
+		try {
+			final HgLookup hgLookup = new HgLookup();
+			final HgRepository srcRepo = hgLookup.detect(srcRepoLoc);
+			final HgRemoteRepository dstRemote = hgLookup.detect(server.getURL());
+			new HgCheckoutCommand(srcRepo).changeset(6).clean(true).execute();
+			assertEquals("[sanity]", "with-merge", srcRepo.getWorkingCopyBranchName());
+			RepoUtils.modifyFileAppend(f1, "change1");
+			new HgCommitCommand(srcRepo).message("Commit 1").execute();
+			new HgCheckoutCommand(srcRepo).changeset(5).clean(true).execute();
+			assertEquals("[sanity]", "no-merge", srcRepo.getWorkingCopyBranchName());
+			RepoUtils.modifyFileAppend(f1, "change2");
+			new HgCommitCommand(srcRepo).message("Commit 2").execute();
+			//
+			HgOutgoingCommand cmd = new HgOutgoingCommand(srcRepo).against(dstRemote);
+			LogOutputParser outParser = new LogOutputParser(true);
+			ExecHelper eh = new ExecHelper(outParser, srcRepoLoc);
+			HgLogCommand.CollectHandler collector = new HgLogCommand.CollectHandler();
+			//
+			List<Nodeid> liteResult = cmd.executeLite();
+			cmd.executeFull(collector);
+			eh.run("hg", "outgoing", "--debug", dstRemote.getLocation());
+			TestIncoming.report(collector, outParser, liteResult, errorCollector);
+		} finally {
+			server.stop();
+		}
 	}
 }
--- a/test/org/tmatesoft/hg/test/TestPhases.java	Fri Jul 05 20:42:45 2013 +0200
+++ b/test/org/tmatesoft/hg/test/TestPhases.java	Wed Jul 10 11:53:19 2013 +0200
@@ -18,12 +18,16 @@
 
 import static org.junit.Assert.*;
 
+import java.util.ArrayList;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.junit.Rule;
 import org.junit.Test;
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.Internals;
 import org.tmatesoft.hg.internal.PhasesHelper;
+import org.tmatesoft.hg.internal.RevisionSet;
 import org.tmatesoft.hg.repo.HgChangelog;
 import org.tmatesoft.hg.repo.HgInternals;
 import org.tmatesoft.hg.repo.HgLookup;
@@ -67,6 +71,41 @@
 		final long end = System.nanoTime();
 		System.out.printf("With ParentWalker(simulates log command for whole repo): %,d μs (pw init: %,d ns)\n", (end - start1)/1000, start2 - start1);
 	}
+	
+	@Test
+	public void testAllSecretAndDraft() throws Exception {
+		HgRepository repo = Configuration.get().find("test-phases");
+		Internals implRepo = HgInternals.getImplementationRepo(repo);
+		HgPhase[] expected = readPhases(repo);
+		ArrayList<Nodeid> secret = new ArrayList<Nodeid>();
+		ArrayList<Nodeid> draft = new ArrayList<Nodeid>();
+		ArrayList<Nodeid> pub = new ArrayList<Nodeid>();
+		for (int i = 0; i < expected.length; i++) {
+			Nodeid n = repo.getChangelog().getRevision(i);
+			switch (expected[i]) {
+			case Secret : secret.add(n); break; 
+			case Draft : draft.add(n); break;
+			case Public : pub.add(n); break;
+			default : throw new IllegalStateException();
+			}
+		}
+		final RevisionSet rsSecret = new RevisionSet(secret);
+		final RevisionSet rsDraft = new RevisionSet(draft);
+		assertFalse("[sanity]", rsSecret.isEmpty());
+		assertFalse("[sanity]", rsDraft.isEmpty());
+		HgParentChildMap<HgChangelog> pw = new HgParentChildMap<HgChangelog>(repo.getChangelog());
+		pw.init();
+		PhasesHelper ph1 = new PhasesHelper(implRepo, null);
+		PhasesHelper ph2 = new PhasesHelper(implRepo, pw);
+		RevisionSet s1 = ph1.allSecret().symmetricDifference(rsSecret);
+		RevisionSet s2 = ph2.allSecret().symmetricDifference(rsSecret);
+		errorCollector.assertTrue("Secret,no ParentChildMap:" + s1.toString(), s1.isEmpty());
+		errorCollector.assertTrue("Secret, with ParentChildMap:" + s2.toString(), s2.isEmpty());
+		RevisionSet s3 = ph1.allDraft().symmetricDifference(rsDraft);
+		RevisionSet s4 = ph2.allDraft().symmetricDifference(rsDraft);
+		errorCollector.assertTrue("Draft,no ParentChildMap:" + s3.toString(), s3.isEmpty());
+		errorCollector.assertTrue("Draft, with ParentChildMap:" + s4.toString(), s4.isEmpty());
+	}
 
 	private HgPhase[] initAndCheck(PhasesHelper ph, HgPhase[] expected) throws HgRuntimeException {
 		HgChangelog clog = ph.getRepo().getChangelog();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/org/tmatesoft/hg/test/TestPull.java	Wed Jul 10 11:53:19 2013 +0200
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.test;
+
+import static org.tmatesoft.hg.repo.HgRepository.TIP;
+
+import java.io.File;
+import java.util.List;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.tmatesoft.hg.core.HgIncomingCommand;
+import org.tmatesoft.hg.core.HgPullCommand;
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.repo.HgLookup;
+import org.tmatesoft.hg.repo.HgRemoteRepository;
+import org.tmatesoft.hg.repo.HgRepository;
+
+/**
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class TestPull {
+
+	@Rule
+	public ErrorCollectorExt errorCollector = new ErrorCollectorExt();
+	
+	@Test
+	public void testPullToEmpty() throws Exception {
+		File srcRepoLoc = RepoUtils.cloneRepoToTempLocation("test-annotate", "test-pull2empty-src", false);
+		File dstRepoLoc = RepoUtils.initEmptyTempRepo("test-pull2empty-dst");
+		HgServer server = new HgServer().start(srcRepoLoc);
+		try {
+			final HgLookup hgLookup = new HgLookup();
+			final HgRemoteRepository srcRemote = hgLookup.detect(server.getURL());
+			HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
+			HgPullCommand cmd = new HgPullCommand(dstRepo).source(srcRemote);
+			cmd.execute();
+			final HgRepository srcRepo = hgLookup.detect(srcRepoLoc);
+			checkRepositoriesAreSame(srcRepo, dstRepo);
+			final List<Nodeid> incoming = new HgIncomingCommand(dstRepo).against(srcRemote).executeLite();
+			errorCollector.assertTrue(incoming.toString(), incoming.isEmpty());
+			RepoUtils.assertHgVerifyOk(errorCollector, dstRepoLoc);
+		} finally {
+			server.stop();
+		}
+	}
+	
+	// test when pull comes with new file (if AddRevInspector/RevlogStreamWriter is ok with file that doesn't exist 
+
+	private void checkRepositoriesAreSame(HgRepository srcRepo, HgRepository dstRepo) {
+		// XXX copy of TestPush#checkRepositoriesAreSame
+		errorCollector.assertEquals(srcRepo.getChangelog().getRevisionCount(), dstRepo.getChangelog().getRevisionCount());
+		errorCollector.assertEquals(srcRepo.getChangelog().getRevision(0), dstRepo.getChangelog().getRevision(0));
+		errorCollector.assertEquals(srcRepo.getChangelog().getRevision(TIP), dstRepo.getChangelog().getRevision(TIP));
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/org/tmatesoft/hg/test/TestPush.java	Wed Jul 10 11:53:19 2013 +0200
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.test;
+
+import static org.junit.Assert.*;
+import static org.tmatesoft.hg.repo.HgRepository.TIP;
+
+import java.io.File;
+import java.util.List;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.tmatesoft.hg.core.HgCheckoutCommand;
+import org.tmatesoft.hg.core.HgCommitCommand;
+import org.tmatesoft.hg.core.HgOutgoingCommand;
+import org.tmatesoft.hg.core.HgPushCommand;
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.PhasesHelper;
+import org.tmatesoft.hg.internal.RevisionSet;
+import org.tmatesoft.hg.repo.HgBookmarks;
+import org.tmatesoft.hg.repo.HgChangelog;
+import org.tmatesoft.hg.repo.HgInternals;
+import org.tmatesoft.hg.repo.HgLookup;
+import org.tmatesoft.hg.repo.HgPhase;
+import org.tmatesoft.hg.repo.HgRemoteRepository;
+import org.tmatesoft.hg.repo.HgRepository;
+
+/**
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class TestPush {
+
+	@Rule
+	public ErrorCollectorExt errorCollector = new ErrorCollectorExt();
+
+	@Test
+	public void testPushToEmpty() throws Exception {
+		File srcRepoLoc = RepoUtils.cloneRepoToTempLocation("test-annotate", "test-push2empty-src", false);
+		File dstRepoLoc = RepoUtils.initEmptyTempRepo("test-push2empty-dst");
+		HgServer server = new HgServer().start(dstRepoLoc);
+		try {
+			final HgLookup hgLookup = new HgLookup();
+			HgRepository srcRepo = hgLookup.detect(srcRepoLoc);
+			HgPushCommand cmd = new HgPushCommand(srcRepo);
+			final HgRemoteRepository dstRemote = hgLookup.detect(server.getURL());
+			cmd.destination(dstRemote);
+			cmd.execute();
+			final HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
+			checkRepositoriesAreSame(srcRepo, dstRepo);
+			final List<Nodeid> outgoing = new HgOutgoingCommand(srcRepo).against(dstRemote).executeLite();
+			errorCollector.assertTrue(outgoing.toString(), outgoing.isEmpty());
+		} finally {
+			server.stop();
+		}
+	}
+
+	@Test
+	public void testPushChanges() throws Exception {
+		File srcRepoLoc = RepoUtils.cloneRepoToTempLocation("test-annotate", "test-push-src", false);
+		File dstRepoLoc = RepoUtils.cloneRepoToTempLocation("test-annotate", "test-push-dst", false);
+		File f1 = new File(srcRepoLoc, "file1");
+		assertTrue("[sanity]", f1.canWrite());
+		HgServer server = new HgServer().start(dstRepoLoc);
+		try {
+			final HgLookup hgLookup = new HgLookup();
+			final HgRepository srcRepo = hgLookup.detect(srcRepoLoc);
+			final HgRemoteRepository dstRemote = hgLookup.detect(server.getURL());
+			RepoUtils.modifyFileAppend(f1, "change1");
+			new HgCommitCommand(srcRepo).message("Commit 1").execute();
+			new HgCheckoutCommand(srcRepo).changeset(7).clean(true).execute();
+			assertEquals("[sanity]", "no-merge", srcRepo.getWorkingCopyBranchName());
+			RepoUtils.modifyFileAppend(f1, "change2");
+			new HgCommitCommand(srcRepo).message("Commit 2").execute();
+			//
+			new HgPushCommand(srcRepo).destination(dstRemote).execute();
+			checkRepositoriesAreSame(srcRepo, hgLookup.detect(dstRepoLoc));
+			final List<Nodeid> outgoing = new HgOutgoingCommand(srcRepo).against(dstRemote).executeLite();
+			errorCollector.assertTrue(outgoing.toString(), outgoing.isEmpty());
+		} finally {
+			server.stop();
+		}
+	}
+	
+	@Test
+	public void testPushToNonPublishingServer() throws Exception {
+		// check drafts are same as on server
+		// copy, not clone as latter updates phase information
+		File srcRepoLoc = RepoUtils.copyRepoToTempLocation("test-phases", "test-push-nopub-src");
+		File dstRepoLoc = RepoUtils.initEmptyTempRepo("test-push-nopub-dst");
+		File f1 = new File(srcRepoLoc, "hello.c");
+		assertTrue("[sanity]", f1.canWrite());
+		HgServer server = new HgServer().publishing(false).start(dstRepoLoc);
+		try {
+			final HgLookup hgLookup = new HgLookup();
+			final HgRepository srcRepo = hgLookup.detect(srcRepoLoc);
+			final HgRemoteRepository dstRemote = hgLookup.detect(server.getURL());
+			PhasesHelper phaseHelper = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
+			final RevisionSet allDraft = phaseHelper.allDraft();
+			assertFalse("[sanity]", allDraft.isEmpty());
+			final int publicCsetToBranchAt = 4;
+			assertEquals("[sanity]", HgPhase.Public, phaseHelper.getPhase(publicCsetToBranchAt, null));
+			// in addition to existing draft csets, add one more draft, branching at some other public revision
+			new HgCheckoutCommand(srcRepo).changeset(publicCsetToBranchAt).clean(true).execute();
+			RepoUtils.modifyFileAppend(f1, "// aaa");
+			final HgCommitCommand commitCmd = new HgCommitCommand(srcRepo).message("Commit aaa");
+			assertTrue(commitCmd.execute().isOk());
+			Nodeid newCommit = commitCmd.getCommittedRevision();
+			//
+			new HgPushCommand(srcRepo).destination(dstRemote).execute();
+			HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
+			final HgChangelog srcClog = srcRepo.getChangelog();
+			final HgChangelog dstClog = dstRepo.getChangelog();
+			// refresh PhasesHelper
+			phaseHelper = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
+			// check if phase didn't change
+			errorCollector.assertEquals(HgPhase.Draft, phaseHelper.getPhase(srcClog.getRevisionIndex(newCommit), newCommit));
+			for (Nodeid n : allDraft) {
+				// check drafts from src were actually pushed to dst 
+				errorCollector.assertTrue(dstClog.isKnown(n));
+				// check drafts didn't change their phase
+				errorCollector.assertEquals(HgPhase.Draft, phaseHelper.getPhase(srcClog.getRevisionIndex(n), n));
+			}
+		} finally {
+			server.stop();
+		}
+	}
+	
+	/**
+	 * If server lists revisions we know as drafts as public, update them locally
+	 */
+	@Test
+	public void testPushUpdatesPublishedDrafts() throws Exception {
+		/* o		r9, secret
+		 * |  o		r8, draft
+		 * |  |
+		 * |  o		r7, draft
+		 * o  |		r6, secret 
+		 * | /
+		 * o		r5, draft
+		 * |
+		 * o		r4, public
+		 */
+		// remote: r5 -> public, r6 -> draft, r8 -> secret
+		// local: new draft from r4, push
+		File srcRepoLoc = RepoUtils.copyRepoToTempLocation("test-phases", "test-push-phase-update-1-src");
+		File dstRepoLoc = RepoUtils.copyRepoToTempLocation("test-phases", "test-push-phase-update-1-dst");
+		File f1 = new File(srcRepoLoc, "hello.c");
+		assertTrue("[sanity]", f1.canWrite());
+		final HgLookup hgLookup = new HgLookup();
+		final HgRepository srcRepo = hgLookup.detect(srcRepoLoc);
+		final ExecHelper dstRun = new ExecHelper(new OutputParser.Stub(), dstRepoLoc);
+		final int publicCsetToBranchAt = 4;
+		final int r5 = 5, r6 = 6, r8 = 8;
+		PhasesHelper srcPhase = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
+		assertEquals("[sanity]", HgPhase.Draft, srcPhase.getPhase(r5, null));
+		assertEquals("[sanity]", HgPhase.Secret, srcPhase.getPhase(r6, null));
+		assertEquals("[sanity]", HgPhase.Draft, srcPhase.getPhase(r8, null));
+		// change phases in repository of remote server:
+		dstRun.exec("hg", "phase", "--public", String.valueOf(r5));
+		assertEquals(0, dstRun.getExitValue());
+		dstRun.exec("hg", "phase", "--draft", String.valueOf(r6));
+		assertEquals(0, dstRun.getExitValue());
+		dstRun.exec("hg", "phase", "--secret", "--force", String.valueOf(r8));
+		assertEquals(0, dstRun.getExitValue());
+		HgServer server = new HgServer().publishing(false).start(dstRepoLoc);
+		try {
+			final HgRemoteRepository dstRemote = hgLookup.detect(server.getURL());
+			// commit new draft head
+			new HgCheckoutCommand(srcRepo).changeset(publicCsetToBranchAt).clean(true).execute();
+			RepoUtils.modifyFileAppend(f1, "// aaa");
+			final HgCommitCommand commitCmd = new HgCommitCommand(srcRepo).message("Commit aaa");
+			assertTrue(commitCmd.execute().isOk());
+			final Nodeid newCommit = commitCmd.getCommittedRevision();
+			//
+			new HgPushCommand(srcRepo).destination(dstRemote).execute();
+			// refresh phase information
+			srcPhase = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
+			// r5 and r6 are changed to match server phases (more exposed)
+			errorCollector.assertEquals(HgPhase.Public, srcPhase.getPhase(r5, null));
+			errorCollector.assertEquals(HgPhase.Draft, srcPhase.getPhase(r6, null));
+			// r8 is secret on server, locally can't make it less exposed though
+			errorCollector.assertEquals(HgPhase.Draft, srcPhase.getPhase(r8, null));
+			//
+			HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
+			final HgChangelog dstClog = dstRepo.getChangelog();
+			assertTrue(dstClog.isKnown(newCommit));
+			PhasesHelper dstPhase = new PhasesHelper(HgInternals.getImplementationRepo(dstRepo));
+			errorCollector.assertEquals(HgPhase.Draft, dstPhase.getPhase(dstClog.getRevisionIndex(newCommit), newCommit));
+			// the one that was secret is draft now
+			errorCollector.assertEquals(HgPhase.Draft, srcPhase.getPhase(r8, null));
+		} finally {
+			server.stop();
+		}
+	}
+	
+	/**
+	 * update phases of local revisions and push changes
+	 */
+	@Test
+	public void testPushPublishAndUpdates() throws Exception {
+		File srcRepoLoc = RepoUtils.copyRepoToTempLocation("test-phases", "test-push-phase-update-2-src");
+		File dstRepoLoc = RepoUtils.initEmptyTempRepo("test-push-phase-update-1-dst");
+		final int r4 = 4, r5 = 5, r6 = 6, r9 = 9;
+		HgServer server = new HgServer().publishing(false).start(dstRepoLoc);
+		try {
+			final HgLookup hgLookup = new HgLookup();
+			final HgRepository srcRepo = hgLookup.detect(srcRepoLoc);
+			final HgRemoteRepository dstRemote = hgLookup.detect(server.getURL());
+			new HgPushCommand(srcRepo).destination(dstRemote).execute();
+			//
+			// make sure pushed repository got same draft root
+			final Nodeid r4PublicHead = srcRepo.getChangelog().getRevision(r4);
+			final Nodeid r5DraftRoot = srcRepo.getChangelog().getRevision(r5);
+			HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
+			final HgChangelog dstClog = dstRepo.getChangelog();
+			PhasesHelper dstPhase = new PhasesHelper(HgInternals.getImplementationRepo(dstRepo));
+			assertEquals(HgPhase.Public, dstPhase.getPhase(dstClog.getRevisionIndex(r4PublicHead), r4PublicHead));
+			assertEquals(HgPhase.Draft, dstPhase.getPhase(dstClog.getRevisionIndex(r5DraftRoot), r5DraftRoot));
+			//
+			// now, graduate some local revisions, r5:draft->public, r6:secret->public, r9: secret->draft
+			final ExecHelper srcRun = new ExecHelper(new OutputParser.Stub(), srcRepoLoc);
+			srcRun.exec("hg", "phase", "--public", String.valueOf(r5));
+			srcRun.exec("hg", "phase", "--public", String.valueOf(r6));
+			srcRun.exec("hg", "phase", "--draft", String.valueOf(r9));
+			// PhaseHelper shall be new for the command, and would pick up these external changes 
+			new HgPushCommand(srcRepo).destination(dstRemote).execute();
+			final Nodeid r6Nodeid = srcRepo.getChangelog().getRevision(r6);
+			final Nodeid r9Nodeid = srcRepo.getChangelog().getRevision(r9);
+			// refresh 
+			dstPhase = new PhasesHelper(HgInternals.getImplementationRepo(dstRepo));
+			// not errorCollector as subsequent code would fail if these secret revs didn't get into dst
+			assertTrue(dstClog.isKnown(r6Nodeid));
+			assertTrue(dstClog.isKnown(r9Nodeid));
+			errorCollector.assertEquals(HgPhase.Public, dstPhase.getPhase(dstClog.getRevisionIndex(r5DraftRoot), r5DraftRoot));
+			errorCollector.assertEquals(HgPhase.Public, dstPhase.getPhase(dstClog.getRevisionIndex(r6Nodeid), r6Nodeid));
+			errorCollector.assertEquals(HgPhase.Draft, dstPhase.getPhase(dstClog.getRevisionIndex(r9Nodeid), r9Nodeid));
+		} finally {
+			server.stop();
+		}
+	}
+
+	
+	@Test
+	public void testPushToPublishingServer() throws Exception {
+		// copy, not clone as latter updates phase information
+		File srcRepoLoc = RepoUtils.copyRepoToTempLocation("test-phases", "test-push-pub-src");
+		File dstRepoLoc = RepoUtils.initEmptyTempRepo("test-push-pub-dst");
+		HgServer server = new HgServer().publishing(true).start(dstRepoLoc);
+		try {
+			final HgLookup hgLookup = new HgLookup();
+			final HgRepository srcRepo = hgLookup.detect(srcRepoLoc);
+			final HgRemoteRepository dstRemote = hgLookup.detect(server.getURL());
+			PhasesHelper phaseHelper = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
+			final RevisionSet allDraft = phaseHelper.allDraft();
+			assertFalse("[sanity]", allDraft.isEmpty());
+			// push all changes
+			new HgPushCommand(srcRepo).destination(dstRemote).execute();
+			HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
+			final HgChangelog srcClog = srcRepo.getChangelog();
+			final HgChangelog dstClog = dstRepo.getChangelog();
+			// refresh PhasesHelper
+			phaseHelper = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
+			for (Nodeid n : allDraft) {
+				// check drafts from src were actually pushed to dst 
+				errorCollector.assertTrue(dstClog.isKnown(n));
+				// check drafts became public
+				errorCollector.assertEquals(HgPhase.Public, phaseHelper.getPhase(srcClog.getRevisionIndex(n), n));
+			}
+		} finally {
+			server.stop();
+		}
+	}
+
+	@Test
+	public void testPushSecretChangesets() throws Exception {
+		// copy, not clone as latter updates phase information
+		File srcRepoLoc = RepoUtils.copyRepoToTempLocation("test-phases", "test-push-no-secret-src");
+		File dstRepoLoc = RepoUtils.initEmptyTempRepo("test-push-no-secret-dst");
+		HgServer server = new HgServer().start(dstRepoLoc);
+		try {
+			final HgLookup hgLookup = new HgLookup();
+			final HgRepository srcRepo = hgLookup.detect(srcRepoLoc);
+			final HgRemoteRepository dstRemote = hgLookup.detect(server.getURL());
+			PhasesHelper phaseHelper = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
+			final RevisionSet allSecret = phaseHelper.allSecret();
+			assertFalse("[sanity]", allSecret.isEmpty());
+			new HgPushCommand(srcRepo).destination(dstRemote).execute();
+			HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
+			final HgChangelog srcClog = srcRepo.getChangelog();
+			final HgChangelog dstClog = dstRepo.getChangelog();
+			errorCollector.assertEquals(srcClog.getRevisionCount() - allSecret.size(), dstClog.getRevisionCount());
+			for (Nodeid n : allSecret) {		
+				errorCollector.assertTrue(n.toString(), !dstClog.isKnown(n));
+			}
+		} finally {
+			server.stop();
+		}
+	}
+
+	@Test
+	public void testUpdateBookmarkOnPush() throws Exception {
+		File srcRepoLoc = RepoUtils.cloneRepoToTempLocation("test-annotate", "test-push-src", false);
+		File dstRepoLoc = RepoUtils.cloneRepoToTempLocation("test-annotate", "test-push-dst", false);
+		final ExecHelper srcRun = new ExecHelper(new OutputParser.Stub(), srcRepoLoc);
+		final ExecHelper dstRun = new ExecHelper(new OutputParser.Stub(), dstRepoLoc);
+		File f1 = new File(srcRepoLoc, "file1");
+		assertTrue("[sanity]", f1.canWrite());
+		//
+		final String bm1 = "mark1", bm2 = "mark2", bm3 = "mark3", bm4 = "mark4", bm5 = "mark5";
+		final int bm2Local = 1, bm2Remote = 6, bm3Local = 7, bm3Remote = 2, bm_4_5 = 3;
+		// 1) bm1 - local active bookmark, check that push updates in remote
+		srcRun.exec("hg", "bookmark", bm1);
+		dstRun.exec("hg", "bookmark", "-r", "8", bm1);
+		// 2) bm2 - local points to ancestor of revision remote points to
+		srcRun.exec("hg", "bookmark", "-r", String.valueOf(bm2Local), bm2);
+		dstRun.exec("hg", "bookmark", "-r", String.valueOf(bm2Remote), bm2);
+		// 3) bm3 - remote points to ancestor of revision local one points to   
+		srcRun.exec("hg", "bookmark", "-r", String.valueOf(bm3Local), bm3);
+		dstRun.exec("hg", "bookmark", "-r", String.valueOf(bm3Remote), bm3);
+		// 4) bm4 - remote bookmark, not known locally
+		dstRun.exec("hg", "bookmark", "-r", String.valueOf(bm_4_5), bm4);
+		// 5) bm5 - local bookmark, not known remotely
+		srcRun.exec("hg", "bookmark", "-r", String.valueOf(bm_4_5), bm5);
+		//
+		HgServer server = new HgServer().start(dstRepoLoc);
+		try {
+			final HgLookup hgLookup = new HgLookup();
+			final HgRepository srcRepo = hgLookup.detect(srcRepoLoc);
+			final HgRemoteRepository dstRemote = hgLookup.detect(server.getURL());
+			RepoUtils.modifyFileAppend(f1, "change1");
+			final HgCommitCommand commitCmd = new HgCommitCommand(srcRepo).message("Commit 1");
+			assertTrue(commitCmd.execute().isOk());
+			assertEquals(bm1, srcRepo.getBookmarks().getActiveBookmarkName());
+			assertEquals(commitCmd.getCommittedRevision(), srcRepo.getBookmarks().getRevision(bm1));
+			//
+			new HgPushCommand(srcRepo).destination(dstRemote).execute();
+			Thread.sleep(300); // let the server perform the update
+			//
+			HgBookmarks srcBookmarks = srcRepo.getBookmarks();
+			final HgChangelog srcClog = srcRepo.getChangelog();
+			// first, check local bookmarks are intact
+			errorCollector.assertEquals(srcClog.getRevision(bm2Local), srcBookmarks.getRevision(bm2));
+			errorCollector.assertEquals(srcClog.getRevision(bm3Local), srcBookmarks.getRevision(bm3));
+			errorCollector.assertEquals(null, srcBookmarks.getRevision(bm4));
+			errorCollector.assertEquals(srcClog.getRevision(bm_4_5), srcBookmarks.getRevision(bm5));
+			// now, check remote bookmarks were touched
+			HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
+			HgBookmarks dstBookmarks = dstRepo.getBookmarks();
+			final HgChangelog dstClog = dstRepo.getChangelog();
+			// bm1 changed and points to newly pushed commit.
+			// if the test fails (bm1 points to r8), chances are server didn't manage to update
+			// bookmarks yet (there's Thread.sleep() above to give it a chance).
+			errorCollector.assertEquals(commitCmd.getCommittedRevision(), dstBookmarks.getRevision(bm1));
+			// bm2 didn't change
+			errorCollector.assertEquals(dstClog.getRevision(bm2Remote), dstBookmarks.getRevision(bm2));
+			// bm3 did change, now points to value we've got in srcRepo
+			errorCollector.assertEquals(srcClog.getRevision(bm3Local), dstBookmarks.getRevision(bm3));
+			// bm4 is not affected
+			errorCollector.assertEquals(dstClog.getRevision(bm_4_5), dstBookmarks.getRevision(bm4));
+			// bm5 is not known remotely
+			errorCollector.assertEquals(null, dstBookmarks.getRevision(bm5));
+		} finally {
+			server.stop();
+		}
+	}
+
+	private void checkRepositoriesAreSame(HgRepository srcRepo, HgRepository dstRepo) {
+		errorCollector.assertEquals(srcRepo.getChangelog().getRevisionCount(), dstRepo.getChangelog().getRevisionCount());
+		errorCollector.assertEquals(srcRepo.getChangelog().getRevision(0), dstRepo.getChangelog().getRevision(0));
+		errorCollector.assertEquals(srcRepo.getChangelog().getRevision(TIP), dstRepo.getChangelog().getRevision(TIP));
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/org/tmatesoft/hg/test/TestRevisionMaps.java	Wed Jul 10 11:53:19 2013 +0200
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.tmatesoft.hg.core.HgException;
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.repo.HgChangelog;
+import org.tmatesoft.hg.repo.HgParentChildMap;
+import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRevisionMap;
+
+/**
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class TestRevisionMaps {
+
+	@Rule
+	public ErrorCollectorExt errorCollector = new ErrorCollectorExt();
+
+	@Test
+	public void testParentChildMap() throws HgException {
+		final HgRepository repo = Configuration.get().find("test-annotate");
+		Nodeid[] allRevs = RepoUtils.allRevisions(repo);
+		HgParentChildMap<HgChangelog> parentHelper = new HgParentChildMap<HgChangelog>(repo.getChangelog());
+		parentHelper.init();
+		errorCollector.assertEquals(Arrays.asList(allRevs), parentHelper.all());
+		for (Nodeid n : allRevs) {
+			errorCollector.assertTrue(parentHelper.knownNode(n));
+			// parents
+			final Nodeid p1 = parentHelper.safeFirstParent(n);
+			final Nodeid p2 = parentHelper.safeSecondParent(n);
+			errorCollector.assertFalse(p1 == null);
+			errorCollector.assertFalse(p2 == null);
+			errorCollector.assertEquals(p1.isNull() ? null : p1, parentHelper.firstParent(n));
+			errorCollector.assertEquals(p2.isNull() ? null : p2, parentHelper.secondParent(n));
+			HashSet<Nodeid> parents = new HashSet<Nodeid>();
+			boolean modified = parentHelper.appendParentsOf(n, parents);
+			errorCollector.assertEquals(p1.isNull() && p2.isNull(), !modified);
+			HashSet<Nodeid> cp = new HashSet<Nodeid>();
+			cp.add(parentHelper.firstParent(n));
+			cp.add(parentHelper.secondParent(n));
+			cp.remove(null);
+			errorCollector.assertEquals(cp, parents);
+			modified = parentHelper.appendParentsOf(n, parents);
+			errorCollector.assertFalse(modified);
+			//
+			// isChild, hasChildren, childrenOf, directChildren
+			if (!p1.isNull()) {
+				errorCollector.assertTrue(parentHelper.isChild(p1, n));
+				errorCollector.assertTrue(parentHelper.hasChildren(p1));
+				errorCollector.assertTrue(parentHelper.childrenOf(Collections.singleton(p1)).contains(n));
+				errorCollector.assertTrue(parentHelper.directChildren(p1).contains(n));
+			}
+			if (!p2.isNull()) {
+				errorCollector.assertTrue(parentHelper.isChild(p2, n));
+				errorCollector.assertTrue(parentHelper.hasChildren(p2));
+				errorCollector.assertTrue(parentHelper.childrenOf(Collections.singleton(p2)).contains(n));
+				errorCollector.assertTrue(parentHelper.directChildren(p2).contains(n));
+			}
+			errorCollector.assertFalse(parentHelper.isChild(n, p1));
+			errorCollector.assertFalse(parentHelper.isChild(n, p2));
+			//
+			
+		}
+		// heads
+		errorCollector.assertEquals(Arrays.asList(allRevs[7], allRevs[9]), new ArrayList<Nodeid>(parentHelper.heads()));
+		// isChild
+		errorCollector.assertTrue(parentHelper.isChild(allRevs[1], allRevs[9]));
+		errorCollector.assertTrue(parentHelper.isChild(allRevs[0], allRevs[7]));
+		errorCollector.assertFalse(parentHelper.isChild(allRevs[4], allRevs[7]));
+		errorCollector.assertFalse(parentHelper.isChild(allRevs[2], allRevs[6]));
+		// childrenOf
+		errorCollector.assertEquals(Arrays.asList(allRevs[7]), parentHelper.childrenOf(Collections.singleton(allRevs[5])));
+		errorCollector.assertEquals(Arrays.asList(allRevs[8], allRevs[9]), parentHelper.childrenOf(Arrays.asList(allRevs[4], allRevs[6])));
+		errorCollector.assertEquals(Arrays.asList(allRevs[6], allRevs[8], allRevs[9]), parentHelper.childrenOf(Collections.singleton(allRevs[3])));
+		// directChildren
+		errorCollector.assertEquals(Arrays.asList(allRevs[2], allRevs[3]), parentHelper.directChildren(allRevs[1]));
+		errorCollector.assertEquals(Arrays.asList(allRevs[8]), parentHelper.directChildren(allRevs[6]));
+		errorCollector.assertEquals(Collections.emptyList(), parentHelper.directChildren(allRevs[7]));
+	}
+
+	@Test
+	public void testRevisionMap() throws HgException {
+		// XXX this test may benefit from external huge repository
+		final HgRepository repo = Configuration.get().find("test-annotate");
+		Nodeid[] allRevs = RepoUtils.allRevisions(repo);
+		final HgChangelog clog = repo.getChangelog();
+		final HgRevisionMap<HgChangelog> rmap = new HgRevisionMap<HgChangelog>(clog).init();
+		doTestRevisionMap(allRevs, rmap);
+	}
+
+	@Test
+	public void testRevisionMapFromParentChildMap() throws HgException {
+		final HgRepository repo = Configuration.get().find("test-annotate");
+		Nodeid[] allRevs = RepoUtils.allRevisions(repo);
+		HgParentChildMap<HgChangelog> parentHelper = new HgParentChildMap<HgChangelog>(repo.getChangelog());
+		parentHelper.init();
+		doTestRevisionMap(allRevs, parentHelper.getRevisionMap());
+	}
+
+	private void doTestRevisionMap(Nodeid[] allRevs, HgRevisionMap<HgChangelog> rmap) {
+		for (int i = 0; i < allRevs.length; i++) {
+			errorCollector.assertEquals(i, rmap.revisionIndex(allRevs[i]));
+			errorCollector.assertEquals(allRevs[i], rmap.revision(i));
+		}
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/org/tmatesoft/hg/test/TestRevisionSet.java	Wed Jul 10 11:53:19 2013 +0200
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2013 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.test;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.RevisionSet;
+import org.tmatesoft.hg.repo.HgChangelog;
+import org.tmatesoft.hg.repo.HgParentChildMap;
+import org.tmatesoft.hg.repo.HgRepository;
+
+/**
+ * 
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class TestRevisionSet {
+	
+	@Rule
+	public ErrorCollectorExt errorCollector = new ErrorCollectorExt();
+	
+	@Test
+	public void testRegularSetOperations() {
+		Nodeid n1 = Nodeid.fromAscii("c75297c1786734589175c673db40e8ecaa032b09");
+		Nodeid n2 = Nodeid.fromAscii("3b7d51ed4c65082f9235e3459e282d7ff723aa97");
+		Nodeid n3 = Nodeid.fromAscii("14dac192aa262feb8ff6645a102648498483a188");
+		Nodeid n4 = Nodeid.fromAscii("1deea2f332183c947937f6df988c2c6417efc217");
+		Nodeid[] nodes = { n1, n2, n3 };
+		RevisionSet a = new RevisionSet(nodes);
+		Nodeid[] nodes1 = { n3, n4 };
+		RevisionSet b = new RevisionSet(nodes1);
+		Nodeid[] nodes2 = { n1, n2, n3, n4 };
+		RevisionSet union_ab = new RevisionSet(nodes2);
+		Nodeid[] nodes3 = { n3 };
+		RevisionSet intersect_ab = new RevisionSet(nodes3);
+		Nodeid[] nodes4 = { n1, n2 };
+		RevisionSet subtract_ab = new RevisionSet(nodes4);
+		Nodeid[] nodes5 = { n4 };
+		RevisionSet subtract_ba = new RevisionSet(nodes5);
+		Nodeid[] nodes6 = { n1, n2, n4 };
+		RevisionSet symDiff_ab = new RevisionSet(nodes6);
+		
+		errorCollector.assertEquals(union_ab, a.union(b));
+		errorCollector.assertEquals(union_ab, b.union(a));
+		errorCollector.assertEquals(intersect_ab, a.intersect(b));
+		errorCollector.assertEquals(intersect_ab, b.intersect(a));
+		errorCollector.assertEquals(subtract_ab, a.subtract(b));
+		errorCollector.assertEquals(subtract_ba, b.subtract(a));
+		errorCollector.assertEquals(symDiff_ab, a.symmetricDifference(b));
+		errorCollector.assertEquals(symDiff_ab, b.symmetricDifference(a));
+		Nodeid[] nodes7 = { n1, n2, n4 };
+		Nodeid[] nodes8 = { n4, n1, n2 };
+		errorCollector.assertTrue(new RevisionSet(nodes7).equals(new RevisionSet(nodes8)));
+		Nodeid[] nodes9 = {};
+		Nodeid[] nodes10 = {};
+		errorCollector.assertTrue(new RevisionSet(nodes9).equals(new RevisionSet(nodes10)));
+		Nodeid[] nodes11 = { n1 };
+		Nodeid[] nodes12 = { n2 };
+		errorCollector.assertFalse(new RevisionSet(nodes11).equals(new RevisionSet(nodes12)));
+	}
+	
+	@Test
+	public void testRootsAndHeads() throws Exception {
+		final HgRepository repo = Configuration.get().find("test-annotate");
+		Nodeid[] allRevs = RepoUtils.allRevisions(repo);
+		HgParentChildMap<HgChangelog> parentHelper = new HgParentChildMap<HgChangelog>(repo.getChangelog());
+		parentHelper.init();
+		final RevisionSet complete = new RevisionSet(allRevs);
+		Nodeid[] nodes = { allRevs[0] };
+		// roots
+		errorCollector.assertEquals(new RevisionSet(nodes), complete.roots(parentHelper));
+		Nodeid[] nodes1 = { allRevs[0], allRevs[1] };
+		RevisionSet fromR2 = complete.subtract(new RevisionSet(nodes1));
+		Nodeid[] nodes2 = { allRevs[0], allRevs[1], allRevs[2] };
+		RevisionSet fromR3 = complete.subtract(new RevisionSet(nodes2));
+		Nodeid[] nodes3 = { allRevs[2], allRevs[3] };
+		errorCollector.assertEquals(new RevisionSet(nodes3), fromR2.roots(parentHelper));
+		Nodeid[] nodes4 = { allRevs[3], allRevs[4], allRevs[5] };
+		errorCollector.assertEquals(new RevisionSet(nodes4), fromR3.roots(parentHelper));
+		Nodeid[] nodes5 = { allRevs[9], allRevs[7] };
+		// heads
+		errorCollector.assertEquals(new RevisionSet(nodes5), complete.heads(parentHelper));
+		Nodeid[] nodes6 = { allRevs[9], allRevs[8] };
+		RevisionSet toR7 = complete.subtract(new RevisionSet(nodes6));
+		Nodeid[] nodes7 = { allRevs[7], allRevs[6], allRevs[4] };
+		errorCollector.assertEquals(new RevisionSet(nodes7), toR7.heads(parentHelper));
+		Nodeid[] nodes8 = { allRevs[5], allRevs[7] };
+		RevisionSet withoutNoMergeBranch = toR7.subtract(new RevisionSet(nodes8));
+		Nodeid[] nodes9 = { allRevs[6], allRevs[4] };
+		errorCollector.assertEquals(new RevisionSet(nodes9), withoutNoMergeBranch.heads(parentHelper));
+		errorCollector.assertEquals(complete.heads(parentHelper), complete.heads(parentHelper).heads(parentHelper));
+	}
+	
+	@Test
+	public void testAncestorsAndChildren() throws Exception {
+		final HgRepository repo = Configuration.get().find("test-annotate");
+		Nodeid[] allRevs = RepoUtils.allRevisions(repo);
+		HgParentChildMap<HgChangelog> parentHelper = new HgParentChildMap<HgChangelog>(repo.getChangelog());
+		parentHelper.init();
+		final RevisionSet complete = new RevisionSet(allRevs);
+		Nodeid[] nodes = {};
+		// children
+		errorCollector.assertTrue(new RevisionSet(nodes).children(parentHelper).isEmpty());
+		Nodeid[] nodes1 = { allRevs[8], allRevs[9] };
+		Nodeid[] nodes2 = { allRevs[4] };
+		errorCollector.assertEquals(new RevisionSet(nodes1), new RevisionSet(nodes2).children(parentHelper));
+		Nodeid[] nodes3 = { allRevs[8], allRevs[9], allRevs[4], allRevs[5], allRevs[7] };
+		// default branch and no-merge branch both from r2 
+		RevisionSet s1 = new RevisionSet(nodes3);
+		Nodeid[] nodes4 = { allRevs[2] };
+		errorCollector.assertEquals(s1, new RevisionSet(nodes4).children(parentHelper));
+		Nodeid[] nodes5 = { allRevs[0], allRevs[1] };
+		// ancestors
+		RevisionSet fromR2 = complete.subtract(new RevisionSet(nodes5));
+		Nodeid[] nodes6 = { allRevs[9], allRevs[5], allRevs[7], allRevs[8] };
+		// no-merge branch and r9 are not in ancestors of r8 (as well as r8 itself)
+		RevisionSet s3 = fromR2.subtract(new RevisionSet(nodes6));
+		Nodeid[] nodes7 = { allRevs[8] };
+		errorCollector.assertEquals(s3, fromR2.ancestors(new RevisionSet(nodes7), parentHelper));
+		Nodeid[] nodes8 = { allRevs[5], allRevs[7] };
+		// ancestors of no-merge branch
+		RevisionSet branchNoMerge = new RevisionSet(nodes8);
+		Nodeid[] nodes9 = { allRevs[0], allRevs[1], allRevs[2] };
+		errorCollector.assertEquals(new RevisionSet(nodes9), complete.ancestors(branchNoMerge, parentHelper));
+		Nodeid[] nodes10 = { allRevs[2] };
+		errorCollector.assertEquals(new RevisionSet(nodes10), fromR2.ancestors(branchNoMerge, parentHelper));
+	}
+}