changeset 652:cd77bf51b562

Push: tests. Commit respects phases.new-commit setting. Fix outgoing when changes are not children of common (Issue 47)
author Artem Tikhomirov <tikhomirov.artem@gmail.com>
date Tue, 02 Jul 2013 23:21:16 +0200
parents 6e98d34eaca8
children 629a7370554c
files src/org/tmatesoft/hg/core/HgPushCommand.java src/org/tmatesoft/hg/internal/CommitFacility.java src/org/tmatesoft/hg/internal/PhasesHelper.java src/org/tmatesoft/hg/internal/RepositoryComparator.java src/org/tmatesoft/hg/internal/RevisionSet.java src/org/tmatesoft/hg/repo/HgParentChildMap.java src/org/tmatesoft/hg/repo/HgPhase.java src/org/tmatesoft/hg/repo/HgRemoteRepository.java test/org/tmatesoft/hg/test/TestCommit.java test/org/tmatesoft/hg/test/TestPush.java
diffstat 10 files changed, 408 insertions(+), 66 deletions(-) [+]
line wrap: on
line diff
--- a/src/org/tmatesoft/hg/core/HgPushCommand.java	Mon Jul 01 21:19:53 2013 +0200
+++ b/src/org/tmatesoft/hg/core/HgPushCommand.java	Tue Jul 02 23:21:16 2013 +0200
@@ -102,7 +102,7 @@
 				RevisionSet presentDraft = phaseHelper.allDraft();
 				RevisionSet secretLeft, draftLeft;
 				HgRemoteRepository.Phases remotePhases = remoteRepo.getPhases();
-				RevisionSet remoteDrafts = knownRemoteDrafts(remotePhases, parentHelper, outgoing);
+				RevisionSet remoteDrafts = knownRemoteDrafts(remotePhases, parentHelper, outgoing, presentSecret);
 				if (remotePhases.isPublishingServer()) {
 					// although it's unlikely outgoing would affect secret changesets,
 					// it doesn't hurt to check secret roots along with draft ones
@@ -136,9 +136,15 @@
 					 * Local draft roots shall be updated
 					 */
 					RevisionSet sharedDraft = presentDraft.intersect(remoteDrafts); // (I: ~presentDraft; II: ~remoteDraft
+					// XXX do I really need sharedDrafts here? why not ancestors(remoteDrafts)?
 					RevisionSet localDraftRemotePublic = presentDraft.ancestors(sharedDraft, parentHelper); // I: 0; II: those treated public on remote
+					// remoteDrafts are local revisions known as draft@remote
+					// remoteDraftsLocalPublic - revisions that would cease to be listed as draft on remote
+					RevisionSet remoteDraftsLocalPublic = remoteDrafts.ancestors(sharedDraft, parentHelper);
+					RevisionSet remoteDraftsLeft = remoteDrafts.subtract(remoteDraftsLocalPublic);
 					// forget those deemed public by remote (drafts shared by both remote and local are ok to stay)
-					draftLeft = presentDraft.subtract(localDraftRemotePublic);
+					RevisionSet combinedDraft = presentDraft.union(remoteDraftsLeft);
+					draftLeft = combinedDraft.subtract(localDraftRemotePublic);
 				}
 				final RevisionSet newDraftRoots = draftLeft.roots(parentHelper);
 				final RevisionSet newSecretRoots = secretLeft.roots(parentHelper);
@@ -192,7 +198,7 @@
 		}
 	}
 	
-	private RevisionSet knownRemoteDrafts(HgRemoteRepository.Phases remotePhases, HgParentChildMap<HgChangelog> parentHelper, RevisionSet outgoing) {
+	private RevisionSet knownRemoteDrafts(HgRemoteRepository.Phases remotePhases, HgParentChildMap<HgChangelog> parentHelper, RevisionSet outgoing, RevisionSet localSecret) {
 		ArrayList<Nodeid> knownRemoteDraftRoots = new ArrayList<Nodeid>();
 		for (Nodeid rdr : remotePhases.draftRoots()) {
 			if (parentHelper.knownNode(rdr)) {
@@ -201,7 +207,11 @@
 		}
 		// knownRemoteDraftRoots + childrenOf(knownRemoteDraftRoots) is everything remote may treat as Draft
 		RevisionSet remoteDrafts = new RevisionSet(knownRemoteDraftRoots);
-		remoteDrafts = remoteDrafts.union(remoteDrafts.children(parentHelper));
+		RevisionSet localChildren = remoteDrafts.children(parentHelper);
+		// we didn't send any local secret revision
+		localChildren = localChildren.subtract(localSecret);
+		// draft roots are among remote drafts
+		remoteDrafts = remoteDrafts.union(localChildren);
 		// 1) outgoing.children gives all local revisions accessible from outgoing.
 		// 2) outgoing.roots.children is equivalent with smaller intermediate set, the way we build
 		// childrenOf doesn't really benefits from that.
--- a/src/org/tmatesoft/hg/internal/CommitFacility.java	Mon Jul 01 21:19:53 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/CommitFacility.java	Tue Jul 02 23:21:16 2013 +0200
@@ -42,6 +42,7 @@
 import org.tmatesoft.hg.internal.DataSerializer.DataSource;
 import org.tmatesoft.hg.repo.HgChangelog;
 import org.tmatesoft.hg.repo.HgDataFile;
+import org.tmatesoft.hg.repo.HgPhase;
 import org.tmatesoft.hg.repo.HgRuntimeException;
 import org.tmatesoft.hg.util.Pair;
 import org.tmatesoft.hg.util.Path;
@@ -234,6 +235,9 @@
 		if (p1Commit != NO_REVISION || p2Commit != NO_REVISION) {
 			repo.getRepo().getBookmarks().updateActive(p1Cset, p2Cset, changesetRev);
 		}
+		PhasesHelper phaseHelper = new PhasesHelper(repo);
+		HgPhase newCommitPhase = HgPhase.parse(repo.getRepo().getConfiguration().getStringValue("phases", "new-commit", HgPhase.Draft.mercurialString()));
+		phaseHelper.newCommitNode(changesetRev, newCommitPhase);
 		// TODO Revisit: might be reasonable to send out a "Repo changed" notification, to clear
 		// e.g. cached branch, tags and so on, not to rely on file change detection methods?
 		// The same notification might come useful once Pull is implemented
--- a/src/org/tmatesoft/hg/internal/PhasesHelper.java	Mon Jul 01 21:19:53 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/PhasesHelper.java	Tue Jul 02 23:21:16 2013 +0200
@@ -36,6 +36,7 @@
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.repo.HgChangelog;
 import org.tmatesoft.hg.repo.HgInvalidControlFileException;
+import org.tmatesoft.hg.repo.HgInvalidStateException;
 import org.tmatesoft.hg.repo.HgParentChildMap;
 import org.tmatesoft.hg.repo.HgPhase;
 import org.tmatesoft.hg.repo.HgRepository;
@@ -174,6 +175,33 @@
 		}
 	}
 
+	public void newCommitNode(Nodeid newChangeset, HgPhase newCommitPhase) throws HgRuntimeException {
+		final int riCset = repo.getRepo().getChangelog().getRevisionIndex(newChangeset);
+		HgPhase ph = getPhase(riCset, newChangeset);
+		if (ph.compareTo(newCommitPhase) >= 0) {
+			// present phase is more secret than the desired one
+			return;
+		}
+		// newCommitPhase can't be public here, condition above would be satisfied
+		assert newCommitPhase != HgPhase.Public;
+		// ph is e.g public when newCommitPhase is draft
+		// or is draft when desired phase is secret
+		final RevisionSet rs = allOf(newCommitPhase).union(new RevisionSet(Collections.singleton(newChangeset)));
+		final RevisionSet newRoots;
+		if (parentHelper != null) {
+			newRoots = rs.roots(parentHelper);
+		} else {
+			newRoots = rs.roots(repo.getRepo());
+		}
+		if (newCommitPhase == HgPhase.Draft) {
+			updateRoots(newRoots.asList(), secretPhaseRoots);
+		} else if (newCommitPhase == HgPhase.Secret) {
+			updateRoots(draftPhaseRoots, newRoots.asList());
+		} else {
+			throw new HgInvalidStateException(String.format("Unexpected phase %s for new commits", newCommitPhase));
+		}
+	}
+
 	/**
 	 * For a given phase, collect all revisions with phase that is the same or more private (i.e. for Draft, returns Draft+Secret)
 	 * The reason is not a nice API intention (which is awful, indeed), but an ease of implementation 
--- a/src/org/tmatesoft/hg/internal/RepositoryComparator.java	Mon Jul 01 21:19:53 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/RepositoryComparator.java	Tue Jul 02 23:21:16 2013 +0200
@@ -95,7 +95,21 @@
 		if (c.isEmpty()) {
 			return localRepo.all();
 		} else {
-			return localRepo.childrenOf(c);
+			RevisionSet localHeads = new RevisionSet(localRepo.heads());
+			final List<Nodeid> commonChildren = localRepo.childrenOf(c);
+			final RevisionSet rsCommonChildren = new RevisionSet(commonChildren);
+			RevisionSet headsNotFromCommon = localHeads.subtract(rsCommonChildren);
+			if (headsNotFromCommon.isEmpty()) {
+				return commonChildren;
+			}
+			RevisionSet all = new RevisionSet(localRepo.all());
+			final RevisionSet rsCommon = new RevisionSet(c);
+			RevisionSet rsAncestors = all.ancestors(headsNotFromCommon, localRepo);
+			// #ancestors gives only parents, we need terminating children as well
+			rsAncestors = rsAncestors.union(headsNotFromCommon);
+			final RevisionSet rsAncestorsCommon = all.ancestors(rsCommon, localRepo);
+			RevisionSet outgoing = rsAncestors.subtract(rsAncestorsCommon).subtract(rsCommon);
+			return outgoing.union(rsCommonChildren).asList();
 		}
 	}
 	
--- a/src/org/tmatesoft/hg/internal/RevisionSet.java	Mon Jul 01 21:19:53 2013 +0200
+++ b/src/org/tmatesoft/hg/internal/RevisionSet.java	Tue Jul 02 23:21:16 2013 +0200
@@ -16,6 +16,8 @@
  */
 package org.tmatesoft.hg.internal;
 
+import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION;
+
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -27,6 +29,7 @@
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.repo.HgChangelog;
 import org.tmatesoft.hg.repo.HgParentChildMap;
+import org.tmatesoft.hg.repo.HgRepository;
 
 /**
  * Unmodifiable collection of revisions with handy set operations
@@ -72,6 +75,31 @@
 	}
 	
 	/**
+	 * Same as {@link #roots(HgParentChildMap)}, but doesn't require a parent-child map
+	 */
+	public RevisionSet roots(HgRepository repo) {
+		// TODO introduce parent access interface, use it here, provide implementations 
+		// that delegate to HgParentChildMap or HgRepository
+		HashSet<Nodeid> copy = new HashSet<Nodeid>(elements);
+		final HgChangelog clog = repo.getChangelog();
+		byte[] parent1 = new byte[Nodeid.SIZE], parent2 = new byte[Nodeid.SIZE];
+		int[] parentRevs = new int[2];
+		for (Nodeid n : elements) {
+			assert clog.isKnown(n);
+			clog.parents(clog.getRevisionIndex(n), parentRevs, parent1, parent2);
+			if (parentRevs[0] != NO_REVISION && elements.contains(new Nodeid(parent1, false))) {
+				copy.remove(n);
+				continue;
+			}
+			if (parentRevs[1] != NO_REVISION && elements.contains(new Nodeid(parent2, false))) {
+				copy.remove(n);
+				continue;
+			}
+		}
+		return copy.size() == elements.size() ? this : new RevisionSet(copy);
+	}
+	
+	/**
 	 * elements of the set that has no children in this set 
 	 */
 	public RevisionSet heads(HgParentChildMap<HgChangelog> ph) {
@@ -165,7 +193,7 @@
 		}
 		HashSet<Nodeid> copy = new HashSet<Nodeid>(elements);
 		copy.addAll(other.elements);
-		return new RevisionSet(copy);
+		return copy.size() == elements.size() ? this : new RevisionSet(copy);
 	}
 
 	/**
--- a/src/org/tmatesoft/hg/repo/HgParentChildMap.java	Mon Jul 01 21:19:53 2013 +0200
+++ b/src/org/tmatesoft/hg/repo/HgParentChildMap.java	Tue Jul 02 23:21:16 2013 +0200
@@ -18,7 +18,9 @@
 
 import static org.tmatesoft.hg.repo.HgRepository.TIP;
 
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.BitSet;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
@@ -62,9 +64,10 @@
 	private Nodeid[] sequential; // natural repository order, childrenOf rely on ordering
 	private Nodeid[] sorted; // for binary search
 	private int[] sorted2natural; // indexes in sorted to indexes in sequential
-	private Nodeid[] firstParent;
+	private Nodeid[] firstParent; // parents by natural order (i.e. firstParent[A] is parent of revision with index A)
 	private Nodeid[] secondParent;
 	private final T revlog;
+	private BitSet heads; // 1 indicates revision got children
 
 
 	public HgParentChildMap(T owner) {
@@ -83,9 +86,11 @@
 		sequential[ix] = sorted[ix] = revision;
 		if (parent1Revision != -1) {
 			firstParent[ix] = sequential[parent1Revision];
+			heads.set(parent1Revision);
 		}
 		if (parent2Revision != -1) { // revlog of DataAccess.java has p2 set when p1 is -1
 			secondParent[ix] = sequential[parent2Revision];
+			heads.set(parent2Revision);
 		}
 	}
 	
@@ -97,13 +102,14 @@
 	public void init() throws HgRuntimeException {
 		final int revisionCount = revlog.getRevisionCount();
 		firstParent = new Nodeid[revisionCount];
-		// TODO [post 1.0] Branches/merges are less frequent, and most of secondParent would be -1/null, hence 
+		// TODO [post 1.1] Branches/merges are less frequent, and most of secondParent would be -1/null, hence 
 		// IntMap might be better alternative here, but need to carefully analyze (test) whether this brings
 		// real improvement (IntMap has 2n capacity, and element lookup is log(n) instead of array's constant)
 		secondParent = new Nodeid[revisionCount];
 		//
 		sequential = new Nodeid[revisionCount];
 		sorted = new Nodeid[revisionCount];
+		heads = new BitSet(revisionCount);
 		revlog.indexWalk(0, TIP, this);
 		Arrays.sort(sorted);
 		sorted2natural = new int[revisionCount];
@@ -233,20 +239,7 @@
 		int x = Arrays.binarySearch(sorted, nid);
 		assertSortedIndex(x);
 		int i = sorted2natural[x];
-		assert firstParent.length == secondParent.length; // just in case later I implement sparse array for secondParent
-		assert firstParent.length == sequential.length;
-		// to use == instead of equals, take the same Nodeid instance we used to fill all the arrays.
-		final Nodeid canonicalNode = sequential[i];
-		i++; // no need to check node itself. child nodes may appear in sequential only after revision in question
-		for (; i < sequential.length; i++) {
-			// TODO [post 1.0] likely, not very effective. 
-			// May want to optimize it with another (Tree|Hash)Set, created on demand on first use, 
-			// however, need to be careful with memory usage
-			if (firstParent[i] == canonicalNode || secondParent[i] == canonicalNode) {
-				return true;
-			}
-		}
-		return false;
+		return hasChildren(i);
 	}
 
 	/**
@@ -267,14 +260,19 @@
 		int x = Arrays.binarySearch(sorted, root);
 		assertSortedIndex(x);
 		root = sorted[x]; // canonical instance
+		final int start = sorted2natural[x];
+		if (!hasChildren(start)) {
+			return false; // root got no children at all
+		}
 		int y = Arrays.binarySearch(sorted, wannaBeChild);
-		if (y < 0 || y <= x) {
-			// not found or comes earlier than root
-			return false;
+		if (y < 0) {
+			return false; // not found
 		}
 		wannaBeChild = sorted[y]; // canonicalize
-		final int start = sorted2natural[x];
 		final int end = sorted2natural[y];
+		if (end <= start) {
+			return false; // potential child was in repository earlier than root
+		}
 		HashSet<Nodeid> parents = new HashSet<Nodeid>();
 		parents.add(root);
 		for (int i = start + 1; i < end; i++) {
@@ -284,4 +282,22 @@
 		}
 		return parents.contains(firstParent[end]) || parents.contains(secondParent[end]);
 	}
+	
+	/**
+	 * @return elements of this map that do not have a child recorded therein.
+	 */
+	public Collection<Nodeid> heads() {
+		ArrayList<Nodeid> result = new ArrayList<Nodeid>();
+		int index = 0;
+		do {
+			index = heads.nextClearBit(index);
+			result.add(sequential[index]);
+			index++;
+		} while (index < sequential.length);
+		return result;
+	}
+
+	private boolean hasChildren(int sequentialIndex) {
+		return heads.get(sequentialIndex);
+	}
 }
\ No newline at end of file
--- a/src/org/tmatesoft/hg/repo/HgPhase.java	Mon Jul 01 21:19:53 2013 +0200
+++ b/src/org/tmatesoft/hg/repo/HgPhase.java	Tue Jul 02 23:21:16 2013 +0200
@@ -68,4 +68,8 @@
 		}
 		return ordinal(); // what a coincidence
 	}
+	
+	public String mercurialString() {
+		return hgString;
+	}
 }
--- a/src/org/tmatesoft/hg/repo/HgRemoteRepository.java	Mon Jul 01 21:19:53 2013 +0200
+++ b/src/org/tmatesoft/hg/repo/HgRemoteRepository.java	Tue Jul 02 23:21:16 2013 +0200
@@ -469,26 +469,15 @@
 		return new Bookmarks(rv);
 	}
 
-	public void updateBookmark(String name, Nodeid oldRev, Nodeid newRev) throws HgRemoteConnectionException, HgRuntimeException {
-		final String namespace = "bookmarks";
-		HttpURLConnection c = null;
-		try {
-			URL u = new URL(url, String.format("%s?cmd=pushkey&namespace=%s&key=%s&old=%s&new=%s",url.getPath(), namespace, name, oldRev.toString(), newRev.toString()));
-			c = setupConnection(u.openConnection());
-			c.connect();
-			if (debug) {
-				dumpResponseHeader(u, c);
-			}
-			checkResponseOk(c, "Update remote bookmark", "pushkey");
-		} catch (MalformedURLException ex) {
-			throw new HgRemoteConnectionException("Bad URL", ex).setRemoteCommand("pushkey").setServerInfo(getLocation());
-		} catch (IOException ex) {
-			throw new HgRemoteConnectionException("Communication failure", ex).setRemoteCommand("pushkey").setServerInfo(getLocation());
-		} finally {
-			if (c != null) {
-				c.disconnect();
-			}
+	public Outcome updateBookmark(String name, Nodeid oldRev, Nodeid newRev) throws HgRemoteConnectionException, HgRuntimeException {
+		initCapabilities();
+		if (!remoteCapabilities.contains("pushkey")) {
+			return new Outcome(Failure, "Server doesn't support pushkey protocol");
 		}
+		if (pushkey("Update remote bookmark", "bookmarks", name, oldRev.toString(), newRev.toString())) {
+			return new Outcome(Success, String.format("Bookmark %s updated to %s", name, newRev.shortNotation()));
+		}
+		return new Outcome(Failure, String.format("Bookmark update (%s: %s -> %s) failed", name, oldRev.shortNotation(), newRev.shortNotation()));
 	}
 	
 	public Phases getPhases() throws HgRemoteConnectionException, HgRuntimeException {
@@ -522,26 +511,12 @@
 		if (!remoteCapabilities.contains("pushkey")) {
 			return new Outcome(Failure, "Server doesn't support pushkey protocol");
 		}
-		if (pushkey("phases", n.toString(), String.valueOf(from.mercurialOrdinal()), String.valueOf(to.mercurialOrdinal()))) {
+		if (pushkey("Update remote phases", "phases", n.toString(), String.valueOf(from.mercurialOrdinal()), String.valueOf(to.mercurialOrdinal()))) {
 			return new Outcome(Success, String.format("Phase of %s updated to %s", n.shortNotation(), to.name()));
 		}
 		return new Outcome(Failure, String.format("Phase update (%s: %s -> %s) failed", n.shortNotation(), from.name(), to.name()));
 	}
 
-	
-	public static void main(String[] args) throws Exception {
-		final HgRemoteRepository r = new HgLookup().detectRemote("http://selenic.com/hg", null);
-		if (r.isInvalid()) {
-			return;
-		}
-		System.out.println(r.remoteCapabilities);
-		r.getPhases();
-		final Iterable<Pair<String, Nodeid>> bm = r.getBookmarks();
-		for (Pair<String, Nodeid> pair : bm) {
-			System.out.println(pair);
-		}
-	}
-
 	@Override
 	public String toString() {
 		return getClass().getSimpleName() + '[' + getLocation() + ']';
@@ -634,7 +609,7 @@
 		}
 	}
 	
-	private boolean pushkey(String namespace, String key, String oldValue, String newValue) throws HgRemoteConnectionException, HgRuntimeException {
+	private boolean pushkey(String opName, String namespace, String key, String oldValue, String newValue) throws HgRemoteConnectionException, HgRuntimeException {
 		HttpURLConnection c = null;
 		try {
 			final String p = String.format("%s?cmd=pushkey&namespace=%s&key=%s&old=%s&new=%s", url.getPath(), namespace, key, oldValue, newValue);
@@ -645,7 +620,7 @@
 			if (debug) {
 				dumpResponseHeader(u, c);
 			}
-			checkResponseOk(c, key, "pushkey");
+			checkResponseOk(c, opName, "pushkey");
 			final InputStream is = c.getInputStream();
 			int rv = is.read();
 			is.close();
--- a/test/org/tmatesoft/hg/test/TestCommit.java	Mon Jul 01 21:19:53 2013 +0200
+++ b/test/org/tmatesoft/hg/test/TestCommit.java	Tue Jul 02 23:21:16 2013 +0200
@@ -44,7 +44,9 @@
 import org.tmatesoft.hg.internal.Transaction;
 import org.tmatesoft.hg.repo.HgDataFile;
 import org.tmatesoft.hg.repo.HgLookup;
+import org.tmatesoft.hg.repo.HgPhase;
 import org.tmatesoft.hg.repo.HgRepository;
+import org.tmatesoft.hg.repo.HgRepositoryFiles;
 import org.tmatesoft.hg.util.Outcome;
 import org.tmatesoft.hg.util.Path;
 
@@ -244,6 +246,8 @@
 	@Test
 	public void testCommandBasics() throws Exception {
 		File repoLoc = RepoUtils.cloneRepoToTempLocation("log-1", "test-commit-cmd", false);
+		// PhasesHelper relies on file existence to tell phase enablement
+		RepoUtils.createFile(new File(repoLoc, HgRepositoryFiles.Phaseroots.getPath()), "");
 		HgRepository hgRepo = new HgLookup().detect(repoLoc);
 		HgDataFile dfB = hgRepo.getFileNode("b");
 		assertTrue("[sanity]", dfB.exists());
@@ -286,6 +290,12 @@
 		errorCollector.assertEquals(csets.get(0).getComment(), "FIRST");
 		errorCollector.assertEquals(csets.get(1).getComment(), "SECOND");
 		assertHgVerifyOk(repoLoc);
+		// new commits are drafts by default, check our commit respects this
+		// TODO more tests with children of changesets with draft, secret or public phases (latter - 
+		// new commit is child of public, but there are other commits with draft/secret phases - ensure they are intact)
+		assertEquals(HgPhase.Draft, HgPhase.parse(hgRepo.getConfiguration().getStringValue("phases", "new-commit", HgPhase.Draft.mercurialString())));
+		errorCollector.assertEquals(HgPhase.Draft, csets.get(0).getPhase());
+		errorCollector.assertEquals(HgPhase.Draft, csets.get(1).getPhase());
 	}
 	
 	@Test
--- a/test/org/tmatesoft/hg/test/TestPush.java	Mon Jul 01 21:19:53 2013 +0200
+++ b/test/org/tmatesoft/hg/test/TestPush.java	Tue Jul 02 23:21:16 2013 +0200
@@ -26,7 +26,6 @@
 import java.util.ArrayList;
 import java.util.List;
 
-import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
 import org.tmatesoft.hg.core.HgCheckoutCommand;
@@ -36,9 +35,11 @@
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.internal.PhasesHelper;
 import org.tmatesoft.hg.internal.RevisionSet;
+import org.tmatesoft.hg.repo.HgBookmarks;
 import org.tmatesoft.hg.repo.HgChangelog;
 import org.tmatesoft.hg.repo.HgInternals;
 import org.tmatesoft.hg.repo.HgLookup;
+import org.tmatesoft.hg.repo.HgPhase;
 import org.tmatesoft.hg.repo.HgRemoteRepository;
 import org.tmatesoft.hg.repo.HgRepository;
 
@@ -101,12 +102,192 @@
 	
 	@Test
 	public void testPushToNonPublishingServer() throws Exception {
-		Assert.fail();
+		// check drafts are same as on server
+		// copy, not clone as latter updates phase information
+		File srcRepoLoc = RepoUtils.copyRepoToTempLocation("test-phases", "test-push-nopub-src");
+		File dstRepoLoc = RepoUtils.initEmptyTempRepo("test-push-nopub-dst");
+		File f1 = new File(srcRepoLoc, "hello.c");
+		assertTrue("[sanity]", f1.canWrite());
+		HgServer server = new HgServer().publishing(false).start(dstRepoLoc);
+		try {
+			final HgLookup hgLookup = new HgLookup();
+			final HgRepository srcRepo = hgLookup.detect(srcRepoLoc);
+			final HgRemoteRepository dstRemote = hgLookup.detect(server.getURL());
+			PhasesHelper phaseHelper = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
+			final RevisionSet allDraft = phaseHelper.allDraft();
+			assertFalse("[sanity]", allDraft.isEmpty());
+			final int publicCsetToBranchAt = 4;
+			assertEquals("[sanity]", HgPhase.Public, phaseHelper.getPhase(publicCsetToBranchAt, null));
+			// in addition to existing draft csets, add one more draft, branching at some other public revision
+			new HgCheckoutCommand(srcRepo).changeset(publicCsetToBranchAt).clean(true).execute();
+			RepoUtils.modifyFileAppend(f1, "// aaa");
+			final HgCommitCommand commitCmd = new HgCommitCommand(srcRepo).message("Commit aaa");
+			assertTrue(commitCmd.execute().isOk());
+			Nodeid newCommit = commitCmd.getCommittedRevision();
+			//
+			new HgPushCommand(srcRepo).destination(dstRemote).execute();
+			HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
+			final HgChangelog srcClog = srcRepo.getChangelog();
+			final HgChangelog dstClog = dstRepo.getChangelog();
+			// refresh PhasesHelper
+			phaseHelper = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
+			// check if phase didn't change
+			errorCollector.assertEquals(HgPhase.Draft, phaseHelper.getPhase(srcClog.getRevisionIndex(newCommit), newCommit));
+			for (Nodeid n : allDraft) {
+				// check drafts from src were actually pushed to dst 
+				errorCollector.assertTrue(dstClog.isKnown(n));
+				// check drafts didn't change their phase
+				errorCollector.assertEquals(HgPhase.Draft, phaseHelper.getPhase(srcClog.getRevisionIndex(n), n));
+			}
+		} finally {
+			server.stop();
+		}
 	}
 	
+	/**
+	 * If server lists revisions we know as drafts as public, update them locally
+	 */
+	@Test
+	public void testPushUpdatesPublishedDrafts() throws Exception {
+		/* o		r9, secret
+		 * |  o		r8, draft
+		 * |  |
+		 * |  o		r7, draft
+		 * o  |		r6, secret 
+		 * | /
+		 * o		r5, draft
+		 * |
+		 * o		r4, public
+		 */
+		// remote: r5 -> public, r6 -> draft, r8 -> secret
+		// local: new draft from r4, push
+		File srcRepoLoc = RepoUtils.copyRepoToTempLocation("test-phases", "test-push-phase-update-1-src");
+		File dstRepoLoc = RepoUtils.copyRepoToTempLocation("test-phases", "test-push-phase-update-1-dst");
+		File f1 = new File(srcRepoLoc, "hello.c");
+		assertTrue("[sanity]", f1.canWrite());
+		final HgLookup hgLookup = new HgLookup();
+		final HgRepository srcRepo = hgLookup.detect(srcRepoLoc);
+		final ExecHelper dstRun = new ExecHelper(new OutputParser.Stub(), dstRepoLoc);
+		final int publicCsetToBranchAt = 4;
+		final int r5 = 5, r6 = 6, r8 = 8;
+		PhasesHelper srcPhase = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
+		assertEquals("[sanity]", HgPhase.Draft, srcPhase.getPhase(r5, null));
+		assertEquals("[sanity]", HgPhase.Secret, srcPhase.getPhase(r6, null));
+		assertEquals("[sanity]", HgPhase.Draft, srcPhase.getPhase(r8, null));
+		// change phases in repository of remote server:
+		dstRun.exec("hg", "phase", "--public", String.valueOf(r5));
+		assertEquals(0, dstRun.getExitValue());
+		dstRun.exec("hg", "phase", "--draft", String.valueOf(r6));
+		assertEquals(0, dstRun.getExitValue());
+		dstRun.exec("hg", "phase", "--secret", "--force", String.valueOf(r8));
+		assertEquals(0, dstRun.getExitValue());
+		HgServer server = new HgServer().publishing(false).start(dstRepoLoc);
+		try {
+			final HgRemoteRepository dstRemote = hgLookup.detect(server.getURL());
+			// commit new draft head
+			new HgCheckoutCommand(srcRepo).changeset(publicCsetToBranchAt).clean(true).execute();
+			RepoUtils.modifyFileAppend(f1, "// aaa");
+			final HgCommitCommand commitCmd = new HgCommitCommand(srcRepo).message("Commit aaa");
+			assertTrue(commitCmd.execute().isOk());
+			final Nodeid newCommit = commitCmd.getCommittedRevision();
+			//
+			new HgPushCommand(srcRepo).destination(dstRemote).execute();
+			// refresh phase information
+			srcPhase = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
+			// r5 and r6 are changed to match server phases (more exposed)
+			errorCollector.assertEquals(HgPhase.Public, srcPhase.getPhase(r5, null));
+			errorCollector.assertEquals(HgPhase.Draft, srcPhase.getPhase(r6, null));
+			// r8 is secret on server, locally can't make it less exposed though
+			errorCollector.assertEquals(HgPhase.Draft, srcPhase.getPhase(r8, null));
+			//
+			HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
+			final HgChangelog dstClog = dstRepo.getChangelog();
+			assertTrue(dstClog.isKnown(newCommit));
+			PhasesHelper dstPhase = new PhasesHelper(HgInternals.getImplementationRepo(dstRepo));
+			errorCollector.assertEquals(HgPhase.Draft, dstPhase.getPhase(dstClog.getRevisionIndex(newCommit), newCommit));
+			// the one that was secret is draft now
+			errorCollector.assertEquals(HgPhase.Draft, srcPhase.getPhase(r8, null));
+		} finally {
+			server.stop();
+		}
+	}
+	
+	/**
+	 * update phases of local revisions and push changes
+	 */
+	@Test
+	public void testPushPublishAndUpdates() throws Exception {
+		File srcRepoLoc = RepoUtils.copyRepoToTempLocation("test-phases", "test-push-phase-update-2-src");
+		File dstRepoLoc = RepoUtils.initEmptyTempRepo("test-push-phase-update-1-dst");
+		final int r4 = 4, r5 = 5, r6 = 6, r9 = 9;
+		HgServer server = new HgServer().publishing(false).start(dstRepoLoc);
+		try {
+			final HgLookup hgLookup = new HgLookup();
+			final HgRepository srcRepo = hgLookup.detect(srcRepoLoc);
+			final HgRemoteRepository dstRemote = hgLookup.detect(server.getURL());
+			new HgPushCommand(srcRepo).destination(dstRemote).execute();
+			//
+			// make sure pushed repository got same draft root
+			final Nodeid r4PublicHead = srcRepo.getChangelog().getRevision(r4);
+			final Nodeid r5DraftRoot = srcRepo.getChangelog().getRevision(r5);
+			HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
+			final HgChangelog dstClog = dstRepo.getChangelog();
+			PhasesHelper dstPhase = new PhasesHelper(HgInternals.getImplementationRepo(dstRepo));
+			assertEquals(HgPhase.Public, dstPhase.getPhase(dstClog.getRevisionIndex(r4PublicHead), r4PublicHead));
+			assertEquals(HgPhase.Draft, dstPhase.getPhase(dstClog.getRevisionIndex(r5DraftRoot), r5DraftRoot));
+			//
+			// now, graduate some local revisions, r5:draft->public, r6:secret->public, r9: secret->draft
+			final ExecHelper srcRun = new ExecHelper(new OutputParser.Stub(), srcRepoLoc);
+			srcRun.exec("hg", "phase", "--public", String.valueOf(r5));
+			srcRun.exec("hg", "phase", "--public", String.valueOf(r6));
+			srcRun.exec("hg", "phase", "--draft", String.valueOf(r9));
+			// PhaseHelper shall be new for the command, and would pick up these external changes 
+			new HgPushCommand(srcRepo).destination(dstRemote).execute();
+			final Nodeid r6Nodeid = srcRepo.getChangelog().getRevision(r6);
+			final Nodeid r9Nodeid = srcRepo.getChangelog().getRevision(r9);
+			// refresh 
+			dstPhase = new PhasesHelper(HgInternals.getImplementationRepo(dstRepo));
+			// not errorCollector as subsequent code would fail if these secret revs didn't get into dst
+			assertTrue(dstClog.isKnown(r6Nodeid));
+			assertTrue(dstClog.isKnown(r9Nodeid));
+			errorCollector.assertEquals(HgPhase.Public, dstPhase.getPhase(dstClog.getRevisionIndex(r5DraftRoot), r5DraftRoot));
+			errorCollector.assertEquals(HgPhase.Public, dstPhase.getPhase(dstClog.getRevisionIndex(r6Nodeid), r6Nodeid));
+			errorCollector.assertEquals(HgPhase.Draft, dstPhase.getPhase(dstClog.getRevisionIndex(r9Nodeid), r9Nodeid));
+		} finally {
+			server.stop();
+		}
+	}
+
+	
 	@Test
 	public void testPushToPublishingServer() throws Exception {
-		Assert.fail();
+		// copy, not clone as latter updates phase information
+		File srcRepoLoc = RepoUtils.copyRepoToTempLocation("test-phases", "test-push-pub-src");
+		File dstRepoLoc = RepoUtils.initEmptyTempRepo("test-push-pub-dst");
+		HgServer server = new HgServer().publishing(true).start(dstRepoLoc);
+		try {
+			final HgLookup hgLookup = new HgLookup();
+			final HgRepository srcRepo = hgLookup.detect(srcRepoLoc);
+			final HgRemoteRepository dstRemote = hgLookup.detect(server.getURL());
+			PhasesHelper phaseHelper = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
+			final RevisionSet allDraft = phaseHelper.allDraft();
+			assertFalse("[sanity]", allDraft.isEmpty());
+			// push all changes
+			new HgPushCommand(srcRepo).destination(dstRemote).execute();
+			HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
+			final HgChangelog srcClog = srcRepo.getChangelog();
+			final HgChangelog dstClog = dstRepo.getChangelog();
+			// refresh PhasesHelper
+			phaseHelper = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
+			for (Nodeid n : allDraft) {
+				// check drafts from src were actually pushed to dst 
+				errorCollector.assertTrue(dstClog.isKnown(n));
+				// check drafts became public
+				errorCollector.assertEquals(HgPhase.Public, phaseHelper.getPhase(srcClog.getRevisionIndex(n), n));
+			}
+		} finally {
+			server.stop();
+		}
 	}
 
 	@Test
@@ -137,7 +318,69 @@
 
 	@Test
 	public void testUpdateBookmarkOnPush() throws Exception {
-		Assert.fail();
+		File srcRepoLoc = RepoUtils.cloneRepoToTempLocation("test-annotate", "test-push-src", false);
+		File dstRepoLoc = RepoUtils.cloneRepoToTempLocation("test-annotate", "test-push-dst", false);
+		final ExecHelper srcRun = new ExecHelper(new OutputParser.Stub(), srcRepoLoc);
+		final ExecHelper dstRun = new ExecHelper(new OutputParser.Stub(), dstRepoLoc);
+		File f1 = new File(srcRepoLoc, "file1");
+		assertTrue("[sanity]", f1.canWrite());
+		//
+		final String bm1 = "mark1", bm2 = "mark2", bm3 = "mark3", bm4 = "mark4", bm5 = "mark5";
+		final int bm2Local = 1, bm2Remote = 6, bm3Local = 7, bm3Remote = 2, bm_4_5 = 3;
+		// 1) bm1 - local active bookmark, check that push updates in remote
+		srcRun.exec("hg", "bookmark", bm1);
+		dstRun.exec("hg", "bookmark", "-r", "8", bm1);
+		// 2) bm2 - local points to ancestor of revision remote points to
+		srcRun.exec("hg", "bookmark", "-r", String.valueOf(bm2Local), bm2);
+		dstRun.exec("hg", "bookmark", "-r", String.valueOf(bm2Remote), bm2);
+		// 3) bm3 - remote points to ancestor of revision local one points to   
+		srcRun.exec("hg", "bookmark", "-r", String.valueOf(bm3Local), bm3);
+		dstRun.exec("hg", "bookmark", "-r", String.valueOf(bm3Remote), bm3);
+		// 4) bm4 - remote bookmark, not known locally
+		dstRun.exec("hg", "bookmark", "-r", String.valueOf(bm_4_5), bm4);
+		// 5) bm5 - local bookmark, not known remotely
+		srcRun.exec("hg", "bookmark", "-r", String.valueOf(bm_4_5), bm5);
+		//
+		HgServer server = new HgServer().start(dstRepoLoc);
+		try {
+			final HgLookup hgLookup = new HgLookup();
+			final HgRepository srcRepo = hgLookup.detect(srcRepoLoc);
+			final HgRemoteRepository dstRemote = hgLookup.detect(server.getURL());
+			RepoUtils.modifyFileAppend(f1, "change1");
+			final HgCommitCommand commitCmd = new HgCommitCommand(srcRepo).message("Commit 1");
+			assertTrue(commitCmd.execute().isOk());
+			assertEquals(bm1, srcRepo.getBookmarks().getActiveBookmarkName());
+			assertEquals(commitCmd.getCommittedRevision(), srcRepo.getBookmarks().getRevision(bm1));
+			//
+			new HgPushCommand(srcRepo).destination(dstRemote).execute();
+			Thread.sleep(300); // let the server perform the update
+			//
+			HgBookmarks srcBookmarks = srcRepo.getBookmarks();
+			final HgChangelog srcClog = srcRepo.getChangelog();
+			// first, check local bookmarks are intact
+			errorCollector.assertEquals(srcClog.getRevision(bm2Local), srcBookmarks.getRevision(bm2));
+			errorCollector.assertEquals(srcClog.getRevision(bm3Local), srcBookmarks.getRevision(bm3));
+			errorCollector.assertEquals(null, srcBookmarks.getRevision(bm4));
+			errorCollector.assertEquals(srcClog.getRevision(bm_4_5), srcBookmarks.getRevision(bm5));
+			// now, check remote bookmarks were touched
+			HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
+			HgBookmarks dstBookmarks = dstRepo.getBookmarks();
+			final HgChangelog dstClog = dstRepo.getChangelog();
+			// bm1 changed and points to newly pushed commit.
+			// if the test fails (bm1 points to r8), chances are server didn't manage to update
+			// bookmarks yet (there's Thread.sleep() above to give it a chance).
+			errorCollector.assertEquals(commitCmd.getCommittedRevision(), dstBookmarks.getRevision(bm1));
+			// bm2 didn't change
+			errorCollector.assertEquals(dstClog.getRevision(bm2Remote), dstBookmarks.getRevision(bm2));
+			// bm3 did change, now points to value we've got in srcRepo
+			errorCollector.assertEquals(srcClog.getRevision(bm3Local), dstBookmarks.getRevision(bm3));
+			// bm4 is not affected
+			errorCollector.assertEquals(dstClog.getRevision(bm_4_5), dstBookmarks.getRevision(bm4));
+			// bm5 is not known remotely
+			errorCollector.assertEquals(null, dstBookmarks.getRevision(bm5));
+		} finally {
+			server.stop();
+		}
 	}
 
 
@@ -149,6 +392,12 @@
 
 	static class HgServer {
 		private Process serverProcess;
+		private boolean publish = true;
+		
+		public HgServer publishing(boolean pub) {
+			publish = pub;
+			return this;
+		}
 
 		public HgServer start(File dir) throws IOException, InterruptedException {
 			if (serverProcess != null) {
@@ -164,6 +413,10 @@
 			cmdline.add("server.validate=True");
 			cmdline.add("--config");
 			cmdline.add(String.format("web.port=%d", port()));
+			if (!publish) {
+				cmdline.add("--config");
+				cmdline.add("phases.publish=False");
+			}
 			cmdline.add("serve");
 			serverProcess = new ProcessBuilder(cmdline).directory(dir).start();
 			Thread.sleep(500);