Mercurial > hg4j
comparison src/org/tmatesoft/hg/repo/HgParentChildMap.java @ 652:cd77bf51b562
Push: tests. Commit respects phases.new-commit setting. Fix outgoing when changes are not children of common (Issue 47)
author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
---|---|
date | Tue, 02 Jul 2013 23:21:16 +0200 |
parents | 3b275cc2d2aa |
children | 629a7370554c |
comparison
equal
deleted
inserted
replaced
651:6e98d34eaca8 | 652:cd77bf51b562 |
---|---|
16 */ | 16 */ |
17 package org.tmatesoft.hg.repo; | 17 package org.tmatesoft.hg.repo; |
18 | 18 |
19 import static org.tmatesoft.hg.repo.HgRepository.TIP; | 19 import static org.tmatesoft.hg.repo.HgRepository.TIP; |
20 | 20 |
21 import java.util.ArrayList; | |
21 import java.util.Arrays; | 22 import java.util.Arrays; |
23 import java.util.BitSet; | |
22 import java.util.Collection; | 24 import java.util.Collection; |
23 import java.util.Collections; | 25 import java.util.Collections; |
24 import java.util.HashSet; | 26 import java.util.HashSet; |
25 import java.util.LinkedList; | 27 import java.util.LinkedList; |
26 import java.util.List; | 28 import java.util.List; |
60 // IMPORTANT: Nodeid instances shall be shared between all arrays | 62 // IMPORTANT: Nodeid instances shall be shared between all arrays |
61 | 63 |
62 private Nodeid[] sequential; // natural repository order, childrenOf rely on ordering | 64 private Nodeid[] sequential; // natural repository order, childrenOf rely on ordering |
63 private Nodeid[] sorted; // for binary search | 65 private Nodeid[] sorted; // for binary search |
64 private int[] sorted2natural; // indexes in sorted to indexes in sequential | 66 private int[] sorted2natural; // indexes in sorted to indexes in sequential |
65 private Nodeid[] firstParent; | 67 private Nodeid[] firstParent; // parents by natural order (i.e. firstParent[A] is parent of revision with index A) |
66 private Nodeid[] secondParent; | 68 private Nodeid[] secondParent; |
67 private final T revlog; | 69 private final T revlog; |
70 private BitSet heads; // 1 indicates revision got children | |
68 | 71 |
69 | 72 |
70 public HgParentChildMap(T owner) { | 73 public HgParentChildMap(T owner) { |
71 revlog = owner; | 74 revlog = owner; |
72 } | 75 } |
81 } | 84 } |
82 int ix = revisionNumber; | 85 int ix = revisionNumber; |
83 sequential[ix] = sorted[ix] = revision; | 86 sequential[ix] = sorted[ix] = revision; |
84 if (parent1Revision != -1) { | 87 if (parent1Revision != -1) { |
85 firstParent[ix] = sequential[parent1Revision]; | 88 firstParent[ix] = sequential[parent1Revision]; |
89 heads.set(parent1Revision); | |
86 } | 90 } |
87 if (parent2Revision != -1) { // revlog of DataAccess.java has p2 set when p1 is -1 | 91 if (parent2Revision != -1) { // revlog of DataAccess.java has p2 set when p1 is -1 |
88 secondParent[ix] = sequential[parent2Revision]; | 92 secondParent[ix] = sequential[parent2Revision]; |
93 heads.set(parent2Revision); | |
89 } | 94 } |
90 } | 95 } |
91 | 96 |
92 /** | 97 /** |
93 * Prepare the map | 98 * Prepare the map |
95 * @throws HgRuntimeException subclass thereof to indicate other issues with the library. <em>Runtime exception</em> | 100 * @throws HgRuntimeException subclass thereof to indicate other issues with the library. <em>Runtime exception</em> |
96 */ | 101 */ |
97 public void init() throws HgRuntimeException { | 102 public void init() throws HgRuntimeException { |
98 final int revisionCount = revlog.getRevisionCount(); | 103 final int revisionCount = revlog.getRevisionCount(); |
99 firstParent = new Nodeid[revisionCount]; | 104 firstParent = new Nodeid[revisionCount]; |
100 // TODO [post 1.0] Branches/merges are less frequent, and most of secondParent would be -1/null, hence | 105 // TODO [post 1.1] Branches/merges are less frequent, and most of secondParent would be -1/null, hence |
101 // IntMap might be better alternative here, but need to carefully analyze (test) whether this brings | 106 // IntMap might be better alternative here, but need to carefully analyze (test) whether this brings |
102 // real improvement (IntMap has 2n capacity, and element lookup is log(n) instead of array's constant) | 107 // real improvement (IntMap has 2n capacity, and element lookup is log(n) instead of array's constant) |
103 secondParent = new Nodeid[revisionCount]; | 108 secondParent = new Nodeid[revisionCount]; |
104 // | 109 // |
105 sequential = new Nodeid[revisionCount]; | 110 sequential = new Nodeid[revisionCount]; |
106 sorted = new Nodeid[revisionCount]; | 111 sorted = new Nodeid[revisionCount]; |
112 heads = new BitSet(revisionCount); | |
107 revlog.indexWalk(0, TIP, this); | 113 revlog.indexWalk(0, TIP, this); |
108 Arrays.sort(sorted); | 114 Arrays.sort(sorted); |
109 sorted2natural = new int[revisionCount]; | 115 sorted2natural = new int[revisionCount]; |
110 for (int i = 0; i < revisionCount; i++) { | 116 for (int i = 0; i < revisionCount; i++) { |
111 Nodeid n = sequential[i]; | 117 Nodeid n = sequential[i]; |
231 */ | 237 */ |
232 public boolean hasChildren(Nodeid nid) { | 238 public boolean hasChildren(Nodeid nid) { |
233 int x = Arrays.binarySearch(sorted, nid); | 239 int x = Arrays.binarySearch(sorted, nid); |
234 assertSortedIndex(x); | 240 assertSortedIndex(x); |
235 int i = sorted2natural[x]; | 241 int i = sorted2natural[x]; |
236 assert firstParent.length == secondParent.length; // just in case later I implement sparse array for secondParent | 242 return hasChildren(i); |
237 assert firstParent.length == sequential.length; | |
238 // to use == instead of equals, take the same Nodeid instance we used to fill all the arrays. | |
239 final Nodeid canonicalNode = sequential[i]; | |
240 i++; // no need to check node itself. child nodes may appear in sequential only after revision in question | |
241 for (; i < sequential.length; i++) { | |
242 // TODO [post 1.0] likely, not very effective. | |
243 // May want to optimize it with another (Tree|Hash)Set, created on demand on first use, | |
244 // however, need to be careful with memory usage | |
245 if (firstParent[i] == canonicalNode || secondParent[i] == canonicalNode) { | |
246 return true; | |
247 } | |
248 } | |
249 return false; | |
250 } | 243 } |
251 | 244 |
252 /** | 245 /** |
253 * @return all revisions this map knows about | 246 * @return all revisions this map knows about |
254 */ | 247 */ |
265 */ | 258 */ |
266 public boolean isChild(Nodeid root, Nodeid wannaBeChild) { | 259 public boolean isChild(Nodeid root, Nodeid wannaBeChild) { |
267 int x = Arrays.binarySearch(sorted, root); | 260 int x = Arrays.binarySearch(sorted, root); |
268 assertSortedIndex(x); | 261 assertSortedIndex(x); |
269 root = sorted[x]; // canonical instance | 262 root = sorted[x]; // canonical instance |
263 final int start = sorted2natural[x]; | |
264 if (!hasChildren(start)) { | |
265 return false; // root got no children at all | |
266 } | |
270 int y = Arrays.binarySearch(sorted, wannaBeChild); | 267 int y = Arrays.binarySearch(sorted, wannaBeChild); |
271 if (y < 0 || y <= x) { | 268 if (y < 0) { |
272 // not found or comes earlier than root | 269 return false; // not found |
273 return false; | |
274 } | 270 } |
275 wannaBeChild = sorted[y]; // canonicalize | 271 wannaBeChild = sorted[y]; // canonicalize |
276 final int start = sorted2natural[x]; | |
277 final int end = sorted2natural[y]; | 272 final int end = sorted2natural[y]; |
273 if (end <= start) { | |
274 return false; // potential child was in repository earlier than root | |
275 } | |
278 HashSet<Nodeid> parents = new HashSet<Nodeid>(); | 276 HashSet<Nodeid> parents = new HashSet<Nodeid>(); |
279 parents.add(root); | 277 parents.add(root); |
280 for (int i = start + 1; i < end; i++) { | 278 for (int i = start + 1; i < end; i++) { |
281 if (parents.contains(firstParent[i]) || parents.contains(secondParent[i])) { | 279 if (parents.contains(firstParent[i]) || parents.contains(secondParent[i])) { |
282 parents.add(sequential[i]); // collect ancestors line | 280 parents.add(sequential[i]); // collect ancestors line |
283 } | 281 } |
284 } | 282 } |
285 return parents.contains(firstParent[end]) || parents.contains(secondParent[end]); | 283 return parents.contains(firstParent[end]) || parents.contains(secondParent[end]); |
286 } | 284 } |
285 | |
286 /** | |
287 * @return elements of this map that do not have a child recorded therein. | |
288 */ | |
289 public Collection<Nodeid> heads() { | |
290 ArrayList<Nodeid> result = new ArrayList<Nodeid>(); | |
291 int index = 0; | |
292 do { | |
293 index = heads.nextClearBit(index); | |
294 result.add(sequential[index]); | |
295 index++; | |
296 } while (index < sequential.length); | |
297 return result; | |
298 } | |
299 | |
300 private boolean hasChildren(int sequentialIndex) { | |
301 return heads.get(sequentialIndex); | |
302 } | |
287 } | 303 } |