Mercurial > hg4j
comparison src/org/tmatesoft/hg/repo/Revlog.java @ 171:2c3e96674e2a
Towards outgoing changes - initial detection logic, get connected with remote repo stub
| author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
|---|---|
| date | Fri, 25 Mar 2011 00:05:52 +0100 |
| parents | d5268ca7715b |
| children | 9807bf8f3a9c |
comparison
equal
deleted
inserted
replaced
| 170:71ddbf8603e8 | 171:2c3e96674e2a |
|---|---|
| 23 import java.nio.ByteBuffer; | 23 import java.nio.ByteBuffer; |
| 24 import java.util.Arrays; | 24 import java.util.Arrays; |
| 25 import java.util.Collection; | 25 import java.util.Collection; |
| 26 import java.util.Collections; | 26 import java.util.Collections; |
| 27 import java.util.HashMap; | 27 import java.util.HashMap; |
| 28 import java.util.HashSet; | |
| 28 import java.util.LinkedHashSet; | 29 import java.util.LinkedHashSet; |
| 30 import java.util.LinkedList; | |
| 31 import java.util.List; | |
| 29 import java.util.Map; | 32 import java.util.Map; |
| 30 import java.util.Set; | 33 import java.util.Set; |
| 31 | 34 |
| 32 import org.tmatesoft.hg.core.HgBadStateException; | 35 import org.tmatesoft.hg.core.HgBadStateException; |
| 33 import org.tmatesoft.hg.core.HgException; | 36 import org.tmatesoft.hg.core.HgException; |
| 197 * and yes, walker is not a proper name | 200 * and yes, walker is not a proper name |
| 198 */ | 201 */ |
| 199 public final class ParentWalker { | 202 public final class ParentWalker { |
| 200 private Map<Nodeid, Nodeid> firstParent; | 203 private Map<Nodeid, Nodeid> firstParent; |
| 201 private Map<Nodeid, Nodeid> secondParent; | 204 private Map<Nodeid, Nodeid> secondParent; |
| 202 private Set<Nodeid> allNodes; | 205 private final LinkedHashSet<Nodeid> allNodes = new LinkedHashSet<Nodeid>(); // childrenOf rely on ordering |
| 203 | 206 |
| 204 public ParentWalker() { | 207 public ParentWalker() { |
| 205 firstParent = secondParent = Collections.emptyMap(); | 208 firstParent = secondParent = Collections.emptyMap(); |
| 206 allNodes = Collections.emptySet(); | |
| 207 } | 209 } |
| 208 | 210 |
| 209 public void init() { | 211 public void init() { |
| 210 final RevlogStream stream = Revlog.this.content; | 212 final RevlogStream stream = Revlog.this.content; |
| 211 final int revisionCount = stream.revisionCount(); | 213 final int revisionCount = stream.revisionCount(); |
| 212 firstParent = new HashMap<Nodeid, Nodeid>(revisionCount); | 214 firstParent = new HashMap<Nodeid, Nodeid>(revisionCount); |
| 213 secondParent = new HashMap<Nodeid, Nodeid>(firstParent.size() >> 1); // assume branches/merges are less frequent | 215 secondParent = new HashMap<Nodeid, Nodeid>(firstParent.size() >> 1); // assume branches/merges are less frequent |
| 214 allNodes = new LinkedHashSet<Nodeid>(); | |
| 215 | 216 |
| 216 RevlogStream.Inspector insp = new RevlogStream.Inspector() { | 217 RevlogStream.Inspector insp = new RevlogStream.Inspector() { |
| 217 final Nodeid[] sequentialRevisionNodeids = new Nodeid[revisionCount]; | 218 final Nodeid[] sequentialRevisionNodeids = new Nodeid[revisionCount]; |
| 218 int ix = 0; | 219 int ix = 0; |
| 219 public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess da) { | 220 public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess da) { |
| 276 if (p2 != null) { | 277 if (p2 != null) { |
| 277 modified = c.add(p2) || modified; | 278 modified = c.add(p2) || modified; |
| 278 } | 279 } |
| 279 } | 280 } |
| 280 return modified; | 281 return modified; |
| 282 } | |
| 283 | |
| 284 // XXX alternative (and perhaps more reliable) approach would be to make a copy of allNodes and remove | |
| 285 // nodes, their parents and so on. | |
| 286 | |
| 287 // @return ordered collection of all children rooted at supplied nodes. Nodes shall not be descendants of each other! | |
| 288 public List<Nodeid> childrenOf(List<Nodeid> nodes) { | |
| 289 HashSet<Nodeid> parents = new HashSet<Nodeid>(); | |
| 290 LinkedHashSet<Nodeid> result = new LinkedHashSet<Nodeid>(); | |
| 291 LinkedList<Nodeid> orderedResult = new LinkedList<Nodeid>(); | |
| 292 for(Nodeid next : allNodes) { | |
| 293 // i assume allNodes is sorted, hence do not check any parents unless we hit any common known node first | |
| 294 if (nodes.contains(next)) { | |
| 295 parents.add(next); | |
| 296 } else { | |
| 297 if (parents.isEmpty()) { | |
| 298 // didn't scroll up to any known yet | |
| 299 continue; | |
| 300 } | |
| 301 // record each and every node reported after first common known node hit | |
| 302 orderedResult.addLast(next); | |
| 303 if (parents.contains(firstParent(next)) || parents.contains(secondParent(next))) { | |
| 304 result.add(next); | |
| 305 parents.add(next); // to find next's children | |
| 306 } | |
| 307 } | |
| 308 } | |
| 309 // leave only those of interest in ordered sequence | |
| 310 orderedResult.retainAll(result); | |
| 311 return orderedResult; | |
| 281 } | 312 } |
| 282 } | 313 } |
| 283 | 314 |
| 284 protected static class ContentPipe implements RevlogStream.Inspector, CancelSupport { | 315 protected static class ContentPipe implements RevlogStream.Inspector, CancelSupport { |
| 285 private final ByteChannel sink; | 316 private final ByteChannel sink; |
