Mercurial > hg4j
comparison src/org/tmatesoft/hg/core/HgLogCommand.java @ 516:0ae5768081aa
Allow walking file rename history independently from file ancestry (native hg log --follow does both at once)
| author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
|---|---|
| date | Tue, 18 Dec 2012 18:57:03 +0100 |
| parents | e6c8b9b654b2 |
| children | 9922d1f7cb2a |
comparison
equal
deleted
inserted
replaced
| 515:e6c8b9b654b2 | 516:0ae5768081aa |
|---|---|
| 186 throw new HgBadArgumentException("Can't find revision", ex).setRevision(nid); | 186 throw new HgBadArgumentException("Can't find revision", ex).setRevision(nid); |
| 187 } | 187 } |
| 188 } | 188 } |
| 189 | 189 |
| 190 /** | 190 /** |
| 191 * Visit history of a given file only. | 191 * Visit history of a given file only. Note, unlike native <code>hg log</code> command argument <code>--follow</code>, this method doesn't |
| 192 * @param file path relative to repository root. Pass <code>null</code> to reset. | 192 * follow file ancestry, but reports complete file history (with <code>followCopyRenames == true</code>, for each |
| 193 * @param followCopyRename true to report changesets of the original file(-s), if copy/rename ever occured to the file. | 193 * name of the file known in sequence). To achieve output similar to that of <code>hg log --follow filePath</code>, use |
| 194 */ | 194 * {@link #file(Path, boolean, boolean) file(filePath, true, true)} alternative. |
| 195 public HgLogCommand file(Path file, boolean followCopyRename) { | 195 * |
| 196 // multiple? Bad idea, would need to include extra method into Handler to tell start of next file | 196 * @param filePath path relative to repository root. Pass <code>null</code> to reset. |
| 197 this.file = file; | 197 * @param followCopyRename true to report changesets of the original file(-s), if copy/rename ever occured to the file. |
| 198 followRenames = followAncestry = followCopyRename; | 198 * @return <code>this</code> for convenience |
| 199 */ | |
| 200 public HgLogCommand file(Path filePath, boolean followCopyRename) { | |
| 201 return file(filePath, followCopyRename, false); | |
| 202 } | |
| 203 | |
| 204 /** | |
| 205 * Full control over file history iteration. | |
| 206 * | |
| 207 * @param filePath path relative to repository root. Pass <code>null</code> to reset. | |
| 208 * @param followCopyRename true to report changesets of the original file(-s), if copy/rename ever occured to the file. | |
| 209 * @param followFileAncestry true to follow file history starting from revision at working copy parent. Note, only revisions | |
| 210 * accessible (i.e. on direct parent line) from the selected one will be reported. This is how <code>hg log --follow filePath</code> | |
| 211 * behaves, with the difference that this method allows separate control whether to follow renames or not. | |
| 212 * | |
| 213 * @return <code>this</code> for convenience | |
| 214 */ | |
| 215 public HgLogCommand file(Path filePath, boolean followCopyRename, boolean followFileAncestry) { | |
| 216 file = filePath; | |
| 217 followRenames = followCopyRename; | |
| 218 followAncestry = followFileAncestry; | |
| 199 return this; | 219 return this; |
| 200 } | 220 } |
| 201 | 221 |
| 202 /** | 222 /** |
| 203 * Handy analog of {@link #file(Path, boolean)} when clients' paths come from filesystem and need conversion to repository's | 223 * Handy analog to {@link #file(Path, boolean)} when clients' paths come from filesystem and need conversion to repository's |
| 224 * @return <code>this</code> for convenience | |
| 204 */ | 225 */ |
| 205 public HgLogCommand file(String file, boolean followCopyRename) { | 226 public HgLogCommand file(String file, boolean followCopyRename) { |
| 206 return file(Path.create(repo.getToRepoPathHelper().rewrite(file)), followCopyRename); | 227 return file(Path.create(repo.getToRepoPathHelper().rewrite(file)), followCopyRename); |
| 228 } | |
| 229 | |
| 230 /** | |
| 231 * Handy analog to {@link #file(Path, boolean, boolean)} when clients' paths come from filesystem and need conversion to repository's | |
| 232 * @return <code>this</code> for convenience | |
| 233 */ | |
| 234 public HgLogCommand file(String file, boolean followCopyRename, boolean followFileAncestry) { | |
| 235 return file(Path.create(repo.getToRepoPathHelper().rewrite(file)), followCopyRename, followFileAncestry); | |
| 207 } | 236 } |
| 208 | 237 |
| 209 /** | 238 /** |
| 210 * Similar to {@link #execute(HgChangesetHandler)}, collects and return result as a list. | 239 * Similar to {@link #execute(HgChangesetHandler)}, collects and return result as a list. |
| 211 * | 240 * |
| 321 } | 350 } |
| 322 final ProgressSupport progressHelper = getProgressSupport(handler); | 351 final ProgressSupport progressHelper = getProgressSupport(handler); |
| 323 final CancelSupport cancelHelper = getCancelSupport(handler, true); | 352 final CancelSupport cancelHelper = getCancelSupport(handler, true); |
| 324 final HgFileRenameHandlerMixin renameHandler = Adaptable.Factory.getAdapter(handler, HgFileRenameHandlerMixin.class, null); | 353 final HgFileRenameHandlerMixin renameHandler = Adaptable.Factory.getAdapter(handler, HgFileRenameHandlerMixin.class, null); |
| 325 | 354 |
| 326 // builds tree of nodes according to parents in file's revlog | 355 |
| 327 final TreeBuildInspector treeBuildInspector = new TreeBuildInspector(followRenames); | 356 final HandlerDispatcher dispatcher = new HandlerDispatcher() { |
| 328 // we iterate separate histories of each filename, need to connect | 357 |
| 329 // last node of historyA with first node of historyB (A renamed to B case) | 358 @Override |
| 330 // to make overall history smooth. | 359 protected void once(HistoryNode n) throws HgCallbackTargetException, CancelledException { |
| 331 HistoryNode lastFromPrevIteration = null; | |
| 332 | |
| 333 class HandlerDispatcher { | |
| 334 private final int CACHE_CSET_IN_ADVANCE_THRESHOLD = 100; /* XXX is it really worth it? */ | |
| 335 private ElementImpl ei = null; | |
| 336 private ProgressSupport progress; | |
| 337 private HgDataFile currentFileNode; | |
| 338 | |
| 339 public void prepare(ProgressSupport parentProgress, int historyNodeCount, TreeBuildInspector treeBuildInspector) { | |
| 340 if (ei == null) { | |
| 341 // when follow is true, changeHistory.size() of the first revision might be quite short | |
| 342 // (e.g. bad fname recognized soon), hence ensure at least cache size at once | |
| 343 ei = new ElementImpl(Math.max(CACHE_CSET_IN_ADVANCE_THRESHOLD, historyNodeCount)); | |
| 344 } | |
| 345 if (historyNodeCount < CACHE_CSET_IN_ADVANCE_THRESHOLD ) { | |
| 346 int[] commitRevisions = treeBuildInspector.getCommitRevisions(); | |
| 347 // read bunch of changesets at once and cache 'em | |
| 348 ei.initTransform(); | |
| 349 repo.getChangelog().range(ei, commitRevisions); | |
| 350 parentProgress.worked(1); | |
| 351 progress = new ProgressSupport.Sub(parentProgress, 2); | |
| 352 } else { | |
| 353 progress = new ProgressSupport.Sub(parentProgress, 3); | |
| 354 } | |
| 355 progress.start(historyNodeCount); | |
| 356 } | |
| 357 public void once(HistoryNode n) throws HgCallbackTargetException, CancelledException { | |
| 358 handler.treeElement(ei.init(n, currentFileNode)); | 360 handler.treeElement(ei.init(n, currentFileNode)); |
| 359 progress.worked(1); | |
| 360 cancelHelper.checkCancelled(); | 361 cancelHelper.checkCancelled(); |
| 361 } | 362 } |
| 362 | |
| 363 public void switchTo(HgDataFile df) { | |
| 364 // from now on, use df in TreeElement | |
| 365 currentFileNode = df; | |
| 366 } | |
| 367 }; | 363 }; |
| 368 final HandlerDispatcher dispatcher = new HandlerDispatcher(); | |
| 369 | 364 |
| 370 // renamed files in the queue are placed with respect to #iterateDirection | 365 // renamed files in the queue are placed with respect to #iterateDirection |
| 371 // i.e. if we iterate from new to old, recent filenames come first | 366 // i.e. if we iterate from new to old, recent filenames come first |
| 372 List<Pair<HgDataFile, Nodeid>> fileRenamesQueue = buildFileRenamesQueue(); | 367 List<Pair<HgDataFile, Nodeid>> fileRenamesQueue = buildFileRenamesQueue(); |
| 373 progressHelper.start(4 * fileRenamesQueue.size()); | 368 progressHelper.start(4 * fileRenamesQueue.size()); |
| 374 for (int namesIndex = 0, renamesQueueSize = fileRenamesQueue.size(); namesIndex < renamesQueueSize; namesIndex++) { | 369 for (int namesIndex = 0, renamesQueueSize = fileRenamesQueue.size(); namesIndex < renamesQueueSize; namesIndex++) { |
| 375 | 370 |
| 376 final Pair<HgDataFile, Nodeid> renameInfo = fileRenamesQueue.get(namesIndex); | 371 final Pair<HgDataFile, Nodeid> renameInfo = fileRenamesQueue.get(namesIndex); |
| 372 dispatcher.prepare(progressHelper, renameInfo); | |
| 377 cancelHelper.checkCancelled(); | 373 cancelHelper.checkCancelled(); |
| 378 final List<HistoryNode> changeHistory = treeBuildInspector.go(renameInfo.first(), renameInfo.second()); | 374 if (namesIndex > 0) { |
| 379 assert changeHistory.size() > 0; | 375 dispatcher.connectWithLastJunctionPoint(renameInfo, fileRenamesQueue.get(namesIndex - 1), renameHandler); |
| 380 progressHelper.worked(1); | |
| 381 cancelHelper.checkCancelled(); | |
| 382 dispatcher.prepare(progressHelper, changeHistory.size(), treeBuildInspector); | |
| 383 if (lastFromPrevIteration != null) { | |
| 384 if (iterateDirection == IterateDirection.FromOldToNew) { | |
| 385 // forward, from old to new: | |
| 386 // A(0..n) -> B(0..m). First, report A(0)..A(n-1) | |
| 387 // then A(n).bind(B(0)) | |
| 388 HistoryNode oldestOfTheNextChunk = changeHistory.get(0); // B(0) | |
| 389 lastFromPrevIteration.bindChild(oldestOfTheNextChunk); // lastFromPrevIteration is A(n) | |
| 390 dispatcher.once(lastFromPrevIteration); | |
| 391 if (renameHandler != null) { // shall report renames | |
| 392 assert namesIndex > 0; | |
| 393 HgDataFile lastIterationFileNode = fileRenamesQueue.get(namesIndex-1).first(); // A | |
| 394 HgFileRevision copiedFrom = new HgFileRevision(lastIterationFileNode, lastFromPrevIteration.fileRevision, null); | |
| 395 HgFileRevision copiedTo = new HgFileRevision(renameInfo.first(), oldestOfTheNextChunk.fileRevision, copiedFrom.getPath()); | |
| 396 renameHandler.copy(copiedFrom, copiedTo); | |
| 397 } | |
| 398 } else { | |
| 399 assert iterateDirection == IterateDirection.FromNewToOld; | |
| 400 // A renamed to B. A(0..n) -> B(0..m). | |
| 401 // First, report B(m), B(m-1)...B(1), then A(n).bind(B(0)), report B(0), A(n)... | |
| 402 HistoryNode newestOfNextChunk = changeHistory.get(changeHistory.size() - 1); // A(n) | |
| 403 newestOfNextChunk.bindChild(lastFromPrevIteration); | |
| 404 dispatcher.once(lastFromPrevIteration); | |
| 405 if (renameHandler != null) { | |
| 406 assert namesIndex > 0; | |
| 407 // renameInfo points to chunk of name A now, and lastFromPrevIteration (from namesIndex-1) is B | |
| 408 HgFileRevision copiedFrom = new HgFileRevision(renameInfo.first(), newestOfNextChunk.fileRevision, null); | |
| 409 HgDataFile lastIterationFileNode = fileRenamesQueue.get(namesIndex-1).first(); // B | |
| 410 HgFileRevision copiedTo = new HgFileRevision(lastIterationFileNode, lastFromPrevIteration.fileRevision, copiedFrom.getPath()); | |
| 411 renameHandler.copy(copiedFrom, copiedTo); | |
| 412 } | |
| 413 } | |
| 414 } | 376 } |
| 415 if (namesIndex + 1 < renamesQueueSize) { | 377 if (namesIndex + 1 < renamesQueueSize) { |
| 416 // there's at least one more name we are going to look at, save | 378 // there's at least one more name we are going to look at |
| 417 // one element for later binding | 379 dispatcher.updateJunctionPoint(renameInfo, fileRenamesQueue.get(namesIndex+1)); |
| 418 // | 380 } else { |
| 419 if (iterateDirection == IterateDirection.FromOldToNew) { | 381 dispatcher.clearJunctionPoint(); |
| 420 // save newest, and exclude it from this iteration (postpone for next) | 382 } |
| 421 lastFromPrevIteration = changeHistory.remove(changeHistory.size()-1); | 383 dispatcher.dispatchAllChanges(); |
| 422 } else { | |
| 423 assert iterateDirection == IterateDirection.FromNewToOld; | |
| 424 // save oldest, and exclude it from this iteration (postpone for next) | |
| 425 lastFromPrevIteration = changeHistory.remove(0); | |
| 426 } | |
| 427 } else { | |
| 428 lastFromPrevIteration = null; // just for the sake of no references to old items | |
| 429 } | |
| 430 dispatcher.switchTo(renameInfo.first()); | |
| 431 // XXX shall sort changeHistory according to changeset numbers? | |
| 432 Iterator<HistoryNode> it; | |
| 433 if (iterateDirection == IterateDirection.FromOldToNew) { | |
| 434 it = changeHistory.listIterator(); | |
| 435 } else { | |
| 436 assert iterateDirection == IterateDirection.FromNewToOld; | |
| 437 it = new ReverseIterator<HistoryNode>(changeHistory); | |
| 438 } | |
| 439 while(it.hasNext()) { | |
| 440 HistoryNode n = it.next(); | |
| 441 dispatcher.once(n); | |
| 442 } | |
| 443 } // for fileRenamesQueue; | 384 } // for fileRenamesQueue; |
| 444 progressHelper.done(); | 385 progressHelper.done(); |
| 445 } | 386 } |
| 446 | 387 |
| 447 private IterateDirection iterateDirection = IterateDirection.FromOldToNew; | 388 private IterateDirection iterateDirection = IterateDirection.FromOldToNew; |
| 527 commitRevisions[revisionNumber] = linkedRevision; | 468 commitRevisions[revisionNumber] = linkedRevision; |
| 528 } | 469 } |
| 529 | 470 |
| 530 public void next(int revisionNumber, Nodeid revision, int parent1, int parent2, Nodeid nidParent1, Nodeid nidParent2) { | 471 public void next(int revisionNumber, Nodeid revision, int parent1, int parent2, Nodeid nidParent1, Nodeid nidParent2) { |
| 531 HistoryNode p1 = null, p2 = null; | 472 HistoryNode p1 = null, p2 = null; |
| 473 // IMPORTANT: method #one(), below, doesn't expect this code expects reasonable values at parent indexes | |
| 532 if (parent1 != -1) { | 474 if (parent1 != -1) { |
| 533 p1 = completeHistory[parent1]; | 475 p1 = completeHistory[parent1]; |
| 534 } | 476 } |
| 535 if (parent2!= -1) { | 477 if (parent2!= -1) { |
| 536 p2 = completeHistory[parent2]; | 478 p2 = completeHistory[parent2]; |
| 537 } | 479 } |
| 538 completeHistory[revisionNumber] = new HistoryNode(commitRevisions[revisionNumber], revision, p1, p2); | 480 completeHistory[revisionNumber] = new HistoryNode(commitRevisions[revisionNumber], revision, p1, p2); |
| 481 } | |
| 482 | |
| 483 HistoryNode one(HgDataFile fileNode, Nodeid fileRevision) throws HgInvalidControlFileException { | |
| 484 int fileRevIndexToVisit = fileNode.getRevisionIndex(fileRevision); | |
| 485 return one(fileNode, fileRevIndexToVisit); | |
| 486 } | |
| 487 | |
| 488 HistoryNode one(HgDataFile fileNode, int fileRevIndexToVisit) throws HgInvalidControlFileException { | |
| 489 resultHistory = null; | |
| 490 if (fileRevIndexToVisit == HgRepository.TIP) { | |
| 491 fileRevIndexToVisit = fileNode.getLastRevision(); | |
| 492 } | |
| 493 // still, allocate whole array, for #next to be able to get null parent values | |
| 494 completeHistory = new HistoryNode[fileRevIndexToVisit+1]; | |
| 495 commitRevisions = new int[completeHistory.length]; | |
| 496 fileNode.indexWalk(fileRevIndexToVisit, fileRevIndexToVisit, this); | |
| 497 // it's only single revision, no need to care about followAncestry | |
| 498 // but won't hurt to keep resultHistory != null and commitRevisions initialized just in case | |
| 499 HistoryNode rv = completeHistory[fileRevIndexToVisit]; | |
| 500 commitRevisions = new int[] { commitRevisions[fileRevIndexToVisit] }; | |
| 501 completeHistory = null; // no need to keep almost empty array in memory | |
| 502 resultHistory = Collections.singletonList(rv); | |
| 503 return rv; | |
| 539 } | 504 } |
| 540 | 505 |
| 541 /** | 506 /** |
| 542 * Builds history of file changes (in natural order, from oldest to newest) up to (and including) file revision specified. | 507 * Builds history of file changes (in natural order, from oldest to newest) up to (and including) file revision specified. |
| 543 * If {@link TreeBuildInspector} follows ancestry, only elements that are on the line of ancestry of the revision at | 508 * If {@link TreeBuildInspector} follows ancestry, only elements that are on the line of ancestry of the revision at |
| 626 } | 591 } |
| 627 return commitRevisions; | 592 return commitRevisions; |
| 628 } | 593 } |
| 629 }; | 594 }; |
| 630 | 595 |
| 596 private abstract class HandlerDispatcher { | |
| 597 private final int CACHE_CSET_IN_ADVANCE_THRESHOLD = 100; /* XXX is it really worth it? */ | |
| 598 // builds tree of nodes according to parents in file's revlog | |
| 599 private final TreeBuildInspector treeBuildInspector = new TreeBuildInspector(followAncestry); | |
| 600 private List<HistoryNode> changeHistory; | |
| 601 protected ElementImpl ei = null; | |
| 602 private ProgressSupport progress; | |
| 603 protected HgDataFile currentFileNode; | |
| 604 // node where current file history chunk intersects with same file under other name history | |
| 605 // either mock of B(0) or A(k), depending on iteration order | |
| 606 private HistoryNode junctionNode; | |
| 607 | |
| 608 // parentProgress shall be initialized with 4 XXX refactor all this stuff with parentProgress | |
| 609 public void prepare(ProgressSupport parentProgress, Pair<HgDataFile, Nodeid> renameInfo) { | |
| 610 // if we don't followAncestry, take complete history | |
| 611 // XXX treeBuildInspector knows followAncestry, perhaps the logic | |
| 612 // whether to take specific revision or the last one shall be there? | |
| 613 changeHistory = treeBuildInspector.go(renameInfo.first(), followAncestry ? renameInfo.second() : null); | |
| 614 assert changeHistory.size() > 0; | |
| 615 parentProgress.worked(1); | |
| 616 int historyNodeCount = changeHistory.size(); | |
| 617 if (ei == null) { | |
| 618 // when follow is true, changeHistory.size() of the first revision might be quite short | |
| 619 // (e.g. bad fname recognized soon), hence ensure at least cache size at once | |
| 620 ei = new ElementImpl(Math.max(CACHE_CSET_IN_ADVANCE_THRESHOLD, historyNodeCount)); | |
| 621 } | |
| 622 if (historyNodeCount < CACHE_CSET_IN_ADVANCE_THRESHOLD ) { | |
| 623 int[] commitRevisions = treeBuildInspector.getCommitRevisions(); | |
| 624 assert commitRevisions.length == changeHistory.size(); | |
| 625 // read bunch of changesets at once and cache 'em | |
| 626 ei.initTransform(); | |
| 627 repo.getChangelog().range(ei, commitRevisions); | |
| 628 parentProgress.worked(1); | |
| 629 progress = new ProgressSupport.Sub(parentProgress, 2); | |
| 630 } else { | |
| 631 progress = new ProgressSupport.Sub(parentProgress, 3); | |
| 632 } | |
| 633 progress.start(historyNodeCount); | |
| 634 // switch to present chunk's file node | |
| 635 switchTo(renameInfo.first()); | |
| 636 } | |
| 637 | |
| 638 public void updateJunctionPoint(Pair<HgDataFile, Nodeid> curRename, Pair<HgDataFile, Nodeid> nextRename) { | |
| 639 // A (old) renamed to B(new). A(0..k..n) -> B(0..m). If followAncestry, k == n | |
| 640 // curRename.second() points to A(k) | |
| 641 if (iterateDirection == IterateDirection.FromOldToNew) { | |
| 642 // looking at A chunk (curRename), nextRename points to B | |
| 643 HistoryNode junctionSrc = findJunctionPointInCurrentChunk(curRename.second()); // A(k) | |
| 644 HistoryNode junctionDestMock = treeBuildInspector.one(nextRename.first(), 0); // B(0) | |
| 645 // junstionDestMock is mock object, once we iterate next rename, there'd be different HistoryNode | |
| 646 // for B's first revision. This means we read it twice, but this seems to be reasonable | |
| 647 // price for simplicity of the code (and opportunity to follow renames while not following ancestry) | |
| 648 junctionSrc.bindChild(junctionDestMock); | |
| 649 // Save mock A(k) 1) not to keep whole A history in memory 2) Don't need it's parent and children once get to B | |
| 650 // moreover, children of original A(k) (junctionSrc) would list mock B(0) which is undesired once we iterate over real B | |
| 651 junctionNode = new HistoryNode(junctionSrc.changeset, junctionSrc.fileRevision, null, null); | |
| 652 } else { | |
| 653 assert iterateDirection == IterateDirection.FromNewToOld; | |
| 654 // looking at B chunk (curRename), nextRename points at A | |
| 655 HistoryNode junctionDest = changeHistory.get(0); // B(0) | |
| 656 // prepare mock A(k) | |
| 657 HistoryNode junctionSrcMock = treeBuildInspector.one(nextRename.first(), nextRename.second()); // A(k) | |
| 658 // B(0) to list A(k) as its parent | |
| 659 // NOTE, A(k) would be different when we reach A chunk on the next iteration, | |
| 660 // but we do not care as long as TreeElement needs only parent/child changesets | |
| 661 // and not other TreeElements; so that it's enough to have mock parent node (just | |
| 662 // for the sake of parent cset revisions). We have to, indeed, update real A(k), | |
| 663 // once we get to iteration over A, with B(0) (junctionDest) as one more child. | |
| 664 junctionSrcMock.bindChild(junctionDest); | |
| 665 // Save mock B(0), for reasons see above for opposite direction | |
| 666 junctionNode = new HistoryNode(junctionDest.changeset, junctionDest.fileRevision, null, null); | |
| 667 } | |
| 668 } | |
| 669 | |
| 670 public void clearJunctionPoint() { | |
| 671 junctionNode = null; | |
| 672 } | |
| 673 | |
| 674 public void connectWithLastJunctionPoint(Pair<HgDataFile, Nodeid> curRename, Pair<HgDataFile, Nodeid> prevRename, HgFileRenameHandlerMixin renameHandler) throws HgCallbackTargetException { | |
| 675 assert junctionNode != null; | |
| 676 // A renamed to B. A(0..k..n) -> B(0..m). If followAncestry: k == n | |
| 677 if (iterateDirection == IterateDirection.FromOldToNew) { | |
| 678 // forward, from old to new: | |
| 679 // changeHistory points to B | |
| 680 // Already reported: A(0)..A(n), A(k) is in junctionNode | |
| 681 // Shall connect histories: A(k).bind(B(0)) | |
| 682 HistoryNode junctionDest = changeHistory.get(0); // B(0) | |
| 683 // junctionNode is A(k) | |
| 684 junctionNode.bindChild(junctionDest); | |
| 685 if (renameHandler != null) { // shall report renames | |
| 686 HgFileRevision copiedFrom = new HgFileRevision(prevRename.first(), junctionNode.fileRevision, null); // "A", A(k) | |
| 687 HgFileRevision copiedTo = new HgFileRevision(curRename.first(), junctionDest.fileRevision, copiedFrom.getPath()); // "B", B(0) | |
| 688 renameHandler.copy(copiedFrom, copiedTo); | |
| 689 } | |
| 690 } else { | |
| 691 assert iterateDirection == IterateDirection.FromNewToOld; | |
| 692 // changeHistory points to A | |
| 693 // Already reported B(m), B(m-1)...B(0), B(0) is in junctionNode | |
| 694 // Shall connect histories A(k).bind(B(0)) | |
| 695 // if followAncestry: A(k) is latest in changeHistory (k == n) | |
| 696 HistoryNode junctionSrc = findJunctionPointInCurrentChunk(curRename.second()); // A(k) | |
| 697 junctionSrc.bindChild(junctionNode); | |
| 698 if (renameHandler != null) { | |
| 699 HgFileRevision copiedFrom = new HgFileRevision(curRename.first(), junctionSrc.fileRevision, null); // "A", A(k) | |
| 700 HgFileRevision copiedTo = new HgFileRevision(prevRename.first(), junctionNode.fileRevision, copiedFrom.getPath()); // "B", B(0) | |
| 701 renameHandler.copy(copiedFrom, copiedTo); | |
| 702 } | |
| 703 } | |
| 704 } | |
| 705 | |
| 706 private HistoryNode findJunctionPointInCurrentChunk(Nodeid fileRevision) { | |
| 707 if (followAncestry) { | |
| 708 // use the fact we don't go past junction point when followAncestry == true | |
| 709 HistoryNode rv = changeHistory.get(changeHistory.size() - 1); | |
| 710 assert rv.fileRevision.equals(fileRevision); | |
| 711 return rv; | |
| 712 } | |
| 713 for (HistoryNode n : changeHistory) { | |
| 714 if (n.fileRevision.equals(fileRevision)) { | |
| 715 return n; | |
| 716 } | |
| 717 } | |
| 718 int csetStart = changeHistory.get(0).changeset; | |
| 719 int csetEnd = changeHistory.get(changeHistory.size() - 1).changeset; | |
| 720 throw new HgInvalidStateException(String.format("For change history (cset[%d..%d]) could not find node for file change %s", csetStart, csetEnd, fileRevision.shortNotation())); | |
| 721 } | |
| 722 | |
| 723 protected abstract void once(HistoryNode n) throws HgCallbackTargetException, CancelledException; | |
| 724 | |
| 725 public void dispatchAllChanges() throws HgCallbackTargetException, CancelledException { | |
| 726 // XXX shall sort changeHistory according to changeset numbers? | |
| 727 Iterator<HistoryNode> it; | |
| 728 if (iterateDirection == IterateDirection.FromOldToNew) { | |
| 729 it = changeHistory.listIterator(); | |
| 730 } else { | |
| 731 assert iterateDirection == IterateDirection.FromNewToOld; | |
| 732 it = new ReverseIterator<HistoryNode>(changeHistory); | |
| 733 } | |
| 734 while(it.hasNext()) { | |
| 735 HistoryNode n = it.next(); | |
| 736 once(n); | |
| 737 progress.worked(1); | |
| 738 } | |
| 739 changeHistory = null; | |
| 740 } | |
| 741 | |
| 742 public void switchTo(HgDataFile df) { | |
| 743 // from now on, use df in TreeElement | |
| 744 currentFileNode = df; | |
| 745 } | |
| 746 } | |
| 747 | |
| 631 | 748 |
| 632 // | 749 // |
| 633 | 750 |
| 634 public void next(int revisionNumber, Nodeid nodeid, RawChangeset cset) { | 751 public void next(int revisionNumber, Nodeid nodeid, RawChangeset cset) { |
| 635 if (limit > 0 && count >= limit) { | 752 if (limit > 0 && count >= limit) { |
