Mercurial > jhg
comparison src/org/tmatesoft/hg/core/HgLogCommand.java @ 508:ca5202afea90
Support follow history option when walking file history tree
author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
---|---|
date | Wed, 12 Dec 2012 20:52:10 +0100 |
parents | a6435c1a42d0 |
children | a30e74dca193 |
comparison
equal
deleted
inserted
replaced
507:a6435c1a42d0 | 508:ca5202afea90 |
---|---|
298 throw new ConcurrentModificationException(); | 298 throw new ConcurrentModificationException(); |
299 } | 299 } |
300 if (file == null) { | 300 if (file == null) { |
301 throw new IllegalArgumentException("History tree is supported for files only (at least now), please specify file"); | 301 throw new IllegalArgumentException("History tree is supported for files only (at least now), please specify file"); |
302 } | 302 } |
303 if (followHistory) { | |
304 throw new UnsupportedOperationException("Can't follow file history when building tree (yet?)"); | |
305 } | |
306 class TreeBuildInspector implements HgChangelog.ParentInspector, HgChangelog.RevisionInspector { | |
307 HistoryNode[] completeHistory; | |
308 int[] commitRevisions; | |
309 | |
310 public void next(int revisionNumber, Nodeid revision, int linkedRevision) { | |
311 commitRevisions[revisionNumber] = linkedRevision; | |
312 } | |
313 | |
314 public void next(int revisionNumber, Nodeid revision, int parent1, int parent2, Nodeid nidParent1, Nodeid nidParent2) { | |
315 HistoryNode p1 = null, p2 = null; | |
316 if (parent1 != -1) { | |
317 p1 = completeHistory[parent1]; | |
318 } | |
319 if (parent2!= -1) { | |
320 p2 = completeHistory[parent2]; | |
321 } | |
322 completeHistory[revisionNumber] = new HistoryNode(commitRevisions[revisionNumber], revision, p1, p2); | |
323 } | |
324 | |
325 HistoryNode[] go(HgDataFile fileNode) throws HgInvalidControlFileException { | |
326 completeHistory = new HistoryNode[fileNode.getRevisionCount()]; | |
327 commitRevisions = new int[completeHistory.length]; | |
328 fileNode.indexWalk(0, TIP, this); | |
329 return completeHistory; | |
330 } | |
331 }; | |
332 final ProgressSupport progressHelper = getProgressSupport(handler); | 303 final ProgressSupport progressHelper = getProgressSupport(handler); |
333 final CancelSupport cancelHelper = getCancelSupport(handler, true); | 304 final CancelSupport cancelHelper = getCancelSupport(handler, true); |
334 | 305 |
335 LinkedList<HgDataFile> fileRenamesQueue = buildFileRenamesQueue(); | 306 // builds tree of nodes according to parents in file's revlog |
307 final TreeBuildInspector treeBuildInspector = new TreeBuildInspector(followHistory); | |
308 | |
309 // most recent file is first in the queue | |
310 LinkedList<Pair<HgDataFile, Nodeid>> fileRenamesQueue = buildFileRenamesQueue(); | |
336 progressHelper.start(4 * fileRenamesQueue.size()); | 311 progressHelper.start(4 * fileRenamesQueue.size()); |
337 do { | 312 do { |
338 HgDataFile fileNode = fileRenamesQueue.removeLast(); | 313 // to maintain iteration order from older to newer, take oldest name (in case of renames) first |
314 Pair<HgDataFile, Nodeid> renameInfo = fileRenamesQueue.removeLast(); | |
339 cancelHelper.checkCancelled(); | 315 cancelHelper.checkCancelled(); |
340 // build tree of nodes according to parents in file's revlog | 316 HgDataFile fileNode = renameInfo.first(); |
341 final TreeBuildInspector treeBuildInspector = new TreeBuildInspector(); | 317 Nodeid fileLastRevToVisit = null; |
342 final HistoryNode[] completeHistory = treeBuildInspector.go(fileNode); | 318 if (followHistory) { |
319 fileLastRevToVisit = renameInfo.second(); | |
320 if (fileLastRevToVisit == null) { | |
321 // only recent file name should not have a changeset when rename has happened. | |
322 assert fileRenamesQueue.isEmpty(); | |
323 // TODO subject to dedicated method either in HgRepository (getWorkingCopyParentRevisionIndex) | |
324 // or in the HgDataFile (getWorkingCopyOriginRevision) | |
325 Nodeid wdParentChangeset = repo.getWorkingCopyParents().first(); | |
326 if (!wdParentChangeset.isNull()) { | |
327 int wdParentRevIndex = repo.getChangelog().getRevisionIndex(wdParentChangeset); | |
328 fileLastRevToVisit = repo.getManifest().getFileRevision(wdParentRevIndex, fileNode.getPath()); | |
329 } | |
330 // else fall-through, assume lastRevision() is ok here | |
331 } | |
332 } | |
333 int fileLastRevIndexToVisit = fileLastRevToVisit == null ? fileNode.getLastRevision() : fileNode.getRevisionIndex(fileLastRevToVisit); | |
334 final HistoryNode[] changeHistory = treeBuildInspector.go(fileNode, fileLastRevIndexToVisit); | |
335 int[] commitRevisions = treeBuildInspector.getCommitRevisions(); | |
336 assert changeHistory.length == commitRevisions.length; | |
343 progressHelper.worked(1); | 337 progressHelper.worked(1); |
344 cancelHelper.checkCancelled(); | 338 cancelHelper.checkCancelled(); |
345 ElementImpl ei = new ElementImpl(treeBuildInspector.commitRevisions.length); | 339 ElementImpl ei = new ElementImpl(commitRevisions.length); |
346 final ProgressSupport ph2; | 340 final ProgressSupport ph2; |
347 if (treeBuildInspector.commitRevisions.length < 100 /*XXX is it really worth it? */) { | 341 if (commitRevisions.length < 100 /*XXX is it really worth it? */) { |
342 // read bunch of changesets at once and cache 'em | |
348 ei.initTransform(); | 343 ei.initTransform(); |
349 repo.getChangelog().range(ei, treeBuildInspector.commitRevisions); | 344 repo.getChangelog().range(ei, commitRevisions); |
350 progressHelper.worked(1); | 345 progressHelper.worked(1); |
351 ph2 = new ProgressSupport.Sub(progressHelper, 2); | 346 ph2 = new ProgressSupport.Sub(progressHelper, 2); |
352 } else { | 347 } else { |
353 ph2 = new ProgressSupport.Sub(progressHelper, 3); | 348 ph2 = new ProgressSupport.Sub(progressHelper, 3); |
354 } | 349 } |
355 ph2.start(completeHistory.length); | 350 ph2.start(changeHistory.length); |
356 // XXX shall sort completeHistory according to changeset numbers? | 351 // XXX shall sort completeHistory according to changeset numbers? |
357 for (int i = 0; i < completeHistory.length; i++ ) { | 352 for (int i = 0; i < changeHistory.length; i++ ) { |
358 final HistoryNode n = completeHistory[i]; | 353 final HistoryNode n = changeHistory[i]; |
359 handler.treeElement(ei.init(n)); | 354 handler.treeElement(ei.init(n)); |
360 ph2.worked(1); | 355 ph2.worked(1); |
361 cancelHelper.checkCancelled(); | 356 cancelHelper.checkCancelled(); |
362 } | 357 } |
363 } while (!fileRenamesQueue.isEmpty()); | 358 } while (!fileRenamesQueue.isEmpty()); |
364 progressHelper.done(); | 359 progressHelper.done(); |
365 } | 360 } |
366 | 361 |
367 /** | 362 /** |
368 * Follows file renames and build a list of all corresponding file nodes. If {@link #followHistory} is <code>false</code>, | 363 * Follows file renames and build a list of all corresponding file nodes and revisions they were |
369 * the list contains one element only, file node with the name of the file as it was specified by the user. | 364 * copied/renamed/branched at (IOW, their latest revision to look at). |
365 * | |
366 * If {@link #followHistory} is <code>false</code>, the list contains one element only, | |
367 * file node with the name of the file as it was specified by the user. | |
368 * | |
369 * For the most recent file revision is null. | |
370 * | |
371 * TODO may use HgFileRevision (after some refactoring to accept HgDataFile and Nodeid) instead of Pair | |
372 * and possibly reuse this functionality | |
370 * | 373 * |
371 * @return list of file renames, with most recent file first | 374 * @return list of file renames, with most recent file first |
372 */ | 375 */ |
373 private LinkedList<HgDataFile> buildFileRenamesQueue() { | 376 private LinkedList<Pair<HgDataFile, Nodeid>> buildFileRenamesQueue() { |
374 LinkedList<HgDataFile> rv = new LinkedList<HgDataFile>(); | 377 LinkedList<Pair<HgDataFile, Nodeid>> rv = new LinkedList<Pair<HgDataFile, Nodeid>>(); |
375 if (!followHistory) { | 378 if (!followHistory) { |
376 rv.add(repo.getFileNode(file)); | 379 rv.add(new Pair<HgDataFile, Nodeid>(repo.getFileNode(file), null)); |
377 return rv; | 380 return rv; |
378 } | 381 } |
379 HgDataFile fileNode; | |
380 Path fp = file; | 382 Path fp = file; |
383 Nodeid copyRev = null; | |
381 boolean isCopy; | 384 boolean isCopy; |
382 do { | 385 do { |
383 fileNode = repo.getFileNode(fp); | 386 HgDataFile fileNode = repo.getFileNode(fp); |
384 rv.addLast(fileNode); | 387 rv.addLast(new Pair<HgDataFile, Nodeid>(fileNode, copyRev)); |
385 if (isCopy = fileNode.isCopy()) { | 388 if (isCopy = fileNode.isCopy()) { |
386 fp = fileNode.getCopySourceName(); | 389 fp = fileNode.getCopySourceName(); |
390 copyRev = fileNode.getCopySourceRevision(); | |
387 } | 391 } |
388 } while (isCopy); | 392 } while (isCopy); |
389 return rv; | 393 return rv; |
390 } | 394 } |
395 | |
396 private static class TreeBuildInspector implements HgChangelog.ParentInspector, HgChangelog.RevisionInspector { | |
397 private final boolean followAncestry; | |
398 | |
399 private HistoryNode[] completeHistory; | |
400 private int[] commitRevisions; | |
401 | |
402 TreeBuildInspector(boolean _followAncestry) { | |
403 followAncestry = _followAncestry; | |
404 } | |
405 | |
406 public void next(int revisionNumber, Nodeid revision, int linkedRevision) { | |
407 commitRevisions[revisionNumber] = linkedRevision; | |
408 } | |
409 | |
410 public void next(int revisionNumber, Nodeid revision, int parent1, int parent2, Nodeid nidParent1, Nodeid nidParent2) { | |
411 HistoryNode p1 = null, p2 = null; | |
412 if (parent1 != -1) { | |
413 p1 = completeHistory[parent1]; | |
414 } | |
415 if (parent2!= -1) { | |
416 p2 = completeHistory[parent2]; | |
417 } | |
418 completeHistory[revisionNumber] = new HistoryNode(commitRevisions[revisionNumber], revision, p1, p2); | |
419 } | |
420 | |
421 /** | |
422 * Builds history of file changes (in natural order, from oldest to newest) up to (and including) file revision specified. | |
423 * If {@link TreeBuildInspector} follows ancestry, only elements that are on the line of ancestry of the revision at | |
424 * lastRevisionIndex would be included. | |
425 */ | |
426 HistoryNode[] go(HgDataFile fileNode, int lastRevisionIndex) throws HgInvalidControlFileException { | |
427 completeHistory = new HistoryNode[lastRevisionIndex+1]; | |
428 commitRevisions = new int[completeHistory.length]; | |
429 fileNode.indexWalk(0, lastRevisionIndex, this); | |
430 if (!followAncestry) { | |
431 return completeHistory; | |
432 } | |
433 /* | |
434 * Changesets: | |
435 * o <-- cset from working dir parent (as in dirstate), file not changed (file revision recorded points to that from A) | |
436 * | x <-- revision with file changed (B') | |
437 * x / <-- revision with file changed (A) | |
438 * | x <-- revision with file changed (B) | |
439 * |/ | |
440 * o <-- another changeset, where file wasn't changed | |
441 * | | |
442 * x <-- revision with file changed (C) | |
443 * | |
444 * File history: B', A, B, C | |
445 * | |
446 * When "follow", SHALL NOT report B and B', but A and C | |
447 */ | |
448 // strippedHistory: only those HistoryNodes from completeHistory that are on the same | |
449 // line of descendant, in order from older to newer | |
450 LinkedList<HistoryNode> strippedHistoryList = new LinkedList<HistoryNode>(); | |
451 LinkedList<HistoryNode> queue = new LinkedList<HistoryNode>(); | |
452 // look for ancestors of the selected history node | |
453 queue.add(completeHistory[lastRevisionIndex]); | |
454 do { | |
455 HistoryNode withFileChange = queue.removeFirst(); | |
456 if (withFileChange.children != null) { | |
457 withFileChange.children.retainAll(strippedHistoryList); | |
458 } | |
459 strippedHistoryList.addFirst(withFileChange); | |
460 if (withFileChange.parent1 != null) { | |
461 queue.addLast(withFileChange.parent1); | |
462 } | |
463 if (withFileChange.parent2 != null) { | |
464 queue.addLast(withFileChange.parent2); | |
465 } | |
466 } while (!queue.isEmpty()); | |
467 HistoryNode[] strippedHistory = strippedHistoryList.toArray(new HistoryNode[strippedHistoryList.size()]); | |
468 completeHistory = null; | |
469 commitRevisions = null; | |
470 // collected values are no longer valid - shall | |
471 // strip off elements for missing HistoryNodes, but it's easier just to re-create the array | |
472 commitRevisions = new int[strippedHistory.length]; | |
473 for (int i = 0; i < commitRevisions.length; i++) { | |
474 commitRevisions[i] = strippedHistory[i].changeset; | |
475 } | |
476 return strippedHistory; | |
477 } | |
478 | |
479 /** | |
480 * handy access to all HistoryNode[i].changeset values | |
481 */ | |
482 int[] getCommitRevisions() { | |
483 return commitRevisions; | |
484 } | |
485 }; | |
486 | |
391 | 487 |
392 // | 488 // |
393 | 489 |
394 public void next(int revisionNumber, Nodeid nodeid, RawChangeset cset) { | 490 public void next(int revisionNumber, Nodeid nodeid, RawChangeset cset) { |
395 if (limit > 0 && count >= limit) { | 491 if (limit > 0 && count >= limit) { |