Mercurial > jhg
changeset 596:43cfa08ff3fd
HgBlameFacility refactoring: extract code to build file history that spans renames
author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
---|---|
date | Thu, 02 May 2013 19:23:53 +0200 |
parents | 92c3ad9c2a51 |
children | c895b5556937 |
files | src/org/tmatesoft/hg/core/HgLogCommand.java src/org/tmatesoft/hg/internal/FileHistory.java src/org/tmatesoft/hg/internal/FileRevisionHistoryChunk.java src/org/tmatesoft/hg/internal/ReverseIterator.java src/org/tmatesoft/hg/repo/HgBlameFacility.java |
diffstat | 5 files changed, 371 insertions(+), 234 deletions(-) [+] |
line wrap: on
line diff
--- a/src/org/tmatesoft/hg/core/HgLogCommand.java Thu May 02 19:23:35 2013 +0200 +++ b/src/org/tmatesoft/hg/core/HgLogCommand.java Thu May 02 19:23:53 2013 +0200 @@ -30,7 +30,6 @@ import java.util.Iterator; import java.util.LinkedList; import java.util.List; -import java.util.ListIterator; import java.util.Set; import java.util.TreeSet; @@ -42,6 +41,7 @@ import org.tmatesoft.hg.internal.Internals; import org.tmatesoft.hg.internal.Lifecycle; import org.tmatesoft.hg.internal.LifecycleProxy; +import org.tmatesoft.hg.internal.ReverseIterator; import org.tmatesoft.hg.repo.HgChangelog; import org.tmatesoft.hg.repo.HgChangelog.RawChangeset; import org.tmatesoft.hg.repo.HgDataFile; @@ -446,12 +446,7 @@ } public Iterable<BatchRecord> iterate(final boolean reverse) { - return new Iterable<BatchRecord>() { - - public Iterator<BatchRecord> iterator() { - return reverse ? new ReverseIterator<BatchRecord>(batch) : batch.iterator(); - } - }; + return reverse ? ReverseIterator.reversed(batch) : batch; } // alternative would be dispatch(HgChangelog.Inspector) and dispatchReverse() @@ -563,24 +558,6 @@ progressHelper.done(); } - private static class ReverseIterator<E> implements Iterator<E> { - private final ListIterator<E> listIterator; - - public ReverseIterator(List<E> list) { - listIterator = list.listIterator(list.size()); - } - - public boolean hasNext() { - return listIterator.hasPrevious(); - } - public E next() { - return listIterator.previous(); - } - public void remove() { - listIterator.remove(); - } - } - /** * Utility to build sequence of file renames */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/org/tmatesoft/hg/internal/FileHistory.java Thu May 02 19:23:53 2013 +0200 @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2013 TMate Software Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * For information on how to redistribute this software under + * the terms of a license other than GNU General Public License + * contact TMate Software at support@hg4j.com + */ +package org.tmatesoft.hg.internal; + +import static org.tmatesoft.hg.core.HgIterateDirection.NewToOld; + +import java.util.Collections; +import java.util.LinkedList; + +import org.tmatesoft.hg.core.HgIterateDirection; +import org.tmatesoft.hg.core.Nodeid; +import org.tmatesoft.hg.repo.HgDataFile; +import org.tmatesoft.hg.repo.HgRepository; + +/** + * History of a file, with copy/renames, and corresponding revision information. + * Facility for file history iteration. + * + * FIXME [post-1.1] Utilize in HgLogCommand and anywhere else we need to follow file history + * + * @author Artem Tikhomirov + * @author TMate Software Ltd. + */ +public class FileHistory { + + private LinkedList<FileRevisionHistoryChunk> fileCompleteHistory = new LinkedList<FileRevisionHistoryChunk>(); + private final HgDataFile df; + private final int csetTo; + private final int csetFrom; + + public FileHistory(HgDataFile file, int fromChangeset, int toChangeset) { + df = file; + csetFrom = fromChangeset; + csetTo = toChangeset; + } + + public int getStartChangeset() { + return csetFrom; + } + + public int getEndChangeset() { + return csetTo; + } + + public void build() { + assert fileCompleteHistory.isEmpty(); + HgDataFile currentFile = df; + final int changelogRevIndexEnd = csetTo; + final int changelogRevIndexStart = csetFrom; + int fileLastClogRevIndex = changelogRevIndexEnd; + FileRevisionHistoryChunk nextChunk = null; + fileCompleteHistory.clear(); // just in case, #build() is not expected to be called more than once + do { + FileRevisionHistoryChunk fileHistory = new FileRevisionHistoryChunk(currentFile); + fileHistory.init(fileLastClogRevIndex); + fileHistory.linkTo(nextChunk); + fileCompleteHistory.addFirst(fileHistory); // to get the list in old-to-new order + nextChunk = fileHistory; + if (fileHistory.changeset(0) > changelogRevIndexStart && currentFile.isCopy()) { + // fileHistory.changeset(0) is the earliest revision we know about so far, + // once we get to revisions earlier than the requested start, stop digging. + // The reason there's NO == (i.e. not >=) because: + // (easy): once it's equal, we've reached our intended start + // (hard): if changelogRevIndexStart happens to be exact start of one of renames in the + // chain of renames (test-annotate2 repository, file1->file1a->file1b, i.e. points + // to the very start of file1a or file1 history), presence of == would get us to the next + // chunk and hence changed parents of present chunk's first element. Our annotate alg + // relies on parents only (i.e. knows nothing about 'last iteration element') to find out + // what to compare, and hence won't report all lines of 'last iteration element' (which is the + // first revision of the renamed file) as "added in this revision", leaving gaps in annotate + HgRepository repo = currentFile.getRepo(); + Nodeid originLastRev = currentFile.getCopySourceRevision(); + currentFile = repo.getFileNode(currentFile.getCopySourceName()); + fileLastClogRevIndex = currentFile.getChangesetRevisionIndex(currentFile.getRevisionIndex(originLastRev)); + // XXX perhaps, shall fail with meaningful exception if new file doesn't exist (.i/.d not found for whatever reason) + // or source revision is missing? + } else { + fileHistory.chopAtChangeset(changelogRevIndexStart); + currentFile = null; // stop iterating + } + } while (currentFile != null && fileLastClogRevIndex > changelogRevIndexStart); + // fileCompleteHistory is in (origin, intermediate target, ultimate target) order + } + + public Iterable<FileRevisionHistoryChunk> iterate(HgIterateDirection order) { + if (order == NewToOld) { + return ReverseIterator.reversed(fileCompleteHistory); + } + return Collections.unmodifiableList(fileCompleteHistory); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/org/tmatesoft/hg/internal/FileRevisionHistoryChunk.java Thu May 02 19:23:53 2013 +0200 @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2013 TMate Software Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * For information on how to redistribute this software under + * the terms of a license other than GNU General Public License + * contact TMate Software at support@hg4j.com + */ +package org.tmatesoft.hg.internal; + +import static org.tmatesoft.hg.core.HgIterateDirection.OldToNew; +import static org.tmatesoft.hg.repo.HgRepository.BAD_REVISION; +import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION; + +import java.util.Arrays; +import java.util.BitSet; +import java.util.LinkedList; + +import org.tmatesoft.hg.core.HgIterateDirection; +import org.tmatesoft.hg.core.Nodeid; +import org.tmatesoft.hg.repo.HgDataFile; +import org.tmatesoft.hg.repo.HgRepository; + +/** + * Piece of file history, identified by path, limited to file revisions from range [chop..init] of changesets, + * can be linked to another piece. + * + * @author Artem Tikhomirov + * @author TMate Software Ltd. + */ +public final class FileRevisionHistoryChunk { + private final HgDataFile df; + // change ancestry, sequence of file revisions + private IntVector fileRevsToVisit; + // parent pairs of complete file history + private IntVector fileParentRevs; + // map file revision to changelog revision (sparse array, only file revisions to visit are set) + private int[] file2changelog; + private int originChangelogRev = BAD_REVISION, originFileRev = BAD_REVISION; + private int csetRangeStart = NO_REVISION, csetRangeEnd = BAD_REVISION; + + + public FileRevisionHistoryChunk(HgDataFile file) { + df = file; + } + + /** + * @return file at this specific chunk of history (i.e. its path may be different from the paths of other chunks) + */ + public HgDataFile getFile() { + return df; + } + + /** + * @return changeset this file history chunk was chopped at, or {@link HgRepository#NO_REVISION} if none specified + */ + public int getStartChangeset() { + return csetRangeStart; + } + + /** + * @return changeset this file history chunk ends at + */ + public int getEndChangeset() { + return csetRangeEnd; + } + + public void init(int changelogRevisionIndex) { + csetRangeEnd = changelogRevisionIndex; + // XXX df.indexWalk(0, fileRevIndex, ) might be more effective + Nodeid fileRev = df.getRepo().getManifest().getFileRevision(changelogRevisionIndex, df.getPath()); + int fileRevIndex = df.getRevisionIndex(fileRev); + int[] fileRevParents = new int[2]; + fileParentRevs = new IntVector((fileRevIndex+1) * 2, 0); + fileParentRevs.add(NO_REVISION, NO_REVISION); // parents of fileRevIndex == 0 + for (int i = 1; i <= fileRevIndex; i++) { + df.parents(i, fileRevParents, null, null); + fileParentRevs.add(fileRevParents[0], fileRevParents[1]); + } + // fileRevsToVisit keep file change ancestry from new to old + fileRevsToVisit = new IntVector(fileRevIndex + 1, 0); + // keep map of file revision to changelog revision + file2changelog = new int[fileRevIndex+1]; + // only elements worth visit would get mapped, so there would be unfilled areas in the file2changelog, + // prevent from error (make it explicit) by bad value + Arrays.fill(file2changelog, BAD_REVISION); + LinkedList<Integer> queue = new LinkedList<Integer>(); + BitSet seen = new BitSet(fileRevIndex + 1); + queue.add(fileRevIndex); + do { + int x = queue.removeFirst(); + if (seen.get(x)) { + continue; + } + seen.set(x); + fileRevsToVisit.add(x); + file2changelog[x] = df.getChangesetRevisionIndex(x); + int p1 = fileParentRevs.get(2*x); + int p2 = fileParentRevs.get(2*x + 1); + if (p1 != NO_REVISION) { + queue.addLast(p1); + } + if (p2 != NO_REVISION) { + queue.addLast(p2); + } + } while (!queue.isEmpty()); + // make sure no child is processed before we handled all (grand-)parents of the element + fileRevsToVisit.sort(false); + } + + public void linkTo(FileRevisionHistoryChunk target) { + // assume that target.init() has been called already + if (target == null) { + return; + } + target.originFileRev = fileRevsToVisit.get(0); // files to visit are new to old + target.originChangelogRev = changeset(target.originFileRev); + } + + /** + * Mark revision closest(ceil) to specified as the very first one (no parents) + */ + public void chopAtChangeset(int firstChangelogRevOfInterest) { + csetRangeStart = firstChangelogRevOfInterest; + if (firstChangelogRevOfInterest == 0) { + return; // nothing to do + } + int i = 0, x = fileRevsToVisit.size(), fileRev = BAD_REVISION; + // fileRevsToVisit is new to old, greater numbers to smaller + while (i < x && changeset(fileRev = fileRevsToVisit.get(i)) >= firstChangelogRevOfInterest) { + i++; + } + assert fileRev != BAD_REVISION; // there's at least 1 revision in fileRevsToVisit + if (i == x && changeset(fileRev) != firstChangelogRevOfInterest) { + assert false : "Requested changeset shall belong to the chunk"; + return; + } + fileRevsToVisit.trimTo(i); // no need to iterate more + // pretend fileRev got no parents + fileParentRevs.set(fileRev * 2, NO_REVISION); + fileParentRevs.set(fileRev, NO_REVISION); + } + + public int[] fileRevisions(HgIterateDirection iterateOrder) { + // fileRevsToVisit is { r10, r7, r6, r5, r0 }, new to old + int[] rv = fileRevsToVisit.toArray(); + if (iterateOrder == OldToNew) { + // reverse return value + for (int a = 0, b = rv.length-1; a < b; a++, b--) { + int t = rv[b]; + rv[b] = rv[a]; + rv[a] = t; + } + } + return rv; + } + + public int changeset(int fileRevIndex) { + return file2changelog[fileRevIndex]; + } + + public void fillFileParents(int fileRevIndex, int[] fileParents) { + if (fileRevIndex == 0 && originFileRev != BAD_REVISION) { + // this chunk continues another file + assert originFileRev != NO_REVISION; + fileParents[0] = originFileRev; + fileParents[1] = NO_REVISION; + return; + } + fileParents[0] = fileParentRevs.get(fileRevIndex * 2); + fileParents[1] = fileParentRevs.get(fileRevIndex * 2 + 1); + } + + public void fillCsetParents(int fileRevIndex, int[] csetParents) { + if (fileRevIndex == 0 && originFileRev != BAD_REVISION) { + assert originFileRev != NO_REVISION; + csetParents[0] = originChangelogRev; + csetParents[1] = NO_REVISION; // I wonder if possible to start a copy with two parents? + return; + } + int fp1 = fileParentRevs.get(fileRevIndex * 2); + int fp2 = fileParentRevs.get(fileRevIndex * 2 + 1); + csetParents[0] = fp1 == NO_REVISION ? NO_REVISION : changeset(fp1); + csetParents[1] = fp2 == NO_REVISION ? NO_REVISION : changeset(fp2); + } +} \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/org/tmatesoft/hg/internal/ReverseIterator.java Thu May 02 19:23:53 2013 +0200 @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2013 TMate Software Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * For information on how to redistribute this software under + * the terms of a license other than GNU General Public License + * contact TMate Software at support@hg4j.com + */ +package org.tmatesoft.hg.internal; + +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; + +/** + * @author Artem Tikhomirov + * @author TMate Software Ltd. + */ +public class ReverseIterator<E> implements Iterator<E> { + private final ListIterator<E> listIterator; + + public ReverseIterator(List<E> list) { + listIterator = list.listIterator(list.size()); + } + + public boolean hasNext() { + return listIterator.hasPrevious(); + } + public E next() { + return listIterator.previous(); + } + public void remove() { + listIterator.remove(); + } + + public static <T> Iterable<T> reversed(final List<T> list) { + return new Iterable<T>() { + + public Iterator<T> iterator() { + return new ReverseIterator<T>(list); + } + }; + } +} \ No newline at end of file
--- a/src/org/tmatesoft/hg/repo/HgBlameFacility.java Thu May 02 19:23:35 2013 +0200 +++ b/src/org/tmatesoft/hg/repo/HgBlameFacility.java Thu May 02 19:23:53 2013 +0200 @@ -16,15 +16,10 @@ */ package org.tmatesoft.hg.repo; -import static org.tmatesoft.hg.core.HgIterateDirection.NewToOld; import static org.tmatesoft.hg.core.HgIterateDirection.OldToNew; import static org.tmatesoft.hg.repo.HgInternals.wrongRevisionIndex; -import static org.tmatesoft.hg.repo.HgRepository.*; - -import java.util.Arrays; -import java.util.BitSet; -import java.util.Collections; -import java.util.LinkedList; +import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION; +import static org.tmatesoft.hg.repo.HgRepository.TIP; import org.tmatesoft.hg.core.HgCallbackTargetException; import org.tmatesoft.hg.core.HgIterateDirection; @@ -32,7 +27,8 @@ import org.tmatesoft.hg.internal.BlameHelper; import org.tmatesoft.hg.internal.Callback; import org.tmatesoft.hg.internal.Experimental; -import org.tmatesoft.hg.internal.IntVector; +import org.tmatesoft.hg.internal.FileHistory; +import org.tmatesoft.hg.internal.FileRevisionHistoryChunk; import org.tmatesoft.hg.util.Adaptable; /** @@ -89,72 +85,23 @@ if (!df.exists()) { return; } + FileHistory fileHistory = new FileHistory(df, changelogRevIndexStart, changelogRevIndexEnd); + fileHistory.build(); BlameHelper bh = new BlameHelper(insp, 10); - HgDataFile currentFile = df; - int fileLastClogRevIndex = changelogRevIndexEnd; - FileRevisionHistoryChunk nextChunk = null; - LinkedList<FileRevisionHistoryChunk> fileCompleteHistory = new LinkedList<FileRevisionHistoryChunk>(); - do { - FileRevisionHistoryChunk fileHistory = new FileRevisionHistoryChunk(currentFile); - fileHistory.init(fileLastClogRevIndex); - fileHistory.linkTo(nextChunk); - fileCompleteHistory.addFirst(fileHistory); // to get the list in old-to-new order - nextChunk = fileHistory; - bh.useFileUpTo(currentFile, fileLastClogRevIndex); - if (fileHistory.changeset(0) > changelogRevIndexStart && currentFile.isCopy()) { - // fileHistory.changeset(0) is the earliest revision we know about so far, - // once we get to revisions earlier than the requested start, stop digging. - // The reason there's NO == (i.e. not >=) because: - // (easy): once it's equal, we've reached our intended start - // (hard): if changelogRevIndexStart happens to be exact start of one of renames in the - // chain of renames (test-annotate2 repository, file1->file1a->file1b, i.e. points - // to the very start of file1a or file1 history), presence of == would get us to the next - // chunk and hence changed parents of present chunk's first element. Our annotate alg - // relies on parents only (i.e. knows nothing about 'last iteration element') to find out - // what to compare, and hence won't report all lines of 'last iteration element' (which is the - // first revision of the renamed file) as "added in this revision", leaving gaps in annotate - HgRepository repo = currentFile.getRepo(); - Nodeid originLastRev = currentFile.getCopySourceRevision(); - currentFile = repo.getFileNode(currentFile.getCopySourceName()); - fileLastClogRevIndex = currentFile.getChangesetRevisionIndex(currentFile.getRevisionIndex(originLastRev)); - // XXX perhaps, shall fail with meaningful exception if new file doesn't exist (.i/.d not found for whatever reason) - // or source revision is missing? - } else { - fileHistory.chopAtChangeset(changelogRevIndexStart); - currentFile = null; // stop iterating - } - } while (currentFile != null && fileLastClogRevIndex > changelogRevIndexStart); - // fileCompleteHistory is in (origin, intermediate target, ultimate target) order - + for (FileRevisionHistoryChunk fhc : fileHistory.iterate(OldToNew)) { + // iteration order is not important here + bh.useFileUpTo(fhc.getFile(), fhc.getEndChangeset()); + } int[] fileClogParentRevs = new int[2]; int[] fileParentRevs = new int[2]; - if (iterateOrder == NewToOld) { - Collections.reverse(fileCompleteHistory); - } - boolean shallFilterStart = changelogRevIndexStart != 0; // no reason if complete history is walked - for (FileRevisionHistoryChunk fileHistory : fileCompleteHistory) { - for (int fri : fileHistory.fileRevisions(iterateOrder)) { - int clogRevIndex = fileHistory.changeset(fri); - if (shallFilterStart) { - if (iterateOrder == NewToOld) { - // clogRevIndex decreases - if (clogRevIndex < changelogRevIndexStart) { - break; - } - // fall-through, clogRevIndex is in the [start..end] range - } else { // old to new - // the way we built fileHistory ensures we won't walk past changelogRevIndexEnd - // here we ensure we start from the right one, the one indicated with changelogRevIndexStart - if (clogRevIndex < changelogRevIndexStart) { - continue; - } else { - shallFilterStart = false; // once boundary is crossed, no need to check - // fall-through - } - } - } - fileHistory.fillFileParents(fri, fileParentRevs); - fileHistory.fillCsetParents(fri, fileClogParentRevs); + for (FileRevisionHistoryChunk fhc : fileHistory.iterate(iterateOrder)) { + for (int fri : fhc.fileRevisions(iterateOrder)) { + int clogRevIndex = fhc.changeset(fri); + // the way we built fileHistory ensures we won't walk past [changelogRevIndexStart..changelogRevIndexEnd] + assert clogRevIndex >= changelogRevIndexStart; + assert clogRevIndex <= changelogRevIndexEnd; + fhc.fillFileParents(fri, fileParentRevs); + fhc.fillCsetParents(fri, fileClogParentRevs); bh.annotateChange(fri, clogRevIndex, fileParentRevs, fileClogParentRevs); } } @@ -198,13 +145,6 @@ } /** - * No need to keep "Block" prefix as long as there's only one {@link Inspector} - */ - @Deprecated - public interface BlockInspector extends Inspector { - } - - /** * Represents content of a block, either as a sequence of bytes or a * sequence of smaller blocks (lines), if appropriate (according to usage context). * @@ -353,135 +293,4 @@ Nodeid fileRev = df.getRepo().getManifest().getFileRevision(csetRevIndex, df.getPath()); return df.getRevisionIndex(fileRev); } - - private static class FileRevisionHistoryChunk { - private final HgDataFile df; - // change ancestry, sequence of file revisions - private IntVector fileRevsToVisit; - // parent pairs of complete file history - private IntVector fileParentRevs; - // map file revision to changelog revision (sparse array, only file revisions to visit are set) - private int[] file2changelog; - private int originChangelogRev = BAD_REVISION, originFileRev = BAD_REVISION; - - public FileRevisionHistoryChunk(HgDataFile file) { - df = file; - } - - public void init(int changelogRevisionIndex) { - // XXX df.indexWalk(0, fileRevIndex, ) might be more effective - int fileRevIndex = fileRevIndex(df, changelogRevisionIndex); - int[] fileRevParents = new int[2]; - fileParentRevs = new IntVector((fileRevIndex+1) * 2, 0); - fileParentRevs.add(NO_REVISION, NO_REVISION); // parents of fileRevIndex == 0 - for (int i = 1; i <= fileRevIndex; i++) { - df.parents(i, fileRevParents, null, null); - fileParentRevs.add(fileRevParents[0], fileRevParents[1]); - } - // fileRevsToVisit keep file change ancestry from new to old - fileRevsToVisit = new IntVector(fileRevIndex + 1, 0); - // keep map of file revision to changelog revision - file2changelog = new int[fileRevIndex+1]; - // only elements worth visit would get mapped, so there would be unfilled areas in the file2changelog, - // prevent from error (make it explicit) by bad value - Arrays.fill(file2changelog, BAD_REVISION); - LinkedList<Integer> queue = new LinkedList<Integer>(); - BitSet seen = new BitSet(fileRevIndex + 1); - queue.add(fileRevIndex); - do { - int x = queue.removeFirst(); - if (seen.get(x)) { - continue; - } - seen.set(x); - fileRevsToVisit.add(x); - file2changelog[x] = df.getChangesetRevisionIndex(x); - int p1 = fileParentRevs.get(2*x); - int p2 = fileParentRevs.get(2*x + 1); - if (p1 != NO_REVISION) { - queue.addLast(p1); - } - if (p2 != NO_REVISION) { - queue.addLast(p2); - } - } while (!queue.isEmpty()); - // make sure no child is processed before we handled all (grand-)parents of the element - fileRevsToVisit.sort(false); - } - - public void linkTo(FileRevisionHistoryChunk target) { - // assume that target.init() has been called already - if (target == null) { - return; - } - target.originFileRev = fileRevsToVisit.get(0); // files to visit are new to old - target.originChangelogRev = changeset(target.originFileRev); - } - - /** - * Mark revision closest(ceil) to specified as the very first one (no parents) - */ - public void chopAtChangeset(int firstChangelogRevOfInterest) { - if (firstChangelogRevOfInterest == 0) { - return; // nothing to do - } - int i = 0, x = fileRevsToVisit.size(), fileRev = BAD_REVISION; - // fileRevsToVisit is new to old, greater numbers to smaller - while (i < x && changeset(fileRev = fileRevsToVisit.get(i)) >= firstChangelogRevOfInterest) { - i++; - } - assert fileRev != BAD_REVISION; // there's at least 1 revision in fileRevsToVisit - if (i == x && changeset(fileRev) != firstChangelogRevOfInterest) { - assert false : "Requested changeset shall belong to the chunk"; - return; - } - fileRevsToVisit.trimTo(i); // no need to iterate more - // pretend fileRev got no parents - fileParentRevs.set(fileRev * 2, NO_REVISION); - fileParentRevs.set(fileRev, NO_REVISION); - } - - public int[] fileRevisions(HgIterateDirection iterateOrder) { - // fileRevsToVisit is { r10, r7, r6, r5, r0 }, new to old - int[] rv = fileRevsToVisit.toArray(); - if (iterateOrder == OldToNew) { - // reverse return value - for (int a = 0, b = rv.length-1; a < b; a++, b--) { - int t = rv[b]; - rv[b] = rv[a]; - rv[a] = t; - } - } - return rv; - } - - public int changeset(int fileRevIndex) { - return file2changelog[fileRevIndex]; - } - - public void fillFileParents(int fileRevIndex, int[] fileParents) { - if (fileRevIndex == 0 && originFileRev != BAD_REVISION) { - // this chunk continues another file - assert originFileRev != NO_REVISION; - fileParents[0] = originFileRev; - fileParents[1] = NO_REVISION; - return; - } - fileParents[0] = fileParentRevs.get(fileRevIndex * 2); - fileParents[1] = fileParentRevs.get(fileRevIndex * 2 + 1); - } - - public void fillCsetParents(int fileRevIndex, int[] csetParents) { - if (fileRevIndex == 0 && originFileRev != BAD_REVISION) { - assert originFileRev != NO_REVISION; - csetParents[0] = originChangelogRev; - csetParents[1] = NO_REVISION; // I wonder if possible to start a copy with two parents? - return; - } - int fp1 = fileParentRevs.get(fileRevIndex * 2); - int fp2 = fileParentRevs.get(fileRevIndex * 2 + 1); - csetParents[0] = fp1 == NO_REVISION ? NO_REVISION : changeset(fp1); - csetParents[1] = fp2 == NO_REVISION ? NO_REVISION : changeset(fp2); - } - } }