Mercurial > hg4j
diff src/org/tmatesoft/hg/internal/RevlogStreamWriter.java @ 660:4fd317a2fecf
Pull: phase1 get remote changes and add local revisions
author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
---|---|
date | Tue, 09 Jul 2013 21:46:45 +0200 |
parents | 14dac192aa26 |
children | 46b56864b483 |
line wrap: on
line diff
--- a/src/org/tmatesoft/hg/internal/RevlogStreamWriter.java Thu Jul 04 21:09:33 2013 +0200 +++ b/src/org/tmatesoft/hg/internal/RevlogStreamWriter.java Tue Jul 09 21:46:45 2013 +0200 @@ -25,13 +25,16 @@ import org.tmatesoft.hg.core.HgIOException; import org.tmatesoft.hg.core.Nodeid; import org.tmatesoft.hg.core.SessionContext; +import org.tmatesoft.hg.internal.DataSerializer.ByteArrayDataSource; import org.tmatesoft.hg.internal.DataSerializer.ByteArraySerializer; -import org.tmatesoft.hg.internal.DataSerializer.ByteArrayDataSource; import org.tmatesoft.hg.internal.DataSerializer.DataSource; +import org.tmatesoft.hg.repo.HgBundle.GroupElement; import org.tmatesoft.hg.repo.HgInvalidControlFileException; import org.tmatesoft.hg.repo.HgInvalidRevisionException; import org.tmatesoft.hg.repo.HgInvalidStateException; +import org.tmatesoft.hg.repo.HgRepository; import org.tmatesoft.hg.repo.HgRuntimeException; +import org.tmatesoft.hg.util.Pair; /** * @@ -45,8 +48,10 @@ private final DigestHelper dh = new DigestHelper(); private final RevlogCompressor revlogDataZip; private final Transaction transaction; - private int lastEntryBase, lastEntryIndex; - private byte[] lastEntryContent; + private int lastEntryBase, lastEntryIndex, lastEntryActualLen; + // record revision and its full content + // the name might be misleading, it does not necessarily match lastEntryIndex + private Pair<Integer, byte[]> lastFullContent; private Nodeid lastEntryRevision; private IntMap<Nodeid> revisionCache = new IntMap<Nodeid>(32); private RevlogStream revlogStream; @@ -61,22 +66,98 @@ transaction = tr; } + public Pair<Integer,Nodeid> addPatchRevision(GroupElement ge, RevisionToIndexMap clogRevs, RevisionToIndexMap revlogRevs) throws HgIOException, HgRuntimeException { + populateLastEntryIndex(); + // + final Nodeid nodeRev = ge.node(); + final Nodeid csetRev = ge.cset(); + int linkRev; + if (nodeRev.equals(csetRev)) { + linkRev = lastEntryIndex+1; + } else { + linkRev = clogRevs.revisionIndex(csetRev); + } + assert linkRev >= 0; + final Nodeid p1Rev = ge.firstParent(); + int p1 = p1Rev.isNull() ? NO_REVISION : revlogRevs.revisionIndex(p1Rev); + final Nodeid p2Rev = ge.secondParent(); + int p2 = p2Rev.isNull() ? NO_REVISION : revlogRevs.revisionIndex(p2Rev); + Patch p = new Patch(); + final byte[] patchBytes; + try { + // XXX there's ge.rawData(), to avoid extra array wrap + patchBytes = ge.rawDataByteArray(); + p.read(new ByteArrayDataAccess(patchBytes)); + } catch (IOException ex) { + throw new HgIOException("Failed to read patch information", ex, null); + } + // + final Nodeid patchBase = ge.patchBase(); + int patchBaseRev = patchBase.isNull() ? NO_REVISION : revlogRevs.revisionIndex(patchBase); + int baseRev = lastEntryIndex == NO_REVISION ? 0 : revlogStream.baseRevision(patchBaseRev); + int revLen; + DataSource ds; + byte[] complete = null; + if (patchBaseRev == lastEntryIndex && lastEntryIndex != NO_REVISION) { + // we may write patch from GroupElement as is + int patchBaseLen = dataLength(patchBaseRev); + revLen = patchBaseLen + p.patchSizeDelta(); + ds = new ByteArrayDataSource(patchBytes); + } else { + // read baseRev, unless it's the pull to empty repository + try { + if (lastEntryIndex == NO_REVISION) { + complete = p.apply(new ByteArrayDataAccess(new byte[0]), -1); + baseRev = 0; // it's done above, but doesn't hurt + } else { + ReadContentInspector insp = new ReadContentInspector().read(revlogStream, baseRev); + complete = p.apply(new ByteArrayDataAccess(insp.content), -1); + baseRev = lastEntryIndex + 1; + } + ds = new ByteArrayDataSource(complete); + revLen = complete.length; + } catch (IOException ex) { + // unlikely to happen, as ByteArrayDataSource doesn't throw IOException + throw new HgIOException("Failed to reconstruct revision", ex, null); + } + } + doAdd(nodeRev, p1, p2, linkRev, baseRev, revLen, ds); + if (complete != null) { + lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, complete); + } + return new Pair<Integer, Nodeid>(lastEntryIndex, lastEntryRevision); + } + /** * @return nodeid of added revision * @throws HgRuntimeException */ - public Nodeid addRevision(DataSource content, int linkRevision, int p1, int p2) throws HgIOException, HgRuntimeException { - lastEntryRevision = Nodeid.NULL; - int revCount = revlogStream.revisionCount(); - lastEntryIndex = revCount == 0 ? NO_REVISION : revCount - 1; - populateLastEntry(); + public Pair<Integer,Nodeid> addRevision(DataSource content, int linkRevision, int p1, int p2) throws HgIOException, HgRuntimeException { + populateLastEntryIndex(); + populateLastEntryContent(); // byte[] contentByteArray = toByteArray(content); - Patch patch = GeneratePatchInspector.delta(lastEntryContent, contentByteArray); + Patch patch = GeneratePatchInspector.delta(lastFullContent.second(), contentByteArray); int patchSerializedLength = patch.serializedLength(); final boolean writeComplete = preferCompleteOverPatch(patchSerializedLength, contentByteArray.length); DataSerializer.DataSource dataSource = writeComplete ? new ByteArrayDataSource(contentByteArray) : patch.new PatchDataSource(); + // + Nodeid p1Rev = revision(p1); + Nodeid p2Rev = revision(p2); + Nodeid newRev = Nodeid.fromBinary(dh.sha1(p1Rev, p2Rev, contentByteArray).asBinary(), 0); + doAdd(newRev, p1, p2, linkRevision, writeComplete ? lastEntryIndex+1 : lastEntryBase, contentByteArray.length, dataSource); + lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, contentByteArray); + return new Pair<Integer, Nodeid>(lastEntryIndex, lastEntryRevision); + } + + private Nodeid doAdd(Nodeid rev, int p1, int p2, int linkRevision, int baseRevision, int revLen, DataSerializer.DataSource dataSource) throws HgIOException, HgRuntimeException { + assert linkRevision >= 0; + assert baseRevision >= 0; + assert p1 == NO_REVISION || p1 >= 0; + assert p2 == NO_REVISION || p2 >= 0; + assert !rev.isNull(); + assert revLen >= 0; revlogDataZip.reset(dataSource); final int compressedLen; final boolean useCompressedData = preferCompressedOverComplete(revlogDataZip.getCompressedLength(), dataSource.serializeLength()); @@ -87,11 +168,6 @@ compressedLen = dataSource.serializeLength() + 1 /*1 byte for 'u' - uncompressed prefix byte*/; } // - Nodeid p1Rev = revision(p1); - Nodeid p2Rev = revision(p2); - byte[] revisionNodeidBytes = dh.sha1(p1Rev, p2Rev, contentByteArray).asBinary(); - // - DataSerializer indexFile, dataFile; indexFile = dataFile = null; try { @@ -99,11 +175,11 @@ indexFile = revlogStream.getIndexStreamWriter(transaction); final boolean isInlineData = revlogStream.isInlineData(); HeaderWriter revlogHeader = new HeaderWriter(isInlineData); - revlogHeader.length(contentByteArray.length, compressedLen); - revlogHeader.nodeid(revisionNodeidBytes); + revlogHeader.length(revLen, compressedLen); + revlogHeader.nodeid(rev.toByteArray()); revlogHeader.linkRevision(linkRevision); revlogHeader.parents(p1, p2); - revlogHeader.baseRevision(writeComplete ? lastEntryIndex+1 : lastEntryBase); + revlogHeader.baseRevision(baseRevision); long lastEntryOffset = revlogStream.newEntryOffset(); revlogHeader.offset(lastEntryOffset); // @@ -124,11 +200,10 @@ dataSource.serialize(dataFile); } - - lastEntryContent = contentByteArray; lastEntryBase = revlogHeader.baseRevision(); lastEntryIndex++; - lastEntryRevision = Nodeid.fromBinary(revisionNodeidBytes, 0); + lastEntryActualLen = revLen; + lastEntryRevision = rev; revisionCache.put(lastEntryIndex, lastEntryRevision); revlogStream.revisionAdded(lastEntryIndex, lastEntryRevision, lastEntryBase, lastEntryOffset); @@ -159,32 +234,38 @@ return n; } - private void populateLastEntry() throws HgRuntimeException { - if (lastEntryContent != null) { + private int dataLength(int revisionIndex) throws HgInvalidControlFileException, HgInvalidRevisionException { + assert revisionIndex >= 0; + if (revisionIndex == lastEntryIndex) { + return lastEntryActualLen; + } + if (lastFullContent != null && lastFullContent.first() == revisionIndex) { + return lastFullContent.second().length; + } + return revlogStream.dataLength(revisionIndex); + } + + private void populateLastEntryIndex() throws HgRuntimeException { + int revCount = revlogStream.revisionCount(); + lastEntryIndex = revCount == 0 ? NO_REVISION : revCount - 1; + } + + private void populateLastEntryContent() throws HgRuntimeException { + if (lastFullContent != null && lastFullContent.first() == lastEntryIndex) { + // we have last entry cached return; } + lastEntryRevision = Nodeid.NULL; if (lastEntryIndex != NO_REVISION) { - assert lastEntryIndex >= 0; - final IOException[] failure = new IOException[1]; - revlogStream.iterate(lastEntryIndex, lastEntryIndex, true, new RevlogStream.Inspector() { - - public void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) { - try { - lastEntryBase = baseRevision; - lastEntryRevision = Nodeid.fromBinary(nodeid, 0); - lastEntryContent = data.byteArray(); - } catch (IOException ex) { - failure[0] = ex; - } - } - }); - if (failure[0] != null) { - String m = String.format("Failed to get content of most recent revision %d", lastEntryIndex); - throw revlogStream.initWithDataFile(new HgInvalidControlFileException(m, failure[0], null)); - } + ReadContentInspector insp = new ReadContentInspector().read(revlogStream, lastEntryIndex); + lastEntryBase = insp.baseRev; + lastEntryRevision = insp.rev; + lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, insp.content); } else { - lastEntryContent = new byte[0]; + lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, new byte[0]); } + assert lastFullContent.first() == lastEntryIndex; + assert lastFullContent.second() != null; } public static boolean preferCompleteOverPatch(int patchLength, int fullContentLength) { @@ -290,4 +371,40 @@ return header.capacity(); } } -} + + // XXX part of HgRevisionMap contract, need public counterparts (along with IndexToRevisionMap) + public interface RevisionToIndexMap { + + /** + * @return {@link HgRepository#NO_REVISION} if unknown revision + */ + int revisionIndex(Nodeid revision); + } + + private static class ReadContentInspector implements RevlogStream.Inspector { + public int baseRev; + public Nodeid rev; + public byte[] content; + private IOException failure; + + public ReadContentInspector read(RevlogStream rs, int revIndex) throws HgInvalidControlFileException { + assert revIndex >= 0; + rs.iterate(revIndex, revIndex, true, this); + if (failure != null) { + String m = String.format("Failed to get content of revision %d", revIndex); + throw rs.initWithDataFile(new HgInvalidControlFileException(m, failure, null)); + } + return this; + } + + public void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) { + try { + baseRev = baseRevision; + rev = Nodeid.fromBinary(nodeid, 0); + content = data.byteArray(); + } catch (IOException ex) { + failure = ex; + } + } + } +} \ No newline at end of file