Mercurial > jhg
changeset 532:688c1ab113bb
Introduce explicit reference to base patch in bundle's group element, use it when cloning to fix defect when few revisions list null,null parents
author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
---|---|
date | Wed, 23 Jan 2013 19:14:15 +0100 |
parents | 95c2f43008bd |
children | e6f72c9829a6 |
files | src/org/tmatesoft/hg/core/HgCloneCommand.java src/org/tmatesoft/hg/internal/Patch.java src/org/tmatesoft/hg/internal/RevlogStreamWriter.java src/org/tmatesoft/hg/repo/HgBundle.java |
diffstat | 4 files changed, 78 insertions(+), 48 deletions(-) [+] |
line wrap: on
line diff
--- a/src/org/tmatesoft/hg/core/HgCloneCommand.java Wed Jan 23 18:03:13 2013 +0100 +++ b/src/org/tmatesoft/hg/core/HgCloneCommand.java Wed Jan 23 19:14:15 2013 +0100 @@ -147,7 +147,8 @@ private DataAccess prevRevContent; private final DigestHelper dh = new DigestHelper(); - private final ArrayList<Nodeid> revisionSequence = new ArrayList<Nodeid>(); // last visited nodes first + // recently processed nodes last, so that index in the array may be used as a linkRevision or baseRevision + private final ArrayList<Nodeid> revisionSequence = new ArrayList<Nodeid>(); private final LinkedList<String> fncacheFiles = new LinkedList<String>(); private RepoInitializer repoInit; @@ -183,23 +184,18 @@ indexFile = new FileOutputStream(new File(hgDir, filename = "store/00changelog.i")); collectChangelogIndexes = true; } catch (IOException ex) { - throw new HgInvalidControlFileException("Failed to write changelog", ex, new File(filename)); + throw new HgInvalidControlFileException("Failed to write changelog", ex, new File(hgDir, filename)); } stopIfCancelled(); } public void changelogEnd() { try { - if (prevRevContent != null) { - prevRevContent.done(); - prevRevContent = null; - } + clearPreviousContent(); collectChangelogIndexes = false; - indexFile.close(); - indexFile = null; - filename = null; + closeIndexFile(); } catch (IOException ex) { - throw new HgInvalidControlFileException("Failed to write changelog", ex, new File(filename)); + throw new HgInvalidControlFileException("Failed to write changelog", ex, new File(hgDir, filename)); } progressSupport.worked(1); stopIfCancelled(); @@ -211,22 +207,17 @@ revisionSequence.clear(); indexFile = new FileOutputStream(new File(hgDir, filename = "store/00manifest.i")); } catch (IOException ex) { - throw new HgInvalidControlFileException("Failed to write manifest", ex, new File(filename)); + throw new HgInvalidControlFileException("Failed to write manifest", ex, new File(hgDir, filename)); } stopIfCancelled(); } public void manifestEnd() { try { - if (prevRevContent != null) { - prevRevContent.done(); - prevRevContent = null; - } - indexFile.close(); - indexFile = null; - filename = null; + clearPreviousContent(); + closeIndexFile(); } catch (IOException ex) { - throw new HgInvalidControlFileException("Failed to write changelog", ex, new File(filename)); + throw new HgInvalidControlFileException("Failed to write manifest", ex, new File(hgDir, filename)); } progressSupport.worked(1); stopIfCancelled(); @@ -250,13 +241,8 @@ public void fileEnd(String name) { try { - if (prevRevContent != null) { - prevRevContent.done(); - prevRevContent = null; - } - indexFile.close(); - indexFile = null; - filename = null; + clearPreviousContent(); + closeIndexFile(); } catch (IOException ex) { String m = String.format("Failed to write file %s", filename); throw new HgInvalidControlFileException(m, ex, new File(filename)); @@ -264,6 +250,19 @@ progressSupport.worked(1); stopIfCancelled(); } + + private void clearPreviousContent() { + if (prevRevContent != null) { + prevRevContent.done(); + prevRevContent = null; + } + } + + private void closeIndexFile() throws IOException { + indexFile.close(); + indexFile = null; + filename = null; + } private int knownRevision(Nodeid p) { if (p.isNull()) { @@ -276,7 +275,7 @@ } } String m = String.format("Can't find index of %s for file %s", p.shortNotation(), filename); - throw new HgInvalidControlFileException(m, null, null).setRevision(p); + throw new HgInvalidControlFileException(m, null, new File(hgDir, filename)).setRevision(p); } private RevlogStreamWriter.HeaderWriter revlogHeader = new RevlogStreamWriter.HeaderWriter(true); @@ -286,15 +285,29 @@ try { assert indexFile != null; boolean writeComplete = false; + Nodeid deltaBase = ge.patchBase(); + if (deltaBase.isNull()) { + // NOTE, can't use both parents isNull == true to empty prevRevContent + // see build.gradle sample below why. + prevRevContent = new DataAccess(); // empty data + writeComplete = true; + // if (writeComplete) would set baseRevision correctly, + } else { + Nodeid prevRevision = revisionSequence.size() > 0 ? revisionSequence.get(revisionSequence.size()-1) : Nodeid.NULL; + if (!prevRevision.equals(deltaBase)) { + // presently, bundle group elements always patch previous, see + // (a) changegroup.py#builddeltaheader(): # do nothing with basenode, it is implicitly the previous one in HG10 + // (b) revlog.py#group(): prev, curr = revs[r], revs[r + 1] + // for c in bundler.revchunk(self, curr, prev): + // so there's no reason to have code here to extract contents of deltaBase revision + String m = String.format("Revision %s import failed: delta base %s is not the last node we've handled (and know content for) %s", ge.node(), deltaBase, prevRevision); + throw new HgInvalidStateException(m); + } + } + // + byte[] content = ge.apply(prevRevContent.byteArray()); Nodeid p1 = ge.firstParent(); Nodeid p2 = ge.secondParent(); - if (p1.isNull() && p2.isNull() /* or forced flag, does REVIDX_PUNCHED_FLAG indicate that? */) { - // FIXME NOTE, both parents isNull == true doesn't necessarily mean - // empty prevContent, see build.gradle sample below - prevRevContent = new ByteArrayDataAccess(new byte[0]); - writeComplete = true; - } - byte[] content = ge.apply(prevRevContent.byteArray()); byte[] calculated = dh.sha1(p1, p2, content).asBinary(); final Nodeid node = ge.node(); if (!node.equalsTo(calculated)) { @@ -302,6 +315,7 @@ throw new HgRevisionIntegrityException(m, null, new File(hgDir, filename)); } revlogHeader.nodeid(node); + // if (collectChangelogIndexes) { changelogIndexes.put(node, revisionSequence.size()); revlogHeader.linkRevision(revisionSequence.size()); @@ -312,12 +326,19 @@ } revlogHeader.linkRevision(csRev.intValue()); } + // revlogHeader.parents(knownRevision(p1), knownRevision(p2)); + // byte[] patchContent = ge.rawDataByteArray(); + // no reason to keep patch if it's close (here, >75%) in size to the complete contents, + // save patching effort in this case writeComplete = writeComplete || patchContent.length >= (/* 3/4 of actual */content.length - (content.length >>> 2)); + if (writeComplete) { revlogHeader.baseRevision(revisionSequence.size()); } + assert revlogHeader.baseRevision() >= 0; + final byte[] sourceData = writeComplete ? content : patchContent; revlogDataZip.reset(sourceData); final int compressedLen; @@ -378,12 +399,7 @@ a3f374fbf33aba1cc3b4f472db022b5185880f5d 3ddd456244a08f81779163d9faf922a6dcd9e53e 0000000000000000000000000000000000000000 3ca4ae7bdd3890b8ed89bfea1b42af593e04b373 3ddd456244a08f81779163d9faf922a6dcd9e53e 195 0227d28e0db69afebee34cd5a4151889fb6271da a3f374fbf33aba1cc3b4f472db022b5185880f5d 0000000000000000000000000000000000000000 31bd09da0dcfe48e1fc662143f91ff402238aa84 a3f374fbf33aba1cc3b4f472db022b5185880f5d 145 -but there's no delta base information in the bundle file, it's merely a hard-coded convention (always patches previous version, see -(a) changegroup.py#builddeltaheader(): # do nothing with basenode, it is implicitly the previous one in HG10 -(b) revlog.py#group(): prev, curr = revs[r], revs[r + 1] - for c in bundler.revchunk(self, curr, prev): -) - +but there's no delta base information in the bundle file, it's merely a hard-coded convention It's unclear where the first chunk (identified 62a101b7...) comes from (by the way, there's no such changeset as 6ec4af... as specified in the chunk, while 7dcc920e.. IS changeset 454)
--- a/src/org/tmatesoft/hg/internal/Patch.java Wed Jan 23 18:03:13 2013 +0100 +++ b/src/org/tmatesoft/hg/internal/Patch.java Wed Jan 23 19:14:15 2013 +0100 @@ -114,11 +114,7 @@ destIndex += start - prevEnd; // insert new data from the patch, if any byte[] d = data.get(i); - try { - System.arraycopy(d, 0, rv, destIndex, d.length); - } catch (ArrayIndexOutOfBoundsException ex) { - ex.printStackTrace(); - } + System.arraycopy(d, 0, rv, destIndex, d.length); destIndex += d.length; prevEnd = ends.get(i); }
--- a/src/org/tmatesoft/hg/internal/RevlogStreamWriter.java Wed Jan 23 18:03:13 2013 +0100 +++ b/src/org/tmatesoft/hg/internal/RevlogStreamWriter.java Wed Jan 23 19:14:15 2013 +0100 @@ -26,6 +26,8 @@ /** * + * TODO check if index is too big and split into index+data + * * @author Artem Tikhomirov * @author TMate Software Ltd. */ @@ -50,6 +52,10 @@ return this; } + public int baseRevision() { + return baseRev; + } + public HeaderWriter baseRevision(int baseRevision) { this.baseRev = baseRevision; return this;
--- a/src/org/tmatesoft/hg/repo/HgBundle.java Wed Jan 23 18:03:13 2013 +0100 +++ b/src/org/tmatesoft/hg/repo/HgBundle.java Wed Jan 23 19:14:15 2013 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2012 TMate Software Ltd + * Copyright (c) 2011-2013 TMate Software Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -409,6 +409,7 @@ private static void readGroup(DataAccess da, Inspector inspector) throws IOException { int len = da.readInt(); boolean good2go = true; + Nodeid prevNodeid = Nodeid.NULL; while (len > 4 && !da.isEmpty() && good2go) { byte[] nb = new byte[80]; da.readBytes(nb, 0, 80); @@ -418,11 +419,12 @@ DataAccess slice = new ByteArrayDataAccess(data); // XXX in fact, may pass a slicing DataAccess. // Just need to make sure that we seek to proper location afterwards (where next GroupElement starts), // regardless whether that slice has read it or not. - GroupElement ge = new GroupElement(nb, slice); + GroupElement ge = new GroupElement(nb, prevNodeid, slice); good2go = inspector.element(ge); slice.done(); // BADA doesn't implement done(), but it could (e.g. free array) /// and we'd better tell it we are not going to use it any more. However, it's important to ensure Inspector // implementations out there do not retain GroupElement.rawData() + prevNodeid = ge.node(); len = da.isEmpty() ? 0 : da.readInt(); } // need to skip up to group end if inspector told he don't want to continue with the group, @@ -446,10 +448,12 @@ private final byte[] header; // byte[80] takes 120 bytes, 4 Nodeids - 192 private final DataAccess dataAccess; private Patch patches; + private final Nodeid deltaBase; - GroupElement(byte[] fourNodeids, DataAccess rawDataAccess) { + GroupElement(byte[] fourNodeids, Nodeid deltaBaseRev, DataAccess rawDataAccess) { assert fourNodeids != null && fourNodeids.length == 80; header = fourNodeids; + deltaBase = deltaBaseRev; dataAccess = rawDataAccess; } @@ -485,6 +489,14 @@ return Nodeid.fromBinary(header, 60); } + /** + * Revision this element keeps patches against. For the patches of the very first revision returns {@link Nodeid#NULL}. + * @return revision of delta base, never <code>null</code> + */ + public Nodeid patchBase() { + return deltaBase; + } + public byte[] rawDataByteArray() throws IOException { // XXX IOException or HgInvalidFileException? return rawData().byteArray(); }