Mercurial > jhg
changeset 584:ed243b668502
Conditionally enable effective patch merge alternative for revlog reading
author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
---|---|
date | Thu, 25 Apr 2013 16:08:17 +0200 |
parents | 47dfa0ec7e35 |
children | b47ef0d2777b |
files | src/org/tmatesoft/hg/core/HgAnnotateCommand.java src/org/tmatesoft/hg/internal/DataAccessProvider.java src/org/tmatesoft/hg/internal/InflaterDataAccess.java src/org/tmatesoft/hg/internal/Internals.java src/org/tmatesoft/hg/internal/Patch.java src/org/tmatesoft/hg/internal/RevlogStream.java test/org/tmatesoft/hg/test/TestRevlog.java |
diffstat | 7 files changed, 225 insertions(+), 120 deletions(-) [+] |
line wrap: on
line diff
--- a/src/org/tmatesoft/hg/core/HgAnnotateCommand.java Wed Apr 24 15:39:53 2013 +0200 +++ b/src/org/tmatesoft/hg/core/HgAnnotateCommand.java Thu Apr 25 16:08:17 2013 +0200 @@ -116,6 +116,7 @@ c.throwIfCancelled(); cancellation.checkCancelled(); ProgressSupport.Sub subProgress = new ProgressSupport.Sub(progress, 1); + subProgress.start(c.lineRevisions.length); LineImpl li = new LineImpl(); for (int i = 0; i < c.lineRevisions.length; i++) { li.init(i+1, c.lineRevisions[i], c.line(i));
--- a/src/org/tmatesoft/hg/internal/DataAccessProvider.java Wed Apr 24 15:39:53 2013 +0200 +++ b/src/org/tmatesoft/hg/internal/DataAccessProvider.java Thu Apr 25 16:08:17 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2012 TMate Software Ltd + * Copyright (c) 2010-2013 TMate Software Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -54,13 +54,16 @@ private final int mapioMagicBoundary; private final int bufferSize, mapioBufSize; private final SessionContext context; - + // not the right place for the property, but DAP is the only place currently available to RevlogStream to get the value + private final boolean shallMergePatches; + public DataAccessProvider(SessionContext ctx) { context = ctx; PropertyMarshal pm = new PropertyMarshal(ctx); mapioMagicBoundary = mapioBoundaryValue(pm.getInt(CFG_PROPERTY_MAPIO_LIMIT, DEFAULT_MAPIO_LIMIT)); bufferSize = pm.getInt(CFG_PROPERTY_FILE_BUFFER_SIZE, DEFAULT_FILE_BUFFER); mapioBufSize = pm.getInt(CFG_PROPERTY_MAPIO_BUFFER_SIZE, DEFAULT_MAPIO_BUFFER); + shallMergePatches = pm.getBoolean(Internals.CFG_PROPERTY_PATCH_MERGE, false); } public DataAccessProvider(SessionContext ctx, int mapioBoundary, int regularBufferSize, int mapioBufferSize) { @@ -68,6 +71,12 @@ mapioMagicBoundary = mapioBoundaryValue(mapioBoundary); bufferSize = regularBufferSize; mapioBufSize = mapioBufferSize; + shallMergePatches = new PropertyMarshal(ctx).getBoolean(Internals.CFG_PROPERTY_PATCH_MERGE, false); + } + + // TODO [post-1.1] find a better place for this option, it's unrelated to the DAP + public boolean shallMergePatches() { + return shallMergePatches; } // ensure contract of CFG_PROPERTY_MAPIO_LIMIT, for mapioBoundary == 0 use MAX_VALUE so that no file is memmap-ed
--- a/src/org/tmatesoft/hg/internal/InflaterDataAccess.java Wed Apr 24 15:39:53 2013 +0200 +++ b/src/org/tmatesoft/hg/internal/InflaterDataAccess.java Thu Apr 25 16:08:17 2013 +0200 @@ -16,6 +16,7 @@ */ package org.tmatesoft.hg.internal; +import java.io.EOFException; import java.io.IOException; import java.nio.ByteBuffer; import java.util.zip.DataFormatException; @@ -153,8 +154,9 @@ @Override public void readBytes(byte[] b, int off, int len) throws IOException { + int fromBuffer; do { - int fromBuffer = outBuffer.remaining(); + fromBuffer = outBuffer.remaining(); if (fromBuffer > 0) { if (fromBuffer >= len) { outBuffer.get(b, off, len); @@ -166,8 +168,12 @@ // fall-through } } - fillOutBuffer(); - } while (len > 0); + fromBuffer = fillOutBuffer(); + } while (len > 0 && fromBuffer > 0); + if (len > 0) { + // prevent hang up in this cycle if no more data is available, see Issue 25 + throw new EOFException(String.format("No more compressed data is available to satisfy request for %d bytes. [finished:%b, needDict:%b, needInp:%b, available:%d", len, inflater.finished(), inflater.needsDictionary(), inflater.needsInput(), super.available())); + } } @Override @@ -220,8 +226,6 @@ assert inflater.finished(); assert toRead <= 0; break; - // prevent hang up in this cycle if no more data is available, see Issue 25 -// throw new EOFException(String.format("No more compressed data is available to satisfy request for %d bytes. [finished:%b, needDict:%b, needInp:%b, available:%d", len, inflater.finished(), inflater.needsDictionary(), inflater.needsInput(), toRead)); } } off += n;
--- a/src/org/tmatesoft/hg/internal/Internals.java Wed Apr 24 15:39:53 2013 +0200 +++ b/src/org/tmatesoft/hg/internal/Internals.java Thu Apr 25 16:08:17 2013 +0200 @@ -88,6 +88,26 @@ * Integer value, use negative for attempts to acquire lock until success, and zero to try once and fail immediately. */ public static final String CFG_PROPERTY_FS_LOCK_TIMEOUT = "hg4j.fs.lock.timeout"; + + /** + * Alternative, more effective approach to build revision text from revlog patches - collect all the + * patches one by one, starting at revision next to base, and apply against each other to get + * one final patch, which in turned is applied to base revision. + * <p> + * Original approach is to apply each patch to a previous revision, so that with base revision + * of 1M and three patches, each altering just a tiny fraction + * of the origin, with latter approach we consume 1M (original) + 1M (first patch applied) + 1M (second + * patch applied) + 1M (third patch applied). + * <p> + * Alternative approach, controlled with this option, first combines these there patches into one, + * and only then applies it to base revision, eliminating 2 intermediate elements. + * <p> + * Present default value for this option is <b>FALSE</b>, and will be changed in future, once + * tests prove support is fully functional (likely in v1.2). + * + * @since 1.1 + */ + public static final String CFG_PROPERTY_PATCH_MERGE = "hg4j.repo.merge_revlog_patches"; public static final int REVLOGV1_RECORD_SIZE = 64;
--- a/src/org/tmatesoft/hg/internal/Patch.java Wed Apr 24 15:39:53 2013 +0200 +++ b/src/org/tmatesoft/hg/internal/Patch.java Thu Apr 25 16:08:17 2013 +0200 @@ -33,6 +33,7 @@ public final class Patch { private final IntVector starts, ends; private final ArrayList<byte[]> data; + private final boolean shallNormalize; private static byte[] generate(int c) { byte[] rv = new byte[c]; @@ -65,8 +66,13 @@ Patch r = p1.apply(p2); System.out.println("r: " + r); } + + public Patch() { + this(false); + } - public Patch() { + public Patch(boolean normalizeOnChange) { + shallNormalize = normalizeOnChange; starts = new IntVector(); ends = new IntVector(); data = new ArrayList<byte[]>(); @@ -182,11 +188,29 @@ } /*package-local*/ void add(int start, int end, byte[] d) { - // FIXME if start == end(-1), merge data if (start == end && d.length == 0) { System.currentTimeMillis(); return; } + int last; + if (shallNormalize && (last = starts.size()) > 0) { + last--; + if (ends.get(last) == start) { + byte[] d1 = data.get(last); + byte[] nd; + if (d1.length > 0 && d.length > 0) { + nd = new byte[d1.length + d.length]; + System.arraycopy(d1, 0, nd, 0, d1.length); + System.arraycopy(d, 0, nd, d1.length, d.length); + } else { + nd = d1.length == 0 ? d : d1 ; + } + ends.set(last, end); + data.set(last, nd); + return; + } + // fall-through + } starts.add(start); ends.add(end); data.add(d); @@ -203,7 +227,7 @@ * Modify this patch with subsequent patch */ public /*SHALL BE PUBLIC ONCE TESTING ENDS*/ Patch apply(Patch another) { - Patch r = new Patch(); + Patch r = new Patch(shallNormalize); int p1TotalAppliedDelta = 0; // value to add to start and end indexes of the older patch to get their values as if // in the patched text, iow, directly comparable with respective indexes from the newer patch. int p1EntryStart = 0, p1EntryEnd = 0, p1EntryLen = 0;
--- a/src/org/tmatesoft/hg/internal/RevlogStream.java Wed Apr 24 15:39:53 2013 +0200 +++ b/src/org/tmatesoft/hg/internal/RevlogStream.java Thu Apr 25 16:08:17 2013 +0200 @@ -267,7 +267,7 @@ HgInternals.checkRevlogRange(start, end, indexSize-1); // XXX may cache [start .. end] from index with a single read (pre-read) - ReaderN1 r = new ReaderN1(needData, inspector); + ReaderN1 r = new ReaderN1(needData, inspector, dataAccess.shallMergePatches()); try { r.start(end - start + 1); r.range(start, end); @@ -299,7 +299,7 @@ throw new HgInvalidRevisionException(String.format("Can't iterate [%d, %d] in range [0..%d]", sortedRevisions[0], sortedRevisions[sortedRevisions.length - 1], indexSize), null, sortedRevisions[sortedRevisions.length - 1]); } - ReaderN1 r = new ReaderN1(needData, inspector); + ReaderN1 r = new ReaderN1(needData, inspector, dataAccess.shallMergePatches()); try { r.start(sortedRevisions.length); for (int i = 0; i < sortedRevisions.length; ) { @@ -382,7 +382,7 @@ return baseRevisions != null && baseRevisions.length > 0; } - // translate 6-byte offset field value to pysical file offset for inline revlogs + // translate 6-byte offset field value to physical file offset for inline revlogs // DOESN'T MAKE SENSE if revlog with data is separate private static int offsetFieldToInlineFileOffset(long offset, int recordIndex) throws HgInvalidStateException { int o = Internals.ltoi(offset); @@ -463,22 +463,39 @@ * operation with single file open/close and multiple diverse reads. * XXX initOutline might need similar extraction to keep N1 format knowledge */ - class ReaderN1 { + final class ReaderN1 { private final Inspector inspector; private final boolean needData; + private final boolean mergePatches; private DataAccess daIndex = null, daData = null; private Lifecycle.BasicCallback cb = null; private Lifecycle lifecycleListener = null; private int lastRevisionRead = BAD_REVISION; private DataAccess lastUserData; + // + // next are transient values, for range() use only + private final Inflater inflater = new Inflater(); + // can share buffer between instances of InflaterDataAccess as I never read any two of them in parallel + private final byte[] inflaterBuffer = new byte[10 * 1024]; // TODO consider using DAP.DEFAULT_FILE_BUFFER + private final byte[] nodeidBuf = new byte[20]; + // revlog record fields + private long offset; + @SuppressWarnings("unused") + private int flags; + private int compressedLen; + private int actualLen; + private int baseRevision; + private int linkRevision; + private int parent1Revision; + private int parent2Revision; // next are to track two major bottlenecks - patch application and actual time spent in inspector // private long applyTime, inspectorTime; // TIMING - - - public ReaderN1(boolean needData, Inspector insp) { + + public ReaderN1(boolean dataRequested, Inspector insp, boolean usePatchMerge) { assert insp != null; - this.needData = needData; + needData = dataRequested; inspector = insp; + mergePatches = usePatchMerge; } public void start(int totalWork) { @@ -513,10 +530,66 @@ } // System.out.printf("applyTime:%d ms, inspectorTime: %d ms\n", applyTime, inspectorTime); // TIMING } + + private void readHeaderRecord(int i) throws IOException { + if (inline && needData) { + // inspector reading data (though FilterDataAccess) may have affected index position + daIndex.seek(getIndexOffsetInt(i)); + } + long l = daIndex.readLong(); // 0 + offset = i == 0 ? 0 : (l >>> 16); + flags = (int) (l & 0x0FFFF); + compressedLen = daIndex.readInt(); // +8 + actualLen = daIndex.readInt(); // +12 + baseRevision = daIndex.readInt(); // +16 + linkRevision = daIndex.readInt(); // +20 + parent1Revision = daIndex.readInt(); + parent2Revision = daIndex.readInt(); + // Hg has 32 bytes here, uses 20 for nodeid, and keeps 12 last bytes empty + daIndex.readBytes(nodeidBuf, 0, 20); // +32 + daIndex.skip(12); + } + + private boolean isPatch(int i) { + return baseRevision != i; // the only way I found to tell if it's a patch + } + + private DataAccess getStoredData(int i) throws IOException { + DataAccess userDataAccess = null; + DataAccess streamDataAccess; + long streamOffset; + if (inline) { + streamOffset = getIndexOffsetInt(i) + REVLOGV1_RECORD_SIZE; + streamDataAccess = daIndex; + // don't need to do seek as it's actual position in the index stream, but it's safe to seek, just in case + daIndex.longSeek(streamOffset); + } else { + streamOffset = offset; + streamDataAccess = daData; + daData.longSeek(streamOffset); + } + if (streamDataAccess.isEmpty() || compressedLen == 0) { + userDataAccess = new DataAccess(); // empty + } else { + final byte firstByte = streamDataAccess.readByte(); + if (firstByte == 0x78 /* 'x' */) { + inflater.reset(); + userDataAccess = new InflaterDataAccess(streamDataAccess, streamOffset, compressedLen, isPatch(i) ? -1 : actualLen, inflater, inflaterBuffer); + } else if (firstByte == 0x75 /* 'u' */) { + userDataAccess = new FilterDataAccess(streamDataAccess, streamOffset+1, compressedLen-1); + } else { + // XXX Python impl in fact throws exception when there's not 'x', 'u' or '0' but I don't see reason not to return data as is + // + // although firstByte is already read from the streamDataAccess, FilterDataAccess#readByte would seek to + // initial offset before first attempt to read a byte + userDataAccess = new FilterDataAccess(streamDataAccess, streamOffset, compressedLen); + } + } + return userDataAccess; + } // may be invoked few times per instance life public boolean range(int start, int end) throws IOException { - byte[] nodeidBuf = new byte[20]; int i; // it (i.e. replace with i >= start) if (needData && (i = getBaseRevision(start)) < start) { @@ -537,63 +610,42 @@ daIndex.seek(getIndexOffsetInt(i)); // - // reuse some instances - final Patch patch = new Patch(); - final Inflater inflater = new Inflater(); - // can share buffer between instances of InflaterDataAccess as I never read any two of them in parallel - final byte[] inflaterBuffer = new byte[10 * 1024]; // TODO consider using DAP.DEFAULT_FILE_BUFFER + // reuse instance, do not normalize it as patches from the stream are unlikely to need it + final Patch patch = new Patch(false); + // + if (needData && mergePatches && start-i > 2) { + // i+1 == start just reads lastUserData, i+2 == start applies one patch - not worth dedicated effort + Patch ultimatePatch = new Patch(true); + for ( ; i < start; i++) { + readHeaderRecord(i); + DataAccess userDataAccess = getStoredData(i); + if (lastUserData == null) { + assert !isPatch(i); + lastUserData = userDataAccess; + } else { + assert isPatch(i); // i < start and i == getBaseRevision() + patch.read(userDataAccess); + userDataAccess.done(); + // I assume empty patches are applied ok + ultimatePatch = ultimatePatch.apply(patch); + patch.clear(); + } + } + lastUserData.reset(); + byte[] userData = ultimatePatch.apply(lastUserData, actualLen); + ultimatePatch.clear(); + lastUserData.done(); + lastUserData = new ByteArrayDataAccess(userData); + } // for (; i <= end; i++ ) { - if (inline && needData) { - // inspector reading data (though FilterDataAccess) may have affected index position - daIndex.seek(getIndexOffsetInt(i)); - } - long l = daIndex.readLong(); // 0 - long offset = i == 0 ? 0 : (l >>> 16); - @SuppressWarnings("unused") - int flags = (int) (l & 0x0FFFF); - int compressedLen = daIndex.readInt(); // +8 - int actualLen = daIndex.readInt(); // +12 - int baseRevision = daIndex.readInt(); // +16 - int linkRevision = daIndex.readInt(); // +20 - int parent1Revision = daIndex.readInt(); - int parent2Revision = daIndex.readInt(); - // Hg has 32 bytes here, uses 20 for nodeid, and keeps 12 last bytes empty - daIndex.readBytes(nodeidBuf, 0, 20); // +32 - daIndex.skip(12); + readHeaderRecord(i); DataAccess userDataAccess = null; if (needData) { - long streamOffset; - DataAccess streamDataAccess; - if (inline) { - streamDataAccess = daIndex; - streamOffset = getIndexOffsetInt(i) + REVLOGV1_RECORD_SIZE; // don't need to do seek as it's actual position in the index stream - } else { - streamOffset = offset; - streamDataAccess = daData; - daData.longSeek(streamOffset); - } - final boolean patchToPrevious = baseRevision != i; // the only way I found to tell if it's a patch - if (streamDataAccess.isEmpty() || compressedLen == 0) { - userDataAccess = new DataAccess(); // empty - } else { - final byte firstByte = streamDataAccess.readByte(); - if (firstByte == 0x78 /* 'x' */) { - inflater.reset(); - userDataAccess = new InflaterDataAccess(streamDataAccess, streamOffset, compressedLen, patchToPrevious ? -1 : actualLen, inflater, inflaterBuffer); - } else if (firstByte == 0x75 /* 'u' */) { - userDataAccess = new FilterDataAccess(streamDataAccess, streamOffset+1, compressedLen-1); - } else { - // XXX Python impl in fact throws exception when there's not 'x', 'u' or '0' but I don't see reason not to return data as is - // - // although firstByte is already read from the streamDataAccess, FilterDataAccess#readByte would seek to - // initial offset before first attempt to read a byte - userDataAccess = new FilterDataAccess(streamDataAccess, streamOffset, compressedLen); - } - } + userDataAccess = getStoredData(i); // userDataAccess is revision content, either complete revision, patch of a previous content, or an empty patch - if (patchToPrevious) { + if (isPatch(i)) { // this is a patch if (userDataAccess.isEmpty()) { // Issue 22, empty patch to an empty base revision
--- a/test/org/tmatesoft/hg/test/TestRevlog.java Wed Apr 24 15:39:53 2013 +0200 +++ b/test/org/tmatesoft/hg/test/TestRevlog.java Thu Apr 25 16:08:17 2013 +0200 @@ -51,14 +51,15 @@ private void run(File indexFile) throws Exception { final boolean shallDumpDiff = Boolean.TRUE.booleanValue(); - final boolean thoroughCheck = Boolean.TRUE.booleanValue(); + final boolean thoroughCheck = Boolean.FALSE.booleanValue(); // RevlogReader rr = new RevlogReader(indexFile); rr.init(true); rr.needData(true); - int startEntryIndex = 76507; // 150--87 + int startEntryIndex = 76507 + 100; // 150--87 rr.startFrom(startEntryIndex); rr.readNext(); + final long s0 = System.currentTimeMillis(); ByteBuffer baseRevision = null; if (rr.isPatch()) { byte[] cc = getRevisionTrueContent(indexFile.getParentFile(), rr.entryIndex, rr.linkRevision); @@ -72,7 +73,7 @@ // final long start = System.currentTimeMillis(); int n = 1419; - Patch seqPatch = null, normalizedPatch = null; + Patch seqPatch = new Patch(false), normalizedPatch = new Patch(true); while (rr.hasMore() && n-- > 0) { rr.readNext(); if (!rr.isPatch()) { @@ -83,65 +84,59 @@ continue; } Patch p1 = createPatch(rr); - if (seqPatch != null) { - if (n < 1) { - System.out.println("+" + p1); - System.currentTimeMillis(); - } + if (n < 1) { + System.out.println("+" + p1); + System.currentTimeMillis(); + } seqPatch = seqPatch.apply(p1); - Patch ppp = normalizedPatch.apply(p1); - normalizedPatch = ppp.normalize(); - if (n <= 1) { - System.out.println("=" + seqPatch); + normalizedPatch = normalizedPatch.apply(p1); +// if (n <= 1) { +// System.out.println("=" + seqPatch); +// } +// if (n == 0) { +// System.out.println("A" + ppp); +// System.out.println("N" + normalizedPatch); +// normalizedPatch = ppp; +// } + // + if (!thoroughCheck) { + if (baseRevisionContent.length() + seqPatch.patchSizeDelta() != rr.actualLen) { + System.out.printf("Sequential patches:\tPatchRevision #%d (+%d, cset:%d) failed\n", rr.entryIndex, rr.entryIndex - startEntryIndex, rr.linkRevision); } - if (n == 0) { - System.out.println("A" + ppp); - System.out.println("N" + normalizedPatch); - normalizedPatch = ppp; + if (baseRevisionContent.length() + normalizedPatch.patchSizeDelta() != rr.actualLen) { + System.out.printf("Normalized patches:\tPatchRevision #%d (+%d, cset:%d) failed\n", rr.entryIndex, rr.entryIndex - startEntryIndex, rr.linkRevision); } - // - if (!thoroughCheck) { - if (baseRevisionContent.length() + seqPatch.patchSizeDelta() != rr.actualLen) { + } else { + byte[] origin = getRevisionTrueContent(indexFile.getParentFile(), rr.entryIndex, rr.linkRevision); + try { + byte[] result1 = seqPatch.apply(baseRevisionContent, rr.actualLen); + if (!Arrays.equals(result1, origin)) { System.out.printf("Sequential patches:\tPatchRevision #%d (+%d, cset:%d) failed\n", rr.entryIndex, rr.entryIndex - startEntryIndex, rr.linkRevision); } - if (baseRevisionContent.length() + normalizedPatch.patchSizeDelta() != rr.actualLen) { - System.out.printf("Normalized patches:\tPatchRevision #%d (+%d, cset:%d) failed\n", rr.entryIndex, rr.entryIndex - startEntryIndex, rr.linkRevision); - } - } else { - byte[] origin = getRevisionTrueContent(indexFile.getParentFile(), rr.entryIndex, rr.linkRevision); - try { - byte[] result1 = seqPatch.apply(baseRevisionContent, rr.actualLen); - if (!Arrays.equals(result1, origin)) { - System.out.printf("Sequential patches:\tPatchRevision #%d (+%d, cset:%d) failed\n", rr.entryIndex, rr.entryIndex - startEntryIndex, rr.linkRevision); - } - } catch (ArrayIndexOutOfBoundsException ex) { - System.err.printf("Failure at entry %d (+%d)\n", rr.entryIndex, rr.entryIndex - startEntryIndex); - ex.printStackTrace(); - } - try { - byte[] result2 = normalizedPatch.apply(baseRevisionContent, rr.actualLen); - if (!Arrays.equals(result2, origin)) { - System.out.printf("Normalized patches:\tPatchRevision #%d (+%d, cset:%d) failed\n", rr.entryIndex, rr.entryIndex - startEntryIndex, rr.linkRevision); - } - } catch (ArrayIndexOutOfBoundsException ex) { - System.err.printf("Failure at entry %d (+%d)\n", rr.entryIndex, rr.entryIndex - startEntryIndex); - ex.printStackTrace(); - } + } catch (ArrayIndexOutOfBoundsException ex) { + System.err.printf("Failure at entry %d (+%d)\n", rr.entryIndex, rr.entryIndex - startEntryIndex); + ex.printStackTrace(); } - } else { - seqPatch = p1; - normalizedPatch = p1.normalize(); +// try { +// byte[] result2 = normalizedPatch.apply(baseRevisionContent, rr.actualLen); +// if (!Arrays.equals(result2, origin)) { +// System.out.printf("Normalized patches:\tPatchRevision #%d (+%d, cset:%d) failed\n", rr.entryIndex, rr.entryIndex - startEntryIndex, rr.linkRevision); +// } +// } catch (ArrayIndexOutOfBoundsException ex) { +// System.err.printf("Failure at entry %d (+%d)\n", rr.entryIndex, rr.entryIndex - startEntryIndex); +// ex.printStackTrace(); +// } } } final long end1 = System.currentTimeMillis(); // -// byte[] result = seqPatch.apply(baseRevisionContent, rr.actualLen); - byte[] result = normalizedPatch.apply(baseRevisionContent, rr.actualLen); + byte[] result = seqPatch.apply(baseRevisionContent, rr.actualLen); +// byte[] result = normalizedPatch.apply(baseRevisionContent, rr.actualLen); final long end2 = System.currentTimeMillis(); byte[] origin = getRevisionTrueContent(indexFile.getParentFile(), rr.entryIndex, rr.linkRevision); final long end3 = System.currentTimeMillis(); rr.done(); - System.out.printf("Collected patches up to revision %d. Patches total: %d, last contains %d elements\n", rr.entryIndex, rr.entryIndex - startEntryIndex + 1, seqPatch.count()); + System.out.printf("Collected patches up to revision %d. Patches total: %d, sequentialPatch contains %d elements, normalized: %d\n", rr.entryIndex, rr.entryIndex - startEntryIndex + 1, seqPatch.count(), normalizedPatch.count()); if (!Arrays.equals(result, origin)) { if (shallDumpDiff) { diff(result, origin); @@ -151,9 +146,9 @@ } } else { System.out.println("OK!"); - System.out.printf("Iterate: %d ms, apply collected: %d ms, total=%d ms; Conventional: %d ms\n", (end1-start), (end2-end1), (end2-start), (end3-end2)); + System.out.printf("Iterate: %d ms, read base:%d, apply collected: %d ms, total=%d ms; Conventional: %d ms\n", (end1-start), (start-s0), (end2-end1), (end2-s0), (end3-end2)); } - Patch normalized = normalizedPatch; //seqPatch.normalize(); + Patch normalized = seqPatch.normalize(); System.out.printf("N%s\n%d => %d patch elements\n", normalized, seqPatch.count(), normalized.count()); // System.out.println(rs); }