Mercurial > hg4j
changeset 534:243202f1bda5
Commit: refactor revision creation code from clone command to work separately, fit into existing library structure
author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
---|---|
date | Mon, 04 Feb 2013 18:00:55 +0100 |
parents | e6f72c9829a6 |
children | d9c07e1432c4 |
files | src/org/tmatesoft/hg/core/HgCloneCommand.java src/org/tmatesoft/hg/internal/DataAccessProvider.java src/org/tmatesoft/hg/internal/DataSerializer.java src/org/tmatesoft/hg/internal/DeflaterDataSerializer.java src/org/tmatesoft/hg/internal/DirstateReader.java src/org/tmatesoft/hg/internal/Patch.java src/org/tmatesoft/hg/internal/RevlogCompressor.java src/org/tmatesoft/hg/internal/RevlogStream.java src/org/tmatesoft/hg/internal/RevlogStreamWriter.java src/org/tmatesoft/hg/repo/HgBundle.java src/org/tmatesoft/hg/repo/Revlog.java test/org/tmatesoft/hg/test/TestCommit.java |
diffstat | 12 files changed, 591 insertions(+), 74 deletions(-) [+] |
line wrap: on
line diff
--- a/src/org/tmatesoft/hg/core/HgCloneCommand.java Wed Jan 30 15:48:36 2013 +0100 +++ b/src/org/tmatesoft/hg/core/HgCloneCommand.java Mon Feb 04 18:00:55 2013 +0100 @@ -18,6 +18,8 @@ import static org.tmatesoft.hg.core.Nodeid.NULL; import static org.tmatesoft.hg.internal.RequiresFile.*; +import static org.tmatesoft.hg.internal.RevlogStreamWriter.preferCompleteOverPatch; +import static org.tmatesoft.hg.internal.RevlogStreamWriter.preferCompressedOverComplete; import java.io.File; import java.io.FileOutputStream; @@ -29,6 +31,8 @@ import org.tmatesoft.hg.internal.ByteArrayDataAccess; import org.tmatesoft.hg.internal.DataAccess; +import org.tmatesoft.hg.internal.DataAccessProvider; +import org.tmatesoft.hg.internal.DataSerializer; import org.tmatesoft.hg.internal.DigestHelper; import org.tmatesoft.hg.internal.Lifecycle; import org.tmatesoft.hg.internal.RepoInitializer; @@ -139,6 +143,7 @@ private final PathRewrite storagePathHelper; private final ProgressSupport progressSupport; private final CancelSupport cancelSupport; + private final SessionContext ctx; private FileOutputStream indexFile; private String filename; // human-readable name of the file being written, for log/exception purposes @@ -155,13 +160,18 @@ private Lifecycle.Callback lifecycleCallback; private CancelledException cancelException; - public WriteDownMate(SessionContext ctx, File destDir, ProgressSupport progress, CancelSupport cancel) { + private RevlogStreamWriter.HeaderWriter revlogHeader = new RevlogStreamWriter.HeaderWriter(true); + private RevlogCompressor revlogDataZip; + + public WriteDownMate(SessionContext sessionCtx, File destDir, ProgressSupport progress, CancelSupport cancel) { + ctx = sessionCtx; hgDir = new File(destDir, ".hg"); repoInit = new RepoInitializer(); repoInit.setRequires(STORE | FNCACHE | DOTENCODE); - storagePathHelper = repoInit.buildDataFilesHelper(ctx); + storagePathHelper = repoInit.buildDataFilesHelper(sessionCtx); progressSupport = progress; cancelSupport = cancel; + revlogDataZip = new RevlogCompressor(sessionCtx); } public void initEmptyRepository() throws IOException { @@ -278,9 +288,6 @@ throw new HgInvalidControlFileException(m, null, new File(hgDir, filename)).setRevision(p); } - private RevlogStreamWriter.HeaderWriter revlogHeader = new RevlogStreamWriter.HeaderWriter(true); - private RevlogCompressor revlogDataZip = new RevlogCompressor(); - public boolean element(GroupElement ge) { try { assert indexFile != null; @@ -332,7 +339,7 @@ byte[] patchContent = ge.rawDataByteArray(); // no reason to keep patch if it's close (here, >75%) in size to the complete contents, // save patching effort in this case - writeComplete = writeComplete || patchContent.length >= (/* 3/4 of actual */content.length - (content.length >>> 2)); + writeComplete = writeComplete || preferCompleteOverPatch(patchContent.length, content.length); if (writeComplete) { revlogHeader.baseRevision(revisionSequence.size()); @@ -340,29 +347,37 @@ assert revlogHeader.baseRevision() >= 0; final byte[] sourceData = writeComplete ? content : patchContent; - revlogDataZip.reset(sourceData); + revlogDataZip.reset(new DataSerializer.ByteArrayDataSource(sourceData)); final int compressedLen; - final boolean useUncompressedData = revlogDataZip.getCompressedLengthEstimate() >= (sourceData.length - (sourceData.length >>> 2)); + final boolean useUncompressedData = preferCompressedOverComplete(revlogDataZip.getCompressedLength(), sourceData.length); if (useUncompressedData) { // compression wasn't too effective, compressedLen = sourceData.length + 1 /*1 byte for 'u' - uncompressed prefix byte*/; } else { - compressedLen= revlogDataZip.getCompressedLengthEstimate(); + compressedLen= revlogDataZip.getCompressedLength(); } revlogHeader.length(content.length, compressedLen); - revlogHeader.write(indexFile); + // XXX may be wise not to create DataSerializer for each revision, but for a file + DataAccessProvider.StreamDataSerializer sds = new DataAccessProvider.StreamDataSerializer(ctx.getLog(), indexFile) { + @Override + public void done() { + // override parent behavior not to close stream in use + } + }; + revlogHeader.serialize(sds); if (useUncompressedData) { indexFile.write((byte) 'u'); indexFile.write(sourceData); } else { - int actualCompressedLenWritten = revlogDataZip.writeCompressedData(indexFile); + int actualCompressedLenWritten = revlogDataZip.writeCompressedData(sds); if (actualCompressedLenWritten != compressedLen) { throw new HgInvalidStateException(String.format("Expected %d bytes of compressed data, but actually wrote %d in %s", compressedLen, actualCompressedLenWritten, filename)); } } + sds.done(); // revisionSequence.add(node); prevRevContent.done();
--- a/src/org/tmatesoft/hg/internal/DataAccessProvider.java Wed Jan 30 15:48:36 2013 +0100 +++ b/src/org/tmatesoft/hg/internal/DataAccessProvider.java Mon Feb 04 18:00:55 2013 +0100 @@ -21,7 +21,10 @@ import java.io.File; import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; @@ -72,7 +75,7 @@ return mapioBoundary == 0 ? Integer.MAX_VALUE : mapioBoundary; } - public DataAccess create(File f) { + public DataAccess createReader(File f) { if (!f.exists()) { return new DataAccess(); } @@ -96,6 +99,22 @@ } return new DataAccess(); // non-null, empty. } + + public DataSerializer createWriter(File f, boolean createNewIfDoesntExist) { + if (!f.exists() && !createNewIfDoesntExist) { + return new DataSerializer(); + } + try { + return new StreamDataSerializer(context.getLog(), new FileOutputStream(f)); + } catch (final FileNotFoundException ex) { + context.getLog().dump(getClass(), Error, ex, null); + return new DataSerializer() { + public void write(byte[] data, int offset, int length) throws IOException { + throw ex; + } + }; + } + } private static class MemoryMapFileAccess extends DataAccess { private FileChannel fileChannel; @@ -374,4 +393,57 @@ } } } + + public/*XXX, private, once HgCloneCommand stops using it */ static class StreamDataSerializer extends DataSerializer { + private final OutputStream out; + private final LogFacility log; + private byte[] buffer; + + public StreamDataSerializer(LogFacility logFacility, OutputStream os) { + assert os != null; + out = os; + log = logFacility; + } + + @Override + public void write(byte[] data, int offset, int length) throws IOException { + out.write(data, offset, length); + } + + @Override + public void writeInt(int... values) throws IOException { + ensureBufferSize(4*values.length); // sizeof(int) + int idx = 0; + for (int v : values) { + DataSerializer.bigEndian(v, buffer, idx); + idx += 4; + } + out.write(buffer, 0, idx); + } + + @Override + public void writeByte(byte... values) throws IOException { + if (values.length == 1) { + out.write(values[0]); + } else { + out.write(values, 0, values.length); + } + } + + private void ensureBufferSize(int bytesNeeded) { + if (buffer == null || buffer.length < bytesNeeded) { + buffer = new byte[bytesNeeded]; + } + } + + @Override + public void done() { + try { + out.flush(); + out.close(); + } catch (IOException ex) { + log.dump(getClass(), Error, ex, "Failure to close stream"); + } + } + } }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/org/tmatesoft/hg/internal/DataSerializer.java Mon Feb 04 18:00:55 2013 +0100 @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2013 TMate Software Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * For information on how to redistribute this software under + * the terms of a license other than GNU General Public License + * contact TMate Software at support@hg4j.com + */ +package org.tmatesoft.hg.internal; + +import java.io.IOException; + +/** + * Serialization friend of {@link DataAccess} + * + * @author Artem Tikhomirov + * @author TMate Software Ltd. + */ +@Experimental(reason="Work in progress") +public class DataSerializer { + + public void writeByte(byte... values) throws IOException { + write(values, 0, values.length); + } + + public void writeInt(int... values) throws IOException { + byte[] buf = new byte[4]; + for (int v : values) { + bigEndian(v, buf, 0); + write(buf, 0, buf.length); + } + } + + public void write(byte[] data, int offset, int length) throws IOException { + throw new IOException("Attempt to write to non-existent file"); + } + + public void done() { + // FIXME perhaps, shall allow IOException, too + // no-op + } + + /** + * Writes 4 bytes of supplied value into the buffer at given offset, big-endian. + */ + public static final void bigEndian(int value, byte[] buffer, int offset) { + assert offset + 4 <= buffer.length; + buffer[offset++] = (byte) ((value >>> 24) & 0x0ff); + buffer[offset++] = (byte) ((value >>> 16) & 0x0ff); + buffer[offset++] = (byte) ((value >>> 8) & 0x0ff); + buffer[offset++] = (byte) (value & 0x0ff); + } + + @Experimental(reason="Work in progress") + interface DataSource { + public void serialize(DataSerializer out) throws IOException; + + /** + * Hint of data length it would like to writes + * @return -1 if can't answer + */ + public int serializeLength(); + } + + public static class ByteArrayDataSource implements DataSource { + + private final byte[] data; + + public ByteArrayDataSource(byte[] bytes) { + data = bytes; + } + + public void serialize(DataSerializer out) throws IOException { + if (data != null) { + out.write(data, 0, data.length); + } + } + + public int serializeLength() { + return data == null ? 0 : data.length; + } + + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/org/tmatesoft/hg/internal/DeflaterDataSerializer.java Mon Feb 04 18:00:55 2013 +0100 @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2013 TMate Software Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * For information on how to redistribute this software under + * the terms of a license other than GNU General Public License + * contact TMate Software at support@hg4j.com + */ +package org.tmatesoft.hg.internal; + +import java.io.IOException; +import java.util.zip.Deflater; +import java.util.zip.DeflaterOutputStream; + +/** + * {@link DeflaterOutputStream} counterpart for {@link DataSerializer} API + * + * @author Artem Tikhomirov + * @author TMate Software Ltd. + */ +class DeflaterDataSerializer extends DataSerializer { + + private static final int AUX_BUFFER_CAPACITY = 5; // room for 5 ints + + private final DataSerializer delegate; + private final Deflater deflater; + private final byte[] deflateOutBuffer; + private final byte[] auxBuffer; + + public DeflaterDataSerializer(DataSerializer delegateSerializer, Deflater def, int bufferSizeHint) { + delegate = delegateSerializer; + deflater = def; + deflateOutBuffer = new byte[bufferSizeHint <= 0 ? 2048 : bufferSizeHint]; + auxBuffer = new byte[4 * AUX_BUFFER_CAPACITY]; // sizeof(int) * capacity + } + + @Override + public void writeInt(int... values) throws IOException { + for (int i = 0; i < values.length; i+= AUX_BUFFER_CAPACITY) { + int idx = 0; + for (int j = i, x = Math.min(values.length, i + AUX_BUFFER_CAPACITY); j < x; j++) { + int v = values[j]; + auxBuffer[idx++] = (byte) ((v >>> 24) & 0x0ff); + auxBuffer[idx++] = (byte) ((v >>> 16) & 0x0ff); + auxBuffer[idx++] = (byte) ((v >>> 8) & 0x0ff); + auxBuffer[idx++] = (byte) (v & 0x0ff); + } + internalWrite(auxBuffer, 0, idx); + } + } + + @Override + public void write(byte[] data, int offset, int length) throws IOException { + // @see DeflaterOutputStream#write(byte[], int, int) + int stride = deflateOutBuffer.length; + for (int i = 0; i < length; i += stride) { + internalWrite(data, offset + i, Math.min(stride, length - i)); + } + } + + private void internalWrite(byte[] data, int offset, int length) throws IOException { + deflater.setInput(data, offset, length); + while (!deflater.needsInput()) { + deflate(); + } + } + + @Override + public void done() { + delegate.done(); + } + + public void finish() throws IOException { + if (!deflater.finished()) { + deflater.finish(); + while (!deflater.finished()) { + deflate(); + } + } + } + + protected void deflate() throws IOException { + int len = deflater.deflate(deflateOutBuffer, 0, deflateOutBuffer.length); + if (len > 0) { + delegate.write(deflateOutBuffer, 0, len); + } + } +}
--- a/src/org/tmatesoft/hg/internal/DirstateReader.java Wed Jan 30 15:48:36 2013 +0100 +++ b/src/org/tmatesoft/hg/internal/DirstateReader.java Mon Feb 04 18:00:55 2013 +0100 @@ -66,7 +66,7 @@ if (dirstateFile == null || !dirstateFile.exists()) { return; } - DataAccess da = repo.getDataAccess().create(dirstateFile); + DataAccess da = repo.getDataAccess().createReader(dirstateFile); try { if (da.isEmpty()) { return; @@ -142,7 +142,7 @@ if (dirstateFile == null || !dirstateFile.exists()) { return new Pair<Nodeid,Nodeid>(NULL, NULL); } - DataAccess da = internalRepo.getDataAccess().create(dirstateFile); + DataAccess da = internalRepo.getDataAccess().createReader(dirstateFile); try { if (da.isEmpty()) { return new Pair<Nodeid,Nodeid>(NULL, NULL);
--- a/src/org/tmatesoft/hg/internal/Patch.java Wed Jan 30 15:48:36 2013 +0100 +++ b/src/org/tmatesoft/hg/internal/Patch.java Mon Feb 04 18:00:55 2013 +0100 @@ -154,7 +154,29 @@ ends.add(e); data.add(src); } - + + /** + * @return how many bytes the patch would take if written down using BundleFormat structure (start, end, length, data) + */ + public int serializedLength() { + int totalDataLen = 0; + for (byte[] d : data) { + totalDataLen += d.length; + } + int prefix = 3 * 4 * count(); // 3 integer fields per entry * sizeof(int) * number of entries + return prefix + totalDataLen; + } + + /*package-local*/ void serialize(DataSerializer out) throws IOException { + for (int i = 0, x = data.size(); i < x; i++) { + final int start = starts.get(i); + final int end = ends.get(i); + byte[] d = data.get(i); + out.writeInt(start, end, d.length); + out.write(d, 0, d.length); + } + } + private void add(Patch p, int i) { add(p.starts.get(i), p.ends.get(i), p.data.get(i)); } @@ -363,4 +385,15 @@ }; return r; } + + public class PatchDataSource implements DataSerializer.DataSource { + + public void serialize(DataSerializer out) throws IOException { + Patch.this.serialize(out); + } + + public int serializeLength() { + return Patch.this.serializedLength(); + } + } } \ No newline at end of file
--- a/src/org/tmatesoft/hg/internal/RevlogCompressor.java Wed Jan 30 15:48:36 2013 +0100 +++ b/src/org/tmatesoft/hg/internal/RevlogCompressor.java Mon Feb 04 18:00:55 2013 +0100 @@ -17,9 +17,10 @@ package org.tmatesoft.hg.internal; import java.io.IOException; -import java.io.OutputStream; import java.util.zip.Deflater; -import java.util.zip.DeflaterOutputStream; + +import org.tmatesoft.hg.core.SessionContext; +import org.tmatesoft.hg.util.LogFacility.Severity; /** * @@ -27,45 +28,59 @@ * @author TMate Software Ltd. */ public class RevlogCompressor { + private final SessionContext ctx; private final Deflater zip; - private byte[] sourceData; - private int compressedLenEstimate; + private DataSerializer.DataSource sourceData; + private int compressedLen; - public RevlogCompressor() { + public RevlogCompressor(SessionContext sessionCtx) { + ctx = sessionCtx; zip = new Deflater(); } - public void reset(byte[] source) { + public void reset(DataSerializer.DataSource source) { sourceData = source; - compressedLenEstimate = -1; + compressedLen = -1; } - public int writeCompressedData(OutputStream out) throws IOException { + // out stream is not closed! + public int writeCompressedData(DataSerializer out) throws IOException { zip.reset(); - DeflaterOutputStream dos = new DeflaterOutputStream(out, zip, Math.min(2048, sourceData.length)); - dos.write(sourceData); - dos.finish(); + DeflaterDataSerializer dds = new DeflaterDataSerializer(out, zip, sourceData.serializeLength()); + sourceData.serialize(dds); + dds.finish(); return zip.getTotalOut(); } - public int getCompressedLengthEstimate() { - if (compressedLenEstimate != -1) { - return compressedLenEstimate; + public int getCompressedLength() { + if (compressedLen != -1) { + return compressedLen; } - zip.reset(); - int rv = 0; - // from DeflaterOutputStream: - byte[] buffer = new byte[Math.min(2048, sourceData.length)]; - for (int i = 0, stride = buffer.length; i < sourceData.length; i+= stride) { - zip.setInput(sourceData, i, Math.min(stride, sourceData.length - i)); - while (!zip.needsInput()) { - rv += zip.deflate(buffer, 0, buffer.length); - } - } - zip.finish(); - while (!zip.finished()) { - rv += zip.deflate(buffer, 0, buffer.length); - } - return compressedLenEstimate = rv; + Counter counter = new Counter(); + try { + compressedLen = writeCompressedData(counter); + assert counter.totalWritten == compressedLen; + return compressedLen; + } catch (IOException ex) { + // can't happen provided we write to our stream that does nothing but byte counting + ctx.getLog().dump(getClass(), Severity.Error, ex, "Failed estimating compressed length of revlog data"); + return counter.totalWritten; // use best known value so far + } + } + + private static class Counter extends DataSerializer { + public int totalWritten = 0; + + public void writeByte(byte... values) throws IOException { + totalWritten += values.length; + } + + public void writeInt(int... values) throws IOException { + totalWritten += 4 * values.length; + } + + public void write(byte[] data, int offset, int length) throws IOException { + totalWritten += length; + } } }
--- a/src/org/tmatesoft/hg/internal/RevlogStream.java Wed Jan 30 15:48:36 2013 +0100 +++ b/src/org/tmatesoft/hg/internal/RevlogStream.java Mon Feb 04 18:00:55 2013 +0100 @@ -67,13 +67,21 @@ } /*package*/ DataAccess getIndexStream() { - // TODO post 1.0 may supply a hint that I'll need really few bytes of data (perhaps, at some offset) + // FIXME post 1.0 must supply a hint that I'll need really few bytes of data (perhaps, at some offset) // to avoid mmap files when only few bytes are to be read (i.e. #dataLength()) - return dataAccess.create(indexFile); + return dataAccess.createReader(indexFile); } /*package*/ DataAccess getDataStream() { - return dataAccess.create(getDataFile()); + return dataAccess.createReader(getDataFile()); + } + + /*package*/ DataSerializer getIndexStreamWriter() { + return dataAccess.createWriter(indexFile, true); + } + + /*package*/ DataSerializer getDataStreamWriter() { + return dataAccess.createWriter(getDataFile(), true); } /** @@ -99,7 +107,21 @@ // although honest approach is to call #initOutline() first return ex.setFile(inline ? indexFile : getDataFile()); } + + /*package-private*/String getDataFileName() { + // XXX a temporary solution to provide more info to fill in exceptions other than + // HgInvalidControlFileException (those benefit from initWith* methods above) + // + // Besides, since RevlogStream represents both revlogs with user data (those with WC representative and + // system data under store/data) and system-only revlogs (like changelog and manifest), there's no + // easy way to supply human-friendly name of the active file (independent from whether it's index of data) + return inline ? indexFile.getPath() : getDataFile().getPath(); + } + public boolean isInlineData() { + initOutline(); + return inline; + } public int revisionCount() { initOutline();
--- a/src/org/tmatesoft/hg/internal/RevlogStreamWriter.java Wed Jan 30 15:48:36 2013 +0100 +++ b/src/org/tmatesoft/hg/internal/RevlogStreamWriter.java Mon Feb 04 18:00:55 2013 +0100 @@ -17,16 +17,19 @@ package org.tmatesoft.hg.internal; import static org.tmatesoft.hg.internal.Internals.REVLOGV1_RECORD_SIZE; +import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION; import java.io.IOException; -import java.io.OutputStream; import java.nio.ByteBuffer; import org.tmatesoft.hg.core.Nodeid; +import org.tmatesoft.hg.core.SessionContext; +import org.tmatesoft.hg.repo.HgInvalidControlFileException; +import org.tmatesoft.hg.repo.HgInvalidStateException; /** * - * TODO check if index is too big and split into index+data + * TODO separate operation to check if index is too big and split into index+data * * @author Artem Tikhomirov * @author TMate Software Ltd. @@ -34,13 +37,14 @@ public class RevlogStreamWriter { - public static class HeaderWriter { + /*XXX public because HgCloneCommand uses it*/ + public static class HeaderWriter implements DataSerializer.DataSource { private final ByteBuffer header; private final boolean isInline; private long offset; private int length, compressedLength; private int baseRev, linkRev, p1, p2; - private Nodeid nodeid; + private byte[] nodeid; public HeaderWriter(boolean inline) { isInline = inline; @@ -74,16 +78,21 @@ } public HeaderWriter linkRevision(int linkRevision) { - this.linkRev = linkRevision; + linkRev = linkRevision; return this; } public HeaderWriter nodeid(Nodeid n) { - this.nodeid = n; + nodeid = n.toByteArray(); return this; } - - public void write(OutputStream out) throws IOException { + + public HeaderWriter nodeid(byte[] nodeidBytes) { + nodeid = nodeidBytes; + return this; + } + + public void serialize(DataSerializer out) throws IOException { header.clear(); if (offset == 0) { int version = 1 /* RevlogNG */; @@ -102,34 +111,161 @@ header.putInt(linkRev); header.putInt(p1); header.putInt(p2); - header.put(nodeid.toByteArray()); + header.put(nodeid); // assume 12 bytes left are zeros - out.write(header.array()); + out.write(header.array(), 0, header.capacity()); // regardless whether it's inline or separate data, // offset field always represent cumulative compressedLength // (while offset in the index file with inline==true differs by n*sizeof(header), where n is entry's position in the file) offset += compressedLength; } + + public int serializeLength() { + return header.capacity(); + } + } + + private final DigestHelper dh = new DigestHelper(); + private final RevlogCompressor revlogDataZip; + + + public RevlogStreamWriter(SessionContext ctx, RevlogStream stream) { + revlogDataZip = new RevlogCompressor(ctx); } - - private final DigestHelper dh = new DigestHelper(); + private int lastEntryBase, lastEntryIndex; + private byte[] lastEntryContent; + private Nodeid lastEntryRevision; + private IntMap<Nodeid> revisionCache = new IntMap<Nodeid>(32); public void addRevision(byte[] content, int linkRevision, int p1, int p2) { - Nodeid p1Rev = parent(p1); - Nodeid p2Rev = parent(p2); - byte[] revisionBytes = dh.sha1(p1Rev, p2Rev, content).asBinary(); - //final Nodeid revision = Nodeid.fromBinary(revisionBytes, 0); - // cache last revision (its delta and baseRev) + int revCount = revlogStream.revisionCount(); + lastEntryIndex = revCount == 0 ? NO_REVISION : revCount - 1; + populateLastEntry(); + // PatchGenerator pg = new PatchGenerator(); - byte[] prev = null; - Patch patch = pg.delta(prev, content); - byte[] patchContent; - // rest as in HgCloneCommand + Patch patch = pg.delta(lastEntryContent, content); + int patchSerializedLength = patch.serializedLength(); + + final boolean writeComplete = preferCompleteOverPatch(patchSerializedLength, content.length); + DataSerializer.DataSource dataSource = writeComplete ? new DataSerializer.ByteArrayDataSource(content) : patch.new PatchDataSource(); + revlogDataZip.reset(dataSource); + final int compressedLen; + final boolean useUncompressedData = preferCompressedOverComplete(revlogDataZip.getCompressedLength(), dataSource.serializeLength()); + if (useUncompressedData) { + // compression wasn't too effective, + compressedLen = dataSource.serializeLength() + 1 /*1 byte for 'u' - uncompressed prefix byte*/; + } else { + compressedLen= revlogDataZip.getCompressedLength(); + } + // + Nodeid p1Rev = revision(p1); + Nodeid p2Rev = revision(p2); + byte[] revisionNodeidBytes = dh.sha1(p1Rev, p2Rev, content).asBinary(); + // + + DataSerializer indexFile, dataFile, activeFile; + indexFile = dataFile = activeFile = null; + try { + // + activeFile = indexFile = revlogStream.getIndexStreamWriter(); + final boolean isInlineData = revlogStream.isInlineData(); + HeaderWriter revlogHeader = new HeaderWriter(isInlineData); + revlogHeader.length(content.length, compressedLen); + revlogHeader.nodeid(revisionNodeidBytes); + revlogHeader.linkRevision(linkRevision); + revlogHeader.parents(p1, p2); + revlogHeader.baseRevision(writeComplete ? lastEntryIndex+1 : lastEntryBase); + // + revlogHeader.serialize(indexFile); + + if (isInlineData) { + dataFile = indexFile; + } else { + dataFile = revlogStream.getDataStreamWriter(); + } + activeFile = dataFile; + if (useUncompressedData) { + dataFile.writeByte((byte) 'u'); + dataSource.serialize(dataFile); + } else { + int actualCompressedLenWritten = revlogDataZip.writeCompressedData(dataFile); + if (actualCompressedLenWritten != compressedLen) { + throw new HgInvalidStateException(String.format("Expected %d bytes of compressed data, but actually wrote %d in %s", compressedLen, actualCompressedLenWritten, revlogStream.getDataFileName())); + } + } + + lastEntryContent = content; + lastEntryBase = revlogHeader.baseRevision(); + lastEntryIndex++; + lastEntryRevision = Nodeid.fromBinary(revisionNodeidBytes, 0); + revisionCache.put(lastEntryIndex, lastEntryRevision); + } catch (IOException ex) { + String m = String.format("Failed to write revision %d", lastEntryIndex+1, null); + HgInvalidControlFileException t = new HgInvalidControlFileException(m, ex, null); + if (activeFile == dataFile) { + throw revlogStream.initWithDataFile(t); + } else { + throw revlogStream.initWithIndexFile(t); + } + } finally { + indexFile.done(); + if (dataFile != null && dataFile != indexFile) { + dataFile.done(); + } + } } - private Nodeid parent(int parentIndex) { - return null; + private RevlogStream revlogStream; + private Nodeid revision(int revisionIndex) { + if (revisionIndex == NO_REVISION) { + return Nodeid.NULL; + } + Nodeid n = revisionCache.get(revisionIndex); + if (n == null) { + n = Nodeid.fromBinary(revlogStream.nodeid(revisionIndex), 0); + revisionCache.put(revisionIndex, n); + } + return n; + } + + private void populateLastEntry() throws HgInvalidControlFileException { + if (lastEntryIndex != NO_REVISION && lastEntryContent == null) { + assert lastEntryIndex >= 0; + final IOException[] failure = new IOException[1]; + revlogStream.iterate(lastEntryIndex, lastEntryIndex, true, new RevlogStream.Inspector() { + + public void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) { + try { + lastEntryBase = baseRevision; + lastEntryRevision = Nodeid.fromBinary(nodeid, 0); + lastEntryContent = data.byteArray(); + } catch (IOException ex) { + failure[0] = ex; + } + } + }); + if (failure[0] != null) { + String m = String.format("Failed to get content of most recent revision %d", lastEntryIndex); + throw revlogStream.initWithDataFile(new HgInvalidControlFileException(m, failure[0], null)); + } + } + } + + public static boolean preferCompleteOverPatch(int patchLength, int fullContentLength) { + return !decideWorthEffort(patchLength, fullContentLength); + } + + public static boolean preferCompressedOverComplete(int compressedLen, int fullContentLength) { + if (compressedLen <= 0) { // just in case, meaningless otherwise + return false; + } + return decideWorthEffort(compressedLen, fullContentLength); + } + + // true if length obtained with effort is worth it + private static boolean decideWorthEffort(int lengthWithExtraEffort, int lengthWithoutEffort) { + return lengthWithExtraEffort < (/* 3/4 of original */lengthWithoutEffort - (lengthWithoutEffort >>> 2)); } }
--- a/src/org/tmatesoft/hg/repo/HgBundle.java Wed Jan 30 15:48:36 2013 +0100 +++ b/src/org/tmatesoft/hg/repo/HgBundle.java Mon Feb 04 18:00:55 2013 +0100 @@ -60,7 +60,7 @@ } private DataAccess getDataStream() throws IOException { - DataAccess da = accessProvider.create(bundleFile); + DataAccess da = accessProvider.createReader(bundleFile); byte[] signature = new byte[6]; if (da.length() > 6) { da.readBytes(signature, 0, 6);
--- a/src/org/tmatesoft/hg/repo/Revlog.java Wed Jan 30 15:48:36 2013 +0100 +++ b/src/org/tmatesoft/hg/repo/Revlog.java Mon Feb 04 18:00:55 2013 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2012 TMate Software Ltd + * Copyright (c) 2010-2013 TMate Software Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -84,11 +84,14 @@ } /** - * @return index of last known revision, a.k.a. {@link HgRepository#TIP} + * @return index of last known revision, a.k.a. {@link HgRepository#TIP}, or {@link HgRepository#NO_REVISION} if revlog is empty * @throws HgRuntimeException subclass thereof to indicate issues with the library. <em>Runtime exception</em> */ public final int getLastRevision() throws HgRuntimeException { - return content.revisionCount() - 1; + // although old code gives correct result when revlog is empty (NO_REVISION deliberately == -1), + // it's still better to be explicit + int revCount = content.revisionCount(); + return revCount == 0 ? NO_REVISION : revCount - 1; } /**
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/org/tmatesoft/hg/test/TestCommit.java Mon Feb 04 18:00:55 2013 +0100 @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2013 TMate Software Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * For information on how to redistribute this software under + * the terms of a license other than GNU General Public License + * contact TMate Software at support@hg4j.com + */ +package org.tmatesoft.hg.test; + +import org.junit.Assert; +import org.junit.Test; + +/** + * + * @author Artem Tikhomirov + * @author TMate Software Ltd. + */ +public class TestCommit { + + @Test + public void testCommitToEmpty() throws Exception { + Assert.fail(); + } +}