comparison src/org/tmatesoft/hg/internal/RevlogStreamWriter.java @ 660:4fd317a2fecf

Pull: phase1 get remote changes and add local revisions
author Artem Tikhomirov <tikhomirov.artem@gmail.com>
date Tue, 09 Jul 2013 21:46:45 +0200
parents 14dac192aa26
children 46b56864b483
comparison
equal deleted inserted replaced
658:d10399f80f4e 660:4fd317a2fecf
23 import java.nio.ByteBuffer; 23 import java.nio.ByteBuffer;
24 24
25 import org.tmatesoft.hg.core.HgIOException; 25 import org.tmatesoft.hg.core.HgIOException;
26 import org.tmatesoft.hg.core.Nodeid; 26 import org.tmatesoft.hg.core.Nodeid;
27 import org.tmatesoft.hg.core.SessionContext; 27 import org.tmatesoft.hg.core.SessionContext;
28 import org.tmatesoft.hg.internal.DataSerializer.ByteArrayDataSource;
28 import org.tmatesoft.hg.internal.DataSerializer.ByteArraySerializer; 29 import org.tmatesoft.hg.internal.DataSerializer.ByteArraySerializer;
29 import org.tmatesoft.hg.internal.DataSerializer.ByteArrayDataSource;
30 import org.tmatesoft.hg.internal.DataSerializer.DataSource; 30 import org.tmatesoft.hg.internal.DataSerializer.DataSource;
31 import org.tmatesoft.hg.repo.HgBundle.GroupElement;
31 import org.tmatesoft.hg.repo.HgInvalidControlFileException; 32 import org.tmatesoft.hg.repo.HgInvalidControlFileException;
32 import org.tmatesoft.hg.repo.HgInvalidRevisionException; 33 import org.tmatesoft.hg.repo.HgInvalidRevisionException;
33 import org.tmatesoft.hg.repo.HgInvalidStateException; 34 import org.tmatesoft.hg.repo.HgInvalidStateException;
35 import org.tmatesoft.hg.repo.HgRepository;
34 import org.tmatesoft.hg.repo.HgRuntimeException; 36 import org.tmatesoft.hg.repo.HgRuntimeException;
37 import org.tmatesoft.hg.util.Pair;
35 38
36 /** 39 /**
37 * 40 *
38 * TODO [post-1.1] separate operation to check if index is too big and split into index+data 41 * TODO [post-1.1] separate operation to check if index is too big and split into index+data
39 * 42 *
43 public class RevlogStreamWriter { 46 public class RevlogStreamWriter {
44 47
45 private final DigestHelper dh = new DigestHelper(); 48 private final DigestHelper dh = new DigestHelper();
46 private final RevlogCompressor revlogDataZip; 49 private final RevlogCompressor revlogDataZip;
47 private final Transaction transaction; 50 private final Transaction transaction;
48 private int lastEntryBase, lastEntryIndex; 51 private int lastEntryBase, lastEntryIndex, lastEntryActualLen;
49 private byte[] lastEntryContent; 52 // record revision and its full content
53 // the name might be misleading, it does not necessarily match lastEntryIndex
54 private Pair<Integer, byte[]> lastFullContent;
50 private Nodeid lastEntryRevision; 55 private Nodeid lastEntryRevision;
51 private IntMap<Nodeid> revisionCache = new IntMap<Nodeid>(32); 56 private IntMap<Nodeid> revisionCache = new IntMap<Nodeid>(32);
52 private RevlogStream revlogStream; 57 private RevlogStream revlogStream;
53 58
54 public RevlogStreamWriter(SessionContext.Source ctxSource, RevlogStream stream, Transaction tr) { 59 public RevlogStreamWriter(SessionContext.Source ctxSource, RevlogStream stream, Transaction tr) {
59 revlogDataZip = new RevlogCompressor(ctxSource.getSessionContext()); 64 revlogDataZip = new RevlogCompressor(ctxSource.getSessionContext());
60 revlogStream = stream; 65 revlogStream = stream;
61 transaction = tr; 66 transaction = tr;
62 } 67 }
63 68
69 public Pair<Integer,Nodeid> addPatchRevision(GroupElement ge, RevisionToIndexMap clogRevs, RevisionToIndexMap revlogRevs) throws HgIOException, HgRuntimeException {
70 populateLastEntryIndex();
71 //
72 final Nodeid nodeRev = ge.node();
73 final Nodeid csetRev = ge.cset();
74 int linkRev;
75 if (nodeRev.equals(csetRev)) {
76 linkRev = lastEntryIndex+1;
77 } else {
78 linkRev = clogRevs.revisionIndex(csetRev);
79 }
80 assert linkRev >= 0;
81 final Nodeid p1Rev = ge.firstParent();
82 int p1 = p1Rev.isNull() ? NO_REVISION : revlogRevs.revisionIndex(p1Rev);
83 final Nodeid p2Rev = ge.secondParent();
84 int p2 = p2Rev.isNull() ? NO_REVISION : revlogRevs.revisionIndex(p2Rev);
85 Patch p = new Patch();
86 final byte[] patchBytes;
87 try {
88 // XXX there's ge.rawData(), to avoid extra array wrap
89 patchBytes = ge.rawDataByteArray();
90 p.read(new ByteArrayDataAccess(patchBytes));
91 } catch (IOException ex) {
92 throw new HgIOException("Failed to read patch information", ex, null);
93 }
94 //
95 final Nodeid patchBase = ge.patchBase();
96 int patchBaseRev = patchBase.isNull() ? NO_REVISION : revlogRevs.revisionIndex(patchBase);
97 int baseRev = lastEntryIndex == NO_REVISION ? 0 : revlogStream.baseRevision(patchBaseRev);
98 int revLen;
99 DataSource ds;
100 byte[] complete = null;
101 if (patchBaseRev == lastEntryIndex && lastEntryIndex != NO_REVISION) {
102 // we may write patch from GroupElement as is
103 int patchBaseLen = dataLength(patchBaseRev);
104 revLen = patchBaseLen + p.patchSizeDelta();
105 ds = new ByteArrayDataSource(patchBytes);
106 } else {
107 // read baseRev, unless it's the pull to empty repository
108 try {
109 if (lastEntryIndex == NO_REVISION) {
110 complete = p.apply(new ByteArrayDataAccess(new byte[0]), -1);
111 baseRev = 0; // it's done above, but doesn't hurt
112 } else {
113 ReadContentInspector insp = new ReadContentInspector().read(revlogStream, baseRev);
114 complete = p.apply(new ByteArrayDataAccess(insp.content), -1);
115 baseRev = lastEntryIndex + 1;
116 }
117 ds = new ByteArrayDataSource(complete);
118 revLen = complete.length;
119 } catch (IOException ex) {
120 // unlikely to happen, as ByteArrayDataSource doesn't throw IOException
121 throw new HgIOException("Failed to reconstruct revision", ex, null);
122 }
123 }
124 doAdd(nodeRev, p1, p2, linkRev, baseRev, revLen, ds);
125 if (complete != null) {
126 lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, complete);
127 }
128 return new Pair<Integer, Nodeid>(lastEntryIndex, lastEntryRevision);
129 }
130
64 /** 131 /**
65 * @return nodeid of added revision 132 * @return nodeid of added revision
66 * @throws HgRuntimeException 133 * @throws HgRuntimeException
67 */ 134 */
68 public Nodeid addRevision(DataSource content, int linkRevision, int p1, int p2) throws HgIOException, HgRuntimeException { 135 public Pair<Integer,Nodeid> addRevision(DataSource content, int linkRevision, int p1, int p2) throws HgIOException, HgRuntimeException {
69 lastEntryRevision = Nodeid.NULL; 136 populateLastEntryIndex();
70 int revCount = revlogStream.revisionCount(); 137 populateLastEntryContent();
71 lastEntryIndex = revCount == 0 ? NO_REVISION : revCount - 1;
72 populateLastEntry();
73 // 138 //
74 byte[] contentByteArray = toByteArray(content); 139 byte[] contentByteArray = toByteArray(content);
75 Patch patch = GeneratePatchInspector.delta(lastEntryContent, contentByteArray); 140 Patch patch = GeneratePatchInspector.delta(lastFullContent.second(), contentByteArray);
76 int patchSerializedLength = patch.serializedLength(); 141 int patchSerializedLength = patch.serializedLength();
77 142
78 final boolean writeComplete = preferCompleteOverPatch(patchSerializedLength, contentByteArray.length); 143 final boolean writeComplete = preferCompleteOverPatch(patchSerializedLength, contentByteArray.length);
79 DataSerializer.DataSource dataSource = writeComplete ? new ByteArrayDataSource(contentByteArray) : patch.new PatchDataSource(); 144 DataSerializer.DataSource dataSource = writeComplete ? new ByteArrayDataSource(contentByteArray) : patch.new PatchDataSource();
145 //
146 Nodeid p1Rev = revision(p1);
147 Nodeid p2Rev = revision(p2);
148 Nodeid newRev = Nodeid.fromBinary(dh.sha1(p1Rev, p2Rev, contentByteArray).asBinary(), 0);
149 doAdd(newRev, p1, p2, linkRevision, writeComplete ? lastEntryIndex+1 : lastEntryBase, contentByteArray.length, dataSource);
150 lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, contentByteArray);
151 return new Pair<Integer, Nodeid>(lastEntryIndex, lastEntryRevision);
152 }
153
154 private Nodeid doAdd(Nodeid rev, int p1, int p2, int linkRevision, int baseRevision, int revLen, DataSerializer.DataSource dataSource) throws HgIOException, HgRuntimeException {
155 assert linkRevision >= 0;
156 assert baseRevision >= 0;
157 assert p1 == NO_REVISION || p1 >= 0;
158 assert p2 == NO_REVISION || p2 >= 0;
159 assert !rev.isNull();
160 assert revLen >= 0;
80 revlogDataZip.reset(dataSource); 161 revlogDataZip.reset(dataSource);
81 final int compressedLen; 162 final int compressedLen;
82 final boolean useCompressedData = preferCompressedOverComplete(revlogDataZip.getCompressedLength(), dataSource.serializeLength()); 163 final boolean useCompressedData = preferCompressedOverComplete(revlogDataZip.getCompressedLength(), dataSource.serializeLength());
83 if (useCompressedData) { 164 if (useCompressedData) {
84 compressedLen= revlogDataZip.getCompressedLength(); 165 compressedLen= revlogDataZip.getCompressedLength();
85 } else { 166 } else {
86 // compression wasn't too effective, 167 // compression wasn't too effective,
87 compressedLen = dataSource.serializeLength() + 1 /*1 byte for 'u' - uncompressed prefix byte*/; 168 compressedLen = dataSource.serializeLength() + 1 /*1 byte for 'u' - uncompressed prefix byte*/;
88 } 169 }
89 // 170 //
90 Nodeid p1Rev = revision(p1);
91 Nodeid p2Rev = revision(p2);
92 byte[] revisionNodeidBytes = dh.sha1(p1Rev, p2Rev, contentByteArray).asBinary();
93 //
94
95 DataSerializer indexFile, dataFile; 171 DataSerializer indexFile, dataFile;
96 indexFile = dataFile = null; 172 indexFile = dataFile = null;
97 try { 173 try {
98 // 174 //
99 indexFile = revlogStream.getIndexStreamWriter(transaction); 175 indexFile = revlogStream.getIndexStreamWriter(transaction);
100 final boolean isInlineData = revlogStream.isInlineData(); 176 final boolean isInlineData = revlogStream.isInlineData();
101 HeaderWriter revlogHeader = new HeaderWriter(isInlineData); 177 HeaderWriter revlogHeader = new HeaderWriter(isInlineData);
102 revlogHeader.length(contentByteArray.length, compressedLen); 178 revlogHeader.length(revLen, compressedLen);
103 revlogHeader.nodeid(revisionNodeidBytes); 179 revlogHeader.nodeid(rev.toByteArray());
104 revlogHeader.linkRevision(linkRevision); 180 revlogHeader.linkRevision(linkRevision);
105 revlogHeader.parents(p1, p2); 181 revlogHeader.parents(p1, p2);
106 revlogHeader.baseRevision(writeComplete ? lastEntryIndex+1 : lastEntryBase); 182 revlogHeader.baseRevision(baseRevision);
107 long lastEntryOffset = revlogStream.newEntryOffset(); 183 long lastEntryOffset = revlogStream.newEntryOffset();
108 revlogHeader.offset(lastEntryOffset); 184 revlogHeader.offset(lastEntryOffset);
109 // 185 //
110 revlogHeader.serialize(indexFile); 186 revlogHeader.serialize(indexFile);
111 187
122 } else { 198 } else {
123 dataFile.writeByte((byte) 'u'); 199 dataFile.writeByte((byte) 'u');
124 dataSource.serialize(dataFile); 200 dataSource.serialize(dataFile);
125 } 201 }
126 202
127
128 lastEntryContent = contentByteArray;
129 lastEntryBase = revlogHeader.baseRevision(); 203 lastEntryBase = revlogHeader.baseRevision();
130 lastEntryIndex++; 204 lastEntryIndex++;
131 lastEntryRevision = Nodeid.fromBinary(revisionNodeidBytes, 0); 205 lastEntryActualLen = revLen;
206 lastEntryRevision = rev;
132 revisionCache.put(lastEntryIndex, lastEntryRevision); 207 revisionCache.put(lastEntryIndex, lastEntryRevision);
133 208
134 revlogStream.revisionAdded(lastEntryIndex, lastEntryRevision, lastEntryBase, lastEntryOffset); 209 revlogStream.revisionAdded(lastEntryIndex, lastEntryRevision, lastEntryBase, lastEntryOffset);
135 } finally { 210 } finally {
136 indexFile.done(); 211 indexFile.done();
157 revisionCache.put(revisionIndex, n); 232 revisionCache.put(revisionIndex, n);
158 } 233 }
159 return n; 234 return n;
160 } 235 }
161 236
162 private void populateLastEntry() throws HgRuntimeException { 237 private int dataLength(int revisionIndex) throws HgInvalidControlFileException, HgInvalidRevisionException {
163 if (lastEntryContent != null) { 238 assert revisionIndex >= 0;
239 if (revisionIndex == lastEntryIndex) {
240 return lastEntryActualLen;
241 }
242 if (lastFullContent != null && lastFullContent.first() == revisionIndex) {
243 return lastFullContent.second().length;
244 }
245 return revlogStream.dataLength(revisionIndex);
246 }
247
248 private void populateLastEntryIndex() throws HgRuntimeException {
249 int revCount = revlogStream.revisionCount();
250 lastEntryIndex = revCount == 0 ? NO_REVISION : revCount - 1;
251 }
252
253 private void populateLastEntryContent() throws HgRuntimeException {
254 if (lastFullContent != null && lastFullContent.first() == lastEntryIndex) {
255 // we have last entry cached
164 return; 256 return;
165 } 257 }
258 lastEntryRevision = Nodeid.NULL;
166 if (lastEntryIndex != NO_REVISION) { 259 if (lastEntryIndex != NO_REVISION) {
167 assert lastEntryIndex >= 0; 260 ReadContentInspector insp = new ReadContentInspector().read(revlogStream, lastEntryIndex);
168 final IOException[] failure = new IOException[1]; 261 lastEntryBase = insp.baseRev;
169 revlogStream.iterate(lastEntryIndex, lastEntryIndex, true, new RevlogStream.Inspector() { 262 lastEntryRevision = insp.rev;
170 263 lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, insp.content);
171 public void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) {
172 try {
173 lastEntryBase = baseRevision;
174 lastEntryRevision = Nodeid.fromBinary(nodeid, 0);
175 lastEntryContent = data.byteArray();
176 } catch (IOException ex) {
177 failure[0] = ex;
178 }
179 }
180 });
181 if (failure[0] != null) {
182 String m = String.format("Failed to get content of most recent revision %d", lastEntryIndex);
183 throw revlogStream.initWithDataFile(new HgInvalidControlFileException(m, failure[0], null));
184 }
185 } else { 264 } else {
186 lastEntryContent = new byte[0]; 265 lastFullContent = new Pair<Integer, byte[]>(lastEntryIndex, new byte[0]);
187 } 266 }
267 assert lastFullContent.first() == lastEntryIndex;
268 assert lastFullContent.second() != null;
188 } 269 }
189 270
190 public static boolean preferCompleteOverPatch(int patchLength, int fullContentLength) { 271 public static boolean preferCompleteOverPatch(int patchLength, int fullContentLength) {
191 return !decideWorthEffort(patchLength, fullContentLength); 272 return !decideWorthEffort(patchLength, fullContentLength);
192 } 273 }
288 369
289 public int serializeLength() { 370 public int serializeLength() {
290 return header.capacity(); 371 return header.capacity();
291 } 372 }
292 } 373 }
374
375 // XXX part of HgRevisionMap contract, need public counterparts (along with IndexToRevisionMap)
376 public interface RevisionToIndexMap {
377
378 /**
379 * @return {@link HgRepository#NO_REVISION} if unknown revision
380 */
381 int revisionIndex(Nodeid revision);
382 }
383
384 private static class ReadContentInspector implements RevlogStream.Inspector {
385 public int baseRev;
386 public Nodeid rev;
387 public byte[] content;
388 private IOException failure;
389
390 public ReadContentInspector read(RevlogStream rs, int revIndex) throws HgInvalidControlFileException {
391 assert revIndex >= 0;
392 rs.iterate(revIndex, revIndex, true, this);
393 if (failure != null) {
394 String m = String.format("Failed to get content of revision %d", revIndex);
395 throw rs.initWithDataFile(new HgInvalidControlFileException(m, failure, null));
396 }
397 return this;
398 }
399
400 public void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) {
401 try {
402 baseRev = baseRevision;
403 rev = Nodeid.fromBinary(nodeid, 0);
404 content = data.byteArray();
405 } catch (IOException ex) {
406 failure = ex;
407 }
408 }
409 }
293 } 410 }