Mercurial > hg4j
comparison src/org/tmatesoft/hg/internal/RevlogStreamWriter.java @ 534:243202f1bda5
Commit: refactor revision creation code from clone command to work separately, fit into existing library structure
author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
---|---|
date | Mon, 04 Feb 2013 18:00:55 +0100 |
parents | e6f72c9829a6 |
children | dd4f6311af52 |
comparison
equal
deleted
inserted
replaced
533:e6f72c9829a6 | 534:243202f1bda5 |
---|---|
15 * contact TMate Software at support@hg4j.com | 15 * contact TMate Software at support@hg4j.com |
16 */ | 16 */ |
17 package org.tmatesoft.hg.internal; | 17 package org.tmatesoft.hg.internal; |
18 | 18 |
19 import static org.tmatesoft.hg.internal.Internals.REVLOGV1_RECORD_SIZE; | 19 import static org.tmatesoft.hg.internal.Internals.REVLOGV1_RECORD_SIZE; |
20 import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION; | |
20 | 21 |
21 import java.io.IOException; | 22 import java.io.IOException; |
22 import java.io.OutputStream; | |
23 import java.nio.ByteBuffer; | 23 import java.nio.ByteBuffer; |
24 | 24 |
25 import org.tmatesoft.hg.core.Nodeid; | 25 import org.tmatesoft.hg.core.Nodeid; |
26 import org.tmatesoft.hg.core.SessionContext; | |
27 import org.tmatesoft.hg.repo.HgInvalidControlFileException; | |
28 import org.tmatesoft.hg.repo.HgInvalidStateException; | |
26 | 29 |
27 /** | 30 /** |
28 * | 31 * |
29 * TODO check if index is too big and split into index+data | 32 * TODO separate operation to check if index is too big and split into index+data |
30 * | 33 * |
31 * @author Artem Tikhomirov | 34 * @author Artem Tikhomirov |
32 * @author TMate Software Ltd. | 35 * @author TMate Software Ltd. |
33 */ | 36 */ |
34 public class RevlogStreamWriter { | 37 public class RevlogStreamWriter { |
35 | 38 |
36 | 39 |
37 public static class HeaderWriter { | 40 /*XXX public because HgCloneCommand uses it*/ |
41 public static class HeaderWriter implements DataSerializer.DataSource { | |
38 private final ByteBuffer header; | 42 private final ByteBuffer header; |
39 private final boolean isInline; | 43 private final boolean isInline; |
40 private long offset; | 44 private long offset; |
41 private int length, compressedLength; | 45 private int length, compressedLength; |
42 private int baseRev, linkRev, p1, p2; | 46 private int baseRev, linkRev, p1, p2; |
43 private Nodeid nodeid; | 47 private byte[] nodeid; |
44 | 48 |
45 public HeaderWriter(boolean inline) { | 49 public HeaderWriter(boolean inline) { |
46 isInline = inline; | 50 isInline = inline; |
47 header = ByteBuffer.allocate(REVLOGV1_RECORD_SIZE); | 51 header = ByteBuffer.allocate(REVLOGV1_RECORD_SIZE); |
48 } | 52 } |
72 p2 = parent2; | 76 p2 = parent2; |
73 return this; | 77 return this; |
74 } | 78 } |
75 | 79 |
76 public HeaderWriter linkRevision(int linkRevision) { | 80 public HeaderWriter linkRevision(int linkRevision) { |
77 this.linkRev = linkRevision; | 81 linkRev = linkRevision; |
78 return this; | 82 return this; |
79 } | 83 } |
80 | 84 |
81 public HeaderWriter nodeid(Nodeid n) { | 85 public HeaderWriter nodeid(Nodeid n) { |
82 this.nodeid = n; | 86 nodeid = n.toByteArray(); |
83 return this; | 87 return this; |
84 } | 88 } |
85 | 89 |
86 public void write(OutputStream out) throws IOException { | 90 public HeaderWriter nodeid(byte[] nodeidBytes) { |
91 nodeid = nodeidBytes; | |
92 return this; | |
93 } | |
94 | |
95 public void serialize(DataSerializer out) throws IOException { | |
87 header.clear(); | 96 header.clear(); |
88 if (offset == 0) { | 97 if (offset == 0) { |
89 int version = 1 /* RevlogNG */; | 98 int version = 1 /* RevlogNG */; |
90 if (isInline) { | 99 if (isInline) { |
91 final int INLINEDATA = 1 << 16; // FIXME extract constant | 100 final int INLINEDATA = 1 << 16; // FIXME extract constant |
100 header.putInt(length); | 109 header.putInt(length); |
101 header.putInt(baseRev); | 110 header.putInt(baseRev); |
102 header.putInt(linkRev); | 111 header.putInt(linkRev); |
103 header.putInt(p1); | 112 header.putInt(p1); |
104 header.putInt(p2); | 113 header.putInt(p2); |
105 header.put(nodeid.toByteArray()); | 114 header.put(nodeid); |
106 // assume 12 bytes left are zeros | 115 // assume 12 bytes left are zeros |
107 out.write(header.array()); | 116 out.write(header.array(), 0, header.capacity()); |
108 | 117 |
109 // regardless whether it's inline or separate data, | 118 // regardless whether it's inline or separate data, |
110 // offset field always represent cumulative compressedLength | 119 // offset field always represent cumulative compressedLength |
111 // (while offset in the index file with inline==true differs by n*sizeof(header), where n is entry's position in the file) | 120 // (while offset in the index file with inline==true differs by n*sizeof(header), where n is entry's position in the file) |
112 offset += compressedLength; | 121 offset += compressedLength; |
113 } | 122 } |
114 } | 123 |
115 | 124 public int serializeLength() { |
116 | 125 return header.capacity(); |
126 } | |
127 } | |
128 | |
117 private final DigestHelper dh = new DigestHelper(); | 129 private final DigestHelper dh = new DigestHelper(); |
130 private final RevlogCompressor revlogDataZip; | |
131 | |
132 | |
133 public RevlogStreamWriter(SessionContext ctx, RevlogStream stream) { | |
134 revlogDataZip = new RevlogCompressor(ctx); | |
135 } | |
136 | |
137 private int lastEntryBase, lastEntryIndex; | |
138 private byte[] lastEntryContent; | |
139 private Nodeid lastEntryRevision; | |
140 private IntMap<Nodeid> revisionCache = new IntMap<Nodeid>(32); | |
118 | 141 |
119 public void addRevision(byte[] content, int linkRevision, int p1, int p2) { | 142 public void addRevision(byte[] content, int linkRevision, int p1, int p2) { |
120 Nodeid p1Rev = parent(p1); | 143 int revCount = revlogStream.revisionCount(); |
121 Nodeid p2Rev = parent(p2); | 144 lastEntryIndex = revCount == 0 ? NO_REVISION : revCount - 1; |
122 byte[] revisionBytes = dh.sha1(p1Rev, p2Rev, content).asBinary(); | 145 populateLastEntry(); |
123 //final Nodeid revision = Nodeid.fromBinary(revisionBytes, 0); | 146 // |
124 // cache last revision (its delta and baseRev) | |
125 PatchGenerator pg = new PatchGenerator(); | 147 PatchGenerator pg = new PatchGenerator(); |
126 byte[] prev = null; | 148 Patch patch = pg.delta(lastEntryContent, content); |
127 Patch patch = pg.delta(prev, content); | 149 int patchSerializedLength = patch.serializedLength(); |
128 byte[] patchContent; | 150 |
129 // rest as in HgCloneCommand | 151 final boolean writeComplete = preferCompleteOverPatch(patchSerializedLength, content.length); |
130 } | 152 DataSerializer.DataSource dataSource = writeComplete ? new DataSerializer.ByteArrayDataSource(content) : patch.new PatchDataSource(); |
131 | 153 revlogDataZip.reset(dataSource); |
132 private Nodeid parent(int parentIndex) { | 154 final int compressedLen; |
133 return null; | 155 final boolean useUncompressedData = preferCompressedOverComplete(revlogDataZip.getCompressedLength(), dataSource.serializeLength()); |
156 if (useUncompressedData) { | |
157 // compression wasn't too effective, | |
158 compressedLen = dataSource.serializeLength() + 1 /*1 byte for 'u' - uncompressed prefix byte*/; | |
159 } else { | |
160 compressedLen= revlogDataZip.getCompressedLength(); | |
161 } | |
162 // | |
163 Nodeid p1Rev = revision(p1); | |
164 Nodeid p2Rev = revision(p2); | |
165 byte[] revisionNodeidBytes = dh.sha1(p1Rev, p2Rev, content).asBinary(); | |
166 // | |
167 | |
168 DataSerializer indexFile, dataFile, activeFile; | |
169 indexFile = dataFile = activeFile = null; | |
170 try { | |
171 // | |
172 activeFile = indexFile = revlogStream.getIndexStreamWriter(); | |
173 final boolean isInlineData = revlogStream.isInlineData(); | |
174 HeaderWriter revlogHeader = new HeaderWriter(isInlineData); | |
175 revlogHeader.length(content.length, compressedLen); | |
176 revlogHeader.nodeid(revisionNodeidBytes); | |
177 revlogHeader.linkRevision(linkRevision); | |
178 revlogHeader.parents(p1, p2); | |
179 revlogHeader.baseRevision(writeComplete ? lastEntryIndex+1 : lastEntryBase); | |
180 // | |
181 revlogHeader.serialize(indexFile); | |
182 | |
183 if (isInlineData) { | |
184 dataFile = indexFile; | |
185 } else { | |
186 dataFile = revlogStream.getDataStreamWriter(); | |
187 } | |
188 activeFile = dataFile; | |
189 if (useUncompressedData) { | |
190 dataFile.writeByte((byte) 'u'); | |
191 dataSource.serialize(dataFile); | |
192 } else { | |
193 int actualCompressedLenWritten = revlogDataZip.writeCompressedData(dataFile); | |
194 if (actualCompressedLenWritten != compressedLen) { | |
195 throw new HgInvalidStateException(String.format("Expected %d bytes of compressed data, but actually wrote %d in %s", compressedLen, actualCompressedLenWritten, revlogStream.getDataFileName())); | |
196 } | |
197 } | |
198 | |
199 lastEntryContent = content; | |
200 lastEntryBase = revlogHeader.baseRevision(); | |
201 lastEntryIndex++; | |
202 lastEntryRevision = Nodeid.fromBinary(revisionNodeidBytes, 0); | |
203 revisionCache.put(lastEntryIndex, lastEntryRevision); | |
204 } catch (IOException ex) { | |
205 String m = String.format("Failed to write revision %d", lastEntryIndex+1, null); | |
206 HgInvalidControlFileException t = new HgInvalidControlFileException(m, ex, null); | |
207 if (activeFile == dataFile) { | |
208 throw revlogStream.initWithDataFile(t); | |
209 } else { | |
210 throw revlogStream.initWithIndexFile(t); | |
211 } | |
212 } finally { | |
213 indexFile.done(); | |
214 if (dataFile != null && dataFile != indexFile) { | |
215 dataFile.done(); | |
216 } | |
217 } | |
218 } | |
219 | |
220 private RevlogStream revlogStream; | |
221 private Nodeid revision(int revisionIndex) { | |
222 if (revisionIndex == NO_REVISION) { | |
223 return Nodeid.NULL; | |
224 } | |
225 Nodeid n = revisionCache.get(revisionIndex); | |
226 if (n == null) { | |
227 n = Nodeid.fromBinary(revlogStream.nodeid(revisionIndex), 0); | |
228 revisionCache.put(revisionIndex, n); | |
229 } | |
230 return n; | |
231 } | |
232 | |
233 private void populateLastEntry() throws HgInvalidControlFileException { | |
234 if (lastEntryIndex != NO_REVISION && lastEntryContent == null) { | |
235 assert lastEntryIndex >= 0; | |
236 final IOException[] failure = new IOException[1]; | |
237 revlogStream.iterate(lastEntryIndex, lastEntryIndex, true, new RevlogStream.Inspector() { | |
238 | |
239 public void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) { | |
240 try { | |
241 lastEntryBase = baseRevision; | |
242 lastEntryRevision = Nodeid.fromBinary(nodeid, 0); | |
243 lastEntryContent = data.byteArray(); | |
244 } catch (IOException ex) { | |
245 failure[0] = ex; | |
246 } | |
247 } | |
248 }); | |
249 if (failure[0] != null) { | |
250 String m = String.format("Failed to get content of most recent revision %d", lastEntryIndex); | |
251 throw revlogStream.initWithDataFile(new HgInvalidControlFileException(m, failure[0], null)); | |
252 } | |
253 } | |
254 } | |
255 | |
256 public static boolean preferCompleteOverPatch(int patchLength, int fullContentLength) { | |
257 return !decideWorthEffort(patchLength, fullContentLength); | |
258 } | |
259 | |
260 public static boolean preferCompressedOverComplete(int compressedLen, int fullContentLength) { | |
261 if (compressedLen <= 0) { // just in case, meaningless otherwise | |
262 return false; | |
263 } | |
264 return decideWorthEffort(compressedLen, fullContentLength); | |
265 } | |
266 | |
267 // true if length obtained with effort is worth it | |
268 private static boolean decideWorthEffort(int lengthWithExtraEffort, int lengthWithoutEffort) { | |
269 return lengthWithExtraEffort < (/* 3/4 of original */lengthWithoutEffort - (lengthWithoutEffort >>> 2)); | |
134 } | 270 } |
135 } | 271 } |