Mercurial > jhg
comparison src/org/tmatesoft/hg/core/HgCloneCommand.java @ 534:243202f1bda5
Commit: refactor revision creation code from clone command to work separately, fit into existing library structure
author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
---|---|
date | Mon, 04 Feb 2013 18:00:55 +0100 |
parents | 688c1ab113bb |
children | 9edfd5a223b8 |
comparison
equal
deleted
inserted
replaced
533:e6f72c9829a6 | 534:243202f1bda5 |
---|---|
16 */ | 16 */ |
17 package org.tmatesoft.hg.core; | 17 package org.tmatesoft.hg.core; |
18 | 18 |
19 import static org.tmatesoft.hg.core.Nodeid.NULL; | 19 import static org.tmatesoft.hg.core.Nodeid.NULL; |
20 import static org.tmatesoft.hg.internal.RequiresFile.*; | 20 import static org.tmatesoft.hg.internal.RequiresFile.*; |
21 import static org.tmatesoft.hg.internal.RevlogStreamWriter.preferCompleteOverPatch; | |
22 import static org.tmatesoft.hg.internal.RevlogStreamWriter.preferCompressedOverComplete; | |
21 | 23 |
22 import java.io.File; | 24 import java.io.File; |
23 import java.io.FileOutputStream; | 25 import java.io.FileOutputStream; |
24 import java.io.IOException; | 26 import java.io.IOException; |
25 import java.util.ArrayList; | 27 import java.util.ArrayList; |
27 import java.util.LinkedList; | 29 import java.util.LinkedList; |
28 import java.util.TreeMap; | 30 import java.util.TreeMap; |
29 | 31 |
30 import org.tmatesoft.hg.internal.ByteArrayDataAccess; | 32 import org.tmatesoft.hg.internal.ByteArrayDataAccess; |
31 import org.tmatesoft.hg.internal.DataAccess; | 33 import org.tmatesoft.hg.internal.DataAccess; |
34 import org.tmatesoft.hg.internal.DataAccessProvider; | |
35 import org.tmatesoft.hg.internal.DataSerializer; | |
32 import org.tmatesoft.hg.internal.DigestHelper; | 36 import org.tmatesoft.hg.internal.DigestHelper; |
33 import org.tmatesoft.hg.internal.Lifecycle; | 37 import org.tmatesoft.hg.internal.Lifecycle; |
34 import org.tmatesoft.hg.internal.RepoInitializer; | 38 import org.tmatesoft.hg.internal.RepoInitializer; |
35 import org.tmatesoft.hg.internal.RevlogCompressor; | 39 import org.tmatesoft.hg.internal.RevlogCompressor; |
36 import org.tmatesoft.hg.internal.RevlogStreamWriter; | 40 import org.tmatesoft.hg.internal.RevlogStreamWriter; |
137 private static class WriteDownMate implements HgBundle.Inspector, Lifecycle { | 141 private static class WriteDownMate implements HgBundle.Inspector, Lifecycle { |
138 private final File hgDir; | 142 private final File hgDir; |
139 private final PathRewrite storagePathHelper; | 143 private final PathRewrite storagePathHelper; |
140 private final ProgressSupport progressSupport; | 144 private final ProgressSupport progressSupport; |
141 private final CancelSupport cancelSupport; | 145 private final CancelSupport cancelSupport; |
146 private final SessionContext ctx; | |
142 private FileOutputStream indexFile; | 147 private FileOutputStream indexFile; |
143 private String filename; // human-readable name of the file being written, for log/exception purposes | 148 private String filename; // human-readable name of the file being written, for log/exception purposes |
144 | 149 |
145 private final TreeMap<Nodeid, Integer> changelogIndexes = new TreeMap<Nodeid, Integer>(); | 150 private final TreeMap<Nodeid, Integer> changelogIndexes = new TreeMap<Nodeid, Integer>(); |
146 private boolean collectChangelogIndexes = false; | 151 private boolean collectChangelogIndexes = false; |
153 private final LinkedList<String> fncacheFiles = new LinkedList<String>(); | 158 private final LinkedList<String> fncacheFiles = new LinkedList<String>(); |
154 private RepoInitializer repoInit; | 159 private RepoInitializer repoInit; |
155 private Lifecycle.Callback lifecycleCallback; | 160 private Lifecycle.Callback lifecycleCallback; |
156 private CancelledException cancelException; | 161 private CancelledException cancelException; |
157 | 162 |
158 public WriteDownMate(SessionContext ctx, File destDir, ProgressSupport progress, CancelSupport cancel) { | 163 private RevlogStreamWriter.HeaderWriter revlogHeader = new RevlogStreamWriter.HeaderWriter(true); |
164 private RevlogCompressor revlogDataZip; | |
165 | |
166 public WriteDownMate(SessionContext sessionCtx, File destDir, ProgressSupport progress, CancelSupport cancel) { | |
167 ctx = sessionCtx; | |
159 hgDir = new File(destDir, ".hg"); | 168 hgDir = new File(destDir, ".hg"); |
160 repoInit = new RepoInitializer(); | 169 repoInit = new RepoInitializer(); |
161 repoInit.setRequires(STORE | FNCACHE | DOTENCODE); | 170 repoInit.setRequires(STORE | FNCACHE | DOTENCODE); |
162 storagePathHelper = repoInit.buildDataFilesHelper(ctx); | 171 storagePathHelper = repoInit.buildDataFilesHelper(sessionCtx); |
163 progressSupport = progress; | 172 progressSupport = progress; |
164 cancelSupport = cancel; | 173 cancelSupport = cancel; |
174 revlogDataZip = new RevlogCompressor(sessionCtx); | |
165 } | 175 } |
166 | 176 |
167 public void initEmptyRepository() throws IOException { | 177 public void initEmptyRepository() throws IOException { |
168 repoInit.initEmptyRepository(hgDir); | 178 repoInit.initEmptyRepository(hgDir); |
169 } | 179 } |
276 } | 286 } |
277 String m = String.format("Can't find index of %s for file %s", p.shortNotation(), filename); | 287 String m = String.format("Can't find index of %s for file %s", p.shortNotation(), filename); |
278 throw new HgInvalidControlFileException(m, null, new File(hgDir, filename)).setRevision(p); | 288 throw new HgInvalidControlFileException(m, null, new File(hgDir, filename)).setRevision(p); |
279 } | 289 } |
280 | 290 |
281 private RevlogStreamWriter.HeaderWriter revlogHeader = new RevlogStreamWriter.HeaderWriter(true); | |
282 private RevlogCompressor revlogDataZip = new RevlogCompressor(); | |
283 | |
284 public boolean element(GroupElement ge) { | 291 public boolean element(GroupElement ge) { |
285 try { | 292 try { |
286 assert indexFile != null; | 293 assert indexFile != null; |
287 boolean writeComplete = false; | 294 boolean writeComplete = false; |
288 Nodeid deltaBase = ge.patchBase(); | 295 Nodeid deltaBase = ge.patchBase(); |
330 revlogHeader.parents(knownRevision(p1), knownRevision(p2)); | 337 revlogHeader.parents(knownRevision(p1), knownRevision(p2)); |
331 // | 338 // |
332 byte[] patchContent = ge.rawDataByteArray(); | 339 byte[] patchContent = ge.rawDataByteArray(); |
333 // no reason to keep patch if it's close (here, >75%) in size to the complete contents, | 340 // no reason to keep patch if it's close (here, >75%) in size to the complete contents, |
334 // save patching effort in this case | 341 // save patching effort in this case |
335 writeComplete = writeComplete || patchContent.length >= (/* 3/4 of actual */content.length - (content.length >>> 2)); | 342 writeComplete = writeComplete || preferCompleteOverPatch(patchContent.length, content.length); |
336 | 343 |
337 if (writeComplete) { | 344 if (writeComplete) { |
338 revlogHeader.baseRevision(revisionSequence.size()); | 345 revlogHeader.baseRevision(revisionSequence.size()); |
339 } | 346 } |
340 assert revlogHeader.baseRevision() >= 0; | 347 assert revlogHeader.baseRevision() >= 0; |
341 | 348 |
342 final byte[] sourceData = writeComplete ? content : patchContent; | 349 final byte[] sourceData = writeComplete ? content : patchContent; |
343 revlogDataZip.reset(sourceData); | 350 revlogDataZip.reset(new DataSerializer.ByteArrayDataSource(sourceData)); |
344 final int compressedLen; | 351 final int compressedLen; |
345 final boolean useUncompressedData = revlogDataZip.getCompressedLengthEstimate() >= (sourceData.length - (sourceData.length >>> 2)); | 352 final boolean useUncompressedData = preferCompressedOverComplete(revlogDataZip.getCompressedLength(), sourceData.length); |
346 if (useUncompressedData) { | 353 if (useUncompressedData) { |
347 // compression wasn't too effective, | 354 // compression wasn't too effective, |
348 compressedLen = sourceData.length + 1 /*1 byte for 'u' - uncompressed prefix byte*/; | 355 compressedLen = sourceData.length + 1 /*1 byte for 'u' - uncompressed prefix byte*/; |
349 } else { | 356 } else { |
350 compressedLen= revlogDataZip.getCompressedLengthEstimate(); | 357 compressedLen= revlogDataZip.getCompressedLength(); |
351 } | 358 } |
352 | 359 |
353 revlogHeader.length(content.length, compressedLen); | 360 revlogHeader.length(content.length, compressedLen); |
354 | 361 |
355 revlogHeader.write(indexFile); | 362 // XXX may be wise not to create DataSerializer for each revision, but for a file |
363 DataAccessProvider.StreamDataSerializer sds = new DataAccessProvider.StreamDataSerializer(ctx.getLog(), indexFile) { | |
364 @Override | |
365 public void done() { | |
366 // override parent behavior not to close stream in use | |
367 } | |
368 }; | |
369 revlogHeader.serialize(sds); | |
356 | 370 |
357 if (useUncompressedData) { | 371 if (useUncompressedData) { |
358 indexFile.write((byte) 'u'); | 372 indexFile.write((byte) 'u'); |
359 indexFile.write(sourceData); | 373 indexFile.write(sourceData); |
360 } else { | 374 } else { |
361 int actualCompressedLenWritten = revlogDataZip.writeCompressedData(indexFile); | 375 int actualCompressedLenWritten = revlogDataZip.writeCompressedData(sds); |
362 if (actualCompressedLenWritten != compressedLen) { | 376 if (actualCompressedLenWritten != compressedLen) { |
363 throw new HgInvalidStateException(String.format("Expected %d bytes of compressed data, but actually wrote %d in %s", compressedLen, actualCompressedLenWritten, filename)); | 377 throw new HgInvalidStateException(String.format("Expected %d bytes of compressed data, but actually wrote %d in %s", compressedLen, actualCompressedLenWritten, filename)); |
364 } | 378 } |
365 } | 379 } |
380 sds.done(); | |
366 // | 381 // |
367 revisionSequence.add(node); | 382 revisionSequence.add(node); |
368 prevRevContent.done(); | 383 prevRevContent.done(); |
369 prevRevContent = new ByteArrayDataAccess(content); | 384 prevRevContent = new ByteArrayDataAccess(content); |
370 } catch (IOException ex) { | 385 } catch (IOException ex) { |