Mercurial > jhg
comparison src/org/tmatesoft/hg/core/HgCloneCommand.java @ 673:545b1d4cc11d
Refactor HgBundle.GroupElement (clear experimental mark), resolve few technical debt issues
| author | Artem Tikhomirov <tikhomirov.artem@gmail.com> | 
|---|---|
| date | Fri, 12 Jul 2013 20:14:24 +0200 | 
| parents | ae2d439fbed3 | 
| children | 
   comparison
  equal
  deleted
  inserted
  replaced
| 672:d2552e6a5af6 | 673:545b1d4cc11d | 
|---|---|
| 29 import java.util.TreeMap; | 29 import java.util.TreeMap; | 
| 30 | 30 | 
| 31 import org.tmatesoft.hg.internal.ByteArrayDataAccess; | 31 import org.tmatesoft.hg.internal.ByteArrayDataAccess; | 
| 32 import org.tmatesoft.hg.internal.DataAccess; | 32 import org.tmatesoft.hg.internal.DataAccess; | 
| 33 import org.tmatesoft.hg.internal.DataSerializer; | 33 import org.tmatesoft.hg.internal.DataSerializer; | 
| 34 import org.tmatesoft.hg.internal.DataSerializer.ByteArrayDataSource; | |
| 34 import org.tmatesoft.hg.internal.DigestHelper; | 35 import org.tmatesoft.hg.internal.DigestHelper; | 
| 35 import org.tmatesoft.hg.internal.FNCacheFile; | 36 import org.tmatesoft.hg.internal.FNCacheFile; | 
| 36 import org.tmatesoft.hg.internal.Internals; | 37 import org.tmatesoft.hg.internal.Internals; | 
| 37 import org.tmatesoft.hg.internal.Lifecycle; | 38 import org.tmatesoft.hg.internal.Lifecycle; | 
| 39 import org.tmatesoft.hg.internal.Patch; | |
| 38 import org.tmatesoft.hg.internal.RepoInitializer; | 40 import org.tmatesoft.hg.internal.RepoInitializer; | 
| 39 import org.tmatesoft.hg.internal.RevlogCompressor; | 41 import org.tmatesoft.hg.internal.RevlogCompressor; | 
| 40 import org.tmatesoft.hg.internal.RevlogStreamWriter; | 42 import org.tmatesoft.hg.internal.RevlogStreamWriter; | 
| 41 import org.tmatesoft.hg.internal.Transaction; | 43 import org.tmatesoft.hg.internal.Transaction; | 
| 42 import org.tmatesoft.hg.repo.HgBundle; | 44 import org.tmatesoft.hg.repo.HgBundle; | 
| 43 import org.tmatesoft.hg.repo.HgBundle.GroupElement; | 45 import org.tmatesoft.hg.repo.HgBundle.GroupElement; | 
| 46 import org.tmatesoft.hg.repo.HgInternals; | |
| 44 import org.tmatesoft.hg.repo.HgInvalidControlFileException; | 47 import org.tmatesoft.hg.repo.HgInvalidControlFileException; | 
| 45 import org.tmatesoft.hg.repo.HgInvalidStateException; | 48 import org.tmatesoft.hg.repo.HgInvalidStateException; | 
| 46 import org.tmatesoft.hg.repo.HgLookup; | 49 import org.tmatesoft.hg.repo.HgLookup; | 
| 47 import org.tmatesoft.hg.repo.HgRemoteRepository; | 50 import org.tmatesoft.hg.repo.HgRemoteRepository; | 
| 48 import org.tmatesoft.hg.repo.HgRepository; | 51 import org.tmatesoft.hg.repo.HgRepository; | 
| 314 String m = String.format("Revision %s import failed: delta base %s is not the last node we've handled (and know content for) %s", ge.node(), deltaBase, prevRevision); | 317 String m = String.format("Revision %s import failed: delta base %s is not the last node we've handled (and know content for) %s", ge.node(), deltaBase, prevRevision); | 
| 315 throw new HgInvalidStateException(m); | 318 throw new HgInvalidStateException(m); | 
| 316 } | 319 } | 
| 317 } | 320 } | 
| 318 // | 321 // | 
| 319 byte[] content = ge.apply(prevRevContent.byteArray()); | 322 Patch patch = HgInternals.patchFromData(ge); | 
| 323 byte[] content = patch.apply(prevRevContent, -1); | |
| 320 Nodeid p1 = ge.firstParent(); | 324 Nodeid p1 = ge.firstParent(); | 
| 321 Nodeid p2 = ge.secondParent(); | 325 Nodeid p2 = ge.secondParent(); | 
| 322 byte[] calculated = dh.sha1(p1, p2, content).asBinary(); | 326 byte[] calculated = dh.sha1(p1, p2, content).asBinary(); | 
| 323 final Nodeid node = ge.node(); | 327 final Nodeid node = ge.node(); | 
| 324 if (!node.equalsTo(calculated)) { | 328 if (!node.equalsTo(calculated)) { | 
| 338 revlogHeader.linkRevision(csRev.intValue()); | 342 revlogHeader.linkRevision(csRev.intValue()); | 
| 339 } | 343 } | 
| 340 // | 344 // | 
| 341 revlogHeader.parents(knownRevision(p1), knownRevision(p2)); | 345 revlogHeader.parents(knownRevision(p1), knownRevision(p2)); | 
| 342 // | 346 // | 
| 343 byte[] patchContent = ge.rawDataByteArray(); | 347 int patchSerializedLength = patch.serializedLength(); | 
| 344 // no reason to keep patch if it's close (here, >75%) in size to the complete contents, | 348 // no reason to keep patch if it's close (here, >75%) in size to the complete contents, | 
| 345 // save patching effort in this case | 349 // save patching effort in this case | 
| 346 writeComplete = writeComplete || preferCompleteOverPatch(patchContent.length, content.length); | 350 writeComplete = writeComplete || preferCompleteOverPatch(patchSerializedLength, content.length); | 
| 347 | 351 | 
| 348 if (writeComplete) { | 352 if (writeComplete) { | 
| 349 revlogHeader.baseRevision(revisionSequence.size()); | 353 revlogHeader.baseRevision(revisionSequence.size()); | 
| 350 } | 354 } | 
| 351 assert revlogHeader.baseRevision() >= 0; | 355 assert revlogHeader.baseRevision() >= 0; | 
| 352 | 356 | 
| 353 final byte[] sourceData = writeComplete ? content : patchContent; | 357 DataSerializer.DataSource dataSource = writeComplete ? new ByteArrayDataSource(content) : patch.new PatchDataSource(); | 
| 354 revlogDataZip.reset(new DataSerializer.ByteArrayDataSource(sourceData)); | 358 revlogDataZip.reset(dataSource); | 
| 355 final int compressedLen; | 359 final int compressedLen; | 
| 356 final boolean useUncompressedData = preferCompressedOverComplete(revlogDataZip.getCompressedLength(), sourceData.length); | 360 final boolean useUncompressedData = preferCompressedOverComplete(revlogDataZip.getCompressedLength(), dataSource.serializeLength()); | 
| 357 if (useUncompressedData) { | 361 if (useUncompressedData) { | 
| 358 // compression wasn't too effective, | 362 // compression wasn't too effective, | 
| 359 compressedLen = sourceData.length + 1 /*1 byte for 'u' - uncompressed prefix byte*/; | 363 compressedLen = dataSource.serializeLength() + 1 /*1 byte for 'u' - uncompressed prefix byte*/; | 
| 360 } else { | 364 } else { | 
| 361 compressedLen= revlogDataZip.getCompressedLength(); | 365 compressedLen= revlogDataZip.getCompressedLength(); | 
| 362 } | 366 } | 
| 363 | 367 | 
| 364 revlogHeader.length(content.length, compressedLen); | 368 revlogHeader.length(content.length, compressedLen); | 
| 375 } | 379 } | 
| 376 }; | 380 }; | 
| 377 revlogHeader.serialize(sds); | 381 revlogHeader.serialize(sds); | 
| 378 | 382 | 
| 379 if (useUncompressedData) { | 383 if (useUncompressedData) { | 
| 380 indexFile.write((byte) 'u'); | 384 sds.writeByte((byte) 'u'); | 
| 381 indexFile.write(sourceData); | 385 dataSource.serialize(sds); | 
| 382 } else { | 386 } else { | 
| 383 int actualCompressedLenWritten = revlogDataZip.writeCompressedData(sds); | 387 int actualCompressedLenWritten = revlogDataZip.writeCompressedData(sds); | 
| 384 if (actualCompressedLenWritten != compressedLen) { | 388 if (actualCompressedLenWritten != compressedLen) { | 
| 385 throw new HgInvalidStateException(String.format("Expected %d bytes of compressed data, but actually wrote %d in %s", compressedLen, actualCompressedLenWritten, filename)); | 389 throw new HgInvalidStateException(String.format("Expected %d bytes of compressed data, but actually wrote %d in %s", compressedLen, actualCompressedLenWritten, filename)); | 
| 386 } | 390 } | 
