Mercurial > jhg
comparison src/org/tmatesoft/hg/internal/RevlogStream.java @ 539:9edfd5a223b8
Commit: handle empty repository case
| author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
|---|---|
| date | Wed, 13 Feb 2013 18:44:58 +0100 |
| parents | dd4f6311af52 |
| children | 6ca3d0c5b4bc |
comparison
equal
deleted
inserted
replaced
| 538:dd4f6311af52 | 539:9edfd5a223b8 |
|---|---|
| 222 daIndex.done(); | 222 daIndex.done(); |
| 223 } | 223 } |
| 224 return BAD_REVISION; | 224 return BAD_REVISION; |
| 225 } | 225 } |
| 226 | 226 |
| 227 /** | |
| 228 * @return value suitable for the corresponding field in the new revision's header, not physical offset in the file | |
| 229 * (which is different in case of inline revlogs) | |
| 230 */ | |
| 227 public long newEntryOffset() { | 231 public long newEntryOffset() { |
| 228 if (revisionCount() == 0) { | 232 if (revisionCount() == 0) { |
| 229 return 0; | 233 return 0; |
| 230 } | 234 } |
| 231 DataAccess daIndex = getIndexStream(); | 235 DataAccess daIndex = getIndexStream(); |
| 322 } finally { | 326 } finally { |
| 323 r.finish(); | 327 r.finish(); |
| 324 } | 328 } |
| 325 } | 329 } |
| 326 | 330 |
| 331 void revisionAdded(int revisionIndex, Nodeid revision, int baseRevisionIndex, long revisionOffset) throws HgInvalidControlFileException { | |
| 332 if (!outlineCached()) { | |
| 333 return; | |
| 334 } | |
| 335 if (baseRevisions.length != revisionIndex) { | |
| 336 throw new HgInvalidControlFileException(String.format("New entry's index shall be %d, not %d", baseRevisions.length, revisionIndex), null, indexFile); | |
| 337 } | |
| 338 if (baseRevisionIndex < 0 || baseRevisionIndex > baseRevisions.length) { | |
| 339 // baseRevisionIndex MAY be == to baseRevisions.length, it's when new revision is based on itself | |
| 340 throw new HgInvalidControlFileException(String.format("Base revision index %d doesn't fit [0..%d] range", baseRevisionIndex, baseRevisions.length), null, indexFile); | |
| 341 } | |
| 342 assert revision != null; | |
| 343 assert !revision.isNull(); | |
| 344 int[] baseRevisionsCopy = new int[baseRevisions.length + 1]; | |
| 345 System.arraycopy(baseRevisions, 0, baseRevisionsCopy, 0, baseRevisions.length); | |
| 346 baseRevisionsCopy[baseRevisions.length] = baseRevisionIndex; | |
| 347 baseRevisions = baseRevisionsCopy; | |
| 348 if (inline && indexRecordOffset != null) { | |
| 349 assert indexRecordOffset.length == revisionIndex; | |
| 350 int[] indexRecordOffsetCopy = new int[indexRecordOffset.length + 1]; | |
| 351 indexRecordOffsetCopy[indexRecordOffset.length] = offsetFieldToInlineFileOffset(revisionOffset, revisionIndex); | |
| 352 indexRecordOffset = indexRecordOffsetCopy; | |
| 353 } | |
| 354 } | |
| 355 | |
| 327 private int getBaseRevision(int revision) { | 356 private int getBaseRevision(int revision) { |
| 328 return baseRevisions[revision]; | 357 return baseRevisions[revision]; |
| 329 } | 358 } |
| 330 | 359 |
| 331 /** | 360 /** |
| 345 if (revisionIndex < 0 || revisionIndex > last) { | 374 if (revisionIndex < 0 || revisionIndex > last) { |
| 346 throw new HgInvalidRevisionException(revisionIndex).setRevisionIndex(revisionIndex, 0, last); | 375 throw new HgInvalidRevisionException(revisionIndex).setRevisionIndex(revisionIndex, 0, last); |
| 347 } | 376 } |
| 348 return revisionIndex; | 377 return revisionIndex; |
| 349 } | 378 } |
| 379 | |
| 380 private boolean outlineCached() { | |
| 381 return baseRevisions != null && baseRevisions.length > 0; | |
| 382 } | |
| 383 | |
| 384 // translate 6-byte offset field value to pysical file offset for inline revlogs | |
| 385 // DOESN'T MAKE SENSE if revlog with data is separate | |
| 386 private static int offsetFieldToInlineFileOffset(long offset, int recordIndex) throws HgInvalidStateException { | |
| 387 int o = Internals.ltoi(offset); | |
| 388 if (o != offset) { | |
| 389 // just in case, can't happen, ever, unless HG (or some other bad tool) produces index file | |
| 390 // with inlined data of size greater than 2 Gb. | |
| 391 throw new HgInvalidStateException("Data too big, offset didn't fit to sizeof(int)"); | |
| 392 } | |
| 393 return o + REVLOGV1_RECORD_SIZE * recordIndex; | |
| 394 } | |
| 350 | 395 |
| 351 private void initOutline() throws HgInvalidControlFileException { | 396 private void initOutline() throws HgInvalidControlFileException { |
| 352 if (baseRevisions != null && baseRevisions.length > 0) { | 397 if (outlineCached()) { |
| 353 return; | 398 return; |
| 354 } | 399 } |
| 355 DataAccess da = getIndexStream(); | 400 DataAccess da = getIndexStream(); |
| 356 try { | 401 try { |
| 357 if (da.isEmpty()) { | 402 if (da.isEmpty()) { |
| 358 // do not fail with exception if stream is empty, it's likely intentional | 403 // do not fail with exception if stream is empty, it's likely intentional |
| 359 baseRevisions = new int[0]; | 404 baseRevisions = new int[0]; |
| 405 // empty revlog, likely to be populated, indicate we start with a single file | |
| 406 inline = true; | |
| 360 return; | 407 return; |
| 361 } | 408 } |
| 362 int versionField = da.readInt(); | 409 int versionField = da.readInt(); |
| 363 da.readInt(); // just to skip next 4 bytes of offset + flags | 410 da.readInt(); // just to skip next 4 bytes of offset + flags |
| 364 final int INLINEDATA = 1 << 16; | 411 final int INLINEDATA = 1 << 16; |
| 383 // int parent1Revision = di.readInt(); | 430 // int parent1Revision = di.readInt(); |
| 384 // int parent2Revision = di.readInt(); | 431 // int parent2Revision = di.readInt(); |
| 385 // byte[] nodeid = new byte[32]; | 432 // byte[] nodeid = new byte[32]; |
| 386 resBases.add(baseRevision); | 433 resBases.add(baseRevision); |
| 387 if (inline) { | 434 if (inline) { |
| 388 int o = Internals.ltoi(offset); | 435 int o = offsetFieldToInlineFileOffset(offset, resOffsets.size()); |
| 389 if (o != offset) { | 436 resOffsets.add(o); |
| 390 // just in case, can't happen, ever, unless HG (or some other bad tool) produces index file | |
| 391 // with inlined data of size greater than 2 Gb. | |
| 392 throw new HgInvalidStateException("Data too big, offset didn't fit to sizeof(int)"); | |
| 393 } | |
| 394 resOffsets.add(o + REVLOGV1_RECORD_SIZE * resOffsets.size()); | |
| 395 da.skip(3*4 + 32 + compressedLen); // Check: 44 (skip) + 20 (read) = 64 (total RevlogNG record size) | 437 da.skip(3*4 + 32 + compressedLen); // Check: 44 (skip) + 20 (read) = 64 (total RevlogNG record size) |
| 396 } else { | 438 } else { |
| 397 da.skip(3*4 + 32); | 439 da.skip(3*4 + 32); |
| 398 } | 440 } |
| 399 if (da.isEmpty()) { | 441 if (da.isEmpty()) { |
| 609 // XXX boolean retVal to indicate whether to continue? | 651 // XXX boolean retVal to indicate whether to continue? |
| 610 // TODO specify nodeid and data length, and reuse policy (i.e. if revlog stream doesn't reuse nodeid[] for each call) | 652 // TODO specify nodeid and data length, and reuse policy (i.e. if revlog stream doesn't reuse nodeid[] for each call) |
| 611 // implementers shall not invoke DataAccess.done(), it's accomplished by #iterate at appropraite moment | 653 // implementers shall not invoke DataAccess.done(), it's accomplished by #iterate at appropraite moment |
| 612 void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[/*20*/] nodeid, DataAccess data); | 654 void next(int revisionIndex, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[/*20*/] nodeid, DataAccess data); |
| 613 } | 655 } |
| 656 | |
| 614 } | 657 } |
