comparison src/org/tmatesoft/hg/repo/HgDataFile.java @ 277:74e7493a042a

Favor delegation over generalization
author Artem Tikhomirov <tikhomirov.artem@gmail.com>
date Mon, 29 Aug 2011 23:14:59 +0200
parents 6355ecda1f08
children 981f9f50bb6c
comparison
equal deleted inserted replaced
276:6355ecda1f08 277:74e7493a042a
32 import org.tmatesoft.hg.core.HgDataStreamException; 32 import org.tmatesoft.hg.core.HgDataStreamException;
33 import org.tmatesoft.hg.core.HgException; 33 import org.tmatesoft.hg.core.HgException;
34 import org.tmatesoft.hg.core.Nodeid; 34 import org.tmatesoft.hg.core.Nodeid;
35 import org.tmatesoft.hg.internal.DataAccess; 35 import org.tmatesoft.hg.internal.DataAccess;
36 import org.tmatesoft.hg.internal.FilterByteChannel; 36 import org.tmatesoft.hg.internal.FilterByteChannel;
37 import org.tmatesoft.hg.internal.FilterDataAccess;
37 import org.tmatesoft.hg.internal.IntMap; 38 import org.tmatesoft.hg.internal.IntMap;
38 import org.tmatesoft.hg.internal.RevlogStream; 39 import org.tmatesoft.hg.internal.RevlogStream;
39 import org.tmatesoft.hg.util.ByteChannel; 40 import org.tmatesoft.hg.util.ByteChannel;
40 import org.tmatesoft.hg.util.CancelSupport; 41 import org.tmatesoft.hg.util.CancelSupport;
41 import org.tmatesoft.hg.util.CancelledException; 42 import org.tmatesoft.hg.util.CancelledException;
199 throw new IllegalArgumentException(); 200 throw new IllegalArgumentException();
200 } 201 }
201 if (metadata == null) { 202 if (metadata == null) {
202 metadata = new Metadata(); 203 metadata = new Metadata();
203 } 204 }
204 ContentPipe insp; 205 ErrorHandlingInspector insp;
205 if (metadata.none(revision)) { 206 if (metadata.none(revision)) {
206 insp = new ContentPipe(sink, 0); 207 insp = new ContentPipe(sink, 0);
207 } else if (metadata.known(revision)) { 208 } else if (metadata.known(revision)) {
208 insp = new ContentPipe(sink, metadata.dataOffset(revision)); 209 insp = new ContentPipe(sink, metadata.dataOffset(revision));
209 } else { 210 } else {
210 // do not know if there's metadata 211 // do not know if there's metadata
211 insp = new MetadataContentPipe(sink, metadata, getPath()); 212 insp = new MetadataInspector(metadata, getPath(), new ContentPipe(sink, 0));
212 } 213 }
213 insp.checkCancelled(); 214 insp.checkCancelled();
214 super.content.iterate(revision, revision, true, insp); 215 super.content.iterate(revision, revision, true, insp);
215 try { 216 try {
216 insp.checkFailed(); // XXX is there real need to throw IOException from ContentPipe? 217 insp.checkFailed(); // XXX is there real need to throw IOException from ContentPipe?
407 } 408 }
408 return null; 409 return null;
409 } 410 }
410 } 411 }
411 412
412 private static class MetadataContentPipe extends ContentPipe { 413 private static class MetadataInspector extends ErrorHandlingInspector implements RevlogStream.Inspector {
413
414 private final Metadata metadata; 414 private final Metadata metadata;
415 private final Path fname; // need this only for error reporting 415 private final Path fname; // need this only for error reporting
416 416 private final RevlogStream.Inspector delegate;
417 public MetadataContentPipe(ByteChannel sink, Metadata _metadata, Path file) { 417
418 super(sink, 0); 418 public MetadataInspector(Metadata _metadata, Path file, RevlogStream.Inspector chain) {
419 metadata = _metadata; 419 metadata = _metadata;
420 fname = file; 420 fname = file;
421 } 421 delegate = chain;
422 422 setCancelSupport(CancelSupport.Factory.get(chain));
423 @Override 423 }
424 protected void prepare(int revisionNumber, DataAccess da) throws HgException, IOException { 424
425 final int daLength = da.length(); 425 public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) {
426 if (daLength < 4 || da.readByte() != 1 || da.readByte() != 10) { 426 try {
427 metadata.recordNone(revisionNumber); 427 final int daLength = data.length();
428 da.reset(); 428 if (daLength < 4 || data.readByte() != 1 || data.readByte() != 10) {
429 return; 429 metadata.recordNone(revisionNumber);
430 } 430 data.reset();
431 } else {
432 ArrayList<MetadataEntry> _metadata = new ArrayList<MetadataEntry>();
433 int offset = parseMetadata(data, daLength, _metadata);
434 metadata.add(revisionNumber, offset, _metadata);
435 // da is in prepared state (i.e. we consumed all bytes up to metadata end).
436 // However, it's not safe to assume delegate won't call da.reset() for some reason,
437 // and we need to ensure predictable result.
438 data.reset();
439 data = new FilterDataAccess(data, offset, daLength - offset);
440 }
441 if (delegate != null) {
442 delegate.next(revisionNumber, actualLen, baseRevision, linkRevision, parent1Revision, parent2Revision, nodeid, data);
443 }
444 } catch (IOException ex) {
445 recordFailure(ex);
446 } catch (HgDataStreamException ex) {
447 recordFailure(ex.setRevisionNumber(revisionNumber));
448 }
449 }
450
451 private int parseMetadata(DataAccess data, final int daLength, ArrayList<MetadataEntry> _metadata) throws IOException, HgDataStreamException {
431 int lastEntryStart = 2; 452 int lastEntryStart = 2;
432 int lastColon = -1; 453 int lastColon = -1;
433 ArrayList<MetadataEntry> _metadata = new ArrayList<MetadataEntry>();
434 // XXX in fact, need smth like ByteArrayBuilder, similar to StringBuilder, 454 // XXX in fact, need smth like ByteArrayBuilder, similar to StringBuilder,
435 // which can't be used here because we can't convert bytes to chars as we read them 455 // which can't be used here because we can't convert bytes to chars as we read them
436 // (there might be multi-byte encoding), and we need to collect all bytes before converting to string 456 // (there might be multi-byte encoding), and we need to collect all bytes before converting to string
437 ByteArrayOutputStream bos = new ByteArrayOutputStream(); 457 ByteArrayOutputStream bos = new ByteArrayOutputStream();
438 String key = null, value = null; 458 String key = null, value = null;
439 boolean byteOne = false; 459 boolean byteOne = false;
440 for (int i = 2; i < daLength; i++) { 460 for (int i = 2; i < daLength; i++) {
441 byte b = da.readByte(); 461 byte b = data.readByte();
442 if (b == '\n') { 462 if (b == '\n') {
443 if (byteOne) { // i.e. \n follows 1 463 if (byteOne) { // i.e. \n follows 1
444 lastEntryStart = i+1; 464 lastEntryStart = i+1;
445 // XXX is it possible to have here incomplete key/value (i.e. if last pair didn't end with \n) 465 // XXX is it possible to have here incomplete key/value (i.e. if last pair didn't end with \n)
446 break; 466 break;
454 key = value = null; 474 key = value = null;
455 lastColon = -1; 475 lastColon = -1;
456 lastEntryStart = i+1; 476 lastEntryStart = i+1;
457 continue; 477 continue;
458 } 478 }
459 // byteOne has to be consumed up to this line, if not jet, consume it 479 // byteOne has to be consumed up to this line, if not yet, consume it
460 if (byteOne) { 480 if (byteOne) {
461 // insert 1 we've read on previous step into the byte builder 481 // insert 1 we've read on previous step into the byte builder
462 bos.write(1); 482 bos.write(1);
483 byteOne = false;
463 // fall-through to consume current byte 484 // fall-through to consume current byte
464 byteOne = false;
465 } 485 }
466 if (b == (int) ':') { 486 if (b == (int) ':') {
467 assert value == null; 487 assert value == null;
468 key = new String(bos.toByteArray()); 488 key = new String(bos.toByteArray());
469 bos.reset(); 489 bos.reset();
472 byteOne = true; 492 byteOne = true;
473 } else { 493 } else {
474 bos.write(b); 494 bos.write(b);
475 } 495 }
476 } 496 }
477 metadata.add(revisionNumber, lastEntryStart, _metadata); 497 if (data.isEmpty() || !byteOne) {
478 if (da.isEmpty() || !byteOne) { 498 throw new HgDataStreamException(fname, "Metadata is not closed properly", null);
479 throw new HgDataStreamException(fname, String.format("Metadata for revision %d is not closed properly", revisionNumber), null); 499 }
480 } 500 return lastEntryStart;
481 // da is in prepared state (i.e. we consumed all bytes up to metadata end).
482 } 501 }
483 } 502 }
484 } 503 }