changeset 157:d5268ca7715b

Merged branch wrap-data-access into default for resource-friendly data access. Updated API to promote that friendliness to clients (channels, not byte[]). More exceptions
author Artem Tikhomirov <tikhomirov.artem@gmail.com>
date Wed, 09 Mar 2011 05:22:17 +0100
parents 643ddec3be36 (current diff) a6f39e595b2b (diff)
children b413b16d10a5
files build.xml cmdline/org/tmatesoft/hg/console/Cat.java cmdline/org/tmatesoft/hg/console/Main.java cmdline/org/tmatesoft/hg/console/Status.java src/org/tmatesoft/hg/core/HgBadStateException.java src/org/tmatesoft/hg/core/HgCatCommand.java src/org/tmatesoft/hg/core/HgLogCommand.java src/org/tmatesoft/hg/internal/ByteArrayDataAccess.java src/org/tmatesoft/hg/internal/DataAccess.java src/org/tmatesoft/hg/internal/DataAccessProvider.java src/org/tmatesoft/hg/internal/FilterByteChannel.java src/org/tmatesoft/hg/internal/FilterDataAccess.java src/org/tmatesoft/hg/internal/InflaterDataAccess.java src/org/tmatesoft/hg/internal/RevlogDump.java src/org/tmatesoft/hg/internal/RevlogStream.java src/org/tmatesoft/hg/repo/HgBundle.java src/org/tmatesoft/hg/repo/HgChangelog.java src/org/tmatesoft/hg/repo/HgDataFile.java src/org/tmatesoft/hg/repo/HgManifest.java src/org/tmatesoft/hg/repo/HgStatusCollector.java src/org/tmatesoft/hg/repo/HgWorkingCopyStatusCollector.java src/org/tmatesoft/hg/repo/Revlog.java src/org/tmatesoft/hg/util/ProgressSupport.java test/org/tmatesoft/hg/test/StatusOutputParser.java test/org/tmatesoft/hg/test/TestByteChannel.java
diffstat 24 files changed, 963 insertions(+), 247 deletions(-) [+]
line wrap: on
line diff
--- a/build.xml	Wed Mar 02 01:06:09 2011 +0100
+++ b/build.xml	Wed Mar 09 05:22:17 2011 +0100
@@ -76,6 +76,7 @@
 			<test name="org.tmatesoft.hg.test.TestManifest" />
 			<test name="org.tmatesoft.hg.test.TestStatus" />
 			<test name="org.tmatesoft.hg.test.TestStorePath" />
+			<test name="org.tmatesoft.hg.test.TestByteChannel" />
 		</junit>
 	</target>
 
--- a/cmdline/org/tmatesoft/hg/console/Cat.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/cmdline/org/tmatesoft/hg/console/Cat.java	Wed Mar 09 05:22:17 2011 +0100
@@ -46,7 +46,7 @@
 			System.out.println(fname);
 			HgDataFile fn = hgRepo.getFileNode(fname);
 			if (fn.exists()) {
-				fn.content(rev, out, true);
+				fn.contentWithFilters(rev, out);
 				System.out.println();
 			} else {
 				System.out.printf("%s not found!\n", fname);
--- a/cmdline/org/tmatesoft/hg/console/Main.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/cmdline/org/tmatesoft/hg/console/Main.java	Wed Mar 09 05:22:17 2011 +0100
@@ -26,6 +26,7 @@
 import org.tmatesoft.hg.core.HgLogCommand.FileRevision;
 import org.tmatesoft.hg.core.HgManifestCommand;
 import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.ByteArrayChannel;
 import org.tmatesoft.hg.internal.DigestHelper;
 import org.tmatesoft.hg.repo.HgDataFile;
 import org.tmatesoft.hg.repo.HgInternals;
@@ -59,13 +60,24 @@
 
 	public static void main(String[] args) throws Exception {
 		Main m = new Main(args);
-		m.dumpIgnored();
-		m.dumpDirstate();
-		m.testStatusInternals();
-		m.catCompleteHistory();
-		m.dumpCompleteManifestLow();
-		m.dumpCompleteManifestHigh();
-		m.bunchOfTests();
+		m.inflaterLengthException();
+//		m.dumpIgnored();
+//		m.dumpDirstate();
+//		m.testStatusInternals();
+//		m.catCompleteHistory();
+//		m.dumpCompleteManifestLow();
+//		m.dumpCompleteManifestHigh();
+//		m.bunchOfTests();
+	}
+	
+	private void inflaterLengthException() throws Exception {
+		HgDataFile f1 = hgRepo.getFileNode("src/com/tmate/hgkit/console/Bundle.java");
+		HgDataFile f2 = hgRepo.getFileNode("test-repos.jar");
+		System.out.println(f1.isCopy());
+		System.out.println(f2.isCopy());
+		ByteArrayChannel bac = new ByteArrayChannel();
+		f1.content(0, bac);
+		System.out.println(bac.toArray().length);
 	}
 	
 	private void dumpIgnored() {
@@ -82,7 +94,7 @@
 	}
 
 	
-	private void catCompleteHistory() {
+	private void catCompleteHistory() throws Exception {
 		DigestHelper dh = new DigestHelper();
 		for (String fname : cmdLineOpts.getList("")) {
 			System.out.println(fname);
@@ -91,8 +103,10 @@
 				int total = fn.getRevisionCount();
 				System.out.printf("Total revisions: %d\n", total);
 				for (int i = 0; i < total; i++) {
-					byte[] content = fn.content(i);
+					ByteArrayChannel sink = new ByteArrayChannel();
+					fn.content(i, sink);
 					System.out.println("==========>");
+					byte[] content = sink.toArray();
 					System.out.println(new String(content));
 					int[] parentRevisions = new int[2];
 					byte[] parent1 = new byte[20];
--- a/cmdline/org/tmatesoft/hg/console/Status.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/cmdline/org/tmatesoft/hg/console/Status.java	Wed Mar 09 05:22:17 2011 +0100
@@ -48,7 +48,7 @@
 		}
 		//
 		HgStatusCommand cmd = hgRepo.createStatusCommand();
-		if (cmdLineOpts.getBoolean("-A", "-all")) {
+		if (cmdLineOpts.getBoolean("-A", "--all")) {
 			cmd.all();
 		} else {
 			// default: mardu
--- a/src/org/tmatesoft/hg/core/HgBadStateException.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/src/org/tmatesoft/hg/core/HgBadStateException.java	Wed Mar 09 05:22:17 2011 +0100
@@ -25,4 +25,16 @@
 @SuppressWarnings("serial")
 public class HgBadStateException extends RuntimeException {
 
+	// FIXME quick-n-dirty fix, don't allow exceptions without a cause
+	public HgBadStateException() {
+		super("Internal error");
+	}
+
+	public HgBadStateException(String message) {
+		super(message);
+	}
+
+	public HgBadStateException(Throwable cause) {
+		super(cause);
+	}
 }
--- a/src/org/tmatesoft/hg/core/HgCatCommand.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/src/org/tmatesoft/hg/core/HgCatCommand.java	Wed Mar 09 05:22:17 2011 +0100
@@ -118,6 +118,6 @@
 		} else {
 			revToExtract = localRevision;
 		}
-		dataFile.content(revToExtract, sink, true);
+		dataFile.contentWithFilters(revToExtract, sink);
 	}
 }
--- a/src/org/tmatesoft/hg/core/HgLogCommand.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/src/org/tmatesoft/hg/core/HgLogCommand.java	Wed Mar 09 05:22:17 2011 +0100
@@ -18,6 +18,7 @@
 
 import static org.tmatesoft.hg.repo.HgRepository.TIP;
 
+import java.io.IOException;
 import java.util.Calendar;
 import java.util.Collections;
 import java.util.ConcurrentModificationException;
@@ -31,6 +32,8 @@
 import org.tmatesoft.hg.repo.HgDataFile;
 import org.tmatesoft.hg.repo.HgRepository;
 import org.tmatesoft.hg.repo.HgStatusCollector;
+import org.tmatesoft.hg.util.ByteChannel;
+import org.tmatesoft.hg.util.CancelledException;
 import org.tmatesoft.hg.util.Path;
 import org.tmatesoft.hg.util.PathPool;
 import org.tmatesoft.hg.util.PathRewrite;
@@ -164,7 +167,7 @@
 	/**
 	 * Similar to {@link #execute(org.tmatesoft.hg.repo.RawChangeset.Inspector)}, collects and return result as a list.
 	 */
-	public List<HgChangeset> execute() {
+	public List<HgChangeset> execute() throws HgException {
 		CollectHandler collector = new CollectHandler();
 		execute(collector);
 		return collector.getChanges();
@@ -176,7 +179,7 @@
 	 * @throws IllegalArgumentException when inspector argument is null
 	 * @throws ConcurrentModificationException if this log command instance is already running
 	 */
-	public void execute(Handler handler) {
+	public void execute(Handler handler) throws HgException {
 		if (handler == null) {
 			throw new IllegalArgumentException();
 		}
@@ -309,9 +312,10 @@
 		public Nodeid getRevision() {
 			return revision;
 		}
-		public byte[] getContent() {
-			// XXX Content wrapper, to allow formats other than byte[], e.g. Stream, DataAccess, etc?
-			return repo.getFileNode(path).content(revision);
+		public void putContentTo(ByteChannel sink) throws HgDataStreamException, IOException, CancelledException {
+			HgDataFile fn = repo.getFileNode(path);
+			int localRevision = fn.getLocalRevision(revision);
+			fn.contentWithFilters(localRevision, sink);
 		}
 	}
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/ByteArrayDataAccess.java	Wed Mar 09 05:22:17 2011 +0100
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2011 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import java.io.IOException;
+
+
+/**
+ *   
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class ByteArrayDataAccess extends DataAccess {
+
+	private final byte[] data;
+	private final int offset;
+	private final int length;
+	private int pos;
+
+	public ByteArrayDataAccess(byte[] data) {
+		this(data, 0, data.length);
+	}
+
+	public ByteArrayDataAccess(byte[] data, int offset, int length) {
+		this.data = data;
+		this.offset = offset;
+		this.length = length;
+		pos = 0;
+	}
+	
+	@Override
+	public byte readByte() throws IOException {
+		if (pos >= length) {
+			throw new IOException();
+		}
+		return data[offset + pos++];
+	}
+	@Override
+	public void readBytes(byte[] buf, int off, int len) throws IOException {
+		if (len > (this.length - pos)) {
+			throw new IOException();
+		}
+		System.arraycopy(data, pos, buf, off, len);
+		pos += len;
+	}
+
+	@Override
+	public ByteArrayDataAccess reset() {
+		pos = 0;
+		return this;
+	}
+	@Override
+	public long length() {
+		return length;
+	}
+	@Override
+	public void seek(long offset) {
+		pos = (int) offset;
+	}
+	@Override
+	public void skip(int bytes) throws IOException {
+		seek(pos + bytes);
+	}
+	@Override
+	public boolean isEmpty() {
+		return pos >= length;
+	}
+	
+	//
+	
+	// when byte[] needed from DA, we may save few cycles and some memory giving this (otherwise unsafe) access to underlying data
+	@Override
+	public byte[] byteArray() {
+		return data;
+	}
+}
--- a/src/org/tmatesoft/hg/internal/DataAccess.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/src/org/tmatesoft/hg/internal/DataAccess.java	Wed Mar 09 05:22:17 2011 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010 TMate Software Ltd
+ * Copyright (c) 2010-2011 TMate Software Ltd
  *  
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -17,6 +17,7 @@
 package org.tmatesoft.hg.internal;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 
 /**
  * relevant parts of DataInput, non-stream nature (seek operation), explicit check for end of data.
@@ -31,6 +32,18 @@
 	public boolean isEmpty() {
 		return true;
 	}
+	public long length() {
+		return 0;
+	}
+	/**
+	 * get this instance into initial state
+	 * @throws IOException
+	 * @return <code>this</code> for convenience
+	 */
+	public DataAccess reset() throws IOException {
+		// nop, empty instance is always in the initial state
+		return this;
+	}
 	// absolute positioning
 	public void seek(long offset) throws IOException {
 		throw new UnsupportedOperationException();
@@ -58,7 +71,32 @@
 	public void readBytes(byte[] buf, int offset, int length) throws IOException {
 		throw new UnsupportedOperationException();
 	}
+	// reads bytes into ByteBuffer, up to its limit or total data length, whichever smaller
+	// FIXME perhaps, in DataAccess paradigm (when we read known number of bytes, we shall pass specific byte count to read) 
+	public void readBytes(ByteBuffer buf) throws IOException {
+//		int toRead = Math.min(buf.remaining(), (int) length());
+//		if (buf.hasArray()) {
+//			readBytes(buf.array(), buf.arrayOffset(), toRead);
+//		} else {
+//			byte[] bb = new byte[toRead];
+//			readBytes(bb, 0, bb.length);
+//			buf.put(bb);
+//		}
+		// FIXME optimize to read as much as possible at once
+		while (!isEmpty() && buf.hasRemaining()) {
+			buf.put(readByte());
+		}
+	}
 	public byte readByte() throws IOException {
 		throw new UnsupportedOperationException();
 	}
-}
\ No newline at end of file
+
+	// XXX decide whether may or may not change position in the DataAccess
+	// FIXME exception handling is not right, just for the sake of quick test
+	public byte[] byteArray() throws IOException {
+		reset();
+		byte[] rv = new byte[(int) length()];
+		readBytes(rv, 0, rv.length);
+		return rv;
+	}
+}
--- a/src/org/tmatesoft/hg/internal/DataAccessProvider.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/src/org/tmatesoft/hg/internal/DataAccessProvider.java	Wed Mar 09 05:22:17 2011 +0100
@@ -86,6 +86,17 @@
 		}
 		
 		@Override
+		public long length() {
+			return size;
+		}
+		
+		@Override
+		public DataAccess reset() throws IOException {
+			seek(0);
+			return this;
+		}
+		
+		@Override
 		public void seek(long offset) {
 			assert offset >= 0;
 			// offset may not necessarily be further than current position in the file (e.g. rewind) 
@@ -188,6 +199,17 @@
 		}
 		
 		@Override
+		public long length() {
+			return size;
+		}
+		
+		@Override
+		public DataAccess reset() throws IOException {
+			seek(0);
+			return this;
+		}
+		
+		@Override
 		public void seek(long offset) throws IOException {
 			if (offset > size) {
 				throw new IllegalArgumentException();
--- a/src/org/tmatesoft/hg/internal/FilterByteChannel.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/src/org/tmatesoft/hg/internal/FilterByteChannel.java	Wed Mar 09 05:22:17 2011 +0100
@@ -20,6 +20,7 @@
 import java.nio.ByteBuffer;
 import java.util.Collection;
 
+import org.tmatesoft.hg.util.Adaptable;
 import org.tmatesoft.hg.util.ByteChannel;
 import org.tmatesoft.hg.util.CancelledException;
 
@@ -28,7 +29,7 @@
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
-public class FilterByteChannel implements ByteChannel {
+public class FilterByteChannel implements ByteChannel, Adaptable {
 	private final Filter[] filters;
 	private final ByteChannel delegate;
 	
@@ -52,4 +53,14 @@
 		return buffer.position() - srcPos; // consumed as much from original buffer
 	}
 
+	// adapters or implemented interfaces of the original class shall not be obfuscated by filter
+	public <T> T getAdapter(Class<T> adapterClass) {
+		if (delegate instanceof Adaptable) {
+			return ((Adaptable) delegate).getAdapter(adapterClass);
+		}
+		if (adapterClass != null && adapterClass.isInstance(delegate)) {
+			return adapterClass.cast(delegate);
+		}
+		return null;
+	}
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/FilterDataAccess.java	Wed Mar 09 05:22:17 2011 +0100
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2011 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import java.io.IOException;
+
+
+/**
+ * XXX Perhaps, DataAccessSlice? Unlike FilterInputStream, we limit amount of data read from DataAccess being filtered.
+ *   
+ *   
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class FilterDataAccess extends DataAccess {
+	private final DataAccess dataAccess;
+	private final long offset;
+	private final int length;
+	private int count;
+
+	public FilterDataAccess(DataAccess dataAccess, long offset, int length) {
+		this.dataAccess = dataAccess;
+		this.offset = offset;
+		this.length = length;
+		count = length;
+	}
+
+	protected int available() {
+		return count;
+	}
+
+	@Override
+	public FilterDataAccess reset() throws IOException {
+		count = length;
+		return this;
+	}
+	
+	@Override
+	public boolean isEmpty() {
+		return count <= 0;
+	}
+	
+	@Override
+	public long length() {
+		return length;
+	}
+
+	@Override
+	public void seek(long localOffset) throws IOException {
+		if (localOffset < 0 || localOffset > length) {
+			throw new IllegalArgumentException();
+		}
+		dataAccess.seek(offset + localOffset);
+		count = (int) (length - localOffset);
+	}
+
+	@Override
+	public void skip(int bytes) throws IOException {
+		int newCount = count - bytes;
+		if (newCount < 0 || newCount > length) {
+			throw new IllegalArgumentException();
+		}
+		seek(length - newCount);
+		/*
+		 can't use next code because don't want to rewind backing DataAccess on reset()
+		 i.e. this.reset() modifies state of this instance only, while filtered DA may go further.
+		 Only actual this.skip/seek/read would rewind it to desired position 
+	  		dataAccess.skip(bytes);
+			count = newCount;
+		 */
+
+	}
+
+	@Override
+	public byte readByte() throws IOException {
+		if (count <= 0) {
+			throw new IllegalArgumentException("Underflow"); // XXX be descriptive
+		}
+		if (count == length) {
+			dataAccess.seek(offset);
+		}
+		count--;
+		return dataAccess.readByte();
+	}
+
+	@Override
+	public void readBytes(byte[] b, int off, int len) throws IOException {
+		if (count <= 0 || len > count) {
+			throw new IllegalArgumentException("Underflow"); // XXX be descriptive
+		}
+		if (count == length) {
+			dataAccess.seek(offset);
+		}
+		dataAccess.readBytes(b, off, len);
+		count -= len;
+	}
+
+	// done shall be no-op, as we have no idea what's going on with DataAccess we filter
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/tmatesoft/hg/internal/InflaterDataAccess.java	Wed Mar 09 05:22:17 2011 +0100
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2011 TMate Software Ltd
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For information on how to redistribute this software under
+ * the terms of a license other than GNU General Public License
+ * contact TMate Software at support@hg4j.com
+ */
+package org.tmatesoft.hg.internal;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.util.zip.DataFormatException;
+import java.util.zip.Inflater;
+import java.util.zip.ZipException;
+
+
+/**
+ * DataAccess counterpart for InflaterInputStream.
+ * XXX is it really needed to be subclass of FilterDataAccess? 
+ *   
+ * @author Artem Tikhomirov
+ * @author TMate Software Ltd.
+ */
+public class InflaterDataAccess extends FilterDataAccess {
+
+	private final Inflater inflater;
+	private final byte[] buffer;
+	private final byte[] singleByte = new byte[1];
+	private int decompressedPos = 0;
+	private int decompressedLength = -1;
+
+	public InflaterDataAccess(DataAccess dataAccess, long offset, int length) {
+		this(dataAccess, offset, length, new Inflater(), 512);
+	}
+
+	public InflaterDataAccess(DataAccess dataAccess, long offset, int length, Inflater inflater, int bufSize) {
+		super(dataAccess, offset, length);
+		this.inflater = inflater;
+		buffer = new byte[bufSize];
+	}
+	
+	@Override
+	public InflaterDataAccess reset() throws IOException {
+		super.reset();
+		inflater.reset();
+		decompressedPos = 0;
+		return this;
+	}
+	
+	@Override
+	protected int available() {
+		throw new IllegalStateException("Can't tell how much uncompressed data left");
+	}
+	
+	@Override
+	public boolean isEmpty() {
+		return super.available() <= 0 && inflater.finished(); // and/or inflater.getRemaining() <= 0 ?
+	}
+	
+	@Override
+	public long length() {
+		if (decompressedLength != -1) {
+			return decompressedLength;
+		}
+		int c = 0;
+		try {
+			int oldPos = decompressedPos;
+			while (!isEmpty()) {
+				readByte();
+				c++;
+			}
+			decompressedLength = c + oldPos;
+			reset();
+			seek(oldPos);
+			return decompressedLength;
+		} catch (IOException ex) {
+			ex.printStackTrace(); // FIXME log error
+			decompressedLength = -1; // better luck next time?
+			return 0;
+		}
+	}
+	
+	@Override
+	public void seek(long localOffset) throws IOException {
+		if (localOffset < 0 /* || localOffset >= length() */) {
+			throw new IllegalArgumentException();
+		}
+		if (localOffset >= decompressedPos) {
+			skip((int) (localOffset - decompressedPos));
+		} else {
+			reset();
+			skip((int) localOffset);
+		}
+	}
+	
+	@Override
+	public void skip(int bytes) throws IOException {
+		if (bytes < 0) {
+			bytes += decompressedPos;
+			if (bytes < 0) {
+				throw new IOException("Underflow. Rewind past start of the slice.");
+			}
+			reset();
+			// fall-through
+		}
+		while (!isEmpty() && bytes > 0) {
+			readByte();
+			bytes--;
+		}
+		if (bytes != 0) {
+			throw new IOException("Underflow. Rewind past end of the slice");
+		}
+	}
+
+	@Override
+	public byte readByte() throws IOException {
+		readBytes(singleByte, 0, 1);
+		return singleByte[0];
+	}
+
+	@Override
+	public void readBytes(byte[] b, int off, int len) throws IOException {
+		try {
+		    int n;
+		    while (len > 0) {
+			    while ((n = inflater.inflate(b, off, len)) == 0) {
+			    	// FIXME few last bytes (checksum?) may be ignored by inflater, thus inflate may return 0 in
+			    	// perfectly legal conditions (when all data already expanded, but there are still some bytes
+			    	// in the input stream
+					if (inflater.finished() || inflater.needsDictionary()) {
+	                    throw new EOFException();
+					}
+					if (inflater.needsInput()) {
+						// fill:
+						int toRead = super.available();
+						if (toRead > buffer.length) {
+							toRead = buffer.length;
+						}
+						super.readBytes(buffer, 0, toRead);
+						inflater.setInput(buffer, 0, toRead);
+					}
+			    }
+				off += n;
+				len -= n;
+				decompressedPos += n;
+				if (len == 0) {
+					return; // filled
+				}
+		    }
+		} catch (DataFormatException e) {
+		    String s = e.getMessage();
+		    throw new ZipException(s != null ? s : "Invalid ZLIB data format");
+		}
+    }
+}
--- a/src/org/tmatesoft/hg/internal/RevlogStream.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/src/org/tmatesoft/hg/internal/RevlogStream.java	Wed Mar 09 05:22:17 2011 +0100
@@ -25,8 +25,6 @@
 import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
-import java.util.zip.DataFormatException;
-import java.util.zip.Inflater;
 
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.repo.HgRepository;
@@ -186,10 +184,10 @@
 			start = indexSize - 1;
 		}
 		if (start < 0 || start >= indexSize) {
-			throw new IllegalArgumentException("Bad left range boundary " + start);
+			throw new IllegalArgumentException(String.format("Bad left range boundary %d in [0..%d]", start, indexSize-1));
 		}
 		if (end < start || end >= indexSize) {
-			throw new IllegalArgumentException("Bad right range boundary " + end);
+			throw new IllegalArgumentException(String.format("Bad right range boundary %d in [0..%d]", end, indexSize-1));
 		}
 		// XXX may cache [start .. end] from index with a single read (pre-read)
 		
@@ -200,7 +198,7 @@
 		}
 		try {
 			byte[] nodeidBuf = new byte[20];
-			byte[] lastData = null;
+			DataAccess lastUserData = null;
 			int i;
 			boolean extraReadsToBaseRev = false;
 			if (needData && index.get(start).baseRevision < start) {
@@ -210,9 +208,13 @@
 				i = start;
 			}
 			
-			daIndex.seek(inline ? (int) index.get(i).offset : i * REVLOGV1_RECORD_SIZE);
+			daIndex.seek(inline ? index.get(i).offset : i * REVLOGV1_RECORD_SIZE);
 			for (; i <= end; i++ ) {
-				long l = daIndex.readLong();  // 0
+				if (inline && needData) {
+					// inspector reading data (though FilterDataAccess) may have affected index position
+					daIndex.seek(index.get(i).offset);
+				}
+				long l = daIndex.readLong(); // 0
 				@SuppressWarnings("unused")
 				long offset = l >>> 16;
 				@SuppressWarnings("unused")
@@ -226,49 +228,41 @@
 				// Hg has 32 bytes here, uses 20 for nodeid, and keeps 12 last bytes empty
 				daIndex.readBytes(nodeidBuf, 0, 20); // +32
 				daIndex.skip(12);
-				byte[] data = null;
+				DataAccess userDataAccess = null;
 				if (needData) {
-					byte[] dataBuf = new byte[compressedLen];
+					final byte firstByte;
+					long streamOffset = index.get(i).offset;
+					DataAccess streamDataAccess;
 					if (inline) {
-						daIndex.readBytes(dataBuf, 0, compressedLen);
+						streamDataAccess = daIndex;
+						streamOffset += REVLOGV1_RECORD_SIZE; // don't need to do seek as it's actual position in the index stream
 					} else {
-						daData.seek(index.get(i).offset);
-						daData.readBytes(dataBuf, 0, compressedLen);
+						streamDataAccess = daData;
+						daData.seek(streamOffset);
 					}
-					if (dataBuf[0] == 0x78 /* 'x' */) {
-						try {
-							Inflater zlib = new Inflater(); // XXX Consider reuse of Inflater, and/or stream alternative
-							zlib.setInput(dataBuf, 0, compressedLen);
-							byte[] result = new byte[actualLen*2]; // FIXME need to use zlib.finished() instead 
-							int resultLen = zlib.inflate(result);
-							zlib.end();
-							data = new byte[resultLen];
-							System.arraycopy(result, 0, data, 0, resultLen);
-						} catch (DataFormatException ex) {
-							ex.printStackTrace();
-							data = new byte[0]; // FIXME need better failure strategy
-						}
-					} else if (dataBuf[0] == 0x75 /* 'u' */) {
-						data = new byte[dataBuf.length - 1];
-						System.arraycopy(dataBuf, 1, data, 0, data.length);
+					firstByte = streamDataAccess.readByte();
+					if (firstByte == 0x78 /* 'x' */) {
+						userDataAccess = new InflaterDataAccess(streamDataAccess, streamOffset, compressedLen);
+					} else if (firstByte == 0x75 /* 'u' */) {
+						userDataAccess = new FilterDataAccess(streamDataAccess, streamOffset+1, compressedLen-1);
 					} else {
 						// XXX Python impl in fact throws exception when there's not 'x', 'u' or '0'
 						// but I don't see reason not to return data as is 
-						data = dataBuf;
+						userDataAccess = new FilterDataAccess(streamDataAccess, streamOffset, compressedLen);
 					}
 					// XXX 
 					if (baseRevision != i) { // XXX not sure if this is the right way to detect a patch
 						// this is a patch
 						LinkedList<PatchRecord> patches = new LinkedList<PatchRecord>();
-						int patchElementIndex = 0;
-						do {
-							PatchRecord pr = PatchRecord.read(data, patchElementIndex);
+						while (!userDataAccess.isEmpty()) {
+							PatchRecord pr = PatchRecord.read(userDataAccess);
+//							System.out.printf("PatchRecord:%d %d %d\n", pr.start, pr.end, pr.len);
 							patches.add(pr);
-							patchElementIndex += 12 + pr.len;
-						} while (patchElementIndex < data.length);
+						}
+						userDataAccess.done();
 						//
-						byte[] baseRevContent = lastData;
-						data = apply(baseRevContent, actualLen, patches);
+						byte[] userData = apply(lastUserData, actualLen, patches);
+						userDataAccess = new ByteArrayDataAccess(userData);
 					}
 				} else {
 					if (inline) {
@@ -276,9 +270,15 @@
 					}
 				}
 				if (!extraReadsToBaseRev || i >= start) {
-					inspector.next(i, actualLen, baseRevision, linkRevision, parent1Revision, parent2Revision, nodeidBuf, data);
+					inspector.next(i, actualLen, baseRevision, linkRevision, parent1Revision, parent2Revision, nodeidBuf, userDataAccess);
 				}
-				lastData = data;
+				if (userDataAccess != null) {
+					userDataAccess.reset();
+					if (lastUserData != null) {
+						lastUserData.done();
+					}
+					lastUserData = userDataAccess;
+				}
 			}
 		} catch (IOException ex) {
 			throw new IllegalStateException(ex); // FIXME need better handling
@@ -357,10 +357,10 @@
 
 	// mpatch.c : apply()
 	// FIXME need to implement patch merge (fold, combine, gather and discard from aforementioned mpatch.[c|py]), also see Revlog and Mercurial PDF
-	public/*for HgBundle; until moved to better place*/static byte[] apply(byte[] baseRevisionContent, int outcomeLen, List<PatchRecord> patch) {
+	public/*for HgBundle; until moved to better place*/static byte[] apply(DataAccess baseRevisionContent, int outcomeLen, List<PatchRecord> patch) throws IOException {
 		int last = 0, destIndex = 0;
 		if (outcomeLen == -1) {
-			outcomeLen = baseRevisionContent.length;
+			outcomeLen = (int) baseRevisionContent.length();
 			for (PatchRecord pr : patch) {
 				outcomeLen += pr.start - last + pr.len;
 				last = pr.end;
@@ -370,13 +370,15 @@
 		}
 		byte[] rv = new byte[outcomeLen];
 		for (PatchRecord pr : patch) {
-			System.arraycopy(baseRevisionContent, last, rv, destIndex, pr.start-last);
+			baseRevisionContent.seek(last);
+			baseRevisionContent.readBytes(rv, destIndex, pr.start-last);
 			destIndex += pr.start - last;
 			System.arraycopy(pr.data, 0, rv, destIndex, pr.data.length);
 			destIndex += pr.data.length;
 			last = pr.end;
 		}
-		System.arraycopy(baseRevisionContent, last, rv, destIndex, baseRevisionContent.length - last);
+		baseRevisionContent.seek(last);
+		baseRevisionContent.readBytes(rv, destIndex, (int) (baseRevisionContent.length() - last));
 		return rv;
 	}
 
@@ -422,7 +424,8 @@
 	// instantly - e.g. calculate hash, or comparing two revisions
 	public interface Inspector {
 		// XXX boolean retVal to indicate whether to continue?
-		// TODO specify nodeid and data length, and reuse policy (i.e. if revlog stream doesn't reuse nodeid[] for each call) 
-		void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[/*20*/] nodeid, byte[] data);
+		// TODO specify nodeid and data length, and reuse policy (i.e. if revlog stream doesn't reuse nodeid[] for each call)
+		// implementers shall not invoke DataAccess.done(), it's accomplished by #iterate at appropraite moment
+		void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[/*20*/] nodeid, DataAccess data);
 	}
 }
--- a/src/org/tmatesoft/hg/repo/HgBundle.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/src/org/tmatesoft/hg/repo/HgBundle.java	Wed Mar 09 05:22:17 2011 +0100
@@ -21,12 +21,16 @@
 import java.util.LinkedList;
 import java.util.List;
 
+import org.tmatesoft.hg.core.HgException;
 import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.ByteArrayChannel;
+import org.tmatesoft.hg.internal.ByteArrayDataAccess;
 import org.tmatesoft.hg.internal.DataAccess;
 import org.tmatesoft.hg.internal.DataAccessProvider;
 import org.tmatesoft.hg.internal.DigestHelper;
 import org.tmatesoft.hg.internal.RevlogStream;
 import org.tmatesoft.hg.repo.HgChangelog.RawChangeset;
+import org.tmatesoft.hg.util.CancelledException;
 
 
 /**
@@ -45,7 +49,7 @@
 		bundleFile = bundle;
 	}
 
-	public void changes(HgRepository hgRepo) throws IOException {
+	public void changes(HgRepository hgRepo) throws HgException, IOException {
 		DataAccess da = accessProvider.create(bundleFile);
 		DigestHelper dh = new DigestHelper();
 		try {
@@ -62,17 +66,23 @@
 			// BundleFormat wiki says:
 			// Each Changelog entry patches the result of all previous patches 
 			// (the previous, or parent patch of a given patch p is the patch that has a node equal to p's p1 field)
-			byte[] baseRevContent = hgRepo.getChangelog().content(base);
+			ByteArrayChannel bac = new ByteArrayChannel();
+			hgRepo.getChangelog().rawContent(base, bac); // FIXME get DataAccess directly, to avoid
+			// extra byte[] (inside ByteArrayChannel) duplication just for the sake of subsequent ByteArrayDataChannel wrap.
+			ByteArrayDataAccess baseRevContent = new ByteArrayDataAccess(bac.toArray());
 			for (GroupElement ge : changelogGroup) {
 				byte[] csetContent = RevlogStream.apply(baseRevContent, -1, ge.patches);
 				dh = dh.sha1(ge.firstParent(), ge.secondParent(), csetContent); // XXX ge may give me access to byte[] content of nodeid directly, perhaps, I don't need DH to be friend of Nodeid?
 				if (!ge.node().equalsTo(dh.asBinary())) {
 					throw new IllegalStateException("Integrity check failed on " + bundleFile + ", node:" + ge.node());
 				}
-				RawChangeset cs = RawChangeset.parse(csetContent, 0, csetContent.length);
+				ByteArrayDataAccess csetDataAccess = new ByteArrayDataAccess(csetContent);
+				RawChangeset cs = RawChangeset.parse(csetDataAccess);
 				System.out.println(cs.toString());
-				baseRevContent = csetContent;
+				baseRevContent = csetDataAccess.reset();
 			}
+		} catch (CancelledException ex) {
+			System.out.println("Operation cancelled");
 		} finally {
 			da.done();
 		}
--- a/src/org/tmatesoft/hg/repo/HgChangelog.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/src/org/tmatesoft/hg/repo/HgChangelog.java	Wed Mar 09 05:22:17 2011 +0100
@@ -16,6 +16,7 @@
  */
 package org.tmatesoft.hg.repo;
 
+import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -30,6 +31,7 @@
 import java.util.TimeZone;
 
 import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.DataAccess;
 import org.tmatesoft.hg.internal.RevlogStream;
 
 /**
@@ -51,8 +53,8 @@
 	public void range(int start, int end, final HgChangelog.Inspector inspector) {
 		RevlogStream.Inspector i = new RevlogStream.Inspector() {
 
-			public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, byte[] data) {
-				RawChangeset cset = RawChangeset.parse(data, 0, data.length);
+			public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess da) {
+				RawChangeset cset = RawChangeset.parse(da);
 				// XXX there's no guarantee for Changeset.Callback that distinct instance comes each time, consider instance reuse
 				inspector.next(revisionNumber, Nodeid.fromBinary(nodeid, 0), cset);
 			}
@@ -64,8 +66,8 @@
 		final ArrayList<RawChangeset> rv = new ArrayList<RawChangeset>(end - start + 1);
 		RevlogStream.Inspector i = new RevlogStream.Inspector() {
 
-			public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, byte[] data) {
-				RawChangeset cset = RawChangeset.parse(data, 0, data.length);
+			public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess da) {
+				RawChangeset cset = RawChangeset.parse(da);
 				rv.add(cset);
 			}
 		};
@@ -79,9 +81,9 @@
 		}
 		RevlogStream.Inspector i = new RevlogStream.Inspector() {
 
-			public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, byte[] data) {
+			public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess da) {
 				if (Arrays.binarySearch(revisions, revisionNumber) >= 0) {
-					RawChangeset cset = RawChangeset.parse(data, 0, data.length);
+					RawChangeset cset = RawChangeset.parse(da);
 					inspector.next(revisionNumber, Nodeid.fromBinary(nodeid, 0), cset);
 				}
 			}
@@ -198,10 +200,15 @@
 			}
 		}
 
-		public static RawChangeset parse(byte[] data, int offset, int length) {
-			RawChangeset rv = new RawChangeset();
-			rv.init(data, offset, length);
-			return rv;
+		public static RawChangeset parse(DataAccess da) {
+			try {
+				byte[] data = da.byteArray();
+				RawChangeset rv = new RawChangeset();
+				rv.init(data, 0, data.length);
+				return rv;
+			} catch (IOException ex) {
+				throw new IllegalArgumentException(ex); // FIXME better handling of IOExc
+			}
 		}
 
 		/* package-local */void init(byte[] data, int offset, int length) {
--- a/src/org/tmatesoft/hg/repo/HgDataFile.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/src/org/tmatesoft/hg/repo/HgDataFile.java	Wed Mar 09 05:22:17 2011 +0100
@@ -18,9 +18,8 @@
 
 import static org.tmatesoft.hg.repo.HgInternals.wrongLocalRevision;
 import static org.tmatesoft.hg.repo.HgRepository.*;
-import static org.tmatesoft.hg.repo.HgRepository.TIP;
-import static org.tmatesoft.hg.repo.HgRepository.WORKING_COPY;
 
+import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
@@ -28,14 +27,14 @@
 import java.util.TreeMap;
 
 import org.tmatesoft.hg.core.HgDataStreamException;
+import org.tmatesoft.hg.core.HgException;
 import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.DataAccess;
 import org.tmatesoft.hg.internal.FilterByteChannel;
 import org.tmatesoft.hg.internal.RevlogStream;
 import org.tmatesoft.hg.util.ByteChannel;
-import org.tmatesoft.hg.util.CancelSupport;
 import org.tmatesoft.hg.util.CancelledException;
 import org.tmatesoft.hg.util.Path;
-import org.tmatesoft.hg.util.ProgressSupport;
 
 
 
@@ -71,106 +70,86 @@
 
 	// human-readable (i.e. "COPYING", not "store/data/_c_o_p_y_i_n_g.i")
 	public Path getPath() {
-		return path; // hgRepo.backresolve(this) -> name?
+		return path; // hgRepo.backresolve(this) -> name? In this case, what about hashed long names?
 	}
 
 	public int length(Nodeid nodeid) {
 		return content.dataLength(getLocalRevision(nodeid));
 	}
 
-	public byte[] content() {
-		return content(TIP);
+	public void workingCopy(ByteChannel sink) throws IOException, CancelledException {
+		throw HgRepository.notImplemented();
 	}
 	
-	/*XXX not sure applyFilters is the best way to do, perhaps, callers shall add filters themselves?*/
-	public void content(int revision, ByteChannel sink, boolean applyFilters) throws HgDataStreamException, IOException, CancelledException {
-		byte[] content = content(revision);
-		final CancelSupport cancelSupport = CancelSupport.Factory.get(sink);
-		final ProgressSupport progressSupport = ProgressSupport.Factory.get(sink);
-		ByteBuffer buf = ByteBuffer.allocate(512);
-		int left = content.length;
-		progressSupport.start(left);
-		int offset = 0;
-		cancelSupport.checkCancelled();
-		ByteChannel _sink = applyFilters ? new FilterByteChannel(sink, getRepo().getFiltersFromRepoToWorkingDir(getPath())) : sink;
-		do {
-			buf.put(content, offset, Math.min(left, buf.remaining()));
-			buf.flip();
-			cancelSupport.checkCancelled();
-			// XXX I may not rely on returned number of bytes but track change in buf position instead.
-			int consumed = _sink.write(buf);
-			buf.compact();
-			offset += consumed;
-			left -= consumed;
-			progressSupport.worked(consumed);
-		} while (left > 0);
-		progressSupport.done(); // XXX shall specify whether #done() is invoked always or only if completed successfully.
+//	public void content(int revision, ByteChannel sink, boolean applyFilters) throws HgDataStreamException, IOException, CancelledException {
+//		byte[] content = content(revision);
+//		final CancelSupport cancelSupport = CancelSupport.Factory.get(sink);
+//		final ProgressSupport progressSupport = ProgressSupport.Factory.get(sink);
+//		ByteBuffer buf = ByteBuffer.allocate(512);
+//		int left = content.length;
+//		progressSupport.start(left);
+//		int offset = 0;
+//		cancelSupport.checkCancelled();
+//		ByteChannel _sink = applyFilters ? new FilterByteChannel(sink, getRepo().getFiltersFromRepoToWorkingDir(getPath())) : sink;
+//		do {
+//			buf.put(content, offset, Math.min(left, buf.remaining()));
+//			buf.flip();
+//			cancelSupport.checkCancelled();
+//			// XXX I may not rely on returned number of bytes but track change in buf position instead.
+//			int consumed = _sink.write(buf);
+//			buf.compact();
+//			offset += consumed;
+//			left -= consumed;
+//			progressSupport.worked(consumed);
+//		} while (left > 0);
+//		progressSupport.done(); // XXX shall specify whether #done() is invoked always or only if completed successfully.
+//	}
+	
+	/*XXX not sure distinct method contentWithFilters() is the best way to do, perhaps, callers shall add filters themselves?*/
+	public void contentWithFilters(int revision, ByteChannel sink) throws HgDataStreamException, IOException, CancelledException {
+		content(revision, new FilterByteChannel(sink, getRepo().getFiltersFromRepoToWorkingDir(getPath())));
 	}
 
 	// for data files need to check heading of the file content for possible metadata
 	// @see http://mercurial.selenic.com/wiki/FileFormats#data.2BAC8-
-	@Override
-	public byte[] content(int revision) {
+	public void content(int revision, ByteChannel sink) throws HgDataStreamException, IOException, CancelledException {
 		if (revision == TIP) {
 			revision = getLastRevision();
 		}
-		if (wrongLocalRevision(revision) || revision == BAD_REVISION || revision == WORKING_COPY) {
+		if (revision == WORKING_COPY) {
+			workingCopy(sink);
+			return;
+		}
+		if (wrongLocalRevision(revision) || revision == BAD_REVISION) {
 			throw new IllegalArgumentException(String.valueOf(revision));
 		}
-		byte[] data = super.content(revision);
+		if (sink == null) {
+			throw new IllegalArgumentException();
+		}
 		if (metadata == null) {
 			metadata = new Metadata();
 		}
+		ContentPipe insp;
 		if (metadata.none(revision)) {
-			// although not very reasonable when data is byte array, this check might
-			// get handy when there's a stream/channel to avoid useless reads and rewinds.
-			return data;
+			insp = new ContentPipe(sink, 0);
+		} else if (metadata.known(revision)) {
+			insp = new ContentPipe(sink, metadata.dataOffset(revision));
+		} else {
+			// do not know if there's metadata
+			insp = new MetadataContentPipe(sink, metadata);
 		}
-		int toSkip = 0;
-		if (!metadata.known(revision)) {
-			if (data.length < 4 || (data[0] != 1 && data[1] != 10)) {
-				metadata.recordNone(revision);
-				return data;
-			}
-			int lastEntryStart = 2;
-			int lastColon = -1;
-			ArrayList<MetadataEntry> _metadata = new ArrayList<MetadataEntry>();
-			String key = null, value = null;
-			for (int i = 2; i < data.length; i++) {
-				if (data[i] == (int) ':') {
-					key = new String(data, lastEntryStart, i - lastEntryStart);
-					lastColon = i;
-				} else if (data[i] == '\n') {
-					if (key == null || lastColon == -1 || i <= lastColon) {
-						throw new IllegalStateException(); // FIXME log instead and record null key in the metadata. Ex just to fail fast during dev
-					}
-					value = new String(data, lastColon + 1, i - lastColon - 1).trim();
-					_metadata.add(new MetadataEntry(key, value));
-					key = value = null;
-					lastColon = -1;
-					lastEntryStart = i+1;
-				} else if (data[i] == 1 && i + 1 < data.length && data[i+1] == 10) {
-					if (key != null && lastColon != -1 && i > lastColon) {
-						// just in case last entry didn't end with newline
-						value = new String(data, lastColon + 1, i - lastColon - 1);
-						_metadata.add(new MetadataEntry(key, value));
-					}
-					lastEntryStart = i+1;
-					break;
-				}
-			}
-			_metadata.trimToSize();
-			metadata.add(revision, lastEntryStart, _metadata);
-			toSkip = lastEntryStart;
-		} else {
-			toSkip = metadata.dataOffset(revision);
+		insp.checkCancelled();
+		super.content.iterate(revision, revision, true, insp);
+		try {
+			insp.checkFailed();
+		} catch (HgDataStreamException ex) {
+			throw ex;
+		} catch (HgException ex) {
+			// shall not happen, unless we changed ContentPipe or its subclass
+			throw new HgDataStreamException(ex.getClass().getName(), ex);
 		}
-		// XXX copy of an array may be memory-hostile, a wrapper with baseOffsetShift(lastEntryStart) would be more convenient
-		byte[] rv = new byte[data.length - toSkip];
-		System.arraycopy(data, toSkip, rv, 0, rv.length);
-		return rv;
 	}
-
+	
 	public void history(HgChangelog.Inspector inspector) {
 		history(0, getLastRevision(), inspector);
 	}
@@ -192,7 +171,7 @@
 		RevlogStream.Inspector insp = new RevlogStream.Inspector() {
 			int count = 0;
 			
-			public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, byte[] data) {
+			public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) {
 				commitRevisions[count++] = linkRevision;
 			}
 		};
@@ -210,10 +189,22 @@
 		return getRepo().getChangelog().getRevision(changelogRevision);
 	}
 
-	public boolean isCopy() {
+	public boolean isCopy() throws HgDataStreamException {
 		if (metadata == null || !metadata.checked(0)) {
 			// content() always initializes metadata.
-			content(0); // FIXME expensive way to find out metadata, distinct RevlogStream.Iterator would be better.
+			 // FIXME this is expensive way to find out metadata, distinct RevlogStream.Iterator would be better.
+			try {
+				content(0, new ByteChannel() { // No-op channel
+					public int write(ByteBuffer buffer) throws IOException {
+						// pretend we consumed whole buffer
+						int rv = buffer.remaining();
+						buffer.position(buffer.limit());
+						return rv;
+					}
+				});
+			} catch (Exception ex) {
+				throw new HgDataStreamException("Can't initialize metadata", ex);
+			}
 		}
 		if (!metadata.known(0)) {
 			return false;
@@ -221,14 +212,14 @@
 		return metadata.find(0, "copy") != null;
 	}
 
-	public Path getCopySourceName() {
+	public Path getCopySourceName() throws HgDataStreamException {
 		if (isCopy()) {
 			return Path.create(metadata.find(0, "copy"));
 		}
 		throw new UnsupportedOperationException(); // XXX REVISIT, think over if Exception is good (clients would check isCopy() anyway, perhaps null is sufficient?)
 	}
 	
-	public Nodeid getCopySourceRevision() {
+	public Nodeid getCopySourceRevision() throws HgDataStreamException {
 		if (isCopy()) {
 			return Nodeid.fromAscii(metadata.find(0, "copyrev")); // XXX reuse/cache Nodeid
 		}
@@ -317,4 +308,76 @@
 			return null;
 		}
 	}
+
+	private static class MetadataContentPipe extends ContentPipe {
+
+		private final Metadata metadata;
+
+		public MetadataContentPipe(ByteChannel sink, Metadata _metadata) {
+			super(sink, 0);
+			metadata = _metadata;
+		}
+
+		@Override
+		protected void prepare(int revisionNumber, DataAccess da) throws HgException, IOException {
+			long daLength = da.length();
+			if (daLength < 4 || da.readByte() != 1 || da.readByte() != 10) {
+				metadata.recordNone(revisionNumber);
+				da.reset();
+				return;
+			}
+			int lastEntryStart = 2;
+			int lastColon = -1;
+			ArrayList<MetadataEntry> _metadata = new ArrayList<MetadataEntry>();
+			// XXX in fact, need smth like ByteArrayBuilder, similar to StringBuilder,
+			// which can't be used here because we can't convert bytes to chars as we read them
+			// (there might be multi-byte encoding), and we need to collect all bytes before converting to string 
+			ByteArrayOutputStream bos = new ByteArrayOutputStream();
+			String key = null, value = null;
+			boolean byteOne = false;
+			for (int i = 2; i < daLength; i++) {
+				byte b = da.readByte();
+				if (b == '\n') {
+					if (byteOne) { // i.e. \n follows 1
+						lastEntryStart = i+1;
+						// XXX is it possible to have here incomplete key/value (i.e. if last pair didn't end with \n)
+						break;
+					}
+					if (key == null || lastColon == -1 || i <= lastColon) {
+						throw new IllegalStateException(); // FIXME log instead and record null key in the metadata. Ex just to fail fast during dev
+					}
+					value = new String(bos.toByteArray()).trim();
+					bos.reset();
+					_metadata.add(new MetadataEntry(key, value));
+					key = value = null;
+					lastColon = -1;
+					lastEntryStart = i+1;
+					continue;
+				} 
+				// byteOne has to be consumed up to this line, if not jet, consume it
+				if (byteOne) {
+					// insert 1 we've read on previous step into the byte builder
+					bos.write(1);
+					// fall-through to consume current byte
+					byteOne = false;
+				}
+				if (b == (int) ':') {
+					assert value == null;
+					key = new String(bos.toByteArray());
+					bos.reset();
+					lastColon = i;
+				} else if (b == 1) {
+					byteOne = true;
+				} else {
+					bos.write(b);
+				}
+			}
+			_metadata.trimToSize();
+			metadata.add(revisionNumber, lastEntryStart, _metadata);
+			if (da.isEmpty() || !byteOne) {
+				throw new HgDataStreamException(String.format("Metadata for revision %d is not closed properly", revisionNumber), null);
+			}
+			// da is in prepared state (i.e. we consumed all bytes up to metadata end).
+		}
+	}
 }
--- a/src/org/tmatesoft/hg/repo/HgManifest.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/src/org/tmatesoft/hg/repo/HgManifest.java	Wed Mar 09 05:22:17 2011 +0100
@@ -16,7 +16,11 @@
  */
 package org.tmatesoft.hg.repo;
 
+import java.io.IOException;
+
+import org.tmatesoft.hg.core.HgBadStateException;
 import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.DataAccess;
 import org.tmatesoft.hg.internal.RevlogStream;
 
 
@@ -36,38 +40,43 @@
 
 			private boolean gtg = true; // good to go
 
-			public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, byte[] data) {
+			public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess da) {
 				if (!gtg) {
 					return;
 				}
-				gtg = gtg && inspector.begin(revisionNumber, new Nodeid(nodeid, true));
-				int i;
-				String fname = null;
-				String flags = null;
-				Nodeid nid = null;
-				for (i = 0; gtg && i < actualLen; i++) {
-					int x = i;
-					for( ; data[i] != '\n' && i < actualLen; i++) {
-						if (fname == null && data[i] == 0) {
-							fname = new String(data, x, i - x);
-							x = i+1;
+				try {
+					gtg = gtg && inspector.begin(revisionNumber, new Nodeid(nodeid, true));
+					int i;
+					String fname = null;
+					String flags = null;
+					Nodeid nid = null;
+					byte[] data = da.byteArray();
+					for (i = 0; gtg && i < actualLen; i++) {
+						int x = i;
+						for( ; data[i] != '\n' && i < actualLen; i++) {
+							if (fname == null && data[i] == 0) {
+								fname = new String(data, x, i - x);
+								x = i+1;
+							}
 						}
+						if (i < actualLen) {
+							assert data[i] == '\n'; 
+							int nodeidLen = i - x < 40 ? i-x : 40;
+							nid = Nodeid.fromAscii(data, x, nodeidLen);
+							if (nodeidLen + x < i) {
+								// 'x' and 'l' for executable bits and symlinks?
+								// hg --debug manifest shows 644 for each regular file in my repo
+								flags = new String(data, x + nodeidLen, i-x-nodeidLen);
+							}
+							gtg = gtg && inspector.next(nid, fname, flags);
+						}
+						nid = null;
+						fname = flags = null;
 					}
-					if (i < actualLen) {
-						assert data[i] == '\n'; 
-						int nodeidLen = i - x < 40 ? i-x : 40;
-						nid = Nodeid.fromAscii(data, x, nodeidLen);
-						if (nodeidLen + x < i) {
-							// 'x' and 'l' for executable bits and symlinks?
-							// hg --debug manifest shows 644 for each regular file in my repo
-							flags = new String(data, x + nodeidLen, i-x-nodeidLen);
-						}
-						gtg = gtg && inspector.next(nid, fname, flags);
-					}
-					nid = null;
-					fname = flags = null;
+					gtg = gtg && inspector.end(revisionNumber);
+				} catch (IOException ex) {
+					throw new HgBadStateException(ex);
 				}
-				gtg = gtg && inspector.end(revisionNumber);
 			}
 		};
 		content.iterate(start, end, true, insp);
--- a/src/org/tmatesoft/hg/repo/HgStatusCollector.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/src/org/tmatesoft/hg/repo/HgStatusCollector.java	Wed Mar 09 05:22:17 2011 +0100
@@ -28,6 +28,7 @@
 import java.util.TreeMap;
 import java.util.TreeSet;
 
+import org.tmatesoft.hg.core.HgDataStreamException;
 import org.tmatesoft.hg.core.Nodeid;
 import org.tmatesoft.hg.util.Path;
 import org.tmatesoft.hg.util.PathPool;
@@ -164,12 +165,18 @@
 					inspector.modified(pp.path(fname));
 				}
 			} else {
-				Path copyTarget = pp.path(fname);
-				Path copyOrigin = getOriginIfCopy(repo, copyTarget, r1Files, rev1);
-				if (copyOrigin != null) {
-					inspector.copied(pp.path(copyOrigin) /*pipe through pool, just in case*/, copyTarget);
-				} else {
-					inspector.added(copyTarget);
+				try {
+					Path copyTarget = pp.path(fname);
+					Path copyOrigin = getOriginIfCopy(repo, copyTarget, r1Files, rev1);
+					if (copyOrigin != null) {
+						inspector.copied(pp.path(copyOrigin) /*pipe through pool, just in case*/, copyTarget);
+					} else {
+						inspector.added(copyTarget);
+					}
+				} catch (HgDataStreamException ex) {
+					ex.printStackTrace();
+					// FIXME perhaps, shall record this exception to dedicated mediator and continue
+					// for a single file not to be irresolvable obstacle for a status operation
 				}
 			}
 		}
@@ -184,7 +191,7 @@
 		return rv;
 	}
 	
-	/*package-local*/static Path getOriginIfCopy(HgRepository hgRepo, Path fname, Collection<String> originals, int originalChangelogRevision) {
+	/*package-local*/static Path getOriginIfCopy(HgRepository hgRepo, Path fname, Collection<String> originals, int originalChangelogRevision) throws HgDataStreamException {
 		HgDataFile df = hgRepo.getFileNode(fname);
 		while (df.isCopy()) {
 			Path original = df.getCopySourceName();
--- a/src/org/tmatesoft/hg/repo/HgWorkingCopyStatusCollector.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/src/org/tmatesoft/hg/repo/HgWorkingCopyStatusCollector.java	Wed Mar 09 05:22:17 2011 +0100
@@ -30,10 +30,14 @@
 import java.util.Set;
 import java.util.TreeSet;
 
+import org.tmatesoft.hg.core.HgDataStreamException;
+import org.tmatesoft.hg.core.HgException;
 import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.ByteArrayChannel;
 import org.tmatesoft.hg.internal.FilterByteChannel;
 import org.tmatesoft.hg.repo.HgStatusCollector.ManifestRevisionInspector;
 import org.tmatesoft.hg.util.ByteChannel;
+import org.tmatesoft.hg.util.CancelledException;
 import org.tmatesoft.hg.util.FileIterator;
 import org.tmatesoft.hg.util.Path;
 import org.tmatesoft.hg.util.PathPool;
@@ -176,7 +180,7 @@
 			} else {
 				// check actual content to avoid false modified files
 				HgDataFile df = repo.getFileNode(fname);
-				if (!areTheSame(f, df.content(), df.getPath())) {
+				if (!areTheSame(f, df, HgRepository.TIP)) {
 					inspector.modified(df.getPath());
 				}
 			}
@@ -204,10 +208,15 @@
 			// added: not known at the time of baseRevision, shall report
 			// merged: was not known, report as added?
 			if ((r = getDirstate().checkNormal(fname)) != null) {
-				Path origin = HgStatusCollector.getOriginIfCopy(repo, fname, baseRevNames, baseRevision);
-				if (origin != null) {
-					inspector.copied(getPathPool().path(origin), getPathPool().path(fname));
-					return;
+				try {
+					Path origin = HgStatusCollector.getOriginIfCopy(repo, fname, baseRevNames, baseRevision);
+					if (origin != null) {
+						inspector.copied(getPathPool().path(origin), getPathPool().path(fname));
+						return;
+					}
+				} catch (HgDataStreamException ex) {
+					ex.printStackTrace();
+					// FIXME report to a mediator, continue status collection
 				}
 			} else if ((r = getDirstate().checkAdded(fname)) != null) {
 				if (r.name2 != null && baseRevNames.contains(r.name2)) {
@@ -232,8 +241,7 @@
 					inspector.modified(getPathPool().path(fname));
 				} else {
 					// check actual content to see actual changes
-					// XXX consider adding HgDataDile.compare(File/byte[]/whatever) operation to optimize comparison
-					if (areTheSame(f, fileNode.content(nid1), fileNode.getPath())) {
+					if (areTheSame(f, fileNode, fileNode.getLocalRevision(nid1))) {
 						inspector.clean(getPathPool().path(fname));
 					} else {
 						inspector.modified(getPathPool().path(fname));
@@ -251,6 +259,24 @@
 		// The question is whether original Hg treats this case (same content, different parents and hence nodeids) as 'modified' or 'clean'
 	}
 
+	private boolean areTheSame(File f, HgDataFile dataFile, int localRevision) {
+		// XXX consider adding HgDataDile.compare(File/byte[]/whatever) operation to optimize comparison
+		ByteArrayChannel bac = new ByteArrayChannel();
+		boolean ioFailed = false;
+		try {
+			// need content with metadata striped off - although theoretically chances are metadata may be different,
+			// WC doesn't have it anyway 
+			dataFile.content(localRevision, bac);
+		} catch (CancelledException ex) {
+			// silently ignore - can't happen, ByteArrayChannel is not cancellable
+		} catch (IOException ex) {
+			ioFailed = true;
+		} catch (HgException ex) {
+			ioFailed = true;
+		}
+		return !ioFailed && areTheSame(f, bac.toArray(), dataFile.getPath());
+	}
+	
 	private boolean areTheSame(File f, final byte[] data, Path p) {
 		FileInputStream fis = null;
 		try {
--- a/src/org/tmatesoft/hg/repo/Revlog.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/src/org/tmatesoft/hg/repo/Revlog.java	Wed Mar 09 05:22:17 2011 +0100
@@ -19,6 +19,8 @@
 import static org.tmatesoft.hg.repo.HgRepository.BAD_REVISION;
 import static org.tmatesoft.hg.repo.HgRepository.TIP;
 
+import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
@@ -27,12 +29,24 @@
 import java.util.Map;
 import java.util.Set;
 
+import org.tmatesoft.hg.core.HgBadStateException;
+import org.tmatesoft.hg.core.HgException;
 import org.tmatesoft.hg.core.Nodeid;
+import org.tmatesoft.hg.internal.DataAccess;
 import org.tmatesoft.hg.internal.RevlogStream;
+import org.tmatesoft.hg.util.ByteChannel;
+import org.tmatesoft.hg.util.CancelSupport;
+import org.tmatesoft.hg.util.CancelledException;
+import org.tmatesoft.hg.util.ProgressSupport;
 
 
 /**
- *
+ * Base class for all Mercurial entities that are serialized in a so called revlog format (changelog, manifest, data files).
+ * 
+ * Implementation note:
+ * Hides actual actual revlog stream implementation and its access methods (i.e. RevlogStream.Inspector), iow shall not expose anything internal
+ * in public methods.
+ *   
  * @author Artem Tikhomirov
  * @author TMate Software Ltd.
  */
@@ -100,22 +114,21 @@
 	 * Access to revision data as is (decompressed, but otherwise unprocessed, i.e. not parsed for e.g. changeset or manifest entries) 
 	 * @param nodeid
 	 */
-	public byte[] content(Nodeid nodeid) {
-		return content(getLocalRevision(nodeid));
+	protected void rawContent(Nodeid nodeid, ByteChannel sink) throws HgException, IOException, CancelledException {
+		rawContent(getLocalRevision(nodeid), sink);
 	}
 	
 	/**
 	 * @param revision - repo-local index of this file change (not a changelog revision number!)
 	 */
-	public byte[] content(int revision) {
-		final byte[][] dataPtr = new byte[1][];
-		RevlogStream.Inspector insp = new RevlogStream.Inspector() {
-			public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, byte[] data) {
-				dataPtr[0] = data;
-			}
-		};
+	protected void rawContent(int revision, ByteChannel sink) throws HgException, IOException, CancelledException {
+		if (sink == null) {
+			throw new IllegalArgumentException();
+		}
+		ContentPipe insp = new ContentPipe(sink, 0);
+		insp.checkCancelled();
 		content.iterate(revision, revision, true, insp);
-		return dataPtr[0];
+		insp.checkFailed();
 	}
 
 	/**
@@ -145,7 +158,7 @@
 			public int p2 = -1;
 			public byte[] nodeid;
 			
-			public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, byte[] data) {
+			public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess da) {
 				p1 = parent1Revision;
 				p2 = parent2Revision;
 				this.nodeid = new byte[20];
@@ -203,7 +216,7 @@
 			RevlogStream.Inspector insp = new RevlogStream.Inspector() {
 				final Nodeid[] sequentialRevisionNodeids = new Nodeid[revisionCount];
 				int ix = 0;
-				public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, byte[] data) {
+				public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess da) {
 					if (ix != revisionNumber) {
 						// XXX temp code, just to make sure I understand what's going on here
 						throw new IllegalStateException();
@@ -267,4 +280,79 @@
 			return modified;
 		}
 	}
+
+	protected static class ContentPipe implements RevlogStream.Inspector, CancelSupport {
+		private final ByteChannel sink;
+		private final CancelSupport cancelSupport;
+		private Exception failure;
+		private final int offset;
+
+		/**
+		 * @param _sink - cannot be <code>null</code>
+		 * @param seekOffset - when positive, orders to pipe bytes to the sink starting from specified offset, not from the first byte available in DataAccess
+		 */
+		public ContentPipe(ByteChannel _sink, int seekOffset) {
+			assert _sink != null;
+			sink = _sink;
+			cancelSupport = CancelSupport.Factory.get(_sink);
+			offset = seekOffset;
+		}
+		
+		protected void prepare(int revisionNumber, DataAccess da) throws HgException, IOException {
+			if (offset > 0) { // save few useless reset/rewind operations
+				da.seek(offset);
+			}
+		}
+
+		public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess da) {
+			try {
+				prepare(revisionNumber, da); // XXX perhaps, prepare shall return DA (sliced, if needed)
+				final ProgressSupport progressSupport = ProgressSupport.Factory.get(sink);
+				ByteBuffer buf = ByteBuffer.allocate(512);
+				progressSupport.start(da.length());
+				while (!da.isEmpty()) {
+					cancelSupport.checkCancelled();
+					da.readBytes(buf);
+					buf.flip();
+					// XXX I may not rely on returned number of bytes but track change in buf position instead.
+					int consumed = sink.write(buf); 
+					// FIXME in fact, bad sink implementation (that consumes no bytes) would result in endless loop. Need to account for this 
+					buf.compact();
+					progressSupport.worked(consumed);
+				}
+				progressSupport.done(); // XXX shall specify whether #done() is invoked always or only if completed successfully.
+			} catch (IOException ex) {
+				recordFailure(ex);
+			} catch (CancelledException ex) {
+				recordFailure(ex);
+			} catch (HgException ex) {
+				recordFailure(ex);
+			}
+		}
+		
+		public void checkCancelled() throws CancelledException {
+			cancelSupport.checkCancelled();
+		}
+
+		protected void recordFailure(Exception ex) {
+			assert failure == null;
+			failure = ex;
+		}
+
+		public void checkFailed() throws HgException, IOException, CancelledException {
+			if (failure == null) {
+				return;
+			}
+			if (failure instanceof IOException) {
+				throw (IOException) failure;
+			}
+			if (failure instanceof CancelledException) {
+				throw (CancelledException) failure;
+			}
+			if (failure instanceof HgException) {
+				throw (HgException) failure;
+			}
+			throw new HgBadStateException(failure);
+		}
+	}
 }
--- a/src/org/tmatesoft/hg/util/ProgressSupport.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/src/org/tmatesoft/hg/util/ProgressSupport.java	Wed Mar 09 05:22:17 2011 +0100
@@ -24,7 +24,7 @@
  */
 public interface ProgressSupport {
 
-	public void start(int totalUnits);
+	public void start(long totalUnits);
 	public void worked(int units);
 	public void done();
 
@@ -45,7 +45,7 @@
 				}
 			}
 			return new ProgressSupport() {
-				public void start(int totalUnits) {
+				public void start(long totalUnits) {
 				}
 				public void worked(int units) {
 				}
--- a/test/org/tmatesoft/hg/test/StatusOutputParser.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/test/org/tmatesoft/hg/test/StatusOutputParser.java	Wed Mar 09 05:22:17 2011 +0100
@@ -64,42 +64,45 @@
 
 	public void parse(CharSequence seq) {
 		Matcher m = pattern.matcher(seq);
-		Path lastAdded = null;
+		Path lastEntry = null;
 		while (m.find()) {
-			String fname = m.group(2);
+			Path fname = pathHelper.path(m.group(2));
 			switch ((int) m.group(1).charAt(0)) {
 			case (int) 'M' : {
-				result.modified(pathHelper.path(fname));
+				result.modified(fname);
+				lastEntry = fname; // for files modified through merge there's also 'copy' source 
 				break;
 			}
 			case (int) 'A' : {
-				result.added(lastAdded = pathHelper.path(fname));
+				result.added(fname);
+				lastEntry = fname;
 				break;
 			}
 			case (int) 'R' : {
-				result.removed(pathHelper.path(fname));
+				result.removed(fname);
 				break;
 			}
 			case (int) '?' : {
-				result.unknown(pathHelper.path(fname));
+				result.unknown(fname);
 				break;
 			}
 			case (int) 'I' : {
-				result.ignored(pathHelper.path(fname));
+				result.ignored(fname);
 				break;
 			}
 			case (int) 'C' : {
-				result.clean(pathHelper.path(fname));
+				result.clean(fname);
 				break;
 			}
 			case (int) '!' : {
-				result.missing(pathHelper.path(fname));
+				result.missing(fname);
 				break;
 			}
 			case (int) ' ' : {
 				// last added is copy destination
 				// to get or to remove it - depends on what StatusCollector does in this case
-				result.copied(pathHelper.path(fname), lastAdded);
+				result.copied(fname, lastEntry);
+				lastEntry = null;
 				break;
 			}
 			}
--- a/test/org/tmatesoft/hg/test/TestByteChannel.java	Wed Mar 02 01:06:09 2011 +0100
+++ b/test/org/tmatesoft/hg/test/TestByteChannel.java	Wed Mar 09 05:22:17 2011 +0100
@@ -16,12 +16,13 @@
  */
 package org.tmatesoft.hg.test;
 
-import java.util.Arrays;
+import static org.junit.Assert.assertArrayEquals;
 
 import org.junit.Assert;
-import org.tmatesoft.hg.core.HgRepoFacade;
+import org.junit.Test;
 import org.tmatesoft.hg.internal.ByteArrayChannel;
 import org.tmatesoft.hg.repo.HgDataFile;
+import org.tmatesoft.hg.repo.HgRepository;
 
 /**
  *
@@ -30,27 +31,57 @@
  */
 public class TestByteChannel {
 
+	private HgRepository repo;
+
 	public static void main(String[] args) throws Exception {
-		HgRepoFacade rf = new HgRepoFacade();
-		rf.init();
-		HgDataFile file = rf.getRepository().getFileNode("src/org/tmatesoft/hg/internal/KeywordFilter.java");
-		for (int i = file.getLastRevision(); i >= 0; i--) {
-			System.out.print("Content for revision:" + i);
-			compareContent(file, i);
-			System.out.println(" OK");
-		}
+//		HgRepoFacade rf = new HgRepoFacade();
+//		rf.init();
+//		HgDataFile file = rf.getRepository().getFileNode("src/org/tmatesoft/hg/internal/KeywordFilter.java");
+//		for (int i = file.getLastRevision(); i >= 0; i--) {
+//			System.out.print("Content for revision:" + i);
+//			compareContent(file, i);
+//			System.out.println(" OK");
+//		}
 		//CatCommand cmd = rf.createCatCommand();
 	}
 
-	private static void compareContent(HgDataFile file, int rev) throws Exception {
-		byte[] oldAccess = file.content(rev);
+//	private static void compareContent(HgDataFile file, int rev) throws Exception {
+//		byte[] oldAccess = file.content(rev);
+//		ByteArrayChannel ch = new ByteArrayChannel();
+//		file.content(rev, ch);
+//		byte[] newAccess = ch.toArray();
+//		Assert.assertArrayEquals(oldAccess, newAccess);
+//		// don't trust anyone (even JUnit) 
+//		if (!Arrays.equals(oldAccess, newAccess)) {
+//			throw new RuntimeException("Failed:" + rev);
+//		}
+//	}
+
+	@Test
+	public void testContent() throws Exception {
+		repo = Configuration.get().find("log-1");
+		final byte[] expectedContent = new byte[] { 'a', ' ', 13, 10 };
 		ByteArrayChannel ch = new ByteArrayChannel();
-		file.content(rev, ch, false);
-		byte[] newAccess = ch.toArray();
-		Assert.assertArrayEquals(oldAccess, newAccess);
-		// don't trust anyone (even JUnit) 
-		if (!Arrays.equals(oldAccess, newAccess)) {
-			throw new RuntimeException("Failed:" + rev);
-		}
+		repo.getFileNode("dir/b").content(0, ch);
+		assertArrayEquals(expectedContent, ch.toArray());
+		repo.getFileNode("d").content(HgRepository.TIP, ch = new ByteArrayChannel() );
+		assertArrayEquals(expectedContent, ch.toArray());
+	}
+
+	@Test
+	public void testStripMetadata() throws Exception {
+		repo = Configuration.get().find("log-1");
+		ByteArrayChannel ch = new ByteArrayChannel();
+		HgDataFile dir_b = repo.getFileNode("dir/b");
+		Assert.assertTrue(dir_b.isCopy());
+		Assert.assertEquals("b", dir_b.getCopySourceName().toString());
+		Assert.assertEquals("e44751cdc2d14f1eb0146aa64f0895608ad15917", dir_b.getCopySourceRevision().toString());
+		dir_b.content(0, ch);
+		// assert rawContent has 1 10 ... 1 10
+		assertArrayEquals("a \r\n".getBytes(), ch.toArray());
+		//
+		// try once again to make sure metadata records/extracts correct offsets
+		dir_b.content(0, ch = new ByteArrayChannel());
+		assertArrayEquals("a \r\n".getBytes(), ch.toArray());
 	}
 }