comparison src/org/tmatesoft/hg/repo/HgDataFile.java @ 157:d5268ca7715b

Merged branch wrap-data-access into default for resource-friendly data access. Updated API to promote that friendliness to clients (channels, not byte[]). More exceptions
author Artem Tikhomirov <tikhomirov.artem@gmail.com>
date Wed, 09 Mar 2011 05:22:17 +0100
parents src/com/tmate/hgkit/ll/HgDataFile.java@9429c7bd1920 src/com/tmate/hgkit/ll/HgDataFile.java@1a7a9a20e1f9
children b413b16d10a5
comparison
equal deleted inserted replaced
156:643ddec3be36 157:d5268ca7715b
16 */ 16 */
17 package org.tmatesoft.hg.repo; 17 package org.tmatesoft.hg.repo;
18 18
19 import static org.tmatesoft.hg.repo.HgInternals.wrongLocalRevision; 19 import static org.tmatesoft.hg.repo.HgInternals.wrongLocalRevision;
20 import static org.tmatesoft.hg.repo.HgRepository.*; 20 import static org.tmatesoft.hg.repo.HgRepository.*;
21 import static org.tmatesoft.hg.repo.HgRepository.TIP; 21
22 import static org.tmatesoft.hg.repo.HgRepository.WORKING_COPY; 22 import java.io.ByteArrayOutputStream;
23
24 import java.io.IOException; 23 import java.io.IOException;
25 import java.nio.ByteBuffer; 24 import java.nio.ByteBuffer;
26 import java.util.ArrayList; 25 import java.util.ArrayList;
27 import java.util.Collection; 26 import java.util.Collection;
28 import java.util.TreeMap; 27 import java.util.TreeMap;
29 28
30 import org.tmatesoft.hg.core.HgDataStreamException; 29 import org.tmatesoft.hg.core.HgDataStreamException;
30 import org.tmatesoft.hg.core.HgException;
31 import org.tmatesoft.hg.core.Nodeid; 31 import org.tmatesoft.hg.core.Nodeid;
32 import org.tmatesoft.hg.internal.DataAccess;
32 import org.tmatesoft.hg.internal.FilterByteChannel; 33 import org.tmatesoft.hg.internal.FilterByteChannel;
33 import org.tmatesoft.hg.internal.RevlogStream; 34 import org.tmatesoft.hg.internal.RevlogStream;
34 import org.tmatesoft.hg.util.ByteChannel; 35 import org.tmatesoft.hg.util.ByteChannel;
35 import org.tmatesoft.hg.util.CancelSupport;
36 import org.tmatesoft.hg.util.CancelledException; 36 import org.tmatesoft.hg.util.CancelledException;
37 import org.tmatesoft.hg.util.Path; 37 import org.tmatesoft.hg.util.Path;
38 import org.tmatesoft.hg.util.ProgressSupport;
39 38
40 39
41 40
42 /** 41 /**
43 * ? name:HgFileNode? 42 * ? name:HgFileNode?
69 return content != null; // XXX need better impl 68 return content != null; // XXX need better impl
70 } 69 }
71 70
72 // human-readable (i.e. "COPYING", not "store/data/_c_o_p_y_i_n_g.i") 71 // human-readable (i.e. "COPYING", not "store/data/_c_o_p_y_i_n_g.i")
73 public Path getPath() { 72 public Path getPath() {
74 return path; // hgRepo.backresolve(this) -> name? 73 return path; // hgRepo.backresolve(this) -> name? In this case, what about hashed long names?
75 } 74 }
76 75
77 public int length(Nodeid nodeid) { 76 public int length(Nodeid nodeid) {
78 return content.dataLength(getLocalRevision(nodeid)); 77 return content.dataLength(getLocalRevision(nodeid));
79 } 78 }
80 79
81 public byte[] content() { 80 public void workingCopy(ByteChannel sink) throws IOException, CancelledException {
82 return content(TIP); 81 throw HgRepository.notImplemented();
83 } 82 }
84 83
85 /*XXX not sure applyFilters is the best way to do, perhaps, callers shall add filters themselves?*/ 84 // public void content(int revision, ByteChannel sink, boolean applyFilters) throws HgDataStreamException, IOException, CancelledException {
86 public void content(int revision, ByteChannel sink, boolean applyFilters) throws HgDataStreamException, IOException, CancelledException { 85 // byte[] content = content(revision);
87 byte[] content = content(revision); 86 // final CancelSupport cancelSupport = CancelSupport.Factory.get(sink);
88 final CancelSupport cancelSupport = CancelSupport.Factory.get(sink); 87 // final ProgressSupport progressSupport = ProgressSupport.Factory.get(sink);
89 final ProgressSupport progressSupport = ProgressSupport.Factory.get(sink); 88 // ByteBuffer buf = ByteBuffer.allocate(512);
90 ByteBuffer buf = ByteBuffer.allocate(512); 89 // int left = content.length;
91 int left = content.length; 90 // progressSupport.start(left);
92 progressSupport.start(left); 91 // int offset = 0;
93 int offset = 0; 92 // cancelSupport.checkCancelled();
94 cancelSupport.checkCancelled(); 93 // ByteChannel _sink = applyFilters ? new FilterByteChannel(sink, getRepo().getFiltersFromRepoToWorkingDir(getPath())) : sink;
95 ByteChannel _sink = applyFilters ? new FilterByteChannel(sink, getRepo().getFiltersFromRepoToWorkingDir(getPath())) : sink; 94 // do {
96 do { 95 // buf.put(content, offset, Math.min(left, buf.remaining()));
97 buf.put(content, offset, Math.min(left, buf.remaining())); 96 // buf.flip();
98 buf.flip(); 97 // cancelSupport.checkCancelled();
99 cancelSupport.checkCancelled(); 98 // // XXX I may not rely on returned number of bytes but track change in buf position instead.
100 // XXX I may not rely on returned number of bytes but track change in buf position instead. 99 // int consumed = _sink.write(buf);
101 int consumed = _sink.write(buf); 100 // buf.compact();
102 buf.compact(); 101 // offset += consumed;
103 offset += consumed; 102 // left -= consumed;
104 left -= consumed; 103 // progressSupport.worked(consumed);
105 progressSupport.worked(consumed); 104 // } while (left > 0);
106 } while (left > 0); 105 // progressSupport.done(); // XXX shall specify whether #done() is invoked always or only if completed successfully.
107 progressSupport.done(); // XXX shall specify whether #done() is invoked always or only if completed successfully. 106 // }
107
108 /*XXX not sure distinct method contentWithFilters() is the best way to do, perhaps, callers shall add filters themselves?*/
109 public void contentWithFilters(int revision, ByteChannel sink) throws HgDataStreamException, IOException, CancelledException {
110 content(revision, new FilterByteChannel(sink, getRepo().getFiltersFromRepoToWorkingDir(getPath())));
108 } 111 }
109 112
110 // for data files need to check heading of the file content for possible metadata 113 // for data files need to check heading of the file content for possible metadata
111 // @see http://mercurial.selenic.com/wiki/FileFormats#data.2BAC8- 114 // @see http://mercurial.selenic.com/wiki/FileFormats#data.2BAC8-
112 @Override 115 public void content(int revision, ByteChannel sink) throws HgDataStreamException, IOException, CancelledException {
113 public byte[] content(int revision) {
114 if (revision == TIP) { 116 if (revision == TIP) {
115 revision = getLastRevision(); 117 revision = getLastRevision();
116 } 118 }
117 if (wrongLocalRevision(revision) || revision == BAD_REVISION || revision == WORKING_COPY) { 119 if (revision == WORKING_COPY) {
120 workingCopy(sink);
121 return;
122 }
123 if (wrongLocalRevision(revision) || revision == BAD_REVISION) {
118 throw new IllegalArgumentException(String.valueOf(revision)); 124 throw new IllegalArgumentException(String.valueOf(revision));
119 } 125 }
120 byte[] data = super.content(revision); 126 if (sink == null) {
127 throw new IllegalArgumentException();
128 }
121 if (metadata == null) { 129 if (metadata == null) {
122 metadata = new Metadata(); 130 metadata = new Metadata();
123 } 131 }
132 ContentPipe insp;
124 if (metadata.none(revision)) { 133 if (metadata.none(revision)) {
125 // although not very reasonable when data is byte array, this check might 134 insp = new ContentPipe(sink, 0);
126 // get handy when there's a stream/channel to avoid useless reads and rewinds. 135 } else if (metadata.known(revision)) {
127 return data; 136 insp = new ContentPipe(sink, metadata.dataOffset(revision));
128 }
129 int toSkip = 0;
130 if (!metadata.known(revision)) {
131 if (data.length < 4 || (data[0] != 1 && data[1] != 10)) {
132 metadata.recordNone(revision);
133 return data;
134 }
135 int lastEntryStart = 2;
136 int lastColon = -1;
137 ArrayList<MetadataEntry> _metadata = new ArrayList<MetadataEntry>();
138 String key = null, value = null;
139 for (int i = 2; i < data.length; i++) {
140 if (data[i] == (int) ':') {
141 key = new String(data, lastEntryStart, i - lastEntryStart);
142 lastColon = i;
143 } else if (data[i] == '\n') {
144 if (key == null || lastColon == -1 || i <= lastColon) {
145 throw new IllegalStateException(); // FIXME log instead and record null key in the metadata. Ex just to fail fast during dev
146 }
147 value = new String(data, lastColon + 1, i - lastColon - 1).trim();
148 _metadata.add(new MetadataEntry(key, value));
149 key = value = null;
150 lastColon = -1;
151 lastEntryStart = i+1;
152 } else if (data[i] == 1 && i + 1 < data.length && data[i+1] == 10) {
153 if (key != null && lastColon != -1 && i > lastColon) {
154 // just in case last entry didn't end with newline
155 value = new String(data, lastColon + 1, i - lastColon - 1);
156 _metadata.add(new MetadataEntry(key, value));
157 }
158 lastEntryStart = i+1;
159 break;
160 }
161 }
162 _metadata.trimToSize();
163 metadata.add(revision, lastEntryStart, _metadata);
164 toSkip = lastEntryStart;
165 } else { 137 } else {
166 toSkip = metadata.dataOffset(revision); 138 // do not know if there's metadata
167 } 139 insp = new MetadataContentPipe(sink, metadata);
168 // XXX copy of an array may be memory-hostile, a wrapper with baseOffsetShift(lastEntryStart) would be more convenient 140 }
169 byte[] rv = new byte[data.length - toSkip]; 141 insp.checkCancelled();
170 System.arraycopy(data, toSkip, rv, 0, rv.length); 142 super.content.iterate(revision, revision, true, insp);
171 return rv; 143 try {
172 } 144 insp.checkFailed();
173 145 } catch (HgDataStreamException ex) {
146 throw ex;
147 } catch (HgException ex) {
148 // shall not happen, unless we changed ContentPipe or its subclass
149 throw new HgDataStreamException(ex.getClass().getName(), ex);
150 }
151 }
152
174 public void history(HgChangelog.Inspector inspector) { 153 public void history(HgChangelog.Inspector inspector) {
175 history(0, getLastRevision(), inspector); 154 history(0, getLastRevision(), inspector);
176 } 155 }
177 156
178 public void history(int start, int end, HgChangelog.Inspector inspector) { 157 public void history(int start, int end, HgChangelog.Inspector inspector) {
190 } 169 }
191 final int[] commitRevisions = new int[end - start + 1]; 170 final int[] commitRevisions = new int[end - start + 1];
192 RevlogStream.Inspector insp = new RevlogStream.Inspector() { 171 RevlogStream.Inspector insp = new RevlogStream.Inspector() {
193 int count = 0; 172 int count = 0;
194 173
195 public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, byte[] data) { 174 public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess data) {
196 commitRevisions[count++] = linkRevision; 175 commitRevisions[count++] = linkRevision;
197 } 176 }
198 }; 177 };
199 content.iterate(start, end, false, insp); 178 content.iterate(start, end, false, insp);
200 getRepo().getChangelog().range(inspector, commitRevisions); 179 getRepo().getChangelog().range(inspector, commitRevisions);
208 public Nodeid getChangesetRevision(Nodeid nid) { 187 public Nodeid getChangesetRevision(Nodeid nid) {
209 int changelogRevision = getChangesetLocalRevision(getLocalRevision(nid)); 188 int changelogRevision = getChangesetLocalRevision(getLocalRevision(nid));
210 return getRepo().getChangelog().getRevision(changelogRevision); 189 return getRepo().getChangelog().getRevision(changelogRevision);
211 } 190 }
212 191
213 public boolean isCopy() { 192 public boolean isCopy() throws HgDataStreamException {
214 if (metadata == null || !metadata.checked(0)) { 193 if (metadata == null || !metadata.checked(0)) {
215 // content() always initializes metadata. 194 // content() always initializes metadata.
216 content(0); // FIXME expensive way to find out metadata, distinct RevlogStream.Iterator would be better. 195 // FIXME this is expensive way to find out metadata, distinct RevlogStream.Iterator would be better.
196 try {
197 content(0, new ByteChannel() { // No-op channel
198 public int write(ByteBuffer buffer) throws IOException {
199 // pretend we consumed whole buffer
200 int rv = buffer.remaining();
201 buffer.position(buffer.limit());
202 return rv;
203 }
204 });
205 } catch (Exception ex) {
206 throw new HgDataStreamException("Can't initialize metadata", ex);
207 }
217 } 208 }
218 if (!metadata.known(0)) { 209 if (!metadata.known(0)) {
219 return false; 210 return false;
220 } 211 }
221 return metadata.find(0, "copy") != null; 212 return metadata.find(0, "copy") != null;
222 } 213 }
223 214
224 public Path getCopySourceName() { 215 public Path getCopySourceName() throws HgDataStreamException {
225 if (isCopy()) { 216 if (isCopy()) {
226 return Path.create(metadata.find(0, "copy")); 217 return Path.create(metadata.find(0, "copy"));
227 } 218 }
228 throw new UnsupportedOperationException(); // XXX REVISIT, think over if Exception is good (clients would check isCopy() anyway, perhaps null is sufficient?) 219 throw new UnsupportedOperationException(); // XXX REVISIT, think over if Exception is good (clients would check isCopy() anyway, perhaps null is sufficient?)
229 } 220 }
230 221
231 public Nodeid getCopySourceRevision() { 222 public Nodeid getCopySourceRevision() throws HgDataStreamException {
232 if (isCopy()) { 223 if (isCopy()) {
233 return Nodeid.fromAscii(metadata.find(0, "copyrev")); // XXX reuse/cache Nodeid 224 return Nodeid.fromAscii(metadata.find(0, "copyrev")); // XXX reuse/cache Nodeid
234 } 225 }
235 throw new UnsupportedOperationException(); 226 throw new UnsupportedOperationException();
236 } 227 }
315 } 306 }
316 } 307 }
317 return null; 308 return null;
318 } 309 }
319 } 310 }
311
312 private static class MetadataContentPipe extends ContentPipe {
313
314 private final Metadata metadata;
315
316 public MetadataContentPipe(ByteChannel sink, Metadata _metadata) {
317 super(sink, 0);
318 metadata = _metadata;
319 }
320
321 @Override
322 protected void prepare(int revisionNumber, DataAccess da) throws HgException, IOException {
323 long daLength = da.length();
324 if (daLength < 4 || da.readByte() != 1 || da.readByte() != 10) {
325 metadata.recordNone(revisionNumber);
326 da.reset();
327 return;
328 }
329 int lastEntryStart = 2;
330 int lastColon = -1;
331 ArrayList<MetadataEntry> _metadata = new ArrayList<MetadataEntry>();
332 // XXX in fact, need smth like ByteArrayBuilder, similar to StringBuilder,
333 // which can't be used here because we can't convert bytes to chars as we read them
334 // (there might be multi-byte encoding), and we need to collect all bytes before converting to string
335 ByteArrayOutputStream bos = new ByteArrayOutputStream();
336 String key = null, value = null;
337 boolean byteOne = false;
338 for (int i = 2; i < daLength; i++) {
339 byte b = da.readByte();
340 if (b == '\n') {
341 if (byteOne) { // i.e. \n follows 1
342 lastEntryStart = i+1;
343 // XXX is it possible to have here incomplete key/value (i.e. if last pair didn't end with \n)
344 break;
345 }
346 if (key == null || lastColon == -1 || i <= lastColon) {
347 throw new IllegalStateException(); // FIXME log instead and record null key in the metadata. Ex just to fail fast during dev
348 }
349 value = new String(bos.toByteArray()).trim();
350 bos.reset();
351 _metadata.add(new MetadataEntry(key, value));
352 key = value = null;
353 lastColon = -1;
354 lastEntryStart = i+1;
355 continue;
356 }
357 // byteOne has to be consumed up to this line, if not jet, consume it
358 if (byteOne) {
359 // insert 1 we've read on previous step into the byte builder
360 bos.write(1);
361 // fall-through to consume current byte
362 byteOne = false;
363 }
364 if (b == (int) ':') {
365 assert value == null;
366 key = new String(bos.toByteArray());
367 bos.reset();
368 lastColon = i;
369 } else if (b == 1) {
370 byteOne = true;
371 } else {
372 bos.write(b);
373 }
374 }
375 _metadata.trimToSize();
376 metadata.add(revisionNumber, lastEntryStart, _metadata);
377 if (da.isEmpty() || !byteOne) {
378 throw new HgDataStreamException(String.format("Metadata for revision %d is not closed properly", revisionNumber), null);
379 }
380 // da is in prepared state (i.e. we consumed all bytes up to metadata end).
381 }
382 }
320 } 383 }