Mercurial > hg4j
comparison src/org/tmatesoft/hg/repo/HgWorkingCopyStatusCollector.java @ 117:6c0be854d149
Enable filters for status operation (ToRepo case)
| author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
|---|---|
| date | Fri, 04 Feb 2011 02:12:30 +0100 |
| parents | a3a2e5deb320 |
| children | b19f0ac5ee62 |
comparison
equal
deleted
inserted
replaced
| 116:5d13dcaaff39 | 117:6c0be854d149 |
|---|---|
| 14 * the terms of a license other than GNU General Public License | 14 * the terms of a license other than GNU General Public License |
| 15 * contact TMate Software at support@hg4j.com | 15 * contact TMate Software at support@hg4j.com |
| 16 */ | 16 */ |
| 17 package org.tmatesoft.hg.repo; | 17 package org.tmatesoft.hg.repo; |
| 18 | 18 |
| 19 import static java.lang.Math.min; | |
| 19 import static org.tmatesoft.hg.repo.HgRepository.BAD_REVISION; | 20 import static org.tmatesoft.hg.repo.HgRepository.BAD_REVISION; |
| 20 import static org.tmatesoft.hg.repo.HgRepository.TIP; | 21 import static org.tmatesoft.hg.repo.HgRepository.TIP; |
| 21 | 22 |
| 22 import java.io.BufferedInputStream; | 23 import java.io.BufferedInputStream; |
| 23 import java.io.File; | 24 import java.io.File; |
| 24 import java.io.FileInputStream; | 25 import java.io.FileInputStream; |
| 25 import java.io.IOException; | 26 import java.io.IOException; |
| 27 import java.nio.ByteBuffer; | |
| 28 import java.nio.channels.FileChannel; | |
| 26 import java.util.Collections; | 29 import java.util.Collections; |
| 27 import java.util.Set; | 30 import java.util.Set; |
| 28 import java.util.TreeSet; | 31 import java.util.TreeSet; |
| 29 | 32 |
| 30 import org.tmatesoft.hg.core.Nodeid; | 33 import org.tmatesoft.hg.core.Nodeid; |
| 34 import org.tmatesoft.hg.core.Path; | |
| 35 import org.tmatesoft.hg.internal.ByteArrayChannel; | |
| 36 import org.tmatesoft.hg.internal.FilterByteChannel; | |
| 31 import org.tmatesoft.hg.repo.HgStatusCollector.ManifestRevisionInspector; | 37 import org.tmatesoft.hg.repo.HgStatusCollector.ManifestRevisionInspector; |
| 38 import org.tmatesoft.hg.util.ByteChannel; | |
| 32 import org.tmatesoft.hg.util.FileWalker; | 39 import org.tmatesoft.hg.util.FileWalker; |
| 33 import org.tmatesoft.hg.util.PathPool; | 40 import org.tmatesoft.hg.util.PathPool; |
| 34 import org.tmatesoft.hg.util.PathRewrite; | 41 import org.tmatesoft.hg.util.PathRewrite; |
| 35 | 42 |
| 36 /** | 43 /** |
| 222 if (r.size /* XXX File.length() ?! */ != lengthAtRevision || flags != todoGenerateFlags(fname /*java.io.File*/)) { | 229 if (r.size /* XXX File.length() ?! */ != lengthAtRevision || flags != todoGenerateFlags(fname /*java.io.File*/)) { |
| 223 inspector.modified(getPathPool().path(fname)); | 230 inspector.modified(getPathPool().path(fname)); |
| 224 } else { | 231 } else { |
| 225 // check actual content to see actual changes | 232 // check actual content to see actual changes |
| 226 // XXX consider adding HgDataDile.compare(File/byte[]/whatever) operation to optimize comparison | 233 // XXX consider adding HgDataDile.compare(File/byte[]/whatever) operation to optimize comparison |
| 227 if (areTheSame(f, fileNode.content(nid1))) { | 234 if (areTheSame(f, fileNode.content(nid1), fileNode.getPath())) { |
| 228 inspector.clean(getPathPool().path(fname)); | 235 inspector.clean(getPathPool().path(fname)); |
| 229 } else { | 236 } else { |
| 230 inspector.modified(getPathPool().path(fname)); | 237 inspector.modified(getPathPool().path(fname)); |
| 231 } | 238 } |
| 232 } | 239 } |
| 240 // changeset nodeid + hash(actual content) => entry (Nodeid) in the next Manifest | 247 // changeset nodeid + hash(actual content) => entry (Nodeid) in the next Manifest |
| 241 // then it's sufficient to check parents from dirstate, and if they do not match parents from file's baseRevision (non matching parents means different nodeids). | 248 // then it's sufficient to check parents from dirstate, and if they do not match parents from file's baseRevision (non matching parents means different nodeids). |
| 242 // The question is whether original Hg treats this case (same content, different parents and hence nodeids) as 'modified' or 'clean' | 249 // The question is whether original Hg treats this case (same content, different parents and hence nodeids) as 'modified' or 'clean' |
| 243 } | 250 } |
| 244 | 251 |
| 252 private boolean areTheSame(File f, final byte[] data, Path p) { | |
| 253 FileInputStream fis = null; | |
| 254 try { | |
| 255 try { | |
| 256 fis = new FileInputStream(f); | |
| 257 FileChannel fc = fis.getChannel(); | |
| 258 ByteBuffer fb = ByteBuffer.allocate(min(data.length, 8192)); | |
| 259 final boolean[] checkValue = new boolean[] { true }; | |
| 260 ByteChannel check = new ByteChannel() { | |
| 261 int x = 0; | |
| 262 public int write(ByteBuffer buffer) throws Exception { | |
| 263 for (int i = buffer.remaining(); i > 0; i--, x++) { | |
| 264 if (data[x] != buffer.get()) { | |
| 265 checkValue[0] = false; | |
| 266 break; | |
| 267 } | |
| 268 } | |
| 269 buffer.position(buffer.limit()); // mark as read | |
| 270 return buffer.limit(); | |
| 271 } | |
| 272 }; | |
| 273 FilterByteChannel filters = new FilterByteChannel(check, repo.getFiltersFromWorkingDirToRepo(p)); | |
| 274 while (fc.read(fb) != -1 && checkValue[0]) { | |
| 275 fb.flip(); | |
| 276 filters.write(fb); | |
| 277 fb.compact(); | |
| 278 } | |
| 279 return checkValue[0]; | |
| 280 } catch (IOException ex) { | |
| 281 if (fis != null) { | |
| 282 fis.close(); | |
| 283 } | |
| 284 ex.printStackTrace(); // log warn | |
| 285 } | |
| 286 } catch (/*TODO typed*/Exception ex) { | |
| 287 ex.printStackTrace(); | |
| 288 } | |
| 289 return false; | |
| 290 } | |
| 291 | |
| 245 private static String todoGenerateFlags(String fname) { | 292 private static String todoGenerateFlags(String fname) { |
| 246 // FIXME implement | 293 // FIXME implement |
| 247 return null; | 294 return null; |
| 248 } | 295 } |
| 249 private static boolean areTheSame(File f, byte[] data) { | |
| 250 try { | |
| 251 BufferedInputStream is = new BufferedInputStream(new FileInputStream(f)); | |
| 252 int i = 0; | |
| 253 while (i < data.length && data[i] == is.read()) { | |
| 254 i++; // increment only for successful match, otherwise won't tell last byte in data was the same as read from the stream | |
| 255 } | |
| 256 return i == data.length && is.read() == -1; // although data length is expected to be the same (see caller), check that we reached EOF, no more data left. | |
| 257 } catch (IOException ex) { | |
| 258 ex.printStackTrace(); // log warn | |
| 259 } | |
| 260 return false; | |
| 261 } | |
| 262 | 296 |
| 263 } | 297 } |
