Mercurial > hg4j
comparison src/org/tmatesoft/hg/core/HgCommitCommand.java @ 618:7c0d2ce340b8
Refactor approach how content finds it way down to a commit revision
| author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
|---|---|
| date | Thu, 16 May 2013 19:46:13 +0200 |
| parents | 65c01508f002 |
| children | 5afc7eedb3dd |
comparison
equal
deleted
inserted
replaced
| 617:65c01508f002 | 618:7c0d2ce340b8 |
|---|---|
| 17 package org.tmatesoft.hg.core; | 17 package org.tmatesoft.hg.core; |
| 18 | 18 |
| 19 import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION; | 19 import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION; |
| 20 | 20 |
| 21 import java.io.IOException; | 21 import java.io.IOException; |
| 22 import java.nio.ByteBuffer; | |
| 23 import java.util.ArrayList; | |
| 24 | 22 |
| 25 import org.tmatesoft.hg.internal.ByteArrayChannel; | |
| 26 import org.tmatesoft.hg.internal.COWTransaction; | 23 import org.tmatesoft.hg.internal.COWTransaction; |
| 27 import org.tmatesoft.hg.internal.CommitFacility; | 24 import org.tmatesoft.hg.internal.CommitFacility; |
| 28 import org.tmatesoft.hg.internal.CompleteRepoLock; | 25 import org.tmatesoft.hg.internal.CompleteRepoLock; |
| 29 import org.tmatesoft.hg.internal.Experimental; | 26 import org.tmatesoft.hg.internal.Experimental; |
| 30 import org.tmatesoft.hg.internal.FileContentSupplier; | 27 import org.tmatesoft.hg.internal.FileContentSupplier; |
| 31 import org.tmatesoft.hg.internal.Internals; | 28 import org.tmatesoft.hg.internal.Internals; |
| 32 import org.tmatesoft.hg.internal.Transaction; | 29 import org.tmatesoft.hg.internal.Transaction; |
| 30 import org.tmatesoft.hg.internal.WorkingCopyContent; | |
| 33 import org.tmatesoft.hg.repo.HgChangelog; | 31 import org.tmatesoft.hg.repo.HgChangelog; |
| 34 import org.tmatesoft.hg.repo.HgDataFile; | 32 import org.tmatesoft.hg.repo.HgDataFile; |
| 35 import org.tmatesoft.hg.repo.HgInternals; | 33 import org.tmatesoft.hg.repo.HgInternals; |
| 36 import org.tmatesoft.hg.repo.HgRepository; | 34 import org.tmatesoft.hg.repo.HgRepository; |
| 37 import org.tmatesoft.hg.repo.HgRuntimeException; | 35 import org.tmatesoft.hg.repo.HgRuntimeException; |
| 114 CommitFacility cf = new CommitFacility(Internals.getInstance(repo), parentRevs[0], parentRevs[1]); | 112 CommitFacility cf = new CommitFacility(Internals.getInstance(repo), parentRevs[0], parentRevs[1]); |
| 115 for (Path m : status.getModified()) { | 113 for (Path m : status.getModified()) { |
| 116 HgDataFile df = repo.getFileNode(m); | 114 HgDataFile df = repo.getFileNode(m); |
| 117 cf.add(df, new WorkingCopyContent(df)); | 115 cf.add(df, new WorkingCopyContent(df)); |
| 118 } | 116 } |
| 119 ArrayList<FileContentSupplier> toClear = new ArrayList<FileContentSupplier>(); | |
| 120 for (Path a : status.getAdded()) { | 117 for (Path a : status.getAdded()) { |
| 121 HgDataFile df = repo.getFileNode(a); // TODO need smth explicit, like repo.createNewFileNode(Path) here | 118 HgDataFile df = repo.getFileNode(a); // TODO need smth explicit, like repo.createNewFileNode(Path) here |
| 122 // XXX might be an interesting exercise not to demand a content supplier, but instead return a "DataRequester" | 119 // XXX might be an interesting exercise not to demand a content supplier, but instead return a "DataRequester" |
| 123 // object, that would indicate interest in data, and this code would "push" it to requester, so that any exception | 120 // object, that would indicate interest in data, and this code would "push" it to requester, so that any exception |
| 124 // is handled here, right away, and won't need to travel supplier and CommitFacility. (although try/catch inside | 121 // is handled here, right away, and won't need to travel supplier and CommitFacility. (although try/catch inside |
| 125 // supplier.read (with empty throws declaration) | 122 // supplier.read (with empty throws declaration) |
| 126 FileContentSupplier fcs = new FileContentSupplier(repo, a); | 123 cf.add(df, new FileContentSupplier(repo, a)); |
| 127 cf.add(df, fcs); | |
| 128 toClear.add(fcs); | |
| 129 } | 124 } |
| 130 for (Path r : status.getRemoved()) { | 125 for (Path r : status.getRemoved()) { |
| 131 HgDataFile df = repo.getFileNode(r); | 126 HgDataFile df = repo.getFileNode(r); |
| 132 cf.forget(df); | 127 cf.forget(df); |
| 133 } | 128 } |
| 142 tr.rollback(); | 137 tr.rollback(); |
| 143 throw ex; | 138 throw ex; |
| 144 } catch (HgException ex) { | 139 } catch (HgException ex) { |
| 145 tr.rollback(); | 140 tr.rollback(); |
| 146 throw ex; | 141 throw ex; |
| 147 } | |
| 148 // TODO toClear list is awful | |
| 149 for (FileContentSupplier fcs : toClear) { | |
| 150 fcs.done(); | |
| 151 } | 142 } |
| 152 return new Outcome(Kind.Success, "Commit ok"); | 143 return new Outcome(Kind.Success, "Commit ok"); |
| 153 } catch (HgRuntimeException ex) { | 144 } catch (HgRuntimeException ex) { |
| 154 throw new HgLibraryFailureException(ex); | 145 throw new HgLibraryFailureException(ex); |
| 155 } finally { | 146 } finally { |
| 180 Pair<Nodeid, Nodeid> pn = repo.getWorkingCopyParents(); | 171 Pair<Nodeid, Nodeid> pn = repo.getWorkingCopyParents(); |
| 181 HgChangelog clog = repo.getChangelog(); | 172 HgChangelog clog = repo.getChangelog(); |
| 182 parents[0] = pn.first().isNull() ? NO_REVISION : clog.getRevisionIndex(pn.first()); | 173 parents[0] = pn.first().isNull() ? NO_REVISION : clog.getRevisionIndex(pn.first()); |
| 183 parents[1] = pn.second().isNull() ? NO_REVISION : clog.getRevisionIndex(pn.second()); | 174 parents[1] = pn.second().isNull() ? NO_REVISION : clog.getRevisionIndex(pn.second()); |
| 184 } | 175 } |
| 185 | |
| 186 private static class WorkingCopyContent implements CommitFacility.ByteDataSupplier { | |
| 187 private final HgDataFile file; | |
| 188 private ByteBuffer fileContent; | |
| 189 | |
| 190 public WorkingCopyContent(HgDataFile dataFile) { | |
| 191 file = dataFile; | |
| 192 if (!dataFile.exists()) { | |
| 193 throw new IllegalArgumentException(); | |
| 194 } | |
| 195 } | |
| 196 | |
| 197 public int read(ByteBuffer dst) { | |
| 198 if (fileContent == null) { | |
| 199 try { | |
| 200 ByteArrayChannel sink = new ByteArrayChannel(); | |
| 201 // TODO desperately need partial read here | |
| 202 file.workingCopy(sink); | |
| 203 fileContent = ByteBuffer.wrap(sink.toArray()); | |
| 204 } catch (CancelledException ex) { | |
| 205 // ByteArrayChannel doesn't cancel, never happens | |
| 206 assert false; | |
| 207 } | |
| 208 } | |
| 209 if (fileContent.remaining() == 0) { | |
| 210 return -1; | |
| 211 } | |
| 212 int dstCap = dst.remaining(); | |
| 213 if (fileContent.remaining() > dstCap) { | |
| 214 // save actual limit, and pretend we've got exactly desired amount of bytes | |
| 215 final int lim = fileContent.limit(); | |
| 216 fileContent.limit(dstCap); | |
| 217 dst.put(fileContent); | |
| 218 fileContent.limit(lim); | |
| 219 } else { | |
| 220 dst.put(fileContent); | |
| 221 } | |
| 222 return dstCap - dst.remaining(); | |
| 223 } | |
| 224 } | |
| 225 } | 176 } |
