Mercurial > hg4j
comparison src/org/tmatesoft/hg/internal/CommitFacility.java @ 618:7c0d2ce340b8
Refactor approach how content finds it way down to a commit revision
author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
---|---|
date | Thu, 16 May 2013 19:46:13 +0200 |
parents | 65c01508f002 |
children | 868b2ffdcd5c |
comparison
equal
deleted
inserted
replaced
617:65c01508f002 | 618:7c0d2ce340b8 |
---|---|
23 import static org.tmatesoft.hg.util.LogFacility.Severity.Error; | 23 import static org.tmatesoft.hg.util.LogFacility.Severity.Error; |
24 | 24 |
25 import java.io.File; | 25 import java.io.File; |
26 import java.io.FileOutputStream; | 26 import java.io.FileOutputStream; |
27 import java.io.IOException; | 27 import java.io.IOException; |
28 import java.nio.ByteBuffer; | |
29 import java.util.ArrayList; | 28 import java.util.ArrayList; |
30 import java.util.HashMap; | 29 import java.util.HashMap; |
31 import java.util.LinkedHashMap; | 30 import java.util.LinkedHashMap; |
32 import java.util.Map; | 31 import java.util.Map; |
33 import java.util.Set; | 32 import java.util.Set; |
36 | 35 |
37 import org.tmatesoft.hg.core.HgCommitCommand; | 36 import org.tmatesoft.hg.core.HgCommitCommand; |
38 import org.tmatesoft.hg.core.HgIOException; | 37 import org.tmatesoft.hg.core.HgIOException; |
39 import org.tmatesoft.hg.core.HgRepositoryLockException; | 38 import org.tmatesoft.hg.core.HgRepositoryLockException; |
40 import org.tmatesoft.hg.core.Nodeid; | 39 import org.tmatesoft.hg.core.Nodeid; |
40 import org.tmatesoft.hg.internal.DataSerializer.DataSource; | |
41 import org.tmatesoft.hg.repo.HgChangelog; | 41 import org.tmatesoft.hg.repo.HgChangelog; |
42 import org.tmatesoft.hg.repo.HgDataFile; | 42 import org.tmatesoft.hg.repo.HgDataFile; |
43 import org.tmatesoft.hg.util.Pair; | 43 import org.tmatesoft.hg.util.Pair; |
44 import org.tmatesoft.hg.util.Path; | 44 import org.tmatesoft.hg.util.Path; |
45 | 45 |
51 * @author TMate Software Ltd. | 51 * @author TMate Software Ltd. |
52 */ | 52 */ |
53 public final class CommitFacility { | 53 public final class CommitFacility { |
54 private final Internals repo; | 54 private final Internals repo; |
55 private final int p1Commit, p2Commit; | 55 private final int p1Commit, p2Commit; |
56 private Map<Path, Pair<HgDataFile, ByteDataSupplier>> files = new LinkedHashMap<Path, Pair<HgDataFile, ByteDataSupplier>>(); | 56 private Map<Path, Pair<HgDataFile, DataSource>> files = new LinkedHashMap<Path, Pair<HgDataFile, DataSource>>(); |
57 private Set<Path> removals = new TreeSet<Path>(); | 57 private Set<Path> removals = new TreeSet<Path>(); |
58 private String branch, user; | 58 private String branch, user; |
59 | 59 |
60 public CommitFacility(Internals hgRepo, int parentCommit) { | 60 public CommitFacility(Internals hgRepo, int parentCommit) { |
61 this(hgRepo, parentCommit, NO_REVISION); | 61 this(hgRepo, parentCommit, NO_REVISION); |
72 | 72 |
73 public boolean isMerge() { | 73 public boolean isMerge() { |
74 return p1Commit != NO_REVISION && p2Commit != NO_REVISION; | 74 return p1Commit != NO_REVISION && p2Commit != NO_REVISION; |
75 } | 75 } |
76 | 76 |
77 public void add(HgDataFile dataFile, ByteDataSupplier content) { | 77 public void add(HgDataFile dataFile, DataSource content) { |
78 if (content == null) { | 78 if (content == null) { |
79 throw new IllegalArgumentException(); | 79 throw new IllegalArgumentException(); |
80 } | 80 } |
81 removals.remove(dataFile.getPath()); | 81 removals.remove(dataFile.getPath()); |
82 files.put(dataFile.getPath(), new Pair<HgDataFile, ByteDataSupplier>(dataFile, content)); | 82 files.put(dataFile.getPath(), new Pair<HgDataFile, DataSource>(dataFile, content)); |
83 } | 83 } |
84 | 84 |
85 public void forget(HgDataFile dataFile) { | 85 public void forget(HgDataFile dataFile) { |
86 files.remove(dataFile.getPath()); | 86 files.remove(dataFile.getPath()); |
87 removals.add(dataFile.getPath()); | 87 removals.add(dataFile.getPath()); |
136 } | 136 } |
137 // | 137 // |
138 // Register new/changed | 138 // Register new/changed |
139 LinkedHashMap<Path, RevlogStream> newlyAddedFiles = new LinkedHashMap<Path, RevlogStream>(); | 139 LinkedHashMap<Path, RevlogStream> newlyAddedFiles = new LinkedHashMap<Path, RevlogStream>(); |
140 ArrayList<Path> touchInDirstate = new ArrayList<Path>(); | 140 ArrayList<Path> touchInDirstate = new ArrayList<Path>(); |
141 for (Pair<HgDataFile, ByteDataSupplier> e : files.values()) { | 141 for (Pair<HgDataFile, DataSource> e : files.values()) { |
142 HgDataFile df = e.first(); | 142 HgDataFile df = e.first(); |
143 DataSource bds = e.second(); | |
143 Pair<Integer, Integer> fp = fileParents.get(df.getPath()); | 144 Pair<Integer, Integer> fp = fileParents.get(df.getPath()); |
144 if (fp == null) { | 145 if (fp == null) { |
145 // NEW FILE | 146 // NEW FILE |
146 fp = new Pair<Integer, Integer>(NO_REVISION, NO_REVISION); | 147 fp = new Pair<Integer, Integer>(NO_REVISION, NO_REVISION); |
147 } | |
148 ByteDataSupplier bds = e.second(); | |
149 // FIXME quickfix, instead, pass ByteDataSupplier directly to RevlogStreamWriter | |
150 ByteBuffer bb = ByteBuffer.allocate(2048); | |
151 ByteArrayChannel bac = new ByteArrayChannel(); | |
152 while (bds.read(bb) != -1) { | |
153 bb.flip(); | |
154 bac.write(bb); | |
155 bb.clear(); | |
156 } | 148 } |
157 RevlogStream contentStream; | 149 RevlogStream contentStream; |
158 if (df.exists()) { | 150 if (df.exists()) { |
159 contentStream = repo.getImplAccess().getStream(df); | 151 contentStream = repo.getImplAccess().getStream(df); |
160 } else { | 152 } else { |
163 // FIXME df doesn't get df.content updated, and clients | 155 // FIXME df doesn't get df.content updated, and clients |
164 // that would attempt to access newly added file after commit would fail | 156 // that would attempt to access newly added file after commit would fail |
165 // (despite the fact the file is in there) | 157 // (despite the fact the file is in there) |
166 } | 158 } |
167 RevlogStreamWriter fileWriter = new RevlogStreamWriter(repo, contentStream, transaction); | 159 RevlogStreamWriter fileWriter = new RevlogStreamWriter(repo, contentStream, transaction); |
168 Nodeid fileRev = fileWriter.addRevision(bac.toArray(), clogRevisionIndex, fp.first(), fp.second()); | 160 Nodeid fileRev = fileWriter.addRevision(bds, clogRevisionIndex, fp.first(), fp.second()); |
169 newManifestRevision.put(df.getPath(), fileRev); | 161 newManifestRevision.put(df.getPath(), fileRev); |
170 touchInDirstate.add(df.getPath()); | 162 touchInDirstate.add(df.getPath()); |
171 } | 163 } |
172 // | 164 // |
173 // Manifest | 165 // Manifest |
174 final ManifestEntryBuilder manifestBuilder = new ManifestEntryBuilder(); | 166 final ManifestEntryBuilder manifestBuilder = new ManifestEntryBuilder(repo.buildFileNameEncodingHelper()); |
175 for (Map.Entry<Path, Nodeid> me : newManifestRevision.entrySet()) { | 167 for (Map.Entry<Path, Nodeid> me : newManifestRevision.entrySet()) { |
176 manifestBuilder.add(me.getKey().toString(), me.getValue()); | 168 manifestBuilder.add(me.getKey().toString(), me.getValue()); |
177 } | 169 } |
178 RevlogStreamWriter manifestWriter = new RevlogStreamWriter(repo, repo.getImplAccess().getManifestStream(), transaction); | 170 RevlogStreamWriter manifestWriter = new RevlogStreamWriter(repo, repo.getImplAccess().getManifestStream(), transaction); |
179 Nodeid manifestRev = manifestWriter.addRevision(manifestBuilder.build(), clogRevisionIndex, manifestParents.first(), manifestParents.second()); | 171 Nodeid manifestRev = manifestWriter.addRevision(manifestBuilder, clogRevisionIndex, manifestParents.first(), manifestParents.second()); |
180 // | 172 // |
181 // Changelog | 173 // Changelog |
182 final ChangelogEntryBuilder changelogBuilder = new ChangelogEntryBuilder(); | 174 final ChangelogEntryBuilder changelogBuilder = new ChangelogEntryBuilder(); |
183 changelogBuilder.setModified(files.keySet()); | 175 changelogBuilder.setModified(files.keySet()); |
184 changelogBuilder.branch(branch == null ? DEFAULT_BRANCH_NAME : branch); | 176 changelogBuilder.branch(branch == null ? DEFAULT_BRANCH_NAME : branch); |
185 changelogBuilder.user(String.valueOf(user)); | 177 changelogBuilder.user(String.valueOf(user)); |
186 byte[] clogContent = changelogBuilder.build(manifestRev, message); | 178 changelogBuilder.manifest(manifestRev).comment(message); |
187 RevlogStreamWriter changelogWriter = new RevlogStreamWriter(repo, repo.getImplAccess().getChangelogStream(), transaction); | 179 RevlogStreamWriter changelogWriter = new RevlogStreamWriter(repo, repo.getImplAccess().getChangelogStream(), transaction); |
188 Nodeid changesetRev = changelogWriter.addRevision(clogContent, clogRevisionIndex, p1Commit, p2Commit); | 180 Nodeid changesetRev = changelogWriter.addRevision(changelogBuilder, clogRevisionIndex, p1Commit, p2Commit); |
189 // TODO move fncache update to an external facility, along with dirstate and bookmark update | 181 // TODO move fncache update to an external facility, along with dirstate and bookmark update |
190 if (!newlyAddedFiles.isEmpty() && repo.fncacheInUse()) { | 182 if (!newlyAddedFiles.isEmpty() && repo.fncacheInUse()) { |
191 FNCacheFile fncache = new FNCacheFile(repo); | 183 FNCacheFile fncache = new FNCacheFile(repo); |
192 for (Path p : newlyAddedFiles.keySet()) { | 184 for (Path p : newlyAddedFiles.keySet()) { |
193 fncache.addIndex(p); | 185 fncache.addIndex(p); |
263 return NO_REVISION; | 255 return NO_REVISION; |
264 } | 256 } |
265 return repo.getManifest().getRevisionIndex(manifestRev); | 257 return repo.getManifest().getRevisionIndex(manifestRev); |
266 } | 258 } |
267 */ | 259 */ |
268 | |
269 // unlike DataAccess (which provides structured access), this one | |
270 // deals with a sequence of bytes, when there's no need in structure of the data | |
271 // FIXME java.nio.ReadableByteChannel or ByteStream/ByteSequence(read, length, reset) | |
272 // SHALL be inline with util.ByteChannel, reading bytes from HgDataFile, preferably DataAccess#readBytes(BB) to match API, | |
273 // and a wrap for ByteVector | |
274 public interface ByteDataSupplier { // TODO look if can resolve DataAccess in HgCloneCommand visibility issue | |
275 // FIXME needs lifecycle, e.g. for supplier that reads from WC | |
276 int read(ByteBuffer buf); | |
277 } | |
278 | |
279 public interface ByteDataConsumer { | |
280 void write(ByteBuffer buf); | |
281 } | |
282 } | 260 } |