Mercurial > hg4j
comparison src/org/tmatesoft/hg/repo/CommitFacility.java @ 559:6ca3d0c5b4bc
Commit: tests and fixes for defects discovered
| author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
|---|---|
| date | Mon, 25 Feb 2013 19:48:20 +0100 |
| parents | 67d4b0f73984 |
| children | ca56a36c2eea |
comparison
equal
deleted
inserted
replaced
| 558:154718ae23ed | 559:6ca3d0c5b4bc |
|---|---|
| 22 import java.nio.ByteBuffer; | 22 import java.nio.ByteBuffer; |
| 23 import java.util.ArrayList; | 23 import java.util.ArrayList; |
| 24 import java.util.HashMap; | 24 import java.util.HashMap; |
| 25 import java.util.LinkedHashMap; | 25 import java.util.LinkedHashMap; |
| 26 import java.util.Map; | 26 import java.util.Map; |
| 27 import java.util.Set; | |
| 27 import java.util.TreeMap; | 28 import java.util.TreeMap; |
| 29 import java.util.TreeSet; | |
| 28 | 30 |
| 29 import org.tmatesoft.hg.core.HgRepositoryLockException; | 31 import org.tmatesoft.hg.core.HgRepositoryLockException; |
| 30 import org.tmatesoft.hg.core.Nodeid; | 32 import org.tmatesoft.hg.core.Nodeid; |
| 31 import org.tmatesoft.hg.internal.ByteArrayChannel; | 33 import org.tmatesoft.hg.internal.ByteArrayChannel; |
| 32 import org.tmatesoft.hg.internal.ChangelogEntryBuilder; | 34 import org.tmatesoft.hg.internal.ChangelogEntryBuilder; |
| 46 * | 48 * |
| 47 * @author Artem Tikhomirov | 49 * @author Artem Tikhomirov |
| 48 * @author TMate Software Ltd. | 50 * @author TMate Software Ltd. |
| 49 */ | 51 */ |
| 50 @Experimental(reason="Work in progress") | 52 @Experimental(reason="Work in progress") |
| 51 public class CommitFacility { | 53 public final class CommitFacility { |
| 52 private final HgRepository repo; | 54 private final HgRepository repo; |
| 53 private final int p1Commit, p2Commit; | 55 private final int p1Commit, p2Commit; |
| 54 private Map<Path, Pair<HgDataFile, ByteDataSupplier>> files = new LinkedHashMap<Path, Pair<HgDataFile, ByteDataSupplier>>(); | 56 private Map<Path, Pair<HgDataFile, ByteDataSupplier>> files = new LinkedHashMap<Path, Pair<HgDataFile, ByteDataSupplier>>(); |
| 55 | 57 private Set<Path> removals = new TreeSet<Path>(); |
| 58 private String branch; | |
| 56 | 59 |
| 57 public CommitFacility(HgRepository hgRepo, int parentCommit) { | 60 public CommitFacility(HgRepository hgRepo, int parentCommit) { |
| 58 this(hgRepo, parentCommit, NO_REVISION); | 61 this(hgRepo, parentCommit, NO_REVISION); |
| 59 } | 62 } |
| 60 | 63 |
| 70 public boolean isMerge() { | 73 public boolean isMerge() { |
| 71 return p1Commit != NO_REVISION && p2Commit != NO_REVISION; | 74 return p1Commit != NO_REVISION && p2Commit != NO_REVISION; |
| 72 } | 75 } |
| 73 | 76 |
| 74 public void add(HgDataFile dataFile, ByteDataSupplier content) { | 77 public void add(HgDataFile dataFile, ByteDataSupplier content) { |
| 78 if (content == null) { | |
| 79 throw new IllegalArgumentException(); | |
| 80 } | |
| 81 removals.remove(dataFile.getPath()); | |
| 75 files.put(dataFile.getPath(), new Pair<HgDataFile, ByteDataSupplier>(dataFile, content)); | 82 files.put(dataFile.getPath(), new Pair<HgDataFile, ByteDataSupplier>(dataFile, content)); |
| 83 } | |
| 84 | |
| 85 public void forget(HgDataFile dataFile) { | |
| 86 files.remove(dataFile.getPath()); | |
| 87 removals.add(dataFile.getPath()); | |
| 88 } | |
| 89 | |
| 90 public void branch(String branchName) { | |
| 91 branch = branchName; | |
| 76 } | 92 } |
| 77 | 93 |
| 78 public Nodeid commit(String message) throws HgRepositoryLockException { | 94 public Nodeid commit(String message) throws HgRepositoryLockException { |
| 79 | 95 |
| 80 final HgChangelog clog = repo.getChangelog(); | 96 final HgChangelog clog = repo.getChangelog(); |
| 85 repo.getManifest().walk(p1Commit, p1Commit, c1Manifest); | 101 repo.getManifest().walk(p1Commit, p1Commit, c1Manifest); |
| 86 } | 102 } |
| 87 if (p2Commit != NO_REVISION) { | 103 if (p2Commit != NO_REVISION) { |
| 88 repo.getManifest().walk(p2Commit, p2Commit, c2Manifest); | 104 repo.getManifest().walk(p2Commit, p2Commit, c2Manifest); |
| 89 } | 105 } |
| 90 FNCacheFile fncache = null; | |
| 91 if ((repo.getImplHelper().getRequiresFlags() & RequiresFile.FNCACHE) != 0) { | |
| 92 fncache = new FNCacheFile(repo.getImplHelper()); | |
| 93 try { | |
| 94 fncache.read(new Path.SimpleSource()); | |
| 95 } catch (IOException ex) { | |
| 96 // fncache may be restored using native client, so do not treat failure to read it as severe enough to stop | |
| 97 repo.getSessionContext().getLog().dump(getClass(), Severity.Error, ex, "Failed to read fncache, attempt commit nevertheless"); | |
| 98 } | |
| 99 } | |
| 100 // Pair<Integer, Integer> manifestParents = getManifestParents(); | 106 // Pair<Integer, Integer> manifestParents = getManifestParents(); |
| 101 Pair<Integer, Integer> manifestParents = new Pair<Integer, Integer>(c1Manifest.revisionIndex(), c2Manifest.revisionIndex()); | 107 Pair<Integer, Integer> manifestParents = new Pair<Integer, Integer>(c1Manifest.revisionIndex(), c2Manifest.revisionIndex()); |
| 102 TreeMap<Path, Nodeid> newManifestRevision = new TreeMap<Path, Nodeid>(); | 108 TreeMap<Path, Nodeid> newManifestRevision = new TreeMap<Path, Nodeid>(); |
| 103 HashMap<Path, Pair<Integer, Integer>> fileParents = new HashMap<Path, Pair<Integer,Integer>>(); | 109 HashMap<Path, Pair<Integer, Integer>> fileParents = new HashMap<Path, Pair<Integer,Integer>>(); |
| 104 for (Path f : c1Manifest.files()) { | 110 for (Path f : c1Manifest.files()) { |
| 105 HgDataFile df = repo.getFileNode(f); | 111 HgDataFile df = repo.getFileNode(f); |
| 106 Nodeid fileKnownRev = c1Manifest.nodeid(f); | 112 Nodeid fileKnownRev1 = c1Manifest.nodeid(f), fileKnownRev2; |
| 107 final int fileRevIndex1 = df.getRevisionIndex(fileKnownRev); | 113 final int fileRevIndex1 = df.getRevisionIndex(fileKnownRev1); |
| 108 final int fileRevIndex2; | 114 final int fileRevIndex2; |
| 109 if ((fileKnownRev = c2Manifest.nodeid(f)) != null) { | 115 if ((fileKnownRev2 = c2Manifest.nodeid(f)) != null) { |
| 110 // merged files | 116 // merged files |
| 111 fileRevIndex2 = df.getRevisionIndex(fileKnownRev); | 117 fileRevIndex2 = df.getRevisionIndex(fileKnownRev2); |
| 112 } else { | 118 } else { |
| 113 fileRevIndex2 = NO_REVISION; | 119 fileRevIndex2 = NO_REVISION; |
| 114 } | 120 } |
| 115 | 121 |
| 116 fileParents.put(f, new Pair<Integer, Integer>(fileRevIndex1, fileRevIndex2)); | 122 fileParents.put(f, new Pair<Integer, Integer>(fileRevIndex1, fileRevIndex2)); |
| 117 newManifestRevision.put(f, fileKnownRev); | 123 newManifestRevision.put(f, fileKnownRev1); |
| 118 } | 124 } |
| 119 // | 125 // |
| 120 // Files | 126 // Forget removed |
| 127 for (Path p : removals) { | |
| 128 newManifestRevision.remove(p); | |
| 129 } | |
| 130 // | |
| 131 // Register new/changed | |
| 121 ArrayList<Path> newlyAddedFiles = new ArrayList<Path>(); | 132 ArrayList<Path> newlyAddedFiles = new ArrayList<Path>(); |
| 122 for (Pair<HgDataFile, ByteDataSupplier> e : files.values()) { | 133 for (Pair<HgDataFile, ByteDataSupplier> e : files.values()) { |
| 123 HgDataFile df = e.first(); | 134 HgDataFile df = e.first(); |
| 124 Pair<Integer, Integer> fp = fileParents.get(df.getPath()); | 135 Pair<Integer, Integer> fp = fileParents.get(df.getPath()); |
| 125 if (fp == null) { | 136 if (fp == null) { |
| 159 Nodeid manifestRev = manifestWriter.addRevision(manifestBuilder.build(), clogRevisionIndex, manifestParents.first(), manifestParents.second()); | 170 Nodeid manifestRev = manifestWriter.addRevision(manifestBuilder.build(), clogRevisionIndex, manifestParents.first(), manifestParents.second()); |
| 160 // | 171 // |
| 161 // Changelog | 172 // Changelog |
| 162 final ChangelogEntryBuilder changelogBuilder = new ChangelogEntryBuilder(); | 173 final ChangelogEntryBuilder changelogBuilder = new ChangelogEntryBuilder(); |
| 163 changelogBuilder.setModified(files.keySet()); | 174 changelogBuilder.setModified(files.keySet()); |
| 175 changelogBuilder.branch(branch == null ? HgRepository.DEFAULT_BRANCH_NAME : branch); | |
| 164 byte[] clogContent = changelogBuilder.build(manifestRev, message); | 176 byte[] clogContent = changelogBuilder.build(manifestRev, message); |
| 165 RevlogStreamWriter changelogWriter = new RevlogStreamWriter(repo.getSessionContext(), clog.content); | 177 RevlogStreamWriter changelogWriter = new RevlogStreamWriter(repo.getSessionContext(), clog.content); |
| 166 Nodeid changesetRev = changelogWriter.addRevision(clogContent, clogRevisionIndex, p1Commit, p2Commit); | 178 Nodeid changesetRev = changelogWriter.addRevision(clogContent, clogRevisionIndex, p1Commit, p2Commit); |
| 167 if (!newlyAddedFiles.isEmpty() && fncache != null) { | 179 // FIXME move fncache update to an external facility, along with dirstate update |
| 180 if (!newlyAddedFiles.isEmpty() && repo.getImplHelper().fncacheInUse()) { | |
| 181 FNCacheFile fncache = new FNCacheFile(repo.getImplHelper()); | |
| 168 for (Path p : newlyAddedFiles) { | 182 for (Path p : newlyAddedFiles) { |
| 169 fncache.add(p); | 183 fncache.add(p); |
| 170 } | 184 } |
| 171 try { | 185 try { |
| 172 fncache.write(); | 186 fncache.write(); |
| 196 */ | 210 */ |
| 197 | 211 |
| 198 // unlike DataAccess (which provides structured access), this one | 212 // unlike DataAccess (which provides structured access), this one |
| 199 // deals with a sequence of bytes, when there's no need in structure of the data | 213 // deals with a sequence of bytes, when there's no need in structure of the data |
| 200 public interface ByteDataSupplier { // TODO look if can resolve DataAccess in HgCloneCommand visibility issue | 214 public interface ByteDataSupplier { // TODO look if can resolve DataAccess in HgCloneCommand visibility issue |
| 215 // FIXME needs lifecycle, e.g. for supplier that reads from WC | |
| 201 int read(ByteBuffer buf); | 216 int read(ByteBuffer buf); |
| 202 } | 217 } |
| 203 | 218 |
| 204 public interface ByteDataConsumer { | 219 public interface ByteDataConsumer { |
| 205 void write(ByteBuffer buf); | 220 void write(ByteBuffer buf); |
