tikhomirov@538: /* tikhomirov@538: * Copyright (c) 2013 TMate Software Ltd tikhomirov@538: * tikhomirov@538: * This program is free software; you can redistribute it and/or modify tikhomirov@538: * it under the terms of the GNU General Public License as published by tikhomirov@538: * the Free Software Foundation; version 2 of the License. tikhomirov@538: * tikhomirov@538: * This program is distributed in the hope that it will be useful, tikhomirov@538: * but WITHOUT ANY WARRANTY; without even the implied warranty of tikhomirov@538: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the tikhomirov@538: * GNU General Public License for more details. tikhomirov@538: * tikhomirov@538: * For information on how to redistribute this software under tikhomirov@538: * the terms of a license other than GNU General Public License tikhomirov@538: * contact TMate Software at support@hg4j.com tikhomirov@538: */ tikhomirov@591: package org.tmatesoft.hg.internal; tikhomirov@538: tikhomirov@538: import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION; tikhomirov@538: tikhomirov@539: import java.io.IOException; tikhomirov@538: import java.nio.ByteBuffer; tikhomirov@539: import java.util.ArrayList; tikhomirov@538: import java.util.HashMap; tikhomirov@538: import java.util.LinkedHashMap; tikhomirov@538: import java.util.Map; tikhomirov@559: import java.util.Set; tikhomirov@538: import java.util.TreeMap; tikhomirov@559: import java.util.TreeSet; tikhomirov@538: tikhomirov@586: import org.tmatesoft.hg.core.HgCommitCommand; tikhomirov@588: import org.tmatesoft.hg.core.HgIOException; tikhomirov@538: import org.tmatesoft.hg.core.HgRepositoryLockException; tikhomirov@538: import org.tmatesoft.hg.core.Nodeid; tikhomirov@591: import org.tmatesoft.hg.repo.HgChangelog; tikhomirov@591: import org.tmatesoft.hg.repo.HgDataFile; tikhomirov@591: import org.tmatesoft.hg.repo.HgRepository; tikhomirov@538: import org.tmatesoft.hg.util.Pair; tikhomirov@538: import org.tmatesoft.hg.util.Path; tikhomirov@539: import org.tmatesoft.hg.util.LogFacility.Severity; tikhomirov@538: tikhomirov@538: /** tikhomirov@538: * WORK IN PROGRESS tikhomirov@586: * Name: CommitObject, FutureCommit or PendingCommit tikhomirov@591: * Only public API now: {@link HgCommitCommand}. tikhomirov@538: * tikhomirov@538: * @author Artem Tikhomirov tikhomirov@538: * @author TMate Software Ltd. tikhomirov@538: */ tikhomirov@538: @Experimental(reason="Work in progress") tikhomirov@559: public final class CommitFacility { tikhomirov@591: private final Internals repo; tikhomirov@538: private final int p1Commit, p2Commit; tikhomirov@538: private Map> files = new LinkedHashMap>(); tikhomirov@559: private Set removals = new TreeSet(); tikhomirov@586: private String branch, user; tikhomirov@538: tikhomirov@591: public CommitFacility(Internals hgRepo, int parentCommit) { tikhomirov@538: this(hgRepo, parentCommit, NO_REVISION); tikhomirov@538: } tikhomirov@538: tikhomirov@591: public CommitFacility(Internals hgRepo, int parent1Commit, int parent2Commit) { tikhomirov@538: repo = hgRepo; tikhomirov@538: p1Commit = parent1Commit; tikhomirov@538: p2Commit = parent2Commit; tikhomirov@538: if (parent1Commit != NO_REVISION && parent1Commit == parent2Commit) { tikhomirov@538: throw new IllegalArgumentException("Merging same revision is dubious"); tikhomirov@538: } tikhomirov@538: } tikhomirov@538: tikhomirov@538: public boolean isMerge() { tikhomirov@538: return p1Commit != NO_REVISION && p2Commit != NO_REVISION; tikhomirov@538: } tikhomirov@538: tikhomirov@538: public void add(HgDataFile dataFile, ByteDataSupplier content) { tikhomirov@559: if (content == null) { tikhomirov@559: throw new IllegalArgumentException(); tikhomirov@559: } tikhomirov@559: removals.remove(dataFile.getPath()); tikhomirov@538: files.put(dataFile.getPath(), new Pair(dataFile, content)); tikhomirov@538: } tikhomirov@559: tikhomirov@559: public void forget(HgDataFile dataFile) { tikhomirov@559: files.remove(dataFile.getPath()); tikhomirov@559: removals.add(dataFile.getPath()); tikhomirov@559: } tikhomirov@559: tikhomirov@559: public void branch(String branchName) { tikhomirov@559: branch = branchName; tikhomirov@559: } tikhomirov@538: tikhomirov@586: public void user(String userName) { tikhomirov@586: user = userName; tikhomirov@586: } tikhomirov@586: tikhomirov@588: public Nodeid commit(String message) throws HgIOException, HgRepositoryLockException { tikhomirov@591: final HgChangelog clog = repo.getRepo().getChangelog(); tikhomirov@538: final int clogRevisionIndex = clog.getRevisionCount(); tikhomirov@538: ManifestRevision c1Manifest = new ManifestRevision(null, null); tikhomirov@538: ManifestRevision c2Manifest = new ManifestRevision(null, null); tikhomirov@607: final Nodeid p1Cset = p1Commit == NO_REVISION ? null : clog.getRevision(p1Commit); tikhomirov@607: final Nodeid p2Cset = p2Commit == NO_REVISION ? null : clog.getRevision(p2Commit); tikhomirov@538: if (p1Commit != NO_REVISION) { tikhomirov@591: repo.getRepo().getManifest().walk(p1Commit, p1Commit, c1Manifest); tikhomirov@538: } tikhomirov@538: if (p2Commit != NO_REVISION) { tikhomirov@591: repo.getRepo().getManifest().walk(p2Commit, p2Commit, c2Manifest); tikhomirov@538: } tikhomirov@538: // Pair manifestParents = getManifestParents(); tikhomirov@538: Pair manifestParents = new Pair(c1Manifest.revisionIndex(), c2Manifest.revisionIndex()); tikhomirov@538: TreeMap newManifestRevision = new TreeMap(); tikhomirov@538: HashMap> fileParents = new HashMap>(); tikhomirov@538: for (Path f : c1Manifest.files()) { tikhomirov@591: HgDataFile df = repo.getRepo().getFileNode(f); tikhomirov@559: Nodeid fileKnownRev1 = c1Manifest.nodeid(f), fileKnownRev2; tikhomirov@559: final int fileRevIndex1 = df.getRevisionIndex(fileKnownRev1); tikhomirov@539: final int fileRevIndex2; tikhomirov@559: if ((fileKnownRev2 = c2Manifest.nodeid(f)) != null) { tikhomirov@539: // merged files tikhomirov@559: fileRevIndex2 = df.getRevisionIndex(fileKnownRev2); tikhomirov@539: } else { tikhomirov@539: fileRevIndex2 = NO_REVISION; tikhomirov@539: } tikhomirov@539: tikhomirov@539: fileParents.put(f, new Pair(fileRevIndex1, fileRevIndex2)); tikhomirov@559: newManifestRevision.put(f, fileKnownRev1); tikhomirov@538: } tikhomirov@538: // tikhomirov@559: // Forget removed tikhomirov@559: for (Path p : removals) { tikhomirov@559: newManifestRevision.remove(p); tikhomirov@559: } tikhomirov@559: // tikhomirov@559: // Register new/changed tikhomirov@539: ArrayList newlyAddedFiles = new ArrayList(); tikhomirov@588: ArrayList touchInDirstate = new ArrayList(); tikhomirov@538: for (Pair e : files.values()) { tikhomirov@538: HgDataFile df = e.first(); tikhomirov@538: Pair fp = fileParents.get(df.getPath()); tikhomirov@538: if (fp == null) { tikhomirov@538: // NEW FILE tikhomirov@538: fp = new Pair(NO_REVISION, NO_REVISION); tikhomirov@538: } tikhomirov@538: ByteDataSupplier bds = e.second(); tikhomirov@538: // FIXME quickfix, instead, pass ByteDataSupplier directly to RevlogStreamWriter tikhomirov@538: ByteBuffer bb = ByteBuffer.allocate(2048); tikhomirov@538: ByteArrayChannel bac = new ByteArrayChannel(); tikhomirov@538: while (bds.read(bb) != -1) { tikhomirov@538: bb.flip(); tikhomirov@538: bac.write(bb); tikhomirov@538: bb.clear(); tikhomirov@538: } tikhomirov@539: RevlogStream contentStream; tikhomirov@539: if (df.exists()) { tikhomirov@591: contentStream = repo.getImplAccess().getStream(df); tikhomirov@539: } else { tikhomirov@539: contentStream = repo.createStoreFile(df.getPath()); tikhomirov@539: newlyAddedFiles.add(df.getPath()); tikhomirov@540: // FIXME df doesn't get df.content updated, and clients tikhomirov@540: // that would attempt to access newly added file after commit would fail tikhomirov@540: // (despite the fact the file is in there) tikhomirov@539: } tikhomirov@591: RevlogStreamWriter fileWriter = new RevlogStreamWriter(repo, contentStream); tikhomirov@538: Nodeid fileRev = fileWriter.addRevision(bac.toArray(), clogRevisionIndex, fp.first(), fp.second()); tikhomirov@538: newManifestRevision.put(df.getPath(), fileRev); tikhomirov@588: touchInDirstate.add(df.getPath()); tikhomirov@538: } tikhomirov@538: // tikhomirov@538: // Manifest tikhomirov@538: final ManifestEntryBuilder manifestBuilder = new ManifestEntryBuilder(); tikhomirov@538: for (Map.Entry me : newManifestRevision.entrySet()) { tikhomirov@538: manifestBuilder.add(me.getKey().toString(), me.getValue()); tikhomirov@538: } tikhomirov@591: RevlogStreamWriter manifestWriter = new RevlogStreamWriter(repo, repo.getImplAccess().getManifestStream()); tikhomirov@538: Nodeid manifestRev = manifestWriter.addRevision(manifestBuilder.build(), clogRevisionIndex, manifestParents.first(), manifestParents.second()); tikhomirov@538: // tikhomirov@538: // Changelog tikhomirov@538: final ChangelogEntryBuilder changelogBuilder = new ChangelogEntryBuilder(); tikhomirov@538: changelogBuilder.setModified(files.keySet()); tikhomirov@559: changelogBuilder.branch(branch == null ? HgRepository.DEFAULT_BRANCH_NAME : branch); tikhomirov@586: changelogBuilder.user(String.valueOf(user)); tikhomirov@538: byte[] clogContent = changelogBuilder.build(manifestRev, message); tikhomirov@591: RevlogStreamWriter changelogWriter = new RevlogStreamWriter(repo, repo.getImplAccess().getChangelogStream()); tikhomirov@538: Nodeid changesetRev = changelogWriter.addRevision(clogContent, clogRevisionIndex, p1Commit, p2Commit); tikhomirov@605: // FIXME move fncache update to an external facility, along with dirstate and bookmark update tikhomirov@591: if (!newlyAddedFiles.isEmpty() && repo.fncacheInUse()) { tikhomirov@591: FNCacheFile fncache = new FNCacheFile(repo); tikhomirov@539: for (Path p : newlyAddedFiles) { tikhomirov@539: fncache.add(p); tikhomirov@539: } tikhomirov@539: try { tikhomirov@539: fncache.write(); tikhomirov@539: } catch (IOException ex) { tikhomirov@539: // see comment above for fnchache.read() tikhomirov@539: repo.getSessionContext().getLog().dump(getClass(), Severity.Error, ex, "Failed to write fncache, error ignored"); tikhomirov@539: } tikhomirov@539: } tikhomirov@588: // bring dirstate up to commit state tikhomirov@591: final DirstateBuilder dirstateBuilder = new DirstateBuilder(repo); tikhomirov@591: dirstateBuilder.fillFrom(new DirstateReader(repo, new Path.SimpleSource())); tikhomirov@588: for (Path p : removals) { tikhomirov@588: dirstateBuilder.recordRemoved(p); tikhomirov@588: } tikhomirov@588: for (Path p : touchInDirstate) { tikhomirov@588: dirstateBuilder.recordUncertain(p); tikhomirov@588: } tikhomirov@588: dirstateBuilder.parents(changesetRev, Nodeid.NULL); tikhomirov@588: dirstateBuilder.serialize(); tikhomirov@605: // update bookmarks tikhomirov@605: if (p1Commit != NO_REVISION || p2Commit != NO_REVISION) { tikhomirov@605: repo.getRepo().getBookmarks().updateActive(p1Cset, p2Cset, changesetRev); tikhomirov@605: } tikhomirov@538: return changesetRev; tikhomirov@538: } tikhomirov@538: /* tikhomirov@538: private Pair getManifestParents() { tikhomirov@538: return new Pair(extractManifestRevisionIndex(p1Commit), extractManifestRevisionIndex(p2Commit)); tikhomirov@538: } tikhomirov@538: tikhomirov@538: private int extractManifestRevisionIndex(int clogRevIndex) { tikhomirov@538: if (clogRevIndex == NO_REVISION) { tikhomirov@538: return NO_REVISION; tikhomirov@538: } tikhomirov@538: RawChangeset commitObject = repo.getChangelog().range(clogRevIndex, clogRevIndex).get(0); tikhomirov@538: Nodeid manifestRev = commitObject.manifest(); tikhomirov@538: if (manifestRev.isNull()) { tikhomirov@538: return NO_REVISION; tikhomirov@538: } tikhomirov@538: return repo.getManifest().getRevisionIndex(manifestRev); tikhomirov@538: } tikhomirov@538: */ tikhomirov@538: tikhomirov@538: // unlike DataAccess (which provides structured access), this one tikhomirov@538: // deals with a sequence of bytes, when there's no need in structure of the data tikhomirov@586: // FIXME java.nio.ReadableByteChannel or ByteStream/ByteSequence(read, length, reset) tikhomirov@586: // SHALL be inline with util.ByteChannel, reading bytes from HgDataFile, preferably DataAccess#readBytes(BB) to match API, tikhomirov@586: // and a wrap for ByteVector tikhomirov@538: public interface ByteDataSupplier { // TODO look if can resolve DataAccess in HgCloneCommand visibility issue tikhomirov@559: // FIXME needs lifecycle, e.g. for supplier that reads from WC tikhomirov@538: int read(ByteBuffer buf); tikhomirov@538: } tikhomirov@538: tikhomirov@538: public interface ByteDataConsumer { tikhomirov@538: void write(ByteBuffer buf); tikhomirov@538: } tikhomirov@538: }