tikhomirov@538: /* tikhomirov@538: * Copyright (c) 2013 TMate Software Ltd tikhomirov@538: * tikhomirov@538: * This program is free software; you can redistribute it and/or modify tikhomirov@538: * it under the terms of the GNU General Public License as published by tikhomirov@538: * the Free Software Foundation; version 2 of the License. tikhomirov@538: * tikhomirov@538: * This program is distributed in the hope that it will be useful, tikhomirov@538: * but WITHOUT ANY WARRANTY; without even the implied warranty of tikhomirov@538: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the tikhomirov@538: * GNU General Public License for more details. tikhomirov@538: * tikhomirov@538: * For information on how to redistribute this software under tikhomirov@538: * the terms of a license other than GNU General Public License tikhomirov@538: * contact TMate Software at support@hg4j.com tikhomirov@538: */ tikhomirov@538: package org.tmatesoft.hg.repo; tikhomirov@538: tikhomirov@538: import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION; tikhomirov@538: tikhomirov@539: import java.io.IOException; tikhomirov@538: import java.nio.ByteBuffer; tikhomirov@539: import java.util.ArrayList; tikhomirov@538: import java.util.HashMap; tikhomirov@538: import java.util.LinkedHashMap; tikhomirov@538: import java.util.Map; tikhomirov@538: import java.util.TreeMap; tikhomirov@538: tikhomirov@538: import org.tmatesoft.hg.core.HgRepositoryLockException; tikhomirov@538: import org.tmatesoft.hg.core.Nodeid; tikhomirov@538: import org.tmatesoft.hg.internal.ByteArrayChannel; tikhomirov@538: import org.tmatesoft.hg.internal.ChangelogEntryBuilder; tikhomirov@538: import org.tmatesoft.hg.internal.Experimental; tikhomirov@539: import org.tmatesoft.hg.internal.FNCacheFile; tikhomirov@538: import org.tmatesoft.hg.internal.ManifestEntryBuilder; tikhomirov@538: import org.tmatesoft.hg.internal.ManifestRevision; tikhomirov@539: import org.tmatesoft.hg.internal.RequiresFile; tikhomirov@539: import org.tmatesoft.hg.internal.RevlogStream; tikhomirov@538: import org.tmatesoft.hg.internal.RevlogStreamWriter; tikhomirov@538: import org.tmatesoft.hg.util.Pair; tikhomirov@538: import org.tmatesoft.hg.util.Path; tikhomirov@539: import org.tmatesoft.hg.util.LogFacility.Severity; tikhomirov@538: tikhomirov@538: /** tikhomirov@538: * WORK IN PROGRESS tikhomirov@538: * tikhomirov@538: * @author Artem Tikhomirov tikhomirov@538: * @author TMate Software Ltd. tikhomirov@538: */ tikhomirov@538: @Experimental(reason="Work in progress") tikhomirov@538: public class CommitFacility { tikhomirov@538: private final HgRepository repo; tikhomirov@538: private final int p1Commit, p2Commit; tikhomirov@538: private Map> files = new LinkedHashMap>(); tikhomirov@538: tikhomirov@538: tikhomirov@538: public CommitFacility(HgRepository hgRepo, int parentCommit) { tikhomirov@538: this(hgRepo, parentCommit, NO_REVISION); tikhomirov@538: } tikhomirov@538: tikhomirov@538: public CommitFacility(HgRepository hgRepo, int parent1Commit, int parent2Commit) { tikhomirov@538: repo = hgRepo; tikhomirov@538: p1Commit = parent1Commit; tikhomirov@538: p2Commit = parent2Commit; tikhomirov@538: if (parent1Commit != NO_REVISION && parent1Commit == parent2Commit) { tikhomirov@538: throw new IllegalArgumentException("Merging same revision is dubious"); tikhomirov@538: } tikhomirov@538: } tikhomirov@538: tikhomirov@538: public boolean isMerge() { tikhomirov@538: return p1Commit != NO_REVISION && p2Commit != NO_REVISION; tikhomirov@538: } tikhomirov@538: tikhomirov@538: public void add(HgDataFile dataFile, ByteDataSupplier content) { tikhomirov@538: files.put(dataFile.getPath(), new Pair(dataFile, content)); tikhomirov@538: } tikhomirov@538: tikhomirov@538: public Nodeid commit(String message) throws HgRepositoryLockException { tikhomirov@538: tikhomirov@538: final HgChangelog clog = repo.getChangelog(); tikhomirov@538: final int clogRevisionIndex = clog.getRevisionCount(); tikhomirov@538: ManifestRevision c1Manifest = new ManifestRevision(null, null); tikhomirov@538: ManifestRevision c2Manifest = new ManifestRevision(null, null); tikhomirov@538: if (p1Commit != NO_REVISION) { tikhomirov@538: repo.getManifest().walk(p1Commit, p1Commit, c1Manifest); tikhomirov@538: } tikhomirov@538: if (p2Commit != NO_REVISION) { tikhomirov@538: repo.getManifest().walk(p2Commit, p2Commit, c2Manifest); tikhomirov@538: } tikhomirov@539: FNCacheFile fncache = null; tikhomirov@539: if ((repo.getImplHelper().getRequiresFlags() & RequiresFile.FNCACHE) != 0) { tikhomirov@539: fncache = new FNCacheFile(repo.getImplHelper()); tikhomirov@539: try { tikhomirov@539: fncache.read(new Path.SimpleSource()); tikhomirov@539: } catch (IOException ex) { tikhomirov@539: // fncache may be restored using native client, so do not treat failure to read it as severe enough to stop tikhomirov@539: repo.getSessionContext().getLog().dump(getClass(), Severity.Error, ex, "Failed to read fncache, attempt commit nevertheless"); tikhomirov@539: } tikhomirov@539: } tikhomirov@538: // Pair manifestParents = getManifestParents(); tikhomirov@538: Pair manifestParents = new Pair(c1Manifest.revisionIndex(), c2Manifest.revisionIndex()); tikhomirov@538: TreeMap newManifestRevision = new TreeMap(); tikhomirov@538: HashMap> fileParents = new HashMap>(); tikhomirov@538: for (Path f : c1Manifest.files()) { tikhomirov@538: HgDataFile df = repo.getFileNode(f); tikhomirov@538: Nodeid fileKnownRev = c1Manifest.nodeid(f); tikhomirov@539: final int fileRevIndex1 = df.getRevisionIndex(fileKnownRev); tikhomirov@539: final int fileRevIndex2; tikhomirov@539: if ((fileKnownRev = c2Manifest.nodeid(f)) != null) { tikhomirov@539: // merged files tikhomirov@539: fileRevIndex2 = df.getRevisionIndex(fileKnownRev); tikhomirov@539: } else { tikhomirov@539: fileRevIndex2 = NO_REVISION; tikhomirov@539: } tikhomirov@539: tikhomirov@539: fileParents.put(f, new Pair(fileRevIndex1, fileRevIndex2)); tikhomirov@538: newManifestRevision.put(f, fileKnownRev); tikhomirov@538: } tikhomirov@538: // tikhomirov@538: // Files tikhomirov@539: ArrayList newlyAddedFiles = new ArrayList(); tikhomirov@538: for (Pair e : files.values()) { tikhomirov@538: HgDataFile df = e.first(); tikhomirov@538: Pair fp = fileParents.get(df.getPath()); tikhomirov@538: if (fp == null) { tikhomirov@538: // NEW FILE tikhomirov@538: fp = new Pair(NO_REVISION, NO_REVISION); tikhomirov@538: } tikhomirov@538: ByteDataSupplier bds = e.second(); tikhomirov@538: // FIXME quickfix, instead, pass ByteDataSupplier directly to RevlogStreamWriter tikhomirov@538: ByteBuffer bb = ByteBuffer.allocate(2048); tikhomirov@538: ByteArrayChannel bac = new ByteArrayChannel(); tikhomirov@538: while (bds.read(bb) != -1) { tikhomirov@538: bb.flip(); tikhomirov@538: bac.write(bb); tikhomirov@538: bb.clear(); tikhomirov@538: } tikhomirov@539: RevlogStream contentStream; tikhomirov@539: if (df.exists()) { tikhomirov@539: contentStream = df.content; tikhomirov@539: } else { tikhomirov@539: contentStream = repo.createStoreFile(df.getPath()); tikhomirov@539: newlyAddedFiles.add(df.getPath()); tikhomirov@540: // FIXME df doesn't get df.content updated, and clients tikhomirov@540: // that would attempt to access newly added file after commit would fail tikhomirov@540: // (despite the fact the file is in there) tikhomirov@539: } tikhomirov@539: RevlogStreamWriter fileWriter = new RevlogStreamWriter(repo.getSessionContext(), contentStream); tikhomirov@538: Nodeid fileRev = fileWriter.addRevision(bac.toArray(), clogRevisionIndex, fp.first(), fp.second()); tikhomirov@538: newManifestRevision.put(df.getPath(), fileRev); tikhomirov@538: } tikhomirov@538: // tikhomirov@538: // Manifest tikhomirov@538: final ManifestEntryBuilder manifestBuilder = new ManifestEntryBuilder(); tikhomirov@538: for (Map.Entry me : newManifestRevision.entrySet()) { tikhomirov@538: manifestBuilder.add(me.getKey().toString(), me.getValue()); tikhomirov@538: } tikhomirov@538: RevlogStreamWriter manifestWriter = new RevlogStreamWriter(repo.getSessionContext(), repo.getManifest().content); tikhomirov@538: Nodeid manifestRev = manifestWriter.addRevision(manifestBuilder.build(), clogRevisionIndex, manifestParents.first(), manifestParents.second()); tikhomirov@538: // tikhomirov@538: // Changelog tikhomirov@538: final ChangelogEntryBuilder changelogBuilder = new ChangelogEntryBuilder(); tikhomirov@538: changelogBuilder.setModified(files.keySet()); tikhomirov@538: byte[] clogContent = changelogBuilder.build(manifestRev, message); tikhomirov@538: RevlogStreamWriter changelogWriter = new RevlogStreamWriter(repo.getSessionContext(), clog.content); tikhomirov@538: Nodeid changesetRev = changelogWriter.addRevision(clogContent, clogRevisionIndex, p1Commit, p2Commit); tikhomirov@539: if (!newlyAddedFiles.isEmpty() && fncache != null) { tikhomirov@539: for (Path p : newlyAddedFiles) { tikhomirov@539: fncache.add(p); tikhomirov@539: } tikhomirov@539: try { tikhomirov@539: fncache.write(); tikhomirov@539: } catch (IOException ex) { tikhomirov@539: // see comment above for fnchache.read() tikhomirov@539: repo.getSessionContext().getLog().dump(getClass(), Severity.Error, ex, "Failed to write fncache, error ignored"); tikhomirov@539: } tikhomirov@539: } tikhomirov@538: return changesetRev; tikhomirov@538: } tikhomirov@538: /* tikhomirov@538: private Pair getManifestParents() { tikhomirov@538: return new Pair(extractManifestRevisionIndex(p1Commit), extractManifestRevisionIndex(p2Commit)); tikhomirov@538: } tikhomirov@538: tikhomirov@538: private int extractManifestRevisionIndex(int clogRevIndex) { tikhomirov@538: if (clogRevIndex == NO_REVISION) { tikhomirov@538: return NO_REVISION; tikhomirov@538: } tikhomirov@538: RawChangeset commitObject = repo.getChangelog().range(clogRevIndex, clogRevIndex).get(0); tikhomirov@538: Nodeid manifestRev = commitObject.manifest(); tikhomirov@538: if (manifestRev.isNull()) { tikhomirov@538: return NO_REVISION; tikhomirov@538: } tikhomirov@538: return repo.getManifest().getRevisionIndex(manifestRev); tikhomirov@538: } tikhomirov@538: */ tikhomirov@538: tikhomirov@538: // unlike DataAccess (which provides structured access), this one tikhomirov@538: // deals with a sequence of bytes, when there's no need in structure of the data tikhomirov@538: public interface ByteDataSupplier { // TODO look if can resolve DataAccess in HgCloneCommand visibility issue tikhomirov@538: int read(ByteBuffer buf); tikhomirov@538: } tikhomirov@538: tikhomirov@538: public interface ByteDataConsumer { tikhomirov@538: void write(ByteBuffer buf); tikhomirov@538: } tikhomirov@538: }