Mercurial > hg4j
comparison src/com/tmate/hgkit/console/Incoming.java @ 31:346b66add79d
Basic lookup for incoming changes
author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
---|---|
date | Wed, 12 Jan 2011 00:30:55 +0100 |
parents | |
children | 565ce0835674 |
comparison
equal
deleted
inserted
replaced
30:de7217a0aa4d | 31:346b66add79d |
---|---|
1 /* | |
2 * Copyright (c) 2011 Artem Tikhomirov | |
3 */ | |
4 package com.tmate.hgkit.console; | |
5 | |
6 import java.util.Collection; | |
7 import java.util.HashSet; | |
8 import java.util.LinkedHashSet; | |
9 import java.util.LinkedList; | |
10 import java.util.List; | |
11 | |
12 import com.tmate.hgkit.fs.RepositoryLookup; | |
13 import com.tmate.hgkit.ll.HgRepository; | |
14 import com.tmate.hgkit.ll.Nodeid; | |
15 import com.tmate.hgkit.ll.Revlog; | |
16 | |
17 /** | |
18 * | |
19 * @author artem | |
20 */ | |
21 public class Incoming { | |
22 | |
23 public static void main(String[] args) throws Exception { | |
24 RepositoryLookup repoLookup = new RepositoryLookup(); | |
25 RepositoryLookup.Options cmdLineOpts = RepositoryLookup.Options.parse(args); | |
26 HgRepository hgRepo = repoLookup.detect(cmdLineOpts); | |
27 if (hgRepo.isInvalid()) { | |
28 System.err.printf("Can't find repository in: %s\n", hgRepo.getLocation()); | |
29 return; | |
30 } | |
31 // in fact, all we need from changelog is set of all nodeids. However, since ParentWalker reuses same Nodeids, it's not too expensive | |
32 // to reuse it here, XXX although later this may need to be refactored | |
33 final Revlog.ParentWalker pw = hgRepo.getChangelog().new ParentWalker(); | |
34 pw.init(); | |
35 // | |
36 HashSet<Nodeid> base = new HashSet<Nodeid>(); | |
37 HashSet<Nodeid> unknownRemoteHeads = new HashSet<Nodeid>(); | |
38 // imagine empty repository - any nodeid from remote heads would be unknown | |
39 unknownRemoteHeads.add(Nodeid.fromAscii("382cfe9463db0484a14136e4b38407419525f0c0".getBytes(), 0, 40)); | |
40 // | |
41 LinkedList<RemoteBranch> remoteBranches = new LinkedList<RemoteBranch>(); | |
42 remoteBranches(unknownRemoteHeads, remoteBranches); | |
43 // | |
44 HashSet<Nodeid> visited = new HashSet<Nodeid>(); | |
45 HashSet<RemoteBranch> processed = new HashSet<RemoteBranch>(); | |
46 LinkedList<Nodeid[]> toScan = new LinkedList<Nodeid[]>(); | |
47 LinkedHashSet<Nodeid> toFetch = new LinkedHashSet<Nodeid>(); | |
48 // next one seems to track heads we've asked (or plan to ask) remote.branches for | |
49 HashSet<Nodeid> unknownHeads /*req*/ = new HashSet<Nodeid>(unknownRemoteHeads); | |
50 while (!remoteBranches.isEmpty()) { | |
51 LinkedList<Nodeid> toQueryRemote = new LinkedList<Nodeid>(); | |
52 while (!remoteBranches.isEmpty()) { | |
53 RemoteBranch next = remoteBranches.removeFirst(); | |
54 if (visited.contains(next.head) || processed.contains(next)) { | |
55 continue; | |
56 } | |
57 if (Nodeid.NULL.equals(next.head)) { | |
58 // it's discovery.py that expects next.head to be nullid here, I can't imagine how this may happen, hence this exception | |
59 throw new IllegalStateException("I wonder if null if may ever get here with remote branches"); | |
60 } else if (pw.knownNode(next.root)) { | |
61 // root of the remote change is known locally, analyze to find exact missing changesets | |
62 toScan.addLast(new Nodeid[] { next.head, next.root }); | |
63 processed.add(next); | |
64 } else { | |
65 if (!visited.contains(next.root) && !toFetch.contains(next.root)) { | |
66 // if parents are locally known, this is new branch (sequence of changes) (sequence sprang out of known parents) | |
67 if ((next.p1 == null || pw.knownNode(next.p1)) && (next.p2 == null || pw.knownNode(next.p2))) { | |
68 toFetch.add(next.root); | |
69 } | |
70 // XXX perhaps, may combine this parent processing below (I don't understand what this code is exactly about) | |
71 if (pw.knownNode(next.p1)) { | |
72 base.add(next.p1); | |
73 } | |
74 if (pw.knownNode(next.p2)) { | |
75 base.add(next.p2); | |
76 } | |
77 } | |
78 if (next.p1 != null && !pw.knownNode(next.p1) && !unknownHeads.contains(next.p1)) { | |
79 toQueryRemote.add(next.p1); | |
80 unknownHeads.add(next.p1); | |
81 } | |
82 if (next.p2 != null && !pw.knownNode(next.p2) && !unknownHeads.contains(next.p2)) { | |
83 toQueryRemote.add(next.p2); | |
84 unknownHeads.add(next.p2); | |
85 } | |
86 } | |
87 visited.add(next.head); | |
88 } | |
89 if (!toQueryRemote.isEmpty()) { | |
90 // discovery.py in fact does this in batches of 10 revisions a time. | |
91 // however, this slicing may be done in remoteBranches call instead (if needed) | |
92 remoteBranches(toQueryRemote, remoteBranches); | |
93 } | |
94 } | |
95 while (!toScan.isEmpty()) { | |
96 Nodeid[] head_root = toScan.removeFirst(); | |
97 List<Nodeid> nodesBetween = remoteBetween(head_root[0], head_root[1], new LinkedList<Nodeid>()); | |
98 nodesBetween.add(head_root[1]); | |
99 int x = 1; | |
100 Nodeid p = head_root[0]; | |
101 for (Nodeid i : nodesBetween) { | |
102 System.out.println("narrowing " + x + ":" + nodesBetween.size() + " " + i.shortNotation()); | |
103 if (pw.knownNode(i)) { | |
104 if (x <= 2) { | |
105 toFetch.add(p); | |
106 base.add(i); | |
107 } else { | |
108 // XXX original discovery.py collects new elements to scan separately | |
109 // likely to "batch" calls to server | |
110 System.out.println("narrowed branch search to " + p.shortNotation() + ":" + i.shortNotation()); | |
111 toScan.addLast(new Nodeid[] { p, i }); | |
112 } | |
113 break; | |
114 } | |
115 x = x << 1; | |
116 p = i; | |
117 } | |
118 } | |
119 for (Nodeid n : toFetch) { | |
120 if (pw.knownNode(n)) { | |
121 System.out.println("Erroneous to fetch:" + n); | |
122 } else { | |
123 System.out.println(n); | |
124 } | |
125 } | |
126 } | |
127 | |
128 static final class RemoteBranch { | |
129 public Nodeid head, root, p1, p2; | |
130 | |
131 @Override | |
132 public boolean equals(Object obj) { | |
133 if (this == obj) { | |
134 return true; | |
135 } | |
136 if (false == obj instanceof RemoteBranch) { | |
137 return false; | |
138 } | |
139 RemoteBranch o = (RemoteBranch) obj; | |
140 return head.equals(o.head) && root.equals(o.root) && (p1 == null && o.p1 == null || p1.equals(o.p1)) && (p2 == null && o.p2 == null || p2.equals(o.p2)); | |
141 } | |
142 } | |
143 | |
144 private static void remoteBranches(Collection<Nodeid> unknownRemoteHeads, List<RemoteBranch> remoteBranches) { | |
145 // discovery.findcommonincoming: | |
146 // unknown = remote.branches(remote.heads); | |
147 // sent: cmd=branches&roots=d6d2a630f4a6d670c90a5ca909150f2b426ec88f+ | |
148 // received: d6d2a630f4a6d670c90a5ca909150f2b426ec88f dbd663faec1f0175619cf7668bddc6350548b8d6 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 | |
149 // head, root, first parent, second parent | |
150 // | |
151 // TODO implement this with remote access | |
152 // | |
153 RemoteBranch rb = new RemoteBranch(); | |
154 rb.head = unknownRemoteHeads.iterator().next(); | |
155 rb.root = Nodeid.fromAscii("dbd663faec1f0175619cf7668bddc6350548b8d6".getBytes(), 0, 40); | |
156 remoteBranches.add(rb); | |
157 } | |
158 | |
159 private static List<Nodeid> remoteBetween(Nodeid nodeid1, Nodeid nodeid2, List<Nodeid> list) { | |
160 // sent: cmd=between&pairs=d6d2a630f4a6d670c90a5ca909150f2b426ec88f-dbd663faec1f0175619cf7668bddc6350548b8d6 | |
161 // received: a78c980749e3ccebb47138b547e9b644a22797a9 286d221f6c28cbfce25ea314e1f46a23b7f979d3 fc265ddeab262ff5c34b4cf4e2522d8d41f1f05b a3576694a4d1edaa681cab15b89d6b556b02aff4 | |
162 // 1st, 2nd, fourth and eights of total 8 changes between rev9 and rev0 | |
163 // | |
164 // | |
165 // a78c980749e3ccebb47138b547e9b644a22797a9 286d221f6c28cbfce25ea314e1f46a23b7f979d3 fc265ddeab262ff5c34b4cf4e2522d8d41f1f05b a3576694a4d1edaa681cab15b89d6b556b02aff4 | |
166 //d6d2a630f4a6d670c90a5ca909150f2b426ec88f a78c980749e3ccebb47138b547e9b644a22797a9 5abe5af181bd6a6d3e94c378376c901f0f80da50 08db726a0fb7914ac9d27ba26dc8bbf6385a0554 | |
167 | |
168 // TODO implement with remote access | |
169 String response = null; | |
170 if (nodeid1.equals(Nodeid.fromAscii("382cfe9463db0484a14136e4b38407419525f0c0".getBytes(), 0, 40)) && nodeid2.equals(Nodeid.fromAscii("dbd663faec1f0175619cf7668bddc6350548b8d6".getBytes(), 0, 40))) { | |
171 response = "d6d2a630f4a6d670c90a5ca909150f2b426ec88f a78c980749e3ccebb47138b547e9b644a22797a9 5abe5af181bd6a6d3e94c378376c901f0f80da50 08db726a0fb7914ac9d27ba26dc8bbf6385a0554"; | |
172 } else if (nodeid1.equals(Nodeid.fromAscii("a78c980749e3ccebb47138b547e9b644a22797a9".getBytes(), 0, 40)) && nodeid2.equals(Nodeid.fromAscii("5abe5af181bd6a6d3e94c378376c901f0f80da50".getBytes(), 0, 40))) { | |
173 response = "286d221f6c28cbfce25ea314e1f46a23b7f979d3"; | |
174 } | |
175 if (response == null) { | |
176 throw HgRepository.notImplemented(); | |
177 } | |
178 for (String s : response.split(" ")) { | |
179 list.add(Nodeid.fromAscii(s.getBytes(), 0, 40)); | |
180 } | |
181 return list; | |
182 } | |
183 | |
184 } |