Mercurial > hg4j
comparison cmdline/org/tmatesoft/hg/console/Incoming.java @ 181:cd3371670f0b
Refactor incoming and outgoing code to be shared with RepositoryComparator. Placeholders for in/out commands. Refactor common remote lookup code
author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
---|---|
date | Tue, 12 Apr 2011 19:10:38 +0200 |
parents | 62665d8f0686 |
children | f26ffe04ced0 |
comparison
equal
deleted
inserted
replaced
180:42fe9a94b9d0 | 181:cd3371670f0b |
---|---|
14 * the terms of a license other than GNU General Public License | 14 * the terms of a license other than GNU General Public License |
15 * contact TMate Software at support@hg4j.com | 15 * contact TMate Software at support@hg4j.com |
16 */ | 16 */ |
17 package org.tmatesoft.hg.console; | 17 package org.tmatesoft.hg.console; |
18 | 18 |
19 import static org.tmatesoft.hg.core.Nodeid.NULL; | |
20 | |
21 import java.io.File; | 19 import java.io.File; |
22 import java.net.URL; | 20 import java.net.URL; |
23 import java.util.ArrayList; | 21 import java.util.ArrayList; |
24 import java.util.Arrays; | 22 import java.util.Arrays; |
25 import java.util.Collections; | 23 import java.util.Collections; |
26 import java.util.Comparator; | 24 import java.util.Comparator; |
27 import java.util.HashMap; | |
28 import java.util.HashSet; | 25 import java.util.HashSet; |
29 import java.util.Iterator; | 26 import java.util.Iterator; |
30 import java.util.LinkedHashMap; | 27 import java.util.LinkedHashMap; |
31 import java.util.LinkedList; | 28 import java.util.LinkedList; |
32 import java.util.List; | 29 import java.util.List; |
33 import java.util.ListIterator; | 30 import java.util.ListIterator; |
34 import java.util.Map; | |
35 import java.util.Map.Entry; | 31 import java.util.Map.Entry; |
36 | 32 |
37 import org.tmatesoft.hg.core.HgBadStateException; | |
38 import org.tmatesoft.hg.core.HgException; | 33 import org.tmatesoft.hg.core.HgException; |
39 import org.tmatesoft.hg.core.Nodeid; | 34 import org.tmatesoft.hg.core.Nodeid; |
40 import org.tmatesoft.hg.internal.ConfigFile; | 35 import org.tmatesoft.hg.internal.ConfigFile; |
41 import org.tmatesoft.hg.internal.Internals; | 36 import org.tmatesoft.hg.internal.Internals; |
37 import org.tmatesoft.hg.internal.RepositoryComparator; | |
38 import org.tmatesoft.hg.internal.RepositoryComparator.BranchChain; | |
42 import org.tmatesoft.hg.repo.HgChangelog; | 39 import org.tmatesoft.hg.repo.HgChangelog; |
43 import org.tmatesoft.hg.repo.HgLookup; | 40 import org.tmatesoft.hg.repo.HgLookup; |
44 import org.tmatesoft.hg.repo.HgRemoteRepository; | 41 import org.tmatesoft.hg.repo.HgRemoteRepository; |
45 import org.tmatesoft.hg.repo.HgRemoteRepository.Range; | |
46 import org.tmatesoft.hg.repo.HgRemoteRepository.RemoteBranch; | |
47 import org.tmatesoft.hg.repo.HgRepository; | 42 import org.tmatesoft.hg.repo.HgRepository; |
48 | 43 |
49 | 44 |
50 /** | 45 /** |
51 * WORK IN PROGRESS, DO NOT USE | 46 * WORK IN PROGRESS, DO NOT USE |
65 HgRepository hgRepo = cmdLineOpts.findRepository(); | 60 HgRepository hgRepo = cmdLineOpts.findRepository(); |
66 if (hgRepo.isInvalid()) { | 61 if (hgRepo.isInvalid()) { |
67 System.err.printf("Can't find repository in: %s\n", hgRepo.getLocation()); | 62 System.err.printf("Can't find repository in: %s\n", hgRepo.getLocation()); |
68 return; | 63 return; |
69 } | 64 } |
70 String key = "svnkit"; | 65 HgRemoteRepository hgRemote = new HgLookup().detectRemote("svnkit", hgRepo); |
71 ConfigFile cfg = new Internals().newConfigFile(); | 66 if (hgRemote.isInvalid()) { |
72 cfg.addLocation(new File(System.getProperty("user.home"), ".hgrc")); | 67 System.err.printf("Remote repository %s is not valid", hgRemote.getLocation()); |
73 String server = cfg.getSection("paths").get(key); | 68 return; |
74 if (server == null) { | 69 } |
75 throw new HgException(String.format("Can't find server %s specification in the config", key)); | |
76 } | |
77 HgRemoteRepository hgRemote = new HgLookup().detect(new URL(server)); | |
78 // | 70 // |
79 // in fact, all we need from changelog is set of all nodeids. However, since ParentWalker reuses same Nodeids, it's not too expensive | 71 // in fact, all we need from changelog is set of all nodeids. However, since ParentWalker reuses same Nodeids, it's not too expensive |
80 // to reuse it here, XXX although later this may need to be refactored | 72 // to reuse it here, XXX although later this may need to be refactored |
81 final HgChangelog.ParentWalker pw = hgRepo.getChangelog().new ParentWalker(); | 73 final HgChangelog.ParentWalker pw = hgRepo.getChangelog().new ParentWalker(); |
82 pw.init(); | 74 pw.init(); |
83 // | 75 // |
84 List<BranchChain> missingBranches0 = calculateMissingBranches(pw, hgRemote); | 76 RepositoryComparator repoCompare = new RepositoryComparator(pw, hgRemote); |
77 repoCompare.compare(null); | |
78 List<BranchChain> missingBranches0 = repoCompare.calculateMissingBranches(); | |
85 for (BranchChain bc : missingBranches0) { | 79 for (BranchChain bc : missingBranches0) { |
86 bc.dump(); | 80 bc.dump(); |
87 | 81 |
88 List<Nodeid> missing = visitBranches(hgRemote, bc); | 82 List<Nodeid> missing = visitBranches(repoCompare, bc); |
89 // Collections.reverse(missing); // useful to test output, from newer to older | 83 // Collections.reverse(missing); // useful to test output, from newer to older |
90 for (Nodeid n : missing) { | 84 for (Nodeid n : missing) { |
91 if (pw.knownNode(n)) { | 85 if (pw.knownNode(n)) { |
92 System.out.println("Erroneous to fetch:" + n); | 86 System.out.println("Erroneous to fetch:" + n); |
93 } else { | 87 } else { |
97 System.out.println("Branch done"); | 91 System.out.println("Branch done"); |
98 } | 92 } |
99 | 93 |
100 } | 94 } |
101 | 95 |
102 private static class BranchChain { | |
103 // when we construct a chain, we know head which is missing locally, hence init it right away. | |
104 // as for root (branch unknown start), we might happen to have one locally, and need further digging to find out right branch start | |
105 public final Nodeid branchHead; | |
106 public Nodeid branchRoot; | |
107 // either of these can be null, or both. | |
108 // although RemoteBranch has either both parents null, or both non-null, when we construct a chain | |
109 // we might encounter that we locally know one of branch's parent, hence in the chain corresponding field will be blank. | |
110 public BranchChain p1; | |
111 public BranchChain p2; | |
112 | |
113 public BranchChain(Nodeid head) { | |
114 assert head != null; | |
115 branchHead = head; | |
116 } | |
117 public boolean isTerminal() { | |
118 return p1 == null || p2 == null; | |
119 } | |
120 | |
121 @Override | |
122 public String toString() { | |
123 return String.format("BranchChain [%s, %s]", branchRoot, branchHead); | |
124 } | |
125 void dump() { | |
126 System.out.println(toString()); | |
127 internalDump(" "); | |
128 } | |
129 void internalDump(String prefix) { | |
130 if (p1 != null) { | |
131 System.out.println(prefix + p1.toString()); | |
132 } | |
133 if (p2 != null) { | |
134 System.out.println(prefix + p2.toString()); | |
135 } | |
136 prefix += " "; | |
137 if (p1 != null) { | |
138 p1.internalDump(prefix); | |
139 } | |
140 if (p2 != null) { | |
141 p2.internalDump(prefix); | |
142 } | |
143 } | |
144 } | |
145 | 96 |
146 private static List<Nodeid> visitBranches(HgRemoteRepository hgRemote, BranchChain bc) throws HgException { | 97 private static List<Nodeid> visitBranches(RepositoryComparator repoCompare, BranchChain bc) throws HgException { |
147 if (bc == null) { | 98 if (bc == null) { |
148 return Collections.emptyList(); | 99 return Collections.emptyList(); |
149 } | 100 } |
150 List<Nodeid> mine = completeBranch(hgRemote, bc.branchRoot, bc.branchHead); | 101 List<Nodeid> mine = repoCompare.completeBranch(bc.branchRoot, bc.branchHead); |
151 if (bc.isTerminal()) { | 102 if (bc.isTerminal()) { |
152 return mine; | 103 return mine; |
153 } | 104 } |
154 List<Nodeid> parentBranch1 = visitBranches(hgRemote, bc.p1); | 105 List<Nodeid> parentBranch1 = visitBranches(repoCompare, bc.p1); |
155 List<Nodeid> parentBranch2 = visitBranches(hgRemote, bc.p2); | 106 List<Nodeid> parentBranch2 = visitBranches(repoCompare, bc.p2); |
156 // merge | 107 // merge |
157 LinkedList<Nodeid> merged = new LinkedList<Nodeid>(); | 108 LinkedList<Nodeid> merged = new LinkedList<Nodeid>(); |
158 ListIterator<Nodeid> i1 = parentBranch1.listIterator(), i2 = parentBranch2.listIterator(); | 109 ListIterator<Nodeid> i1 = parentBranch1.listIterator(), i2 = parentBranch2.listIterator(); |
159 while (i1.hasNext() && i2.hasNext()) { | 110 while (i1.hasNext() && i2.hasNext()) { |
160 Nodeid n1 = i1.next(); | 111 Nodeid n1 = i1.next(); |
180 ArrayList<Nodeid> rv = new ArrayList<Nodeid>(mine.size() + merged.size()); | 131 ArrayList<Nodeid> rv = new ArrayList<Nodeid>(mine.size() + merged.size()); |
181 rv.addAll(merged); | 132 rv.addAll(merged); |
182 rv.addAll(mine); | 133 rv.addAll(mine); |
183 return rv; | 134 return rv; |
184 } | 135 } |
185 | 136 |
186 // somewhat similar to Outgoing.findCommonWithRemote() | |
187 private static List<BranchChain> calculateMissingBranches(HgChangelog.ParentWalker pwLocal, HgRemoteRepository hgRemote) throws HgException { | |
188 List<Nodeid> remoteHeads = hgRemote.heads(); | |
189 LinkedList<Nodeid> common = new LinkedList<Nodeid>(); // these remotes are known in local | |
190 LinkedList<Nodeid> toQuery = new LinkedList<Nodeid>(); // these need further queries to find common | |
191 for (Nodeid rh : remoteHeads) { | |
192 if (pwLocal.knownNode(rh)) { | |
193 common.add(rh); | |
194 } else { | |
195 toQuery.add(rh); | |
196 } | |
197 } | |
198 if (toQuery.isEmpty()) { | |
199 return Collections.emptyList(); // no incoming changes | |
200 } | |
201 LinkedList<BranchChain> branches2load = new LinkedList<BranchChain>(); // return value | |
202 // detailed comments are in Outgoing.findCommonWithRemote | |
203 LinkedList<RemoteBranch> checkUp2Head = new LinkedList<RemoteBranch>(); | |
204 // records relation between branch head and its parent branch, if any | |
205 HashMap<Nodeid, BranchChain> head2chain = new HashMap<Nodeid, Incoming.BranchChain>(); | |
206 while (!toQuery.isEmpty()) { | |
207 List<RemoteBranch> remoteBranches = hgRemote.branches(toQuery); //head, root, first parent, second parent | |
208 toQuery.clear(); | |
209 while(!remoteBranches.isEmpty()) { | |
210 RemoteBranch rb = remoteBranches.remove(0); | |
211 BranchChain chainElement = head2chain.get(rb.head); | |
212 if (chainElement == null) { | |
213 chainElement = new BranchChain(rb.head); | |
214 // record this unknown branch to download later | |
215 branches2load.add(chainElement); | |
216 } | |
217 if (pwLocal.knownNode(rb.root)) { | |
218 // we known branch start, common head is somewhere in its descendants line | |
219 checkUp2Head.add(rb); | |
220 } else { | |
221 chainElement.branchRoot = rb.root; | |
222 // dig deeper in the history, if necessary | |
223 if (!NULL.equals(rb.p1) && !pwLocal.knownNode(rb.p1)) { | |
224 toQuery.add(rb.p1); | |
225 head2chain.put(rb.p1, chainElement.p1 = new BranchChain(rb.p1)); | |
226 } | |
227 if (!NULL.equals(rb.p2) && !pwLocal.knownNode(rb.p2)) { | |
228 toQuery.add(rb.p2); | |
229 head2chain.put(rb.p2, chainElement.p2 = new BranchChain(rb.p2)); | |
230 } | |
231 } | |
232 } | |
233 } | |
234 for (RemoteBranch rb : checkUp2Head) { | |
235 Nodeid h = rb.head; | |
236 Nodeid r = rb.root; | |
237 int watchdog = 1000; | |
238 BranchChain bc = head2chain.get(h); | |
239 assert bc != null; | |
240 // if we know branch root locally, there could be no parent branch chain elements. | |
241 assert bc.p1 == null; | |
242 assert bc.p2 == null; | |
243 do { | |
244 List<Nodeid> between = hgRemote.between(h, r); | |
245 if (between.isEmpty()) { | |
246 bc.branchRoot = r; | |
247 break; | |
248 } else { | |
249 Collections.reverse(between); | |
250 for (Nodeid n : between) { | |
251 if (pwLocal.knownNode(n)) { | |
252 r = n; | |
253 } else { | |
254 h = n; | |
255 break; | |
256 } | |
257 } | |
258 Nodeid lastInBetween = between.get(between.size() - 1); | |
259 if (r.equals(lastInBetween)) { | |
260 bc.branchRoot = r; | |
261 break; | |
262 } else if (h.equals(lastInBetween)) { // the only chance for current head pointer to point to the sequence tail | |
263 // is when r is second from the between list end (iow, head,1,[2],4,8...,root) | |
264 bc.branchRoot = r; | |
265 break; | |
266 } | |
267 } | |
268 } while(--watchdog > 0); | |
269 if (watchdog == 0) { | |
270 throw new HgBadStateException(String.format("Can't narrow down branch [%s, %s]", rb.head.shortNotation(), rb.root.shortNotation())); | |
271 } | |
272 } | |
273 return branches2load; | |
274 } | |
275 | |
276 /** | |
277 * @return list of nodeids from branchRoot to branchHead, inclusive. IOW, first element of the list is always root of the branch | |
278 */ | |
279 private static List<Nodeid> completeBranch(HgRemoteRepository hgRemote, final Nodeid branchRoot, final Nodeid branchHead) throws HgException { | |
280 class DataEntry { | |
281 public final Nodeid queryHead; | |
282 public final int headIndex; | |
283 public List<Nodeid> entries; | |
284 | |
285 public DataEntry(Nodeid head, int index, List<Nodeid> data) { | |
286 queryHead = head; | |
287 headIndex = index; | |
288 entries = data; | |
289 } | |
290 }; | |
291 | |
292 List<Nodeid> initial = hgRemote.between(branchHead, branchRoot); | |
293 Nodeid[] result = new Nodeid[1 + (1 << initial.size())]; | |
294 result[0] = branchHead; | |
295 int rootIndex = -1; // index in the result, where to place branche's root. | |
296 if (initial.isEmpty()) { | |
297 rootIndex = 1; | |
298 } else if (initial.size() == 1) { | |
299 rootIndex = 2; | |
300 } | |
301 LinkedList<DataEntry> datas = new LinkedList<DataEntry>(); | |
302 // DataEntry in datas has entries list filled with 'between' data, whereas | |
303 // DataEntry in toQuery keeps only nodeid and its index, with entries to be initialized before | |
304 // moving to datas. | |
305 LinkedList<DataEntry> toQuery = new LinkedList<DataEntry>(); | |
306 // | |
307 datas.add(new DataEntry(branchHead, 0, initial)); | |
308 int totalQueries = 1; | |
309 HashSet<Nodeid> queried = new HashSet<Nodeid>(); | |
310 while(!datas.isEmpty()) { | |
311 // keep record of those planned to be queried next time we call between() | |
312 // although may keep these in queried, if really don't want separate collection | |
313 HashSet<Nodeid> scheduled = new HashSet<Nodeid>(); | |
314 do { | |
315 DataEntry de = datas.removeFirst(); | |
316 // populate result with discovered elements between de.qiueryRoot and branch's head | |
317 for (int i = 1, j = 0; j < de.entries.size(); i = i << 1, j++) { | |
318 int idx = de.headIndex + i; | |
319 result[idx] = de.entries.get(j); | |
320 } | |
321 // form next query entries from new unknown elements | |
322 if (de.entries.size() > 1) { | |
323 /* when entries has only one element, it means de.queryRoot was at head-2 position, and thus | |
324 * no new information can be obtained. E.g. when it's 2, it might be case of [0..4] query with | |
325 * [1,2] result, and we need one more query to get element 3. | |
326 */ | |
327 for (int i =1, j = 0; j < de.entries.size(); i = i<<1, j++) { | |
328 int idx = de.headIndex + i; | |
329 Nodeid x = de.entries.get(j); | |
330 if (!queried.contains(x) && !scheduled.contains(x) && (rootIndex == -1 || rootIndex - de.headIndex > 1)) { | |
331 /*queries for elements right before head is senseless, but unless we know head's index, do it anyway*/ | |
332 toQuery.add(new DataEntry(x, idx, null)); | |
333 scheduled.add(x); | |
334 } | |
335 } | |
336 } | |
337 } while (!datas.isEmpty()); | |
338 if (!toQuery.isEmpty()) { | |
339 totalQueries++; | |
340 } | |
341 // for each query, create an between request range, keep record Range->DataEntry to know range's start index | |
342 LinkedList<HgRemoteRepository.Range> betweenBatch = new LinkedList<HgRemoteRepository.Range>(); | |
343 HashMap<HgRemoteRepository.Range, DataEntry> rangeToEntry = new HashMap<HgRemoteRepository.Range, DataEntry>(); | |
344 for (DataEntry de : toQuery) { | |
345 queried.add(de.queryHead); | |
346 HgRemoteRepository.Range r = new HgRemoteRepository.Range(branchRoot, de.queryHead); | |
347 betweenBatch.add(r); | |
348 rangeToEntry.put(r, de); | |
349 } | |
350 if (!betweenBatch.isEmpty()) { | |
351 Map<Range, List<Nodeid>> between = hgRemote.between(betweenBatch); | |
352 for (Entry<Range, List<Nodeid>> e : between.entrySet()) { | |
353 DataEntry de = rangeToEntry.get(e.getKey()); | |
354 assert de != null; | |
355 de.entries = e.getValue(); | |
356 if (rootIndex == -1 && de.entries.size() == 1) { | |
357 // returned sequence of length 1 means we used element from [head-2] as root | |
358 int numberOfElementsExcludingRootAndHead = de.headIndex + 1; | |
359 rootIndex = numberOfElementsExcludingRootAndHead + 1; | |
360 System.out.printf("On query %d found out exact number of missing elements: %d\n", totalQueries, numberOfElementsExcludingRootAndHead); | |
361 } | |
362 datas.add(de); // queue up to record result and construct further requests | |
363 } | |
364 betweenBatch.clear(); | |
365 rangeToEntry.clear(); | |
366 } | |
367 toQuery.clear(); | |
368 } | |
369 if (rootIndex == -1) { | |
370 throw new HgBadStateException("Shall not happen, provided between output is correct"); // FIXME | |
371 } | |
372 result[rootIndex] = branchRoot; | |
373 boolean resultOk = true; | |
374 LinkedList<Nodeid> fromRootToHead = new LinkedList<Nodeid>(); | |
375 for (int i = 0; i <= rootIndex; i++) { | |
376 Nodeid n = result[i]; | |
377 if (n == null) { | |
378 System.out.printf("ERROR: element %d wasn't found\n",i); | |
379 resultOk = false; | |
380 } | |
381 fromRootToHead.addFirst(n); // reverse order | |
382 } | |
383 System.out.println("Total queries:" + totalQueries); | |
384 if (!resultOk) { | |
385 throw new HgBadStateException("See console for details"); // FIXME | |
386 } | |
387 return fromRootToHead; | |
388 } | |
389 | 137 |
390 private static class SequenceConstructor { | 138 private static class SequenceConstructor { |
391 | 139 |
392 private int[] between(int root, int head) { | 140 private int[] between(int root, int head) { |
393 if (head <= (root+1)) { | 141 if (head <= (root+1)) { |