Mercurial > hg4j
comparison cmdline/org/tmatesoft/hg/console/Incoming.java @ 176:a8df7162ec75
Extracting complete branch using remote between call to detect incoming changes is done. Arguments reorderd in remote repo to better match Hg server ideology, not my mental convenience
author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
---|---|
date | Sat, 02 Apr 2011 03:01:14 +0200 |
parents | b1de83ffa7f8 |
children | e10225daface |
comparison
equal
deleted
inserted
replaced
175:7653bdf82cf0 | 176:a8df7162ec75 |
---|---|
16 */ | 16 */ |
17 package org.tmatesoft.hg.console; | 17 package org.tmatesoft.hg.console; |
18 | 18 |
19 import static org.tmatesoft.hg.core.Nodeid.NULL; | 19 import static org.tmatesoft.hg.core.Nodeid.NULL; |
20 | 20 |
21 import java.io.File; | |
22 import java.net.URL; | |
21 import java.util.Arrays; | 23 import java.util.Arrays; |
22 import java.util.Collection; | |
23 import java.util.Collections; | 24 import java.util.Collections; |
24 import java.util.Comparator; | 25 import java.util.Comparator; |
25 import java.util.HashSet; | 26 import java.util.HashSet; |
26 import java.util.Iterator; | 27 import java.util.Iterator; |
27 import java.util.LinkedHashMap; | 28 import java.util.LinkedHashMap; |
28 import java.util.LinkedHashSet; | |
29 import java.util.LinkedList; | 29 import java.util.LinkedList; |
30 import java.util.List; | 30 import java.util.List; |
31 import java.util.Map.Entry; | 31 import java.util.Map.Entry; |
32 | 32 |
33 import org.tmatesoft.hg.core.HgBadStateException; | |
34 import org.tmatesoft.hg.core.HgException; | |
33 import org.tmatesoft.hg.core.Nodeid; | 35 import org.tmatesoft.hg.core.Nodeid; |
36 import org.tmatesoft.hg.internal.ConfigFile; | |
37 import org.tmatesoft.hg.internal.Internals; | |
34 import org.tmatesoft.hg.repo.HgChangelog; | 38 import org.tmatesoft.hg.repo.HgChangelog; |
39 import org.tmatesoft.hg.repo.HgLookup; | |
40 import org.tmatesoft.hg.repo.HgRemoteRepository; | |
35 import org.tmatesoft.hg.repo.HgRemoteRepository.RemoteBranch; | 41 import org.tmatesoft.hg.repo.HgRemoteRepository.RemoteBranch; |
36 import org.tmatesoft.hg.repo.HgRepository; | 42 import org.tmatesoft.hg.repo.HgRepository; |
37 | 43 |
38 | 44 |
39 /** | 45 /** |
44 * @author TMate Software Ltd. | 50 * @author TMate Software Ltd. |
45 */ | 51 */ |
46 public class Incoming { | 52 public class Incoming { |
47 | 53 |
48 public static void main(String[] args) throws Exception { | 54 public static void main(String[] args) throws Exception { |
49 if (Boolean.TRUE.booleanValue()) { | 55 if (Boolean.FALSE.booleanValue()) { |
50 new SequenceConstructor().test(); | 56 new SequenceConstructor().test(); |
51 return; | 57 return; |
52 } | 58 } |
53 Options cmdLineOpts = Options.parse(args); | 59 Options cmdLineOpts = Options.parse(args); |
54 HgRepository hgRepo = cmdLineOpts.findRepository(); | 60 HgRepository hgRepo = cmdLineOpts.findRepository(); |
55 if (hgRepo.isInvalid()) { | 61 if (hgRepo.isInvalid()) { |
56 System.err.printf("Can't find repository in: %s\n", hgRepo.getLocation()); | 62 System.err.printf("Can't find repository in: %s\n", hgRepo.getLocation()); |
57 return; | 63 return; |
58 } | 64 } |
65 String key = "svnkit"; | |
66 ConfigFile cfg = new Internals().newConfigFile(); | |
67 cfg.addLocation(new File(System.getProperty("user.home"), ".hgrc")); | |
68 String server = cfg.getSection("paths").get(key); | |
69 if (server == null) { | |
70 throw new HgException(String.format("Can't find server %s specification in the config", key)); | |
71 } | |
72 HgRemoteRepository hgRemote = new HgLookup().detect(new URL(server)); | |
73 // | |
59 // in fact, all we need from changelog is set of all nodeids. However, since ParentWalker reuses same Nodeids, it's not too expensive | 74 // in fact, all we need from changelog is set of all nodeids. However, since ParentWalker reuses same Nodeids, it's not too expensive |
60 // to reuse it here, XXX although later this may need to be refactored | 75 // to reuse it here, XXX although later this may need to be refactored |
61 final HgChangelog.ParentWalker pw = hgRepo.getChangelog().new ParentWalker(); | 76 final HgChangelog.ParentWalker pw = hgRepo.getChangelog().new ParentWalker(); |
62 pw.init(); | 77 pw.init(); |
63 // | 78 // |
64 HashSet<Nodeid> base = new HashSet<Nodeid>(); | 79 List<RemoteBranch> missingBranches = calculateMissingBranches(hgRepo, hgRemote); |
65 HashSet<Nodeid> unknownRemoteHeads = new HashSet<Nodeid>(); | 80 LinkedList<Nodeid> missing = new LinkedList<Nodeid>(); |
66 // imagine empty repository - any nodeid from remote heads would be unknown | 81 for (RemoteBranch rb : missingBranches) { |
67 unknownRemoteHeads.add(Nodeid.fromAscii("382cfe9463db0484a14136e4b38407419525f0c0".getBytes(), 0, 40)); | 82 List<Nodeid> completeBranch = completeBranch(hgRemote, rb); |
68 // | 83 // FIXME ensure topologically sorted result |
69 LinkedList<RemoteBranch> remoteBranches = new LinkedList<RemoteBranch>(); | 84 missing.addAll(completeBranch); |
70 remoteBranches(unknownRemoteHeads, remoteBranches); | 85 } |
71 // | 86 // Collections.reverse(missing); // useful to test output, from newer to older |
72 HashSet<Nodeid> visited = new HashSet<Nodeid>(); | 87 for (Nodeid n : missing) { |
73 HashSet<RemoteBranch> processed = new HashSet<RemoteBranch>(); | |
74 LinkedList<Nodeid[]> toScan = new LinkedList<Nodeid[]>(); | |
75 LinkedHashSet<Nodeid> toFetch = new LinkedHashSet<Nodeid>(); | |
76 // next one seems to track heads we've asked (or plan to ask) remote.branches for | |
77 HashSet<Nodeid> unknownHeads /*req*/ = new HashSet<Nodeid>(unknownRemoteHeads); | |
78 while (!remoteBranches.isEmpty()) { | |
79 LinkedList<Nodeid> toQueryRemote = new LinkedList<Nodeid>(); | |
80 while (!remoteBranches.isEmpty()) { | |
81 RemoteBranch next = remoteBranches.removeFirst(); | |
82 if (visited.contains(next.head) || processed.contains(next)) { | |
83 continue; | |
84 } | |
85 if (Nodeid.NULL.equals(next.head)) { | |
86 // it's discovery.py that expects next.head to be nullid here, I can't imagine how this may happen, hence this exception | |
87 throw new IllegalStateException("I wonder if null if may ever get here with remote branches"); | |
88 } else if (pw.knownNode(next.root)) { | |
89 // root of the remote change is known locally, analyze to find exact missing changesets | |
90 toScan.addLast(new Nodeid[] { next.head, next.root }); | |
91 processed.add(next); | |
92 } else { | |
93 if (!visited.contains(next.root) && !toFetch.contains(next.root)) { | |
94 // if parents are locally known, this is new branch (sequence of changes) (sequence sprang out of known parents) | |
95 if ((next.p1 == null || pw.knownNode(next.p1)) && (next.p2 == null || pw.knownNode(next.p2))) { | |
96 toFetch.add(next.root); | |
97 } | |
98 // XXX perhaps, may combine this parent processing below (I don't understand what this code is exactly about) | |
99 if (pw.knownNode(next.p1)) { | |
100 base.add(next.p1); | |
101 } | |
102 if (pw.knownNode(next.p2)) { | |
103 base.add(next.p2); | |
104 } | |
105 } | |
106 if (next.p1 != null && !pw.knownNode(next.p1) && !unknownHeads.contains(next.p1)) { | |
107 toQueryRemote.add(next.p1); | |
108 unknownHeads.add(next.p1); | |
109 } | |
110 if (next.p2 != null && !pw.knownNode(next.p2) && !unknownHeads.contains(next.p2)) { | |
111 toQueryRemote.add(next.p2); | |
112 unknownHeads.add(next.p2); | |
113 } | |
114 } | |
115 visited.add(next.head); | |
116 } | |
117 if (!toQueryRemote.isEmpty()) { | |
118 // discovery.py in fact does this in batches of 10 revisions a time. | |
119 // however, this slicing may be done in remoteBranches call instead (if needed) | |
120 remoteBranches(toQueryRemote, remoteBranches); | |
121 } | |
122 } | |
123 while (!toScan.isEmpty()) { | |
124 Nodeid[] head_root = toScan.removeFirst(); | |
125 List<Nodeid> nodesBetween = remoteBetween(head_root[0], head_root[1], new LinkedList<Nodeid>()); | |
126 nodesBetween.add(head_root[1]); | |
127 int x = 1; | |
128 Nodeid p = head_root[0]; | |
129 for (Nodeid i : nodesBetween) { | |
130 System.out.println("narrowing " + x + ":" + nodesBetween.size() + " " + i.shortNotation()); | |
131 if (pw.knownNode(i)) { | |
132 if (x <= 2) { | |
133 toFetch.add(p); | |
134 base.add(i); | |
135 } else { | |
136 // XXX original discovery.py collects new elements to scan separately | |
137 // likely to "batch" calls to server | |
138 System.out.println("narrowed branch search to " + p.shortNotation() + ":" + i.shortNotation()); | |
139 toScan.addLast(new Nodeid[] { p, i }); | |
140 } | |
141 break; | |
142 } | |
143 x = x << 1; | |
144 p = i; | |
145 } | |
146 } | |
147 for (Nodeid n : toFetch) { | |
148 if (pw.knownNode(n)) { | 88 if (pw.knownNode(n)) { |
149 System.out.println("Erroneous to fetch:" + n); | 89 System.out.println("Erroneous to fetch:" + n); |
150 } else { | 90 } else { |
151 System.out.println(n); | 91 System.out.println(n); |
152 } | 92 } |
153 } | 93 } |
154 | |
155 } | 94 } |
156 | 95 |
157 | 96 private static List<RemoteBranch> calculateMissingBranches(HgRepository hgRepo, HgRemoteRepository hgRemote) { |
158 private static void remoteBranches(Collection<Nodeid> unknownRemoteHeads, List<RemoteBranch> remoteBranches) { | 97 // FIXME implement |
159 // | 98 // |
160 // TODO implement this with remote access | 99 // sample 0..52 |
100 Nodeid head = Nodeid.fromAscii("30bd389788464287cee22ccff54c330a4b715de5"); | |
101 Nodeid root = Nodeid.fromAscii("dbd663faec1f0175619cf7668bddc6350548b8d6"); | |
102 Nodeid p1 = NULL, p2 = NULL; | |
103 RemoteBranch fake = new RemoteBranch(head, root, p1, p2); | |
104 return Collections.singletonList(fake); | |
105 } | |
106 | |
107 // RemoteBranch not necessarily a 'true' remote branch. I use this structure to keep root, head, and root's parents, any | |
108 // of them can be known locally, parent might be only one (when I split 'true' RemoteBranch in between because of locally known node | |
109 private static List<Nodeid> completeBranch(HgRemoteRepository hgRemote, RemoteBranch rb) throws HgException { | |
110 class DataEntry { | |
111 public final Nodeid queryHead; | |
112 public final int headIndex; | |
113 public List<Nodeid> entries; | |
114 | |
115 public DataEntry(Nodeid head, int index, List<Nodeid> data) { | |
116 queryHead = head; | |
117 headIndex = index; | |
118 entries = data; | |
119 } | |
120 }; | |
121 | |
122 List<Nodeid> initial = hgRemote.between(rb.head, rb.root); | |
123 Nodeid[] result = new Nodeid[1 << initial.size()]; | |
124 result[0] = rb.head; | |
125 int rootIndex = -1; // index in the result, where to place branche's root. | |
126 LinkedList<DataEntry> datas = new LinkedList<DataEntry>(); | |
127 // DataEntry in datas has entries list filled with 'between' data, whereas | |
128 // DataEntry in toQuery keeps only nodeid and its index, with entries to be initialized before | |
129 // moving to datas. | |
130 LinkedList<DataEntry> toQuery = new LinkedList<DataEntry>(); | |
161 // | 131 // |
162 RemoteBranch rb = new RemoteBranch(unknownRemoteHeads.iterator().next(), Nodeid.fromAscii("dbd663faec1f0175619cf7668bddc6350548b8d6"), NULL, NULL); | 132 datas.add(new DataEntry(rb.head, 0, initial)); |
163 remoteBranches.add(rb); | 133 int totalQueries = 1; |
164 } | 134 HashSet<Nodeid> queried = new HashSet<Nodeid>(); |
165 | 135 while(!datas.isEmpty()) { |
166 private static List<Nodeid> remoteBetween(Nodeid nodeid1, Nodeid nodeid2, List<Nodeid> list) { | 136 do { |
167 // sent: cmd=between&pairs=d6d2a630f4a6d670c90a5ca909150f2b426ec88f-dbd663faec1f0175619cf7668bddc6350548b8d6 | 137 DataEntry de = datas.removeFirst(); |
168 // received: a78c980749e3ccebb47138b547e9b644a22797a9 286d221f6c28cbfce25ea314e1f46a23b7f979d3 fc265ddeab262ff5c34b4cf4e2522d8d41f1f05b a3576694a4d1edaa681cab15b89d6b556b02aff4 | 138 // populate result with discovered elements between de.qiueryRoot and branch's head |
169 // 1st, 2nd, fourth and eights of total 8 changes between rev9 and rev0 | 139 for (int i = 1, j = 0; j < de.entries.size(); i = i << 1, j++) { |
170 // | 140 int idx = de.headIndex + i; |
171 // | 141 result[idx] = de.entries.get(j); |
172 // a78c980749e3ccebb47138b547e9b644a22797a9 286d221f6c28cbfce25ea314e1f46a23b7f979d3 fc265ddeab262ff5c34b4cf4e2522d8d41f1f05b a3576694a4d1edaa681cab15b89d6b556b02aff4 | 142 } |
173 //d6d2a630f4a6d670c90a5ca909150f2b426ec88f a78c980749e3ccebb47138b547e9b644a22797a9 5abe5af181bd6a6d3e94c378376c901f0f80da50 08db726a0fb7914ac9d27ba26dc8bbf6385a0554 | 143 // form next query entries from new unknown elements |
174 | 144 if (de.entries.size() > 1) { |
175 // TODO implement with remote access | 145 /* when entries has only one element, it means de.queryRoot was at head-2 position, and thus |
176 String response = null; | 146 * no new information can be obtained. E.g. when it's 2, it might be case of [0..4] query with |
177 if (nodeid1.equals(Nodeid.fromAscii("382cfe9463db0484a14136e4b38407419525f0c0".getBytes(), 0, 40)) && nodeid2.equals(Nodeid.fromAscii("dbd663faec1f0175619cf7668bddc6350548b8d6".getBytes(), 0, 40))) { | 147 * [1,2] result, and we need one more query to get element 3. |
178 response = "d6d2a630f4a6d670c90a5ca909150f2b426ec88f a78c980749e3ccebb47138b547e9b644a22797a9 5abe5af181bd6a6d3e94c378376c901f0f80da50 08db726a0fb7914ac9d27ba26dc8bbf6385a0554"; | 148 */ |
179 } else if (nodeid1.equals(Nodeid.fromAscii("a78c980749e3ccebb47138b547e9b644a22797a9".getBytes(), 0, 40)) && nodeid2.equals(Nodeid.fromAscii("5abe5af181bd6a6d3e94c378376c901f0f80da50".getBytes(), 0, 40))) { | 149 for (int i =1, j = 0; j < de.entries.size(); i = i<<1, j++) { |
180 response = "286d221f6c28cbfce25ea314e1f46a23b7f979d3"; | 150 int idx = de.headIndex + i; |
181 } | 151 Nodeid x = de.entries.get(j); |
182 if (response == null) { | 152 if (!queried.contains(x) && (rootIndex == -1 || rootIndex - de.headIndex > 1)) { |
183 throw HgRepository.notImplemented(); | 153 /*queries for elements right before head is senseless, but unless we know head's index, do it anyway*/ |
184 } | 154 toQuery.add(new DataEntry(x, idx, null)); |
185 for (String s : response.split(" ")) { | 155 } |
186 list.add(Nodeid.fromAscii(s.getBytes(), 0, 40)); | 156 } |
187 } | 157 } |
188 return list; | 158 } while (!datas.isEmpty()); |
159 if (!toQuery.isEmpty()) { | |
160 totalQueries++; | |
161 } | |
162 for (DataEntry de : toQuery) { | |
163 if (!queried.contains(de.queryHead)) { | |
164 queried.add(de.queryHead); | |
165 List<Nodeid> between = hgRemote.between(de.queryHead, rb.root); | |
166 if (rootIndex == -1 && between.size() == 1) { | |
167 // returned sequence of length 1 means we used element from [head-2] as root | |
168 int numberOfElementsExcludingRootAndHead = de.headIndex + 1; | |
169 rootIndex = numberOfElementsExcludingRootAndHead + 1; | |
170 System.out.printf("On query %d found out exact number of missing elements: %d\n", totalQueries, numberOfElementsExcludingRootAndHead); | |
171 } | |
172 de.entries = between; | |
173 datas.add(de); // queue up to record result and construct further requests | |
174 } | |
175 } | |
176 toQuery.clear(); | |
177 } | |
178 if (rootIndex == -1) { | |
179 throw new HgBadStateException("Shall not happen, provided between output is correct"); // FIXME | |
180 } | |
181 result[rootIndex] = rb.root; | |
182 boolean resultOk = true; | |
183 LinkedList<Nodeid> fromRootToHead = new LinkedList<Nodeid>(); | |
184 for (int i = 0; i <= rootIndex; i++) { | |
185 Nodeid n = result[i]; | |
186 if (n == null) { | |
187 System.out.printf("ERROR: element %d wasn't found\n",i); | |
188 resultOk = false; | |
189 } | |
190 fromRootToHead.addFirst(n); // reverse order | |
191 } | |
192 if (!resultOk) { | |
193 throw new HgBadStateException("See console for details"); // FIXME | |
194 } | |
195 return fromRootToHead; | |
189 } | 196 } |
190 | 197 |
191 private static class SequenceConstructor { | 198 private static class SequenceConstructor { |
192 | 199 |
193 private int[] between(int root, int head) { | 200 private int[] between(int root, int head) { |
204 System.out.println(Arrays.toString(rv)); | 211 System.out.println(Arrays.toString(rv)); |
205 return rv; | 212 return rv; |
206 } | 213 } |
207 | 214 |
208 public void test() { | 215 public void test() { |
209 int root = 0, head = 1000; | 216 int root = 0, head = 126; |
210 int[] data = between(root, head); // max number of elements to recover is 2**(1+data.length)-1, need room for | 217 int[] data = between(root, head); // max number of elements to recover is 2**data.length-1, when head is exactly |
211 // as much elements, hence 2**(data.length+1). In worst case, when there are onlu 2**data.length + 1 missing element, | 218 // 2**data.length element of the branch. |
212 // almost half of the finalSequence would be empty | 219 // In such case, total number of elements in the branch (including head and root, would be 2**data.length+1 |
213 int[] finalSequence = new int[1 << (data.length+1) >>> 5]; // div 32 - total bits to integers | 220 int[] finalSequence = new int[1 + (1 << data.length >>> 5)]; // div 32 - total bits to integers, +1 for possible modulus |
214 int exactNumberOfElements = -1; // exact number of meaningful bits in finalSequence | 221 int exactNumberOfElements = -1; // exact number of meaningful bits in finalSequence |
215 LinkedHashMap<Integer, int[]> datas = new LinkedHashMap<Integer, int[]>(); | 222 LinkedHashMap<Integer, int[]> datas = new LinkedHashMap<Integer, int[]>(); |
216 datas.put(root, data); | 223 datas.put(root, data); |
217 int totalQueries = 1; | 224 int totalQueries = 1; |
218 HashSet<Integer> queried = new HashSet<Integer>(); | 225 HashSet<Integer> queried = new HashSet<Integer>(); |
239 match &= true; | 246 match &= true; |
240 } | 247 } |
241 } | 248 } |
242 System.out.println(match ? "Match, on query:" + totalQueries : "Didn't match"); | 249 System.out.println(match ? "Match, on query:" + totalQueries : "Didn't match"); |
243 } | 250 } |
244 if (data.length > 2) { | 251 if (data.length > 1) { |
252 /*queries for elements next to head is senseless, hence data.length check above and head-x below*/ | |
245 for (int x : data) { | 253 for (int x : data) { |
246 if (!queried.contains(x) && head - x > 1) { /*queries for neighboring elements is senseless*/ | 254 if (!queried.contains(x) && head - x > 1) { |
247 toQuery.add(new int[] {x, head}); | 255 toQuery.add(new int[] {x, head}); |
248 } | 256 } |
249 } | 257 } |
250 } | 258 } |
251 } while (!datas.isEmpty()) ; | 259 } while (!datas.isEmpty()) ; |