Mercurial > hg4j
comparison cmdline/org/tmatesoft/hg/console/Incoming.java @ 177:e10225daface
Use POST for long between queries. Batch between queries (pass multiple pairs to a server) to minimize number thereof
| author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
|---|---|
| date | Sat, 02 Apr 2011 23:05:28 +0200 |
| parents | a8df7162ec75 |
| children | 62665d8f0686 |
comparison
equal
deleted
inserted
replaced
| 176:a8df7162ec75 | 177:e10225daface |
|---|---|
| 21 import java.io.File; | 21 import java.io.File; |
| 22 import java.net.URL; | 22 import java.net.URL; |
| 23 import java.util.Arrays; | 23 import java.util.Arrays; |
| 24 import java.util.Collections; | 24 import java.util.Collections; |
| 25 import java.util.Comparator; | 25 import java.util.Comparator; |
| 26 import java.util.HashMap; | |
| 26 import java.util.HashSet; | 27 import java.util.HashSet; |
| 27 import java.util.Iterator; | 28 import java.util.Iterator; |
| 28 import java.util.LinkedHashMap; | 29 import java.util.LinkedHashMap; |
| 29 import java.util.LinkedList; | 30 import java.util.LinkedList; |
| 30 import java.util.List; | 31 import java.util.List; |
| 32 import java.util.Map; | |
| 31 import java.util.Map.Entry; | 33 import java.util.Map.Entry; |
| 32 | 34 |
| 33 import org.tmatesoft.hg.core.HgBadStateException; | 35 import org.tmatesoft.hg.core.HgBadStateException; |
| 34 import org.tmatesoft.hg.core.HgException; | 36 import org.tmatesoft.hg.core.HgException; |
| 35 import org.tmatesoft.hg.core.Nodeid; | 37 import org.tmatesoft.hg.core.Nodeid; |
| 36 import org.tmatesoft.hg.internal.ConfigFile; | 38 import org.tmatesoft.hg.internal.ConfigFile; |
| 37 import org.tmatesoft.hg.internal.Internals; | 39 import org.tmatesoft.hg.internal.Internals; |
| 38 import org.tmatesoft.hg.repo.HgChangelog; | 40 import org.tmatesoft.hg.repo.HgChangelog; |
| 39 import org.tmatesoft.hg.repo.HgLookup; | 41 import org.tmatesoft.hg.repo.HgLookup; |
| 40 import org.tmatesoft.hg.repo.HgRemoteRepository; | 42 import org.tmatesoft.hg.repo.HgRemoteRepository; |
| 43 import org.tmatesoft.hg.repo.HgRemoteRepository.Range; | |
| 41 import org.tmatesoft.hg.repo.HgRemoteRepository.RemoteBranch; | 44 import org.tmatesoft.hg.repo.HgRemoteRepository.RemoteBranch; |
| 42 import org.tmatesoft.hg.repo.HgRepository; | 45 import org.tmatesoft.hg.repo.HgRepository; |
| 43 | 46 |
| 44 | 47 |
| 45 /** | 48 /** |
| 131 // | 134 // |
| 132 datas.add(new DataEntry(rb.head, 0, initial)); | 135 datas.add(new DataEntry(rb.head, 0, initial)); |
| 133 int totalQueries = 1; | 136 int totalQueries = 1; |
| 134 HashSet<Nodeid> queried = new HashSet<Nodeid>(); | 137 HashSet<Nodeid> queried = new HashSet<Nodeid>(); |
| 135 while(!datas.isEmpty()) { | 138 while(!datas.isEmpty()) { |
| 139 // keep record of those planned to be queried next time we call between() | |
| 140 // although may keep these in queried, if really don't want separate collection | |
| 141 HashSet<Nodeid> scheduled = new HashSet<Nodeid>(); | |
| 136 do { | 142 do { |
| 137 DataEntry de = datas.removeFirst(); | 143 DataEntry de = datas.removeFirst(); |
| 138 // populate result with discovered elements between de.qiueryRoot and branch's head | 144 // populate result with discovered elements between de.qiueryRoot and branch's head |
| 139 for (int i = 1, j = 0; j < de.entries.size(); i = i << 1, j++) { | 145 for (int i = 1, j = 0; j < de.entries.size(); i = i << 1, j++) { |
| 140 int idx = de.headIndex + i; | 146 int idx = de.headIndex + i; |
| 147 * [1,2] result, and we need one more query to get element 3. | 153 * [1,2] result, and we need one more query to get element 3. |
| 148 */ | 154 */ |
| 149 for (int i =1, j = 0; j < de.entries.size(); i = i<<1, j++) { | 155 for (int i =1, j = 0; j < de.entries.size(); i = i<<1, j++) { |
| 150 int idx = de.headIndex + i; | 156 int idx = de.headIndex + i; |
| 151 Nodeid x = de.entries.get(j); | 157 Nodeid x = de.entries.get(j); |
| 152 if (!queried.contains(x) && (rootIndex == -1 || rootIndex - de.headIndex > 1)) { | 158 if (!queried.contains(x) && !scheduled.contains(x) && (rootIndex == -1 || rootIndex - de.headIndex > 1)) { |
| 153 /*queries for elements right before head is senseless, but unless we know head's index, do it anyway*/ | 159 /*queries for elements right before head is senseless, but unless we know head's index, do it anyway*/ |
| 154 toQuery.add(new DataEntry(x, idx, null)); | 160 toQuery.add(new DataEntry(x, idx, null)); |
| 161 scheduled.add(x); | |
| 155 } | 162 } |
| 156 } | 163 } |
| 157 } | 164 } |
| 158 } while (!datas.isEmpty()); | 165 } while (!datas.isEmpty()); |
| 159 if (!toQuery.isEmpty()) { | 166 if (!toQuery.isEmpty()) { |
| 160 totalQueries++; | 167 totalQueries++; |
| 161 } | 168 } |
| 169 // for each query, create an between request range, keep record Range->DataEntry to know range's start index | |
| 170 LinkedList<HgRemoteRepository.Range> betweenBatch = new LinkedList<HgRemoteRepository.Range>(); | |
| 171 HashMap<HgRemoteRepository.Range, DataEntry> rangeToEntry = new HashMap<HgRemoteRepository.Range, DataEntry>(); | |
| 162 for (DataEntry de : toQuery) { | 172 for (DataEntry de : toQuery) { |
| 163 if (!queried.contains(de.queryHead)) { | 173 queried.add(de.queryHead); |
| 164 queried.add(de.queryHead); | 174 HgRemoteRepository.Range r = new HgRemoteRepository.Range(rb.root, de.queryHead); |
| 165 List<Nodeid> between = hgRemote.between(de.queryHead, rb.root); | 175 betweenBatch.add(r); |
| 166 if (rootIndex == -1 && between.size() == 1) { | 176 rangeToEntry.put(r, de); |
| 177 } | |
| 178 if (!betweenBatch.isEmpty()) { | |
| 179 Map<Range, List<Nodeid>> between = hgRemote.between(betweenBatch); | |
| 180 for (Entry<Range, List<Nodeid>> e : between.entrySet()) { | |
| 181 DataEntry de = rangeToEntry.get(e.getKey()); | |
| 182 assert de != null; | |
| 183 de.entries = e.getValue(); | |
| 184 if (rootIndex == -1 && de.entries.size() == 1) { | |
| 167 // returned sequence of length 1 means we used element from [head-2] as root | 185 // returned sequence of length 1 means we used element from [head-2] as root |
| 168 int numberOfElementsExcludingRootAndHead = de.headIndex + 1; | 186 int numberOfElementsExcludingRootAndHead = de.headIndex + 1; |
| 169 rootIndex = numberOfElementsExcludingRootAndHead + 1; | 187 rootIndex = numberOfElementsExcludingRootAndHead + 1; |
| 170 System.out.printf("On query %d found out exact number of missing elements: %d\n", totalQueries, numberOfElementsExcludingRootAndHead); | 188 System.out.printf("On query %d found out exact number of missing elements: %d\n", totalQueries, numberOfElementsExcludingRootAndHead); |
| 171 } | 189 } |
| 172 de.entries = between; | |
| 173 datas.add(de); // queue up to record result and construct further requests | 190 datas.add(de); // queue up to record result and construct further requests |
| 174 } | 191 } |
| 192 betweenBatch.clear(); | |
| 193 rangeToEntry.clear(); | |
| 175 } | 194 } |
| 176 toQuery.clear(); | 195 toQuery.clear(); |
| 177 } | 196 } |
| 178 if (rootIndex == -1) { | 197 if (rootIndex == -1) { |
| 179 throw new HgBadStateException("Shall not happen, provided between output is correct"); // FIXME | 198 throw new HgBadStateException("Shall not happen, provided between output is correct"); // FIXME |
| 187 System.out.printf("ERROR: element %d wasn't found\n",i); | 206 System.out.printf("ERROR: element %d wasn't found\n",i); |
| 188 resultOk = false; | 207 resultOk = false; |
| 189 } | 208 } |
| 190 fromRootToHead.addFirst(n); // reverse order | 209 fromRootToHead.addFirst(n); // reverse order |
| 191 } | 210 } |
| 211 System.out.println("Total queries:" + totalQueries); | |
| 192 if (!resultOk) { | 212 if (!resultOk) { |
| 193 throw new HgBadStateException("See console for details"); // FIXME | 213 throw new HgBadStateException("See console for details"); // FIXME |
| 194 } | 214 } |
| 195 return fromRootToHead; | 215 return fromRootToHead; |
| 196 } | 216 } |
