/src/libtorrent/src/request_blocks.cpp
Line | Count | Source |
1 | | /* |
2 | | |
3 | | Copyright (c) 2014-2021, Arvid Norberg |
4 | | Copyright (c) 2016, 2019-2020, Alden Torres |
5 | | Copyright (c) 2021, Denis Kuzmenok |
6 | | All rights reserved. |
7 | | |
8 | | Redistribution and use in source and binary forms, with or without |
9 | | modification, are permitted provided that the following conditions |
10 | | are met: |
11 | | |
12 | | * Redistributions of source code must retain the above copyright |
13 | | notice, this list of conditions and the following disclaimer. |
14 | | * Redistributions in binary form must reproduce the above copyright |
15 | | notice, this list of conditions and the following disclaimer in |
16 | | the documentation and/or other materials provided with the distribution. |
17 | | * Neither the name of the author nor the names of its |
18 | | contributors may be used to endorse or promote products derived |
19 | | from this software without specific prior written permission. |
20 | | |
21 | | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
22 | | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
23 | | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
24 | | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
25 | | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
26 | | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
27 | | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
28 | | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
29 | | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
30 | | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
31 | | POSSIBILITY OF SUCH DAMAGE. |
32 | | |
33 | | */ |
34 | | |
35 | | #include "libtorrent/bitfield.hpp" |
36 | | #include "libtorrent/peer_connection.hpp" |
37 | | #include "libtorrent/torrent.hpp" |
38 | | #include "libtorrent/aux_/socket_type.hpp" |
39 | | #include "libtorrent/peer_info.hpp" // for peer_info flags |
40 | | #include "libtorrent/request_blocks.hpp" |
41 | | #include "libtorrent/aux_/alert_manager.hpp" |
42 | | #include "libtorrent/aux_/has_block.hpp" |
43 | | |
44 | | #include <vector> |
45 | | |
46 | | namespace libtorrent { |
47 | | |
48 | | int source_rank(peer_source_flags_t const source_bitmask) |
49 | 0 | { |
50 | 0 | int ret = 0; |
51 | 0 | if (source_bitmask & peer_info::tracker) ret |= 1 << 5; |
52 | 0 | if (source_bitmask & peer_info::lsd) ret |= 1 << 4; |
53 | 0 | if (source_bitmask & peer_info::dht) ret |= 1 << 3; |
54 | 0 | if (source_bitmask & peer_info::pex) ret |= 1 << 2; |
55 | 0 | return ret; |
56 | 0 | } |
57 | | |
58 | | // the case where ignore_peer is motivated is if two peers |
59 | | // have only one piece that we don't have, and it's the |
60 | | // same piece for both peers. Then they might get into an |
61 | | // infinite loop, fighting to request the same blocks. |
62 | | // returns false if the function is aborted by an early-exit |
63 | | // condition. |
64 | | bool request_a_block(torrent& t, peer_connection& c) |
65 | 0 | { |
66 | 0 | if (t.is_seed()) return false; |
67 | 0 | if (c.no_download()) return false; |
68 | 0 | if (t.upload_mode()) return false; |
69 | 0 | if (c.is_disconnecting()) return false; |
70 | | |
71 | | // don't request pieces before we have the metadata |
72 | 0 | if (!t.valid_metadata()) return false; |
73 | | |
74 | | // don't request pieces before the peer is properly |
75 | | // initialized after we have the metadata |
76 | 0 | if (!t.are_files_checked()) return false; |
77 | | |
78 | | // we don't want to request more blocks while trying to gracefully pause |
79 | 0 | if (t.graceful_pause()) return false; |
80 | | |
81 | 0 | TORRENT_ASSERT(c.peer_info_struct() != nullptr |
82 | 0 | || c.type() != connection_type::bittorrent); |
83 | |
|
84 | 0 | bool const time_critical_mode = t.num_time_critical_pieces() > 0; |
85 | | |
86 | | // in time critical mode, only have 1 outstanding request at a time |
87 | | // via normal requests |
88 | 0 | int const desired_queue_size = c.desired_queue_size(); |
89 | |
|
90 | 0 | int num_requests = desired_queue_size |
91 | 0 | - int(c.download_queue().size()) |
92 | 0 | - int(c.request_queue().size()); |
93 | |
|
94 | | #ifndef TORRENT_DISABLE_LOGGING |
95 | | if (c.should_log(peer_log_alert::info)) |
96 | | { |
97 | | c.peer_log(peer_log_alert::info, "PIECE_PICKER" |
98 | | , "dlq: %d rqq: %d target: %d req: %d endgame: %d" |
99 | | , int(c.download_queue().size()), int(c.request_queue().size()) |
100 | | , desired_queue_size, num_requests, c.endgame()); |
101 | | } |
102 | | #endif |
103 | 0 | TORRENT_ASSERT(desired_queue_size > 0); |
104 | | // if our request queue is already full, we |
105 | | // don't have to make any new requests yet |
106 | 0 | if (num_requests <= 0) return false; |
107 | | |
108 | 0 | t.need_picker(); |
109 | |
|
110 | 0 | piece_picker& p = t.picker(); |
111 | 0 | std::vector<piece_block> interesting_pieces; |
112 | 0 | interesting_pieces.reserve(100); |
113 | |
|
114 | 0 | int prefer_contiguous_blocks = c.prefer_contiguous_blocks(); |
115 | |
|
116 | 0 | if (prefer_contiguous_blocks == 0 |
117 | 0 | && !time_critical_mode |
118 | 0 | && t.settings().get_int(settings_pack::whole_pieces_threshold) > 0) |
119 | 0 | { |
120 | | // if our download rate lets us download a whole piece in |
121 | | // "whole_pieces_threshold" seconds, we prefer to pick an entire piece. |
122 | | // If we can download multiple whole pieces, we prefer to download that |
123 | | // many contiguous pieces. |
124 | | |
125 | | // download_rate times the whole piece threshold (seconds) gives the |
126 | | // number of bytes downloaded in one window of that threshold, divided |
127 | | // by the piece size give us the number of (whole) pieces downloaded |
128 | | // in the window. |
129 | 0 | int const contiguous_pieces = |
130 | 0 | std::min(c.statistics().download_payload_rate() |
131 | 0 | * t.settings().get_int(settings_pack::whole_pieces_threshold) |
132 | 0 | , 8 * 1024 * 1024) |
133 | 0 | / t.torrent_file().piece_length(); |
134 | |
|
135 | 0 | int const blocks_per_piece = t.torrent_file().piece_length() / t.block_size(); |
136 | |
|
137 | 0 | prefer_contiguous_blocks = contiguous_pieces * blocks_per_piece; |
138 | 0 | } |
139 | | |
140 | | // if we prefer whole pieces, the piece picker will pick at least |
141 | | // the number of blocks we want, but it will try to make the picked |
142 | | // blocks be from whole pieces, possibly by returning more blocks |
143 | | // than we requested. |
144 | |
|
145 | 0 | aux::session_interface& ses = t.session(); |
146 | |
|
147 | 0 | std::vector<pending_block> const& dq = c.download_queue(); |
148 | 0 | std::vector<pending_block> const& rq = c.request_queue(); |
149 | |
|
150 | 0 | std::vector<piece_index_t> const& suggested = c.suggested_pieces(); |
151 | 0 | auto const* bits = &c.get_bitfield(); |
152 | 0 | typed_bitfield<piece_index_t> fast_mask; |
153 | |
|
154 | 0 | if (c.has_peer_choked()) |
155 | 0 | { |
156 | | // if we are choked we can only pick pieces from the |
157 | | // allowed fast set. The allowed fast set is sorted |
158 | | // in ascending priority order |
159 | | |
160 | | // build a bitmask with only the allowed pieces in it |
161 | 0 | fast_mask.resize(c.get_bitfield().size(), false); |
162 | 0 | for (auto const& i : c.allowed_fast()) |
163 | 0 | { |
164 | 0 | if ((*bits)[i]) fast_mask.set_bit(i); |
165 | 0 | } |
166 | 0 | bits = &fast_mask; |
167 | 0 | } |
168 | | |
169 | | // picks the interesting pieces from this peer |
170 | | // the integer is the number of pieces that |
171 | | // should be guaranteed to be available for download |
172 | | // (if num_requests is too big, too many pieces are |
173 | | // picked and cpu-time is wasted) |
174 | | // the last argument is if we should prefer whole pieces |
175 | | // for this peer. If we're downloading one piece in 20 seconds |
176 | | // then use this mode. |
177 | 0 | picker_flags_t const flags = p.pick_pieces(*bits, interesting_pieces |
178 | 0 | , num_requests, prefer_contiguous_blocks, c.peer_info_struct() |
179 | 0 | , c.picker_options(), suggested, t.num_peers() |
180 | 0 | , ses.stats_counters()); |
181 | |
|
182 | | #ifndef TORRENT_DISABLE_LOGGING |
183 | | if (t.alerts().should_post<picker_log_alert>() |
184 | | && !interesting_pieces.empty()) |
185 | | { |
186 | | t.alerts().emplace_alert<picker_log_alert>(t.get_handle(), c.remote() |
187 | | , c.pid(), flags, interesting_pieces); |
188 | | } |
189 | | c.peer_log(peer_log_alert::info, "PIECE_PICKER" |
190 | | , "prefer_contiguous: %d picked: %d" |
191 | | , prefer_contiguous_blocks, int(interesting_pieces.size())); |
192 | | #else |
193 | 0 | TORRENT_UNUSED(flags); |
194 | 0 | #endif |
195 | | |
196 | | // if the number of pieces we have + the number of pieces |
197 | | // we're requesting from is less than the number of pieces |
198 | | // in the torrent, there are still some unrequested pieces |
199 | | // and we're not strictly speaking in end-game mode yet |
200 | | // also, if we already have at least one outstanding |
201 | | // request, we shouldn't pick any busy pieces either |
202 | | // in time critical mode, it's OK to request busy blocks |
203 | 0 | bool const dont_pick_busy_blocks |
204 | 0 | = ((ses.settings().get_bool(settings_pack::strict_end_game_mode) |
205 | 0 | && p.get_download_queue_size() < p.num_want_left()) |
206 | 0 | || dq.size() + rq.size() > 0); |
207 | | |
208 | | // this is filled with an interesting piece |
209 | | // that some other peer is currently downloading |
210 | 0 | piece_block busy_block = piece_block::invalid; |
211 | |
|
212 | 0 | for (piece_block const& pb : interesting_pieces) |
213 | 0 | { |
214 | 0 | if (prefer_contiguous_blocks == 0 && num_requests <= 0) break; |
215 | | |
216 | 0 | int num_block_requests = p.num_peers(pb); |
217 | 0 | if (num_block_requests > 0) |
218 | 0 | { |
219 | | // have we picked enough pieces? |
220 | 0 | if (num_requests <= 0) break; |
221 | | |
222 | | // this block is busy. This means all the following blocks |
223 | | // in the interesting_pieces list are busy as well, we might |
224 | | // as well just exit the loop |
225 | 0 | if (dont_pick_busy_blocks) break; |
226 | | |
227 | 0 | TORRENT_ASSERT(p.num_peers(pb) > 0); |
228 | 0 | busy_block = pb; |
229 | 0 | continue; |
230 | 0 | } |
231 | | |
232 | 0 | TORRENT_ASSERT(p.num_peers(pb) == 0); |
233 | | |
234 | | // don't request pieces we already have in our request queue |
235 | | // This happens when pieces time out or the peer sends us |
236 | | // pieces we didn't request. Those aren't marked in the |
237 | | // piece picker, but we still keep track of them in the |
238 | | // download queue |
239 | 0 | if (std::find_if(dq.begin(), dq.end(), aux::has_block(pb)) != dq.end() |
240 | 0 | || std::find_if(rq.begin(), rq.end(), aux::has_block(pb)) != rq.end()) |
241 | 0 | { |
242 | 0 | #if TORRENT_USE_ASSERTS |
243 | 0 | auto const j = std::find_if(dq.begin(), dq.end(), aux::has_block(pb)); |
244 | 0 | if (j != dq.end()) TORRENT_ASSERT(j->timed_out || j->not_wanted); |
245 | 0 | #endif |
246 | | #ifndef TORRENT_DISABLE_LOGGING |
247 | | c.peer_log(peer_log_alert::info, "PIECE_PICKER" |
248 | | , "not_picking: %d,%d already in queue" |
249 | | , static_cast<int>(pb.piece_index), pb.block_index); |
250 | | #endif |
251 | 0 | continue; |
252 | 0 | } |
253 | | |
254 | | // ok, we found a piece that's not being downloaded |
255 | | // by somebody else. request it from this peer |
256 | | // and return |
257 | 0 | if (!c.add_request(pb, {})) continue; |
258 | 0 | TORRENT_ASSERT(p.num_peers(pb) == 1); |
259 | 0 | TORRENT_ASSERT(p.is_requested(pb)); |
260 | 0 | num_requests--; |
261 | 0 | } |
262 | | |
263 | | // we have picked as many blocks as we should |
264 | | // we're done! |
265 | 0 | if (num_requests <= 0) |
266 | 0 | { |
267 | | // since we could pick as many blocks as we |
268 | | // requested without having to resort to picking |
269 | | // busy ones, we're not in end-game mode |
270 | 0 | c.set_endgame(false); |
271 | 0 | return true; |
272 | 0 | } |
273 | | |
274 | | // we did not pick as many pieces as we wanted, because |
275 | | // there aren't enough. This means we're in end-game mode |
276 | | // as long as we have at least one request outstanding, |
277 | | // we shouldn't pick another piece |
278 | | // if we are attempting to download 'allowed' pieces |
279 | | // and can't find any, that doesn't count as end-game |
280 | 0 | if (!c.has_peer_choked()) |
281 | 0 | c.set_endgame(true); |
282 | | |
283 | | // if we don't have any potential busy blocks to request |
284 | | // or if we already have outstanding requests, don't |
285 | | // pick a busy piece |
286 | 0 | if (busy_block == piece_block::invalid |
287 | 0 | || dq.size() + rq.size() > 0) |
288 | 0 | { |
289 | 0 | return true; |
290 | 0 | } |
291 | | |
292 | 0 | #if TORRENT_USE_ASSERTS |
293 | 0 | piece_picker::downloading_piece st; |
294 | 0 | p.piece_info(busy_block.piece_index, st); |
295 | 0 | TORRENT_ASSERT(st.requested + st.finished + st.writing |
296 | 0 | == p.blocks_in_piece(busy_block.piece_index)); |
297 | 0 | #endif |
298 | 0 | TORRENT_ASSERT(p.is_requested(busy_block)); |
299 | 0 | TORRENT_ASSERT(!p.is_downloaded(busy_block)); |
300 | 0 | TORRENT_ASSERT(!p.is_finished(busy_block)); |
301 | 0 | TORRENT_ASSERT(p.num_peers(busy_block) > 0); |
302 | |
|
303 | 0 | c.add_request(busy_block, peer_connection::busy); |
304 | 0 | return true; |
305 | 0 | } |
306 | | |
307 | | } |