Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C; indent-tabs-mode:t ; c-basic-offset:8 -*- */ |
2 | | /* |
3 | | * I/O functions for libusb |
4 | | * Copyright © 2007-2009 Daniel Drake <dsd@gentoo.org> |
5 | | * Copyright © 2001 Johannes Erdfelt <johannes@erdfelt.com> |
6 | | * Copyright © 2019-2022 Nathan Hjelm <hjelmn@cs.unm.edu> |
7 | | * Copyright © 2019-2022 Google LLC. All rights reserved. |
8 | | * |
9 | | * This library is free software; you can redistribute it and/or |
10 | | * modify it under the terms of the GNU Lesser General Public |
11 | | * License as published by the Free Software Foundation; either |
12 | | * version 2.1 of the License, or (at your option) any later version. |
13 | | * |
14 | | * This library is distributed in the hope that it will be useful, |
15 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
17 | | * Lesser General Public License for more details. |
18 | | * |
19 | | * You should have received a copy of the GNU Lesser General Public |
20 | | * License along with this library; if not, write to the Free Software |
21 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
22 | | */ |
23 | | |
24 | | #include "libusbi.h" |
25 | | |
26 | | /** |
27 | | * \page libusb_io Synchronous and asynchronous device I/O |
28 | | * |
29 | | * \section io_intro Introduction |
30 | | * |
31 | | * If you're using libusb in your application, you're probably wanting to |
32 | | * perform I/O with devices - you want to perform USB data transfers. |
33 | | * |
34 | | * libusb offers two separate interfaces for device I/O. This page aims to |
35 | | * introduce the two in order to help you decide which one is more suitable |
36 | | * for your application. You can also choose to use both interfaces in your |
37 | | * application by considering each transfer on a case-by-case basis. |
38 | | * |
39 | | * Once you have read through the following discussion, you should consult the |
40 | | * detailed API documentation pages for the details: |
41 | | * - \ref libusb_syncio |
42 | | * - \ref libusb_asyncio |
43 | | * |
44 | | * \section theory Transfers at a logical level |
45 | | * |
46 | | * At a logical level, USB transfers typically happen in two parts. For |
47 | | * example, when reading data from a endpoint: |
48 | | * -# A request for data is sent to the device |
49 | | * -# Some time later, the incoming data is received by the host |
50 | | * |
51 | | * or when writing data to an endpoint: |
52 | | * |
53 | | * -# The data is sent to the device |
54 | | * -# Some time later, the host receives acknowledgement from the device that |
55 | | * the data has been transferred. |
56 | | * |
57 | | * There may be an indefinite delay between the two steps. Consider a |
58 | | * fictional USB input device with a button that the user can press. In order |
59 | | * to determine when the button is pressed, you would likely submit a request |
60 | | * to read data on a bulk or interrupt endpoint and wait for data to arrive. |
61 | | * Data will arrive when the button is pressed by the user, which is |
62 | | * potentially hours later. |
63 | | * |
64 | | * libusb offers both a synchronous and an asynchronous interface to performing |
65 | | * USB transfers. The main difference is that the synchronous interface |
66 | | * combines both steps indicated above into a single function call, whereas |
67 | | * the asynchronous interface separates them. |
68 | | * |
69 | | * \section sync The synchronous interface |
70 | | * |
71 | | * The synchronous I/O interface allows you to perform a USB transfer with |
72 | | * a single function call. When the function call returns, the transfer has |
73 | | * completed and you can parse the results. |
74 | | * |
75 | | * If you have used libusb-0.1 before, this I/O style will seem familiar to |
76 | | * you. libusb-0.1 only offered a synchronous interface. |
77 | | * |
78 | | * In our input device example, to read button presses you might write code |
79 | | * in the following style: |
80 | | \code |
81 | | unsigned char data[4]; |
82 | | int actual_length; |
83 | | int r = libusb_bulk_transfer(dev_handle, LIBUSB_ENDPOINT_IN, data, sizeof(data), &actual_length, 0); |
84 | | if (r == 0 && actual_length == sizeof(data)) { |
85 | | // results of the transaction can now be found in the data buffer |
86 | | // parse them here and report button press |
87 | | } else { |
88 | | error(); |
89 | | } |
90 | | \endcode |
91 | | * |
92 | | * The main advantage of this model is simplicity: you did everything with |
93 | | * a single simple function call. |
94 | | * |
95 | | * However, this interface has its limitations. Your application will sleep |
96 | | * inside libusb_bulk_transfer() until the transaction has completed. If it |
97 | | * takes the user 3 hours to press the button, your application will be |
98 | | * sleeping for that long. Execution will be tied up inside the library - |
99 | | * the entire thread will be useless for that duration. |
100 | | * |
101 | | * Another issue is that by tying up the thread with that single transaction |
102 | | * there is no possibility of performing I/O with multiple endpoints and/or |
103 | | * multiple devices simultaneously, unless you resort to creating one thread |
104 | | * per transaction. |
105 | | * |
106 | | * Additionally, there is no opportunity to cancel the transfer after the |
107 | | * request has been submitted. |
108 | | * |
109 | | * For details on how to use the synchronous API, see the |
110 | | * \ref libusb_syncio "synchronous I/O API documentation" pages. |
111 | | * |
112 | | * \section async The asynchronous interface |
113 | | * |
114 | | * Asynchronous I/O is the most significant new feature in libusb-1.0. |
115 | | * Although it is a more complex interface, it solves all the issues detailed |
116 | | * above. |
117 | | * |
118 | | * Instead of providing which functions that block until the I/O has complete, |
119 | | * libusb's asynchronous interface presents non-blocking functions which |
120 | | * begin a transfer and then return immediately. Your application passes a |
121 | | * callback function pointer to this non-blocking function, which libusb will |
122 | | * call with the results of the transaction when it has completed. |
123 | | * |
124 | | * Transfers which have been submitted through the non-blocking functions |
125 | | * can be cancelled with a separate function call. |
126 | | * |
127 | | * The non-blocking nature of this interface allows you to be simultaneously |
128 | | * performing I/O to multiple endpoints on multiple devices, without having |
129 | | * to use threads. |
130 | | * |
131 | | * This added flexibility does come with some complications though: |
132 | | * - In the interest of being a lightweight library, libusb does not create |
133 | | * threads and can only operate when your application is calling into it. Your |
134 | | * application must call into libusb from it's main loop when events are ready |
135 | | * to be handled, or you must use some other scheme to allow libusb to |
136 | | * undertake whatever work needs to be done. |
137 | | * - libusb also needs to be called into at certain fixed points in time in |
138 | | * order to accurately handle transfer timeouts. |
139 | | * - Memory handling becomes more complex. You cannot use stack memory unless |
140 | | * the function with that stack is guaranteed not to return until the transfer |
141 | | * callback has finished executing. |
142 | | * - You generally lose some linearity from your code flow because submitting |
143 | | * the transfer request is done in a separate function from where the transfer |
144 | | * results are handled. This becomes particularly obvious when you want to |
145 | | * submit a second transfer based on the results of an earlier transfer. |
146 | | * |
147 | | * Internally, libusb's synchronous interface is expressed in terms of function |
148 | | * calls to the asynchronous interface. |
149 | | * |
150 | | * For details on how to use the asynchronous API, see the |
151 | | * \ref libusb_asyncio "asynchronous I/O API" documentation pages. |
152 | | */ |
153 | | |
154 | | |
155 | | /** |
156 | | * \page libusb_packetoverflow Packets and overflows |
157 | | * |
158 | | * \section packets Packet abstraction |
159 | | * |
160 | | * The USB specifications describe how data is transmitted in packets, with |
161 | | * constraints on packet size defined by endpoint descriptors. The host must |
162 | | * not send data payloads larger than the endpoint's maximum packet size. |
163 | | * |
164 | | * libusb and the underlying OS abstract out the packet concept, allowing you |
165 | | * to request transfers of any size. Internally, the request will be divided |
166 | | * up into correctly-sized packets. You do not have to be concerned with |
167 | | * packet sizes, but there is one exception when considering overflows. |
168 | | * |
169 | | * \section overflow Bulk/interrupt transfer overflows |
170 | | * |
171 | | * When requesting data on a bulk endpoint, libusb requires you to supply a |
172 | | * buffer and the maximum number of bytes of data that libusb can put in that |
173 | | * buffer. However, the size of the buffer is not communicated to the device - |
174 | | * the device is just asked to send any amount of data. |
175 | | * |
176 | | * There is no problem if the device sends an amount of data that is less than |
177 | | * or equal to the buffer size. libusb reports this condition to you through |
178 | | * the \ref libusb_transfer::actual_length "libusb_transfer.actual_length" |
179 | | * field. |
180 | | * |
181 | | * Problems may occur if the device attempts to send more data than can fit in |
182 | | * the buffer. libusb reports LIBUSB_TRANSFER_OVERFLOW for this condition but |
183 | | * other behaviour is largely undefined: actual_length may or may not be |
184 | | * accurate, the chunk of data that can fit in the buffer (before overflow) |
185 | | * may or may not have been transferred. |
186 | | * |
187 | | * Overflows are nasty, but can be avoided. Even though you were told to |
188 | | * ignore packets above, think about the lower level details: each transfer is |
189 | | * split into packets (typically small, with a maximum size of 512 bytes). |
190 | | * Overflows can only happen if the final packet in an incoming data transfer |
191 | | * is smaller than the actual packet that the device wants to transfer. |
192 | | * Therefore, you will never see an overflow if your transfer buffer size is a |
193 | | * multiple of the endpoint's packet size: the final packet will either |
194 | | * fill up completely or will be only partially filled. |
195 | | */ |
196 | | |
197 | | /** |
198 | | * @defgroup libusb_asyncio Asynchronous device I/O |
199 | | * |
200 | | * This page details libusb's asynchronous (non-blocking) API for USB device |
201 | | * I/O. This interface is very powerful but is also quite complex - you will |
202 | | * need to read this page carefully to understand the necessary considerations |
203 | | * and issues surrounding use of this interface. Simplistic applications |
204 | | * may wish to consider the \ref libusb_syncio "synchronous I/O API" instead. |
205 | | * |
206 | | * The asynchronous interface is built around the idea of separating transfer |
207 | | * submission and handling of transfer completion (the synchronous model |
208 | | * combines both of these into one). There may be a long delay between |
209 | | * submission and completion, however the asynchronous submission function |
210 | | * is non-blocking so will return control to your application during that |
211 | | * potentially long delay. |
212 | | * |
213 | | * \section asyncabstraction Transfer abstraction |
214 | | * |
215 | | * For the asynchronous I/O, libusb implements the concept of a generic |
216 | | * transfer entity for all types of I/O (control, bulk, interrupt, |
217 | | * isochronous). The generic transfer object must be treated slightly |
218 | | * differently depending on which type of I/O you are performing with it. |
219 | | * |
220 | | * This is represented by the public libusb_transfer structure type. |
221 | | * |
222 | | * \section asynctrf Asynchronous transfers |
223 | | * |
224 | | * We can view asynchronous I/O as a 5 step process: |
225 | | * -# <b>Allocation</b>: allocate a libusb_transfer |
226 | | * -# <b>Filling</b>: populate the libusb_transfer instance with information |
227 | | * about the transfer you wish to perform |
228 | | * -# <b>Submission</b>: ask libusb to submit the transfer |
229 | | * -# <b>Completion handling</b>: examine transfer results in the |
230 | | * libusb_transfer structure |
231 | | * -# <b>Deallocation</b>: clean up resources |
232 | | * |
233 | | * |
234 | | * \subsection asyncalloc Allocation |
235 | | * |
236 | | * This step involves allocating memory for a USB transfer. This is the |
237 | | * generic transfer object mentioned above. At this stage, the transfer |
238 | | * is "blank" with no details about what type of I/O it will be used for. |
239 | | * |
240 | | * Allocation is done with the libusb_alloc_transfer() function. You must use |
241 | | * this function rather than allocating your own transfers. |
242 | | * |
243 | | * \subsection asyncfill Filling |
244 | | * |
245 | | * This step is where you take a previously allocated transfer and fill it |
246 | | * with information to determine the message type and direction, data buffer, |
247 | | * callback function, etc. |
248 | | * |
249 | | * You can either fill the required fields yourself or you can use the |
250 | | * helper functions: libusb_fill_control_transfer(), libusb_fill_bulk_transfer() |
251 | | * and libusb_fill_interrupt_transfer(). |
252 | | * |
253 | | * \subsection asyncsubmit Submission |
254 | | * |
255 | | * When you have allocated a transfer and filled it, you can submit it using |
256 | | * libusb_submit_transfer(). This function returns immediately but can be |
257 | | * regarded as firing off the I/O request in the background. |
258 | | * |
259 | | * \subsection asynccomplete Completion handling |
260 | | * |
261 | | * After a transfer has been submitted, one of four things can happen to it: |
262 | | * |
263 | | * - The transfer completes (i.e. some data was transferred) |
264 | | * - The transfer has a timeout and the timeout expires before all data is |
265 | | * transferred |
266 | | * - The transfer fails due to an error |
267 | | * - The transfer is cancelled |
268 | | * |
269 | | * Each of these will cause the user-specified transfer callback function to |
270 | | * be invoked. It is up to the callback function to determine which of the |
271 | | * above actually happened and to act accordingly. |
272 | | * |
273 | | * The user-specified callback is passed a pointer to the libusb_transfer |
274 | | * structure which was used to setup and submit the transfer. At completion |
275 | | * time, libusb has populated this structure with results of the transfer: |
276 | | * success or failure reason, number of bytes of data transferred, etc. See |
277 | | * the libusb_transfer structure documentation for more information. |
278 | | * |
279 | | * <b>Important Note</b>: The user-specified callback is called from an event |
280 | | * handling context. It is therefore important that no calls are made into |
281 | | * libusb that will attempt to perform any event handling. Examples of such |
282 | | * functions are any listed in the \ref libusb_syncio "synchronous API" and any of |
283 | | * the blocking functions that retrieve \ref libusb_desc "USB descriptors". |
284 | | * |
285 | | * \subsection Deallocation |
286 | | * |
287 | | * When a transfer has completed (i.e. the callback function has been invoked), |
288 | | * you are advised to free the transfer (unless you wish to resubmit it, see |
289 | | * below). Transfers are deallocated with libusb_free_transfer(). |
290 | | * |
291 | | * It is undefined behaviour to free a transfer which has not completed. |
292 | | * |
293 | | * \section asyncresubmit Resubmission |
294 | | * |
295 | | * You may be wondering why allocation, filling, and submission are all |
296 | | * separated above where they could reasonably be combined into a single |
297 | | * operation. |
298 | | * |
299 | | * The reason for separation is to allow you to resubmit transfers without |
300 | | * having to allocate new ones every time. This is especially useful for |
301 | | * common situations dealing with interrupt endpoints - you allocate one |
302 | | * transfer, fill and submit it, and when it returns with results you just |
303 | | * resubmit it for the next interrupt. |
304 | | * |
305 | | * \section asynccancel Cancellation |
306 | | * |
307 | | * Another advantage of using the asynchronous interface is that you have |
308 | | * the ability to cancel transfers which have not yet completed. This is |
309 | | * done by calling the libusb_cancel_transfer() function. |
310 | | * |
311 | | * libusb_cancel_transfer() is asynchronous/non-blocking in itself. When the |
312 | | * cancellation actually completes, the transfer's callback function will |
313 | | * be invoked, and the callback function should check the transfer status to |
314 | | * determine that it was cancelled. |
315 | | * |
316 | | * On macOS and iOS it is not possible to cancel a single transfer. In this |
317 | | * case cancelling one transfer on an endpoint will cause all transfers on |
318 | | * that endpoint to be cancelled. |
319 | | * |
320 | | * Freeing the transfer after it has been cancelled but before cancellation |
321 | | * has completed will result in undefined behaviour. |
322 | | * |
323 | | * \attention |
324 | | * When a transfer is cancelled, some of the data may have been transferred. |
325 | | * libusb will communicate this to you in the transfer callback. |
326 | | * <b>Do not assume that no data was transferred.</b> |
327 | | * |
328 | | * \section asyncpartial Partial data transfer resulting from cancellation |
329 | | * |
330 | | * As noted above, some of the data may have been transferred at the time a |
331 | | * transfer is cancelled. It is helpful to see how this is possible if you |
332 | | * consider a bulk transfer to an endpoint with a packet size of 64 bytes. |
333 | | * Supposing you submit a 512-byte transfer to this endpoint, the operating |
334 | | * system will divide this transfer up into 8 separate 64-byte frames that the |
335 | | * host controller will schedule for the device to transfer data. If this |
336 | | * transfer is cancelled while the device is transferring data, a subset of |
337 | | * these frames may be descheduled from the host controller before the device |
338 | | * has the opportunity to finish transferring data to the host. |
339 | | * |
340 | | * What your application should do with a partial data transfer is a policy |
341 | | * decision; there is no single answer that satisfies the needs of every |
342 | | * application. The data that was successfully transferred should be |
343 | | * considered entirely valid, but your application must decide what to do with |
344 | | * the remaining data that was not transferred. Some possible actions to take |
345 | | * are: |
346 | | * - Resubmit another transfer for the remaining data, possibly with a shorter |
347 | | * timeout |
348 | | * - Discard the partially transferred data and report an error |
349 | | * |
350 | | * \section asynctimeout Timeouts |
351 | | * |
352 | | * When a transfer times out, libusb internally notes this and attempts to |
353 | | * cancel the transfer. As noted in \ref asyncpartial "above", it is possible |
354 | | * that some of the data may actually have been transferred. Your application |
355 | | * should <b>always</b> check how much data was actually transferred once the |
356 | | * transfer completes and act accordingly. |
357 | | * |
358 | | * \section bulk_overflows Overflows on device-to-host bulk/interrupt endpoints |
359 | | * |
360 | | * If your device does not have predictable transfer sizes (or it misbehaves), |
361 | | * your application may submit a request for data on an IN endpoint which is |
362 | | * smaller than the data that the device wishes to send. In some circumstances |
363 | | * this will cause an overflow, which is a nasty condition to deal with. See |
364 | | * the \ref libusb_packetoverflow page for discussion. |
365 | | * |
366 | | * \section asyncctrl Considerations for control transfers |
367 | | * |
368 | | * The <tt>libusb_transfer</tt> structure is generic and hence does not |
369 | | * include specific fields for the control-specific setup packet structure. |
370 | | * |
371 | | * In order to perform a control transfer, you must place the 8-byte setup |
372 | | * packet at the start of the data buffer. To simplify this, you could |
373 | | * cast the buffer pointer to type struct libusb_control_setup, or you can |
374 | | * use the helper function libusb_fill_control_setup(). |
375 | | * |
376 | | * The wLength field placed in the setup packet must be the length you would |
377 | | * expect to be sent in the setup packet: the length of the payload that |
378 | | * follows (or the expected maximum number of bytes to receive). However, |
379 | | * the length field of the libusb_transfer object must be the length of |
380 | | * the data buffer - i.e. it should be wLength <em>plus</em> the size of |
381 | | * the setup packet (LIBUSB_CONTROL_SETUP_SIZE). |
382 | | * |
383 | | * If you use the helper functions, this is simplified for you: |
384 | | * -# Allocate a buffer of size LIBUSB_CONTROL_SETUP_SIZE plus the size of the |
385 | | * data you are sending/requesting. |
386 | | * -# Call libusb_fill_control_setup() on the data buffer, using the transfer |
387 | | * request size as the wLength value (i.e. do not include the extra space you |
388 | | * allocated for the control setup). |
389 | | * -# If this is a host-to-device transfer, place the data to be transferred |
390 | | * in the data buffer, starting at offset LIBUSB_CONTROL_SETUP_SIZE. |
391 | | * -# Call libusb_fill_control_transfer() to associate the data buffer with |
392 | | * the transfer (and to set the remaining details such as callback and timeout). |
393 | | * - Note that there is no parameter to set the length field of the transfer. |
394 | | * The length is automatically inferred from the wLength field of the setup |
395 | | * packet. |
396 | | * -# Submit the transfer. |
397 | | * |
398 | | * The multi-byte control setup fields (wValue, wIndex and wLength) must |
399 | | * be given in little-endian byte order (the endianness of the USB bus). |
400 | | * Endianness conversion is transparently handled by |
401 | | * libusb_fill_control_setup() which is documented to accept host-endian |
402 | | * values. |
403 | | * |
404 | | * Further considerations are needed when handling transfer completion in |
405 | | * your callback function: |
406 | | * - As you might expect, the setup packet will still be sitting at the start |
407 | | * of the data buffer. |
408 | | * - If this was a device-to-host transfer, the received data will be sitting |
409 | | * at offset LIBUSB_CONTROL_SETUP_SIZE into the buffer. |
410 | | * - The actual_length field of the transfer structure is relative to the |
411 | | * wLength of the setup packet, rather than the size of the data buffer. So, |
412 | | * if your wLength was 4, your transfer's <tt>length</tt> was 12, then you |
413 | | * should expect an <tt>actual_length</tt> of 4 to indicate that the data was |
414 | | * transferred in entirety. |
415 | | * |
416 | | * To simplify parsing of setup packets and obtaining the data from the |
417 | | * correct offset, you may wish to use the libusb_control_transfer_get_data() |
418 | | * and libusb_control_transfer_get_setup() functions within your transfer |
419 | | * callback. |
420 | | * |
421 | | * Even though control endpoints do not halt, a completed control transfer |
422 | | * may have a LIBUSB_TRANSFER_STALL status code. This indicates the control |
423 | | * request was not supported. |
424 | | * |
425 | | * \section asyncintr Considerations for interrupt transfers |
426 | | * |
427 | | * All interrupt transfers are performed using the polling interval presented |
428 | | * by the bInterval value of the endpoint descriptor. |
429 | | * |
430 | | * \section asynciso Considerations for isochronous transfers |
431 | | * |
432 | | * Isochronous transfers are more complicated than transfers to |
433 | | * non-isochronous endpoints. |
434 | | * |
435 | | * To perform I/O to an isochronous endpoint, allocate the transfer by calling |
436 | | * libusb_alloc_transfer() with an appropriate number of isochronous packets. |
437 | | * |
438 | | * During filling, set \ref libusb_transfer::type "type" to |
439 | | * \ref libusb_transfer_type::LIBUSB_TRANSFER_TYPE_ISOCHRONOUS |
440 | | * "LIBUSB_TRANSFER_TYPE_ISOCHRONOUS", and set |
441 | | * \ref libusb_transfer::num_iso_packets "num_iso_packets" to a value less than |
442 | | * or equal to the number of packets you requested during allocation. |
443 | | * libusb_alloc_transfer() does not set either of these fields for you, given |
444 | | * that you might not even use the transfer on an isochronous endpoint. |
445 | | * |
446 | | * Next, populate the length field for the first num_iso_packets entries in |
447 | | * the \ref libusb_transfer::iso_packet_desc "iso_packet_desc" array. Section |
448 | | * 5.6.3 of the USB2 specifications describe how the maximum isochronous |
449 | | * packet length is determined by the wMaxPacketSize field in the endpoint |
450 | | * descriptor. |
451 | | * Two functions can help you here: |
452 | | * |
453 | | * - libusb_get_max_iso_packet_size() is an easy way to determine the max |
454 | | * packet size for an isochronous endpoint. Note that the maximum packet |
455 | | * size is actually the maximum number of bytes that can be transmitted in |
456 | | * a single microframe, therefore this function multiplies the maximum number |
457 | | * of bytes per transaction by the number of transaction opportunities per |
458 | | * microframe. |
459 | | * - libusb_set_iso_packet_lengths() assigns the same length to all packets |
460 | | * within a transfer, which is usually what you want. |
461 | | * |
462 | | * For outgoing transfers, you'll obviously fill the buffer and populate the |
463 | | * packet descriptors in hope that all the data gets transferred. For incoming |
464 | | * transfers, you must ensure the buffer has sufficient capacity for |
465 | | * the situation where all packets transfer the full amount of requested data. |
466 | | * |
467 | | * Completion handling requires some extra consideration. The |
468 | | * \ref libusb_transfer::actual_length "actual_length" field of the transfer |
469 | | * is meaningless and should not be examined; instead you must refer to the |
470 | | * \ref libusb_iso_packet_descriptor::actual_length "actual_length" field of |
471 | | * each individual packet. |
472 | | * |
473 | | * The \ref libusb_transfer::status "status" field of the transfer is also a |
474 | | * little misleading: |
475 | | * - If the packets were submitted and the isochronous data microframes |
476 | | * completed normally, status will have value |
477 | | * \ref libusb_transfer_status::LIBUSB_TRANSFER_COMPLETED |
478 | | * "LIBUSB_TRANSFER_COMPLETED". Note that bus errors and software-incurred |
479 | | * delays are not counted as transfer errors; the transfer.status field may |
480 | | * indicate COMPLETED even if some or all of the packets failed. Refer to |
481 | | * the \ref libusb_iso_packet_descriptor::status "status" field of each |
482 | | * individual packet to determine packet failures. |
483 | | * - The status field will have value |
484 | | * \ref libusb_transfer_status::LIBUSB_TRANSFER_ERROR |
485 | | * "LIBUSB_TRANSFER_ERROR" only when serious errors were encountered. |
486 | | * - Other transfer status codes occur with normal behaviour. |
487 | | * |
488 | | * The data for each packet will be found at an offset into the buffer that |
489 | | * can be calculated as if each prior packet completed in full. The |
490 | | * libusb_get_iso_packet_buffer() and libusb_get_iso_packet_buffer_simple() |
491 | | * functions may help you here. |
492 | | * |
493 | | * \section asynclimits Transfer length limitations |
494 | | * |
495 | | * Some operating systems may impose limits on the length of the transfer data |
496 | | * buffer or, in the case of isochronous transfers, the length of individual |
497 | | * isochronous packets. Such limits can be difficult for libusb to detect, so |
498 | | * in most cases the library will simply try and submit the transfer as set up |
499 | | * by you. If the transfer fails to submit because it is too large, |
500 | | * libusb_submit_transfer() will return |
501 | | * \ref libusb_error::LIBUSB_ERROR_INVALID_PARAM "LIBUSB_ERROR_INVALID_PARAM". |
502 | | * |
503 | | * The following are known limits for control transfer lengths. Note that this |
504 | | * length includes the 8-byte setup packet. |
505 | | * - Linux (4,096 bytes) |
506 | | * - Windows (4,096 bytes) |
507 | | * |
508 | | * \section asyncmem Memory caveats |
509 | | * |
510 | | * In most circumstances, it is not safe to use stack memory for transfer |
511 | | * buffers. This is because the function that fired off the asynchronous |
512 | | * transfer may return before libusb has finished using the buffer, and when |
513 | | * the function returns it's stack gets destroyed. This is true for both |
514 | | * host-to-device and device-to-host transfers. |
515 | | * |
516 | | * The only case in which it is safe to use stack memory is where you can |
517 | | * guarantee that the function owning the stack space for the buffer does not |
518 | | * return until after the transfer's callback function has completed. In every |
519 | | * other case, you need to use heap memory instead. |
520 | | * |
521 | | * \section asyncflags Fine control |
522 | | * |
523 | | * Through using this asynchronous interface, you may find yourself repeating |
524 | | * a few simple operations many times. You can apply a bitwise OR of certain |
525 | | * flags to a transfer to simplify certain things: |
526 | | * - \ref libusb_transfer_flags::LIBUSB_TRANSFER_SHORT_NOT_OK |
527 | | * "LIBUSB_TRANSFER_SHORT_NOT_OK" results in transfers which transferred |
528 | | * less than the requested amount of data being marked with status |
529 | | * \ref libusb_transfer_status::LIBUSB_TRANSFER_ERROR "LIBUSB_TRANSFER_ERROR" |
530 | | * (they would normally be regarded as COMPLETED) |
531 | | * - \ref libusb_transfer_flags::LIBUSB_TRANSFER_FREE_BUFFER |
532 | | * "LIBUSB_TRANSFER_FREE_BUFFER" allows you to ask libusb to free the transfer |
533 | | * buffer when freeing the transfer. |
534 | | * - \ref libusb_transfer_flags::LIBUSB_TRANSFER_FREE_TRANSFER |
535 | | * "LIBUSB_TRANSFER_FREE_TRANSFER" causes libusb to automatically free the |
536 | | * transfer after the transfer callback returns. |
537 | | * |
538 | | * \section asyncevent Event handling |
539 | | * |
540 | | * An asynchronous model requires that libusb perform work at various |
541 | | * points in time - namely processing the results of previously-submitted |
542 | | * transfers and invoking the user-supplied callback function. |
543 | | * |
544 | | * This gives rise to the libusb_handle_events() function which your |
545 | | * application must call into when libusb has work do to. This gives libusb |
546 | | * the opportunity to reap pending transfers, invoke callbacks, etc. |
547 | | * |
548 | | * \note |
549 | | * All event handling is performed by whichever thread calls the |
550 | | * libusb_handle_events() function. libusb does not invoke any callbacks |
551 | | * outside of this context. Consequently, any callbacks will be run on the |
552 | | * thread that calls the libusb_handle_events() function. |
553 | | * |
554 | | * When to call the libusb_handle_events() function depends on which model |
555 | | * your application decides to use. The 2 different approaches: |
556 | | * |
557 | | * -# Repeatedly call libusb_handle_events() in blocking mode from a dedicated |
558 | | * thread. |
559 | | * -# Integrate libusb with your application's main event loop. libusb |
560 | | * exposes a set of file descriptors which allow you to do this. |
561 | | * |
562 | | * The first approach has the big advantage that it will also work on Windows |
563 | | * were libusb' poll API for select / poll integration is not available. So |
564 | | * if you want to support Windows and use the async API, you must use this |
565 | | * approach, see the \ref eventthread "Using an event handling thread" section |
566 | | * below for details. |
567 | | * |
568 | | * If you prefer a single threaded approach with a single central event loop, |
569 | | * see the \ref libusb_poll "polling and timing" section for how to integrate libusb |
570 | | * into your application's main event loop. |
571 | | * |
572 | | * \section eventthread Using an event handling thread |
573 | | * |
574 | | * Lets begin with stating the obvious: If you're going to use a separate |
575 | | * thread for libusb event handling, your callback functions MUST be |
576 | | * thread-safe. |
577 | | * |
578 | | * Other then that doing event handling from a separate thread, is mostly |
579 | | * simple. You can use an event thread function as follows: |
580 | | \code |
581 | | void *event_thread_func(void *ctx) |
582 | | { |
583 | | while (event_thread_run) |
584 | | libusb_handle_events(ctx); |
585 | | |
586 | | return NULL; |
587 | | } |
588 | | \endcode |
589 | | * |
590 | | * There is one caveat though, stopping this thread requires setting the |
591 | | * event_thread_run variable to 0, and after that libusb_handle_events() needs |
592 | | * to return control to event_thread_func. But unless some event happens, |
593 | | * libusb_handle_events() will not return. |
594 | | * |
595 | | * There are 2 different ways of dealing with this, depending on if your |
596 | | * application uses libusb' \ref libusb_hotplug "hotplug" support or not. |
597 | | * |
598 | | * Applications which do not use hotplug support, should not start the event |
599 | | * thread until after their first call to libusb_open(), and should stop the |
600 | | * thread when closing the last open device as follows: |
601 | | \code |
602 | | void my_close_handle(libusb_device_handle *dev_handle) |
603 | | { |
604 | | if (open_devs == 1) |
605 | | event_thread_run = 0; |
606 | | |
607 | | libusb_close(dev_handle); // This wakes up libusb_handle_events() |
608 | | |
609 | | if (open_devs == 1) |
610 | | pthread_join(event_thread); |
611 | | |
612 | | open_devs--; |
613 | | } |
614 | | \endcode |
615 | | * |
616 | | * Applications using hotplug support should start the thread at program init, |
617 | | * after having successfully called libusb_hotplug_register_callback(), and |
618 | | * should stop the thread at program exit as follows: |
619 | | \code |
620 | | void my_libusb_exit(void) |
621 | | { |
622 | | event_thread_run = 0; |
623 | | libusb_hotplug_deregister_callback(ctx, hotplug_cb_handle); // This wakes up libusb_handle_events() |
624 | | pthread_join(event_thread); |
625 | | libusb_exit(ctx); |
626 | | } |
627 | | \endcode |
628 | | */ |
629 | | |
630 | | /** |
631 | | * @defgroup libusb_poll Polling and timing |
632 | | * |
633 | | * This page documents libusb's functions for polling events and timing. |
634 | | * These functions are only necessary for users of the |
635 | | * \ref libusb_asyncio "asynchronous API". If you are only using the simpler |
636 | | * \ref libusb_syncio "synchronous API" then you do not need to ever call these |
637 | | * functions. |
638 | | * |
639 | | * The justification for the functionality described here has already been |
640 | | * discussed in the \ref asyncevent "event handling" section of the |
641 | | * asynchronous API documentation. In summary, libusb does not create internal |
642 | | * threads for event processing and hence relies on your application calling |
643 | | * into libusb at certain points in time so that pending events can be handled. |
644 | | * |
645 | | * Your main loop is probably already calling poll() or select() or a |
646 | | * variant on a set of file descriptors for other event sources (e.g. keyboard |
647 | | * button presses, mouse movements, network sockets, etc). You then add |
648 | | * libusb's file descriptors to your poll()/select() calls, and when activity |
649 | | * is detected on such descriptors you know it is time to call |
650 | | * libusb_handle_events(). |
651 | | * |
652 | | * There is one final event handling complication. libusb supports |
653 | | * asynchronous transfers which time out after a specified time period. |
654 | | * |
655 | | * On some platforms a timerfd is used, so the timeout handling is just another |
656 | | * fd, on other platforms this requires that libusb is called into at or after |
657 | | * the timeout to handle it. So, in addition to considering libusb's file |
658 | | * descriptors in your main event loop, you must also consider that libusb |
659 | | * sometimes needs to be called into at fixed points in time even when there |
660 | | * is no file descriptor activity, see \ref polltime details. |
661 | | * |
662 | | * In order to know precisely when libusb needs to be called into, libusb |
663 | | * offers you a set of pollable file descriptors and information about when |
664 | | * the next timeout expires. |
665 | | * |
666 | | * If you are using the asynchronous I/O API, you must take one of the two |
667 | | * following options, otherwise your I/O will not complete. |
668 | | * |
669 | | * \section pollsimple The simple option |
670 | | * |
671 | | * If your application revolves solely around libusb and does not need to |
672 | | * handle other event sources, you can have a program structure as follows: |
673 | | \code |
674 | | // initialize libusb |
675 | | // find and open device |
676 | | // maybe fire off some initial async I/O |
677 | | |
678 | | while (user_has_not_requested_exit) |
679 | | libusb_handle_events(ctx); |
680 | | |
681 | | // clean up and exit |
682 | | \endcode |
683 | | * |
684 | | * With such a simple main loop, you do not have to worry about managing |
685 | | * sets of file descriptors or handling timeouts. libusb_handle_events() will |
686 | | * handle those details internally. |
687 | | * |
688 | | * \section libusb_pollmain The more advanced option |
689 | | * |
690 | | * \note This functionality is currently only available on Unix-like platforms. |
691 | | * On Windows, libusb_get_pollfds() simply returns NULL. Applications which |
692 | | * want to support Windows are advised to use an \ref eventthread |
693 | | * "event handling thread" instead. |
694 | | * |
695 | | * In more advanced applications, you will already have a main loop which |
696 | | * is monitoring other event sources: network sockets, X11 events, mouse |
697 | | * movements, etc. Through exposing a set of file descriptors, libusb is |
698 | | * designed to cleanly integrate into such main loops. |
699 | | * |
700 | | * In addition to polling file descriptors for the other event sources, you |
701 | | * take a set of file descriptors from libusb and monitor those too. When you |
702 | | * detect activity on libusb's file descriptors, you call |
703 | | * libusb_handle_events_timeout() in non-blocking mode. |
704 | | * |
705 | | * What's more, libusb may also need to handle events at specific moments in |
706 | | * time. No file descriptor activity is generated at these times, so your |
707 | | * own application needs to be continually aware of when the next one of these |
708 | | * moments occurs (through calling libusb_get_next_timeout()), and then it |
709 | | * needs to call libusb_handle_events_timeout() in non-blocking mode when |
710 | | * these moments occur. This means that you need to adjust your |
711 | | * poll()/select() timeout accordingly. |
712 | | * |
713 | | * libusb provides you with a set of file descriptors to poll and expects you |
714 | | * to poll all of them, treating them as a single entity. The meaning of each |
715 | | * file descriptor in the set is an internal implementation detail, |
716 | | * platform-dependent and may vary from release to release. Don't try and |
717 | | * interpret the meaning of the file descriptors, just do as libusb indicates, |
718 | | * polling all of them at once. |
719 | | * |
720 | | * In pseudo-code, you want something that looks like: |
721 | | \code |
722 | | // initialise libusb |
723 | | |
724 | | libusb_get_pollfds(ctx) |
725 | | while (user has not requested application exit) { |
726 | | libusb_get_next_timeout(ctx); |
727 | | poll(on libusb file descriptors plus any other event sources of interest, |
728 | | using a timeout no larger than the value libusb just suggested) |
729 | | if (poll() indicated activity on libusb file descriptors) |
730 | | libusb_handle_events_timeout(ctx, &zero_tv); |
731 | | if (time has elapsed to or beyond the libusb timeout) |
732 | | libusb_handle_events_timeout(ctx, &zero_tv); |
733 | | // handle events from other sources here |
734 | | } |
735 | | |
736 | | // clean up and exit |
737 | | \endcode |
738 | | * |
739 | | * \subsection polltime Notes on time-based events |
740 | | * |
741 | | * The above complication with having to track time and call into libusb at |
742 | | * specific moments is a bit of a headache. For maximum compatibility, you do |
743 | | * need to write your main loop as above, but you may decide that you can |
744 | | * restrict the supported platforms of your application and get away with |
745 | | * a more simplistic scheme. |
746 | | * |
747 | | * These time-based event complications are \b not required on the following |
748 | | * platforms: |
749 | | * - Darwin |
750 | | * - Linux, provided that the following version requirements are satisfied: |
751 | | * - Linux v2.6.27 or newer, compiled with timerfd support |
752 | | * - glibc v2.9 or newer |
753 | | * - libusb v1.0.5 or newer |
754 | | * |
755 | | * Under these configurations, libusb_get_next_timeout() will \em always return |
756 | | * 0, so your main loop can be simplified to: |
757 | | \code |
758 | | // initialise libusb |
759 | | |
760 | | libusb_get_pollfds(ctx) |
761 | | while (user has not requested application exit) { |
762 | | poll(on libusb file descriptors plus any other event sources of interest, |
763 | | using any timeout that you like) |
764 | | if (poll() indicated activity on libusb file descriptors) |
765 | | libusb_handle_events_timeout(ctx, &zero_tv); |
766 | | // handle events from other sources here |
767 | | } |
768 | | |
769 | | // clean up and exit |
770 | | \endcode |
771 | | * |
772 | | * Do remember that if you simplify your main loop to the above, you will |
773 | | * lose compatibility with some platforms (including legacy Linux platforms, |
774 | | * and <em>any future platforms supported by libusb which may have time-based |
775 | | * event requirements</em>). The resultant problems will likely appear as |
776 | | * strange bugs in your application. |
777 | | * |
778 | | * You can use the libusb_pollfds_handle_timeouts() function to do a runtime |
779 | | * check to see if it is safe to ignore the time-based event complications. |
780 | | * If your application has taken the shortcut of ignoring libusb's next timeout |
781 | | * in your main loop, then you are advised to check the return value of |
782 | | * libusb_pollfds_handle_timeouts() during application startup, and to abort |
783 | | * if the platform does suffer from these timing complications. |
784 | | * |
785 | | * \subsection fdsetchange Changes in the file descriptor set |
786 | | * |
787 | | * The set of file descriptors that libusb uses as event sources may change |
788 | | * during the life of your application. Rather than having to repeatedly |
789 | | * call libusb_get_pollfds(), you can set up notification functions for when |
790 | | * the file descriptor set changes using libusb_set_pollfd_notifiers(). |
791 | | * |
792 | | * \subsection mtissues Multi-threaded considerations |
793 | | * |
794 | | * Unfortunately, the situation is complicated further when multiple threads |
795 | | * come into play. If two threads are monitoring the same file descriptors, |
796 | | * the fact that only one thread will be woken up when an event occurs causes |
797 | | * some headaches. |
798 | | * |
799 | | * The events lock, event waiters lock, and libusb_handle_events_locked() |
800 | | * entities are added to solve these problems. You do not need to be concerned |
801 | | * with these entities otherwise. |
802 | | * |
803 | | * See the extra documentation: \ref libusb_mtasync |
804 | | */ |
805 | | |
806 | | /** \page libusb_mtasync Multi-threaded applications and asynchronous I/O |
807 | | * |
808 | | * libusb is a thread-safe library, but extra considerations must be applied |
809 | | * to applications which interact with libusb from multiple threads. |
810 | | * |
811 | | * The underlying issue that must be addressed is that all libusb I/O |
812 | | * revolves around monitoring file descriptors through the poll()/select() |
813 | | * system calls. This is directly exposed at the |
814 | | * \ref libusb_asyncio "asynchronous interface" but it is important to note that the |
815 | | * \ref libusb_syncio "synchronous interface" is implemented on top of the |
816 | | * asynchronous interface, therefore the same considerations apply. |
817 | | * |
818 | | * The issue is that if two or more threads are concurrently calling poll() |
819 | | * or select() on libusb's file descriptors then only one of those threads |
820 | | * will be woken up when an event arrives. The others will be completely |
821 | | * oblivious that anything has happened. |
822 | | * |
823 | | * Consider the following pseudo-code, which submits an asynchronous transfer |
824 | | * then waits for its completion. This style is one way you could implement a |
825 | | * synchronous interface on top of the asynchronous interface (and libusb |
826 | | * does something similar, albeit more advanced due to the complications |
827 | | * explained on this page). |
828 | | * |
829 | | \code |
830 | | void cb(struct libusb_transfer *transfer) |
831 | | { |
832 | | int *completed = transfer->user_data; |
833 | | *completed = 1; |
834 | | } |
835 | | |
836 | | void myfunc() { |
837 | | struct libusb_transfer *transfer; |
838 | | unsigned char buffer[LIBUSB_CONTROL_SETUP_SIZE] __attribute__ ((aligned (2))); |
839 | | int completed = 0; |
840 | | |
841 | | transfer = libusb_alloc_transfer(0); |
842 | | libusb_fill_control_setup(buffer, |
843 | | LIBUSB_REQUEST_TYPE_VENDOR | LIBUSB_ENDPOINT_OUT, 0x04, 0x01, 0, 0); |
844 | | libusb_fill_control_transfer(transfer, dev, buffer, cb, &completed, 1000); |
845 | | libusb_submit_transfer(transfer); |
846 | | |
847 | | while (!completed) { |
848 | | poll(libusb file descriptors, 120*1000); |
849 | | if (poll indicates activity) |
850 | | libusb_handle_events_timeout(ctx, &zero_tv); |
851 | | } |
852 | | printf("completed!"); |
853 | | // other code here |
854 | | } |
855 | | \endcode |
856 | | * |
857 | | * Here we are <em>serializing</em> completion of an asynchronous event |
858 | | * against a condition - the condition being completion of a specific transfer. |
859 | | * The poll() loop has a long timeout to minimize CPU usage during situations |
860 | | * when nothing is happening (it could reasonably be unlimited). |
861 | | * |
862 | | * If this is the only thread that is polling libusb's file descriptors, there |
863 | | * is no problem: there is no danger that another thread will swallow up the |
864 | | * event that we are interested in. On the other hand, if there is another |
865 | | * thread polling the same descriptors, there is a chance that it will receive |
866 | | * the event that we were interested in. In this situation, <tt>myfunc()</tt> |
867 | | * will only realise that the transfer has completed on the next iteration of |
868 | | * the loop, <em>up to 120 seconds later.</em> Clearly a two-minute delay is |
869 | | * undesirable, and don't even think about using short timeouts to circumvent |
870 | | * this issue! |
871 | | * |
872 | | * The solution here is to ensure that no two threads are ever polling the |
873 | | * file descriptors at the same time. A naive implementation of this would |
874 | | * impact the capabilities of the library, so libusb offers the scheme |
875 | | * documented below to ensure no loss of functionality. |
876 | | * |
877 | | * Before we go any further, it is worth mentioning that all libusb-wrapped |
878 | | * event handling procedures fully adhere to the scheme documented below. |
879 | | * This includes libusb_handle_events() and its variants, and all the |
880 | | * synchronous I/O functions - libusb hides this headache from you. |
881 | | * |
882 | | * \section Using libusb_handle_events() from multiple threads |
883 | | * |
884 | | * Even when only using libusb_handle_events() and synchronous I/O functions, |
885 | | * you can still have a race condition. You might be tempted to solve the |
886 | | * above with libusb_handle_events() like so: |
887 | | * |
888 | | \code |
889 | | libusb_submit_transfer(transfer); |
890 | | |
891 | | while (!completed) { |
892 | | libusb_handle_events(ctx); |
893 | | } |
894 | | printf("completed!"); |
895 | | \endcode |
896 | | * |
897 | | * This however has a race between the checking of completed and |
898 | | * libusb_handle_events() acquiring the events lock, so another thread |
899 | | * could have completed the transfer, resulting in this thread hanging |
900 | | * until either a timeout or another event occurs. See also commit |
901 | | * 6696512aade99bb15d6792af90ae329af270eba6 which fixes this in the |
902 | | * synchronous API implementation of libusb. |
903 | | * |
904 | | * Fixing this race requires checking the variable completed only after |
905 | | * taking the event lock, which defeats the concept of just calling |
906 | | * libusb_handle_events() without worrying about locking. This is why |
907 | | * libusb-1.0.9 introduces the new libusb_handle_events_timeout_completed() |
908 | | * and libusb_handle_events_completed() functions, which handles doing the |
909 | | * completion check for you after they have acquired the lock: |
910 | | * |
911 | | \code |
912 | | libusb_submit_transfer(transfer); |
913 | | |
914 | | while (!completed) { |
915 | | libusb_handle_events_completed(ctx, &completed); |
916 | | } |
917 | | printf("completed!"); |
918 | | \endcode |
919 | | * |
920 | | * This nicely fixes the race in our example. Note that if all you want to |
921 | | * do is submit a single transfer and wait for its completion, then using |
922 | | * one of the synchronous I/O functions is much easier. |
923 | | * |
924 | | * \note |
925 | | * The `completed` variable must be modified while holding the event lock, |
926 | | * otherwise a race condition can still exist. It is simplest to do so from |
927 | | * within the transfer callback as shown above. |
928 | | * |
929 | | * \section eventlock The events lock |
930 | | * |
931 | | * The problem is when we consider the fact that libusb exposes file |
932 | | * descriptors to allow for you to integrate asynchronous USB I/O into |
933 | | * existing main loops, effectively allowing you to do some work behind |
934 | | * libusb's back. If you do take libusb's file descriptors and pass them to |
935 | | * poll()/select() yourself, you need to be aware of the associated issues. |
936 | | * |
937 | | * The first concept to be introduced is the events lock. The events lock |
938 | | * is used to serialize threads that want to handle events, such that only |
939 | | * one thread is handling events at any one time. |
940 | | * |
941 | | * You must take the events lock before polling libusb file descriptors, |
942 | | * using libusb_lock_events(). You must release the lock as soon as you have |
943 | | * aborted your poll()/select() loop, using libusb_unlock_events(). |
944 | | * |
945 | | * \section threadwait Letting other threads do the work for you |
946 | | * |
947 | | * Although the events lock is a critical part of the solution, it is not |
948 | | * enough on it's own. You might wonder if the following is sufficient... |
949 | | \code |
950 | | libusb_lock_events(ctx); |
951 | | while (!completed) { |
952 | | poll(libusb file descriptors, 120*1000); |
953 | | if (poll indicates activity) |
954 | | libusb_handle_events_timeout(ctx, &zero_tv); |
955 | | } |
956 | | libusb_unlock_events(ctx); |
957 | | \endcode |
958 | | * ...and the answer is that it is not. This is because the transfer in the |
959 | | * code shown above may take a long time (say 30 seconds) to complete, and |
960 | | * the lock is not released until the transfer is completed. |
961 | | * |
962 | | * Another thread with similar code that wants to do event handling may be |
963 | | * working with a transfer that completes after a few milliseconds. Despite |
964 | | * having such a quick completion time, the other thread cannot check that |
965 | | * status of its transfer until the code above has finished (30 seconds later) |
966 | | * due to contention on the lock. |
967 | | * |
968 | | * To solve this, libusb offers you a mechanism to determine when another |
969 | | * thread is handling events. It also offers a mechanism to block your thread |
970 | | * until the event handling thread has completed an event (and this mechanism |
971 | | * does not involve polling of file descriptors). |
972 | | * |
973 | | * After determining that another thread is currently handling events, you |
974 | | * obtain the <em>event waiters</em> lock using libusb_lock_event_waiters(). |
975 | | * You then re-check that some other thread is still handling events, and if |
976 | | * so, you call libusb_wait_for_event(). |
977 | | * |
978 | | * libusb_wait_for_event() puts your application to sleep until an event |
979 | | * occurs, or until a thread releases the events lock. When either of these |
980 | | * things happen, your thread is woken up, and should re-check the condition |
981 | | * it was waiting on. It should also re-check that another thread is handling |
982 | | * events, and if not, it should start handling events itself. |
983 | | * |
984 | | * This looks like the following, as pseudo-code: |
985 | | \code |
986 | | retry: |
987 | | if (libusb_try_lock_events(ctx) == 0) { |
988 | | // we obtained the event lock: do our own event handling |
989 | | while (!completed) { |
990 | | if (!libusb_event_handling_ok(ctx)) { |
991 | | libusb_unlock_events(ctx); |
992 | | goto retry; |
993 | | } |
994 | | poll(libusb file descriptors, 120*1000); |
995 | | if (poll indicates activity) |
996 | | libusb_handle_events_locked(ctx, 0); |
997 | | } |
998 | | libusb_unlock_events(ctx); |
999 | | } else { |
1000 | | // another thread is doing event handling. wait for it to signal us that |
1001 | | // an event has completed |
1002 | | libusb_lock_event_waiters(ctx); |
1003 | | |
1004 | | while (!completed) { |
1005 | | // now that we have the event waiters lock, double check that another |
1006 | | // thread is still handling events for us. (it may have ceased handling |
1007 | | // events in the time it took us to reach this point) |
1008 | | if (!libusb_event_handler_active(ctx)) { |
1009 | | // whoever was handling events is no longer doing so, try again |
1010 | | libusb_unlock_event_waiters(ctx); |
1011 | | goto retry; |
1012 | | } |
1013 | | |
1014 | | libusb_wait_for_event(ctx, NULL); |
1015 | | } |
1016 | | libusb_unlock_event_waiters(ctx); |
1017 | | } |
1018 | | printf("completed!\n"); |
1019 | | \endcode |
1020 | | * |
1021 | | * A naive look at the above code may suggest that this can only support |
1022 | | * one event waiter (hence a total of 2 competing threads, the other doing |
1023 | | * event handling), because the event waiter seems to have taken the event |
1024 | | * waiters lock while waiting for an event. However, the system does support |
1025 | | * multiple event waiters, because libusb_wait_for_event() actually drops |
1026 | | * the lock while waiting, and reacquires it before continuing. |
1027 | | * |
1028 | | * We have now implemented code which can dynamically handle situations where |
1029 | | * nobody is handling events (so we should do it ourselves), and it can also |
1030 | | * handle situations where another thread is doing event handling (so we can |
1031 | | * piggyback onto them). It is also equipped to handle a combination of |
1032 | | * the two, for example, another thread is doing event handling, but for |
1033 | | * whatever reason it stops doing so before our condition is met, so we take |
1034 | | * over the event handling. |
1035 | | * |
1036 | | * Four functions were introduced in the above pseudo-code. Their importance |
1037 | | * should be apparent from the code shown above. |
1038 | | * -# libusb_try_lock_events() is a non-blocking function which attempts |
1039 | | * to acquire the events lock but returns a failure code if it is contended. |
1040 | | * -# libusb_event_handling_ok() checks that libusb is still happy for your |
1041 | | * thread to be performing event handling. Sometimes, libusb needs to |
1042 | | * interrupt the event handler, and this is how you can check if you have |
1043 | | * been interrupted. If this function returns 0, the correct behaviour is |
1044 | | * for you to give up the event handling lock, and then to repeat the cycle. |
1045 | | * The following libusb_try_lock_events() will fail, so you will become an |
1046 | | * events waiter. For more information on this, read \ref fullstory below. |
1047 | | * -# libusb_handle_events_locked() is a variant of |
1048 | | * libusb_handle_events_timeout() that you can call while holding the |
1049 | | * events lock. libusb_handle_events_timeout() itself implements similar |
1050 | | * logic to the above, so be sure not to call it when you are |
1051 | | * "working behind libusb's back", as is the case here. |
1052 | | * -# libusb_event_handler_active() determines if someone is currently |
1053 | | * holding the events lock |
1054 | | * |
1055 | | * You might be wondering why there is no function to wake up all threads |
1056 | | * blocked on libusb_wait_for_event(). This is because libusb can do this |
1057 | | * internally: it will wake up all such threads when someone calls |
1058 | | * libusb_unlock_events() or when a transfer completes (at the point after its |
1059 | | * callback has returned). |
1060 | | * |
1061 | | * \subsection fullstory The full story |
1062 | | * |
1063 | | * The above explanation should be enough to get you going, but if you're |
1064 | | * really thinking through the issues then you may be left with some more |
1065 | | * questions regarding libusb's internals. If you're curious, read on, and if |
1066 | | * not, skip to the next section to avoid confusing yourself! |
1067 | | * |
1068 | | * The immediate question that may spring to mind is: what if one thread |
1069 | | * modifies the set of file descriptors that need to be polled while another |
1070 | | * thread is doing event handling? |
1071 | | * |
1072 | | * There are 2 situations in which this may happen. |
1073 | | * -# libusb_open() will add another file descriptor to the poll set, |
1074 | | * therefore it is desirable to interrupt the event handler so that it |
1075 | | * restarts, picking up the new descriptor. |
1076 | | * -# libusb_close() will remove a file descriptor from the poll set. There |
1077 | | * are all kinds of race conditions that could arise here, so it is |
1078 | | * important that nobody is doing event handling at this time. |
1079 | | * |
1080 | | * libusb handles these issues internally, so application developers do not |
1081 | | * have to stop their event handlers while opening/closing devices. Here's how |
1082 | | * it works, focusing on the libusb_close() situation first: |
1083 | | * |
1084 | | * -# During initialization, libusb opens an internal pipe, and it adds the read |
1085 | | * end of this pipe to the set of file descriptors to be polled. |
1086 | | * -# During libusb_close(), libusb writes some dummy data on this event pipe. |
1087 | | * This immediately interrupts the event handler. libusb also records |
1088 | | * internally that it is trying to interrupt event handlers for this |
1089 | | * high-priority event. |
1090 | | * -# At this point, some of the functions described above start behaving |
1091 | | * differently: |
1092 | | * - libusb_event_handling_ok() starts returning 1, indicating that it is NOT |
1093 | | * OK for event handling to continue. |
1094 | | * - libusb_try_lock_events() starts returning 1, indicating that another |
1095 | | * thread holds the event handling lock, even if the lock is uncontended. |
1096 | | * - libusb_event_handler_active() starts returning 1, indicating that |
1097 | | * another thread is doing event handling, even if that is not true. |
1098 | | * -# The above changes in behaviour result in the event handler stopping and |
1099 | | * giving up the events lock very quickly, giving the high-priority |
1100 | | * libusb_close() operation a "free ride" to acquire the events lock. All |
1101 | | * threads that are competing to do event handling become event waiters. |
1102 | | * -# With the events lock held inside libusb_close(), libusb can safely remove |
1103 | | * a file descriptor from the poll set, in the safety of knowledge that |
1104 | | * nobody is polling those descriptors or trying to access the poll set. |
1105 | | * -# After obtaining the events lock, the close operation completes very |
1106 | | * quickly (usually a matter of milliseconds) and then immediately releases |
1107 | | * the events lock. |
1108 | | * -# At the same time, the behaviour of libusb_event_handling_ok() and friends |
1109 | | * reverts to the original, documented behaviour. |
1110 | | * -# The release of the events lock causes the threads that are waiting for |
1111 | | * events to be woken up and to start competing to become event handlers |
1112 | | * again. One of them will succeed; it will then re-obtain the list of poll |
1113 | | * descriptors, and USB I/O will then continue as normal. |
1114 | | * |
1115 | | * libusb_open() is similar, and is actually a more simplistic case. Upon a |
1116 | | * call to libusb_open(): |
1117 | | * |
1118 | | * -# The device is opened and a file descriptor is added to the poll set. |
1119 | | * -# libusb sends some dummy data on the event pipe, and records that it |
1120 | | * is trying to modify the poll descriptor set. |
1121 | | * -# The event handler is interrupted, and the same behaviour change as for |
1122 | | * libusb_close() takes effect, causing all event handling threads to become |
1123 | | * event waiters. |
1124 | | * -# The libusb_open() implementation takes its free ride to the events lock. |
1125 | | * -# Happy that it has successfully paused the events handler, libusb_open() |
1126 | | * releases the events lock. |
1127 | | * -# The event waiter threads are all woken up and compete to become event |
1128 | | * handlers again. The one that succeeds will obtain the list of poll |
1129 | | * descriptors again, which will include the addition of the new device. |
1130 | | * |
1131 | | * \subsection concl Closing remarks |
1132 | | * |
1133 | | * The above may seem a little complicated, but hopefully I have made it clear |
1134 | | * why such complications are necessary. Also, do not forget that this only |
1135 | | * applies to applications that take libusb's file descriptors and integrate |
1136 | | * them into their own polling loops. |
1137 | | * |
1138 | | * You may decide that it is OK for your multi-threaded application to ignore |
1139 | | * some of the rules and locks detailed above, because you don't think that |
1140 | | * two threads can ever be polling the descriptors at the same time. If that |
1141 | | * is the case, then that's good news for you because you don't have to worry. |
1142 | | * But be careful here; remember that the synchronous I/O functions do event |
1143 | | * handling internally. If you have one thread doing event handling in a loop |
1144 | | * (without implementing the rules and locking semantics documented above) |
1145 | | * and another trying to send a synchronous USB transfer, you will end up with |
1146 | | * two threads monitoring the same descriptors, and the above-described |
1147 | | * undesirable behaviour occurring. The solution is for your polling thread to |
1148 | | * play by the rules; the synchronous I/O functions do so, and this will result |
1149 | | * in them getting along in perfect harmony. |
1150 | | * |
1151 | | * If you do have a dedicated thread doing event handling, it is perfectly |
1152 | | * legal for it to take the event handling lock for long periods of time. Any |
1153 | | * synchronous I/O functions you call from other threads will transparently |
1154 | | * fall back to the "event waiters" mechanism detailed above. The only |
1155 | | * consideration that your event handling thread must apply is the one related |
1156 | | * to libusb_event_handling_ok(): you must call this before every poll(), and |
1157 | | * give up the events lock if instructed. |
1158 | | */ |
1159 | | |
1160 | | int usbi_io_init(struct libusb_context *ctx) |
1161 | 0 | { |
1162 | 0 | int r; |
1163 | |
|
1164 | 0 | usbi_mutex_init(&ctx->flying_transfers_lock); |
1165 | 0 | usbi_mutex_init(&ctx->events_lock); |
1166 | 0 | usbi_mutex_init(&ctx->event_waiters_lock); |
1167 | 0 | usbi_cond_init(&ctx->event_waiters_cond); |
1168 | 0 | usbi_mutex_init(&ctx->event_data_lock); |
1169 | 0 | usbi_tls_key_create(&ctx->event_handling_key); |
1170 | 0 | list_init(&ctx->flying_transfers); |
1171 | 0 | list_init(&ctx->event_sources); |
1172 | 0 | list_init(&ctx->removed_event_sources); |
1173 | 0 | list_init(&ctx->hotplug_msgs); |
1174 | 0 | list_init(&ctx->completed_transfers); |
1175 | |
|
1176 | 0 | r = usbi_create_event(&ctx->event); |
1177 | 0 | if (r < 0) |
1178 | 0 | goto err; |
1179 | | |
1180 | 0 | r = usbi_add_event_source(ctx, USBI_EVENT_OS_HANDLE(&ctx->event), USBI_EVENT_POLL_EVENTS); |
1181 | 0 | if (r < 0) |
1182 | 0 | goto err_destroy_event; |
1183 | | |
1184 | 0 | #ifdef HAVE_OS_TIMER |
1185 | 0 | r = usbi_create_timer(&ctx->timer); |
1186 | 0 | if (r == 0) { |
1187 | 0 | usbi_dbg(ctx, "using timer for timeouts"); |
1188 | 0 | r = usbi_add_event_source(ctx, USBI_TIMER_OS_HANDLE(&ctx->timer), USBI_TIMER_POLL_EVENTS); |
1189 | 0 | if (r < 0) |
1190 | 0 | goto err_destroy_timer; |
1191 | 0 | } else { |
1192 | 0 | usbi_dbg(ctx, "timer not available for timeouts"); |
1193 | 0 | } |
1194 | 0 | #endif |
1195 | | |
1196 | 0 | return 0; |
1197 | | |
1198 | 0 | #ifdef HAVE_OS_TIMER |
1199 | 0 | err_destroy_timer: |
1200 | 0 | usbi_destroy_timer(&ctx->timer); |
1201 | 0 | usbi_remove_event_source(ctx, USBI_EVENT_OS_HANDLE(&ctx->event)); |
1202 | 0 | #endif |
1203 | 0 | err_destroy_event: |
1204 | 0 | usbi_destroy_event(&ctx->event); |
1205 | 0 | err: |
1206 | 0 | usbi_mutex_destroy(&ctx->flying_transfers_lock); |
1207 | 0 | usbi_mutex_destroy(&ctx->events_lock); |
1208 | 0 | usbi_mutex_destroy(&ctx->event_waiters_lock); |
1209 | 0 | usbi_cond_destroy(&ctx->event_waiters_cond); |
1210 | 0 | usbi_mutex_destroy(&ctx->event_data_lock); |
1211 | 0 | usbi_tls_key_delete(ctx->event_handling_key); |
1212 | 0 | return r; |
1213 | 0 | } |
1214 | | |
1215 | | static void cleanup_removed_event_sources(struct libusb_context *ctx) |
1216 | 0 | { |
1217 | 0 | struct usbi_event_source *ievent_source, *tmp; |
1218 | |
|
1219 | 0 | for_each_removed_event_source_safe(ctx, ievent_source, tmp) { |
1220 | 0 | list_del(&ievent_source->list); |
1221 | 0 | free(ievent_source); |
1222 | 0 | } |
1223 | 0 | } |
1224 | | |
1225 | | void usbi_io_exit(struct libusb_context *ctx) |
1226 | 0 | { |
1227 | 0 | #ifdef HAVE_OS_TIMER |
1228 | 0 | if (usbi_using_timer(ctx)) { |
1229 | 0 | usbi_remove_event_source(ctx, USBI_TIMER_OS_HANDLE(&ctx->timer)); |
1230 | 0 | usbi_destroy_timer(&ctx->timer); |
1231 | 0 | } |
1232 | 0 | #endif |
1233 | 0 | usbi_remove_event_source(ctx, USBI_EVENT_OS_HANDLE(&ctx->event)); |
1234 | 0 | usbi_destroy_event(&ctx->event); |
1235 | 0 | usbi_mutex_destroy(&ctx->flying_transfers_lock); |
1236 | 0 | usbi_mutex_destroy(&ctx->events_lock); |
1237 | 0 | usbi_mutex_destroy(&ctx->event_waiters_lock); |
1238 | 0 | usbi_cond_destroy(&ctx->event_waiters_cond); |
1239 | 0 | usbi_mutex_destroy(&ctx->event_data_lock); |
1240 | 0 | usbi_tls_key_delete(ctx->event_handling_key); |
1241 | 0 | cleanup_removed_event_sources(ctx); |
1242 | 0 | free(ctx->event_data); |
1243 | 0 | } |
1244 | | |
1245 | | static void calculate_timeout(struct usbi_transfer *itransfer) |
1246 | 0 | { |
1247 | 0 | struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); |
1248 | 0 | unsigned int timeout = transfer->timeout; |
1249 | |
|
1250 | 0 | if (!timeout) { |
1251 | 0 | TIMESPEC_CLEAR(&itransfer->timeout); |
1252 | 0 | return; |
1253 | 0 | } |
1254 | | |
1255 | 0 | usbi_get_monotonic_time(&itransfer->timeout); |
1256 | |
|
1257 | 0 | itransfer->timeout.tv_sec += timeout / 1000U; |
1258 | 0 | itransfer->timeout.tv_nsec += (timeout % 1000U) * 1000000L; |
1259 | 0 | if (itransfer->timeout.tv_nsec >= NSEC_PER_SEC) { |
1260 | 0 | ++itransfer->timeout.tv_sec; |
1261 | 0 | itransfer->timeout.tv_nsec -= NSEC_PER_SEC; |
1262 | 0 | } |
1263 | 0 | } |
1264 | | |
1265 | | /** \ingroup libusb_asyncio |
1266 | | * Allocate a libusb transfer with a specified number of isochronous packet |
1267 | | * descriptors. The returned transfer is pre-initialized for you. When the new |
1268 | | * transfer is no longer needed, it should be freed with |
1269 | | * libusb_free_transfer(). |
1270 | | * |
1271 | | * Transfers intended for non-isochronous endpoints (e.g. control, bulk, |
1272 | | * interrupt) should specify an iso_packets count of zero. |
1273 | | * |
1274 | | * For transfers intended for isochronous endpoints, specify an appropriate |
1275 | | * number of packet descriptors to be allocated as part of the transfer. |
1276 | | * The returned transfer is not specially initialized for isochronous I/O; |
1277 | | * you are still required to set the |
1278 | | * \ref libusb_transfer::num_iso_packets "num_iso_packets" and |
1279 | | * \ref libusb_transfer::type "type" fields accordingly. |
1280 | | * |
1281 | | * It is safe to allocate a transfer with some isochronous packets and then |
1282 | | * use it on a non-isochronous endpoint. If you do this, ensure that at time |
1283 | | * of submission, num_iso_packets is 0 and that type is set appropriately. |
1284 | | * |
1285 | | * \param iso_packets number of isochronous packet descriptors to allocate. Must be non-negative. |
1286 | | * \returns a newly allocated transfer, or NULL on error |
1287 | | */ |
1288 | | DEFAULT_VISIBILITY |
1289 | | struct libusb_transfer * LIBUSB_CALL libusb_alloc_transfer( |
1290 | | int iso_packets) |
1291 | 282 | { |
1292 | 282 | assert(iso_packets >= 0); |
1293 | 282 | if (iso_packets < 0) |
1294 | 0 | return NULL; |
1295 | | |
1296 | 282 | size_t priv_size = PTR_ALIGN(usbi_backend.transfer_priv_size); |
1297 | 282 | size_t usbi_transfer_size = PTR_ALIGN(sizeof(struct usbi_transfer)); |
1298 | 282 | size_t libusb_transfer_size = PTR_ALIGN(sizeof(struct libusb_transfer)); |
1299 | 282 | size_t iso_packets_size = sizeof(struct libusb_iso_packet_descriptor) * (size_t)iso_packets; |
1300 | 282 | size_t alloc_size = priv_size + usbi_transfer_size + libusb_transfer_size + iso_packets_size; |
1301 | 282 | unsigned char *ptr = calloc(1, alloc_size); |
1302 | 282 | if (!ptr) |
1303 | 0 | return NULL; |
1304 | | |
1305 | 282 | struct usbi_transfer *itransfer = (struct usbi_transfer *)(ptr + priv_size); |
1306 | 282 | itransfer->num_iso_packets = iso_packets; |
1307 | 282 | itransfer->priv = ptr; |
1308 | 282 | usbi_mutex_init(&itransfer->lock); |
1309 | 282 | struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); |
1310 | | |
1311 | 282 | return transfer; |
1312 | 282 | } |
1313 | | |
1314 | | /** \ingroup libusb_asyncio |
1315 | | * Free a transfer structure. This should be called for all transfers |
1316 | | * allocated with libusb_alloc_transfer(). |
1317 | | * |
1318 | | * If the \ref libusb_transfer_flags::LIBUSB_TRANSFER_FREE_BUFFER |
1319 | | * "LIBUSB_TRANSFER_FREE_BUFFER" flag is set and the transfer buffer is |
1320 | | * non-NULL, this function will also free the transfer buffer using the |
1321 | | * standard system memory allocator (e.g. free()). |
1322 | | * |
1323 | | * It is legal to call this function with a NULL transfer. In this case, |
1324 | | * the function will simply return safely. |
1325 | | * |
1326 | | * It is not legal to free an active transfer (one which has been submitted |
1327 | | * and has not yet completed). |
1328 | | * |
1329 | | * \param transfer the transfer to free |
1330 | | */ |
1331 | | void API_EXPORTED libusb_free_transfer(struct libusb_transfer *transfer) |
1332 | 0 | { |
1333 | 0 | if (!transfer) |
1334 | 0 | return; |
1335 | | |
1336 | 0 | usbi_dbg(TRANSFER_CTX(transfer), "transfer %p", (void *) transfer); |
1337 | 0 | if (transfer->flags & LIBUSB_TRANSFER_FREE_BUFFER) |
1338 | 0 | free(transfer->buffer); |
1339 | |
|
1340 | 0 | struct usbi_transfer *itransfer = LIBUSB_TRANSFER_TO_USBI_TRANSFER(transfer); |
1341 | 0 | usbi_mutex_destroy(&itransfer->lock); |
1342 | 0 | if (itransfer->dev) |
1343 | 0 | libusb_unref_device(itransfer->dev); |
1344 | |
|
1345 | 0 | unsigned char *ptr = USBI_TRANSFER_TO_TRANSFER_PRIV(itransfer); |
1346 | 0 | assert(ptr == itransfer->priv); |
1347 | 0 | free(ptr); |
1348 | 0 | } |
1349 | | |
1350 | | /* iterates through the flying transfers, and rearms the timer based on the |
1351 | | * next upcoming timeout. |
1352 | | * NB: flying_transfers_lock must be held when calling this. |
1353 | | * returns 0 on success or a LIBUSB_ERROR code on failure. |
1354 | | */ |
1355 | | #ifdef HAVE_OS_TIMER |
1356 | | static int arm_timer_for_next_timeout(struct libusb_context *ctx) |
1357 | 0 | { |
1358 | 0 | struct usbi_transfer *itransfer; |
1359 | |
|
1360 | 0 | if (!usbi_using_timer(ctx)) |
1361 | 0 | return 0; |
1362 | | |
1363 | 0 | for_each_transfer(ctx, itransfer) { |
1364 | 0 | struct timespec *cur_ts = &itransfer->timeout; |
1365 | | |
1366 | | /* if we've reached transfers of infinite timeout, then we have no |
1367 | | * arming to do */ |
1368 | 0 | if (!TIMESPEC_IS_SET(cur_ts)) |
1369 | 0 | break; |
1370 | | |
1371 | | /* act on first transfer that has not already been handled */ |
1372 | 0 | if (!(itransfer->timeout_flags & (USBI_TRANSFER_TIMEOUT_HANDLED | USBI_TRANSFER_OS_HANDLES_TIMEOUT))) { |
1373 | 0 | struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); |
1374 | 0 | usbi_dbg(ctx, "next timeout originally %ums", transfer->timeout); |
1375 | 0 | return usbi_arm_timer(&ctx->timer, cur_ts); |
1376 | 0 | } |
1377 | 0 | } |
1378 | | |
1379 | 0 | usbi_dbg(ctx, "no timeouts, disarming timer"); |
1380 | 0 | return usbi_disarm_timer(&ctx->timer); |
1381 | 0 | } |
1382 | | #else |
1383 | | static inline int arm_timer_for_next_timeout(struct libusb_context *ctx) |
1384 | | { |
1385 | | UNUSED(ctx); |
1386 | | return 0; |
1387 | | } |
1388 | | #endif |
1389 | | |
1390 | | /* add a transfer to the (timeout-sorted) active transfers list. |
1391 | | * This function will return non 0 if fails to update the timer, |
1392 | | * in which case the transfer is *not* on the flying_transfers list. |
1393 | | * NB: flying_transfers_lock MUST be held when calling this. */ |
1394 | | static int add_to_flying_list(struct usbi_transfer *itransfer) |
1395 | 0 | { |
1396 | 0 | struct usbi_transfer *cur; |
1397 | 0 | struct timespec *timeout = &itransfer->timeout; |
1398 | 0 | struct libusb_context *ctx = ITRANSFER_CTX(itransfer); |
1399 | 0 | int r = 0; |
1400 | 0 | int first = 1; |
1401 | |
|
1402 | 0 | calculate_timeout(itransfer); |
1403 | | |
1404 | | /* if we have no other flying transfers, start the list with this one */ |
1405 | 0 | if (list_empty(&ctx->flying_transfers)) { |
1406 | 0 | list_add(&itransfer->list, &ctx->flying_transfers); |
1407 | 0 | goto out; |
1408 | 0 | } |
1409 | | |
1410 | | /* if we have infinite timeout, append to end of list */ |
1411 | 0 | if (!TIMESPEC_IS_SET(timeout)) { |
1412 | 0 | list_add_tail(&itransfer->list, &ctx->flying_transfers); |
1413 | | /* first is irrelevant in this case */ |
1414 | 0 | goto out; |
1415 | 0 | } |
1416 | | |
1417 | | /* otherwise, find appropriate place in list */ |
1418 | 0 | for_each_transfer(ctx, cur) { |
1419 | | /* find first timeout that occurs after the transfer in question */ |
1420 | 0 | struct timespec *cur_ts = &cur->timeout; |
1421 | |
|
1422 | 0 | if (!TIMESPEC_IS_SET(cur_ts) || TIMESPEC_CMP(cur_ts, timeout, >)) { |
1423 | 0 | list_add_tail(&itransfer->list, &cur->list); |
1424 | 0 | goto out; |
1425 | 0 | } |
1426 | 0 | first = 0; |
1427 | 0 | } |
1428 | | /* first is 0 at this stage (list not empty) */ |
1429 | | |
1430 | | /* otherwise we need to be inserted at the end */ |
1431 | 0 | list_add_tail(&itransfer->list, &ctx->flying_transfers); |
1432 | 0 | out: |
1433 | 0 | #ifdef HAVE_OS_TIMER |
1434 | 0 | if (first && usbi_using_timer(ctx) && TIMESPEC_IS_SET(timeout)) { |
1435 | | /* if this transfer has the lowest timeout of all active transfers, |
1436 | | * rearm the timer with this transfer's timeout */ |
1437 | 0 | struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); |
1438 | 0 | usbi_dbg(ctx, "arm timer for timeout in %ums (first in line)", |
1439 | 0 | transfer->timeout); |
1440 | 0 | r = usbi_arm_timer(&ctx->timer, timeout); |
1441 | 0 | } |
1442 | | #else |
1443 | | UNUSED(first); |
1444 | | #endif |
1445 | |
|
1446 | 0 | if (r) |
1447 | 0 | list_del(&itransfer->list); |
1448 | |
|
1449 | 0 | return r; |
1450 | 0 | } |
1451 | | |
1452 | | /* remove a transfer from the active transfers list. |
1453 | | * This function will *always* remove the transfer from the |
1454 | | * flying_transfers list. It will return a LIBUSB_ERROR code |
1455 | | * if it fails to update the timer for the next timeout. |
1456 | | * NB: flying_transfers_lock MUST be held when calling this. */ |
1457 | | static int remove_from_flying_list(struct usbi_transfer *itransfer) |
1458 | 0 | { |
1459 | 0 | struct libusb_context *ctx = ITRANSFER_CTX(itransfer); |
1460 | 0 | int rearm_timer; |
1461 | 0 | int r = 0; |
1462 | |
|
1463 | 0 | rearm_timer = (TIMESPEC_IS_SET(&itransfer->timeout) && |
1464 | 0 | list_first_entry(&ctx->flying_transfers, struct usbi_transfer, list) == itransfer); |
1465 | 0 | list_del(&itransfer->list); |
1466 | 0 | if (rearm_timer) |
1467 | 0 | r = arm_timer_for_next_timeout(ctx); |
1468 | |
|
1469 | 0 | return r; |
1470 | 0 | } |
1471 | | |
1472 | | /** \ingroup libusb_asyncio |
1473 | | * Submit a transfer. This function will fire off the USB transfer and then |
1474 | | * return immediately. |
1475 | | * |
1476 | | * \param transfer the transfer to submit |
1477 | | * \returns 0 on success |
1478 | | * \returns \ref LIBUSB_ERROR_NO_DEVICE if the device has been disconnected |
1479 | | * \returns \ref LIBUSB_ERROR_BUSY if the transfer has already been submitted. |
1480 | | * \returns \ref LIBUSB_ERROR_NOT_SUPPORTED if the transfer flags are not supported |
1481 | | * by the operating system. |
1482 | | * \returns \ref LIBUSB_ERROR_INVALID_PARAM if the transfer size is larger than |
1483 | | * the operating system and/or hardware can support (see \ref asynclimits) |
1484 | | * \returns another LIBUSB_ERROR code on other failure |
1485 | | */ |
1486 | | int API_EXPORTED libusb_submit_transfer(struct libusb_transfer *transfer) |
1487 | 0 | { |
1488 | 0 | struct usbi_transfer *itransfer = |
1489 | 0 | LIBUSB_TRANSFER_TO_USBI_TRANSFER(transfer); |
1490 | 0 | struct libusb_context *ctx; |
1491 | 0 | int r; |
1492 | |
|
1493 | 0 | assert(transfer->dev_handle); |
1494 | 0 | if (itransfer->dev) |
1495 | 0 | libusb_unref_device(itransfer->dev); |
1496 | 0 | itransfer->dev = libusb_ref_device(transfer->dev_handle->dev); |
1497 | |
|
1498 | 0 | ctx = HANDLE_CTX(transfer->dev_handle); |
1499 | 0 | usbi_dbg(ctx, "transfer %p", (void *) transfer); |
1500 | | |
1501 | | /* |
1502 | | * Important note on locking, this function takes / releases locks |
1503 | | * in the following order: |
1504 | | * take flying_transfers_lock |
1505 | | * take itransfer->lock |
1506 | | * clear transfer |
1507 | | * add to flying_transfers list |
1508 | | * release flying_transfers_lock |
1509 | | * submit transfer |
1510 | | * release itransfer->lock |
1511 | | * if submit failed: |
1512 | | * take flying_transfers_lock |
1513 | | * remove from flying_transfers list |
1514 | | * release flying_transfers_lock |
1515 | | * |
1516 | | * Note that it takes locks in the order a-b and then releases them |
1517 | | * in the same order a-b. This is somewhat unusual but not wrong, |
1518 | | * release order is not important as long as *all* locks are released |
1519 | | * before re-acquiring any locks. |
1520 | | * |
1521 | | * This means that the ordering of first releasing itransfer->lock |
1522 | | * and then re-acquiring the flying_transfers_list on error is |
1523 | | * important and must not be changed! |
1524 | | * |
1525 | | * This is done this way because when we take both locks we must always |
1526 | | * take flying_transfers_lock first to avoid ab-ba style deadlocks with |
1527 | | * the timeout handling and usbi_handle_disconnect paths. |
1528 | | * |
1529 | | * And we cannot release itransfer->lock before the submission is |
1530 | | * complete otherwise timeout handling for transfers with short |
1531 | | * timeouts may run before submission. |
1532 | | */ |
1533 | 0 | usbi_mutex_lock(&ctx->flying_transfers_lock); |
1534 | 0 | usbi_mutex_lock(&itransfer->lock); |
1535 | 0 | if (itransfer->state_flags & USBI_TRANSFER_IN_FLIGHT) { |
1536 | 0 | usbi_mutex_unlock(&ctx->flying_transfers_lock); |
1537 | 0 | usbi_mutex_unlock(&itransfer->lock); |
1538 | 0 | return LIBUSB_ERROR_BUSY; |
1539 | 0 | } |
1540 | 0 | itransfer->transferred = 0; |
1541 | 0 | itransfer->state_flags = 0; |
1542 | 0 | itransfer->timeout_flags = 0; |
1543 | 0 | r = add_to_flying_list(itransfer); |
1544 | 0 | if (r) { |
1545 | 0 | usbi_mutex_unlock(&ctx->flying_transfers_lock); |
1546 | 0 | usbi_mutex_unlock(&itransfer->lock); |
1547 | 0 | return r; |
1548 | 0 | } |
1549 | | /* |
1550 | | * We must release the flying transfers lock here, because with |
1551 | | * some backends the submit_transfer method is synchronous. |
1552 | | */ |
1553 | 0 | usbi_mutex_unlock(&ctx->flying_transfers_lock); |
1554 | |
|
1555 | 0 | r = usbi_backend.submit_transfer(itransfer); |
1556 | 0 | if (r == LIBUSB_SUCCESS) { |
1557 | 0 | itransfer->state_flags |= USBI_TRANSFER_IN_FLIGHT; |
1558 | 0 | } |
1559 | 0 | usbi_mutex_unlock(&itransfer->lock); |
1560 | |
|
1561 | 0 | if (r != LIBUSB_SUCCESS) { |
1562 | 0 | usbi_mutex_lock(&ctx->flying_transfers_lock); |
1563 | 0 | remove_from_flying_list(itransfer); |
1564 | 0 | usbi_mutex_unlock(&ctx->flying_transfers_lock); |
1565 | 0 | } |
1566 | |
|
1567 | 0 | return r; |
1568 | 0 | } |
1569 | | |
1570 | | /** \ingroup libusb_asyncio |
1571 | | * Asynchronously cancel a previously submitted transfer. |
1572 | | * This function returns immediately, but this does not indicate cancellation |
1573 | | * is complete. Your callback function will be invoked at some later time |
1574 | | * with a transfer status of |
1575 | | * \ref libusb_transfer_status::LIBUSB_TRANSFER_CANCELLED |
1576 | | * "LIBUSB_TRANSFER_CANCELLED." |
1577 | | * |
1578 | | * This function behaves differently on Darwin-based systems (macOS and iOS): |
1579 | | * |
1580 | | * - Calling this function for one transfer will cause all transfers on the |
1581 | | * same endpoint to be cancelled. Your callback function will be invoked with |
1582 | | * a transfer status of |
1583 | | * \ref libusb_transfer_status::LIBUSB_TRANSFER_CANCELLED |
1584 | | * "LIBUSB_TRANSFER_CANCELLED" for each transfer that was cancelled. |
1585 | | |
1586 | | * - When built for macOS versions prior to 10.5, this function sends a |
1587 | | * \c ClearFeature(ENDPOINT_HALT) request for the transfer's endpoint. |
1588 | | * (Prior to libusb 1.0.27, this request was sent on all Darwin systems.) |
1589 | | * If the device does not handle this request correctly, the data toggle |
1590 | | * bits for the endpoint can be left out of sync between host and device, |
1591 | | * which can have unpredictable results when the next data is sent on |
1592 | | * the endpoint, including data being silently lost. A call to |
1593 | | * \ref libusb_clear_halt will not resolve this situation, since that |
1594 | | * function uses the same request. Therefore, if your program runs on |
1595 | | * macOS < 10.5 (or libusb < 1.0.27), and uses a device that does not |
1596 | | * correctly implement \c ClearFeature(ENDPOINT_HALT) requests, it may |
1597 | | * only be safe to cancel transfers when followed by a device reset using |
1598 | | * \ref libusb_reset_device. |
1599 | | * |
1600 | | * \param transfer the transfer to cancel |
1601 | | * \returns 0 on success |
1602 | | * \returns \ref LIBUSB_ERROR_NOT_FOUND if the transfer is not in progress, |
1603 | | * already complete, or already cancelled. |
1604 | | * \returns a LIBUSB_ERROR code on failure |
1605 | | */ |
1606 | | int API_EXPORTED libusb_cancel_transfer(struct libusb_transfer *transfer) |
1607 | 0 | { |
1608 | 0 | struct usbi_transfer *itransfer = |
1609 | 0 | LIBUSB_TRANSFER_TO_USBI_TRANSFER(transfer); |
1610 | 0 | struct libusb_context *ctx = ITRANSFER_CTX(itransfer); |
1611 | 0 | int r; |
1612 | |
|
1613 | 0 | usbi_dbg(ctx, "transfer %p", (void *) transfer ); |
1614 | 0 | usbi_mutex_lock(&itransfer->lock); |
1615 | 0 | if (!(itransfer->state_flags & USBI_TRANSFER_IN_FLIGHT) |
1616 | 0 | || (itransfer->state_flags & USBI_TRANSFER_CANCELLING)) { |
1617 | 0 | r = LIBUSB_ERROR_NOT_FOUND; |
1618 | 0 | goto out; |
1619 | 0 | } |
1620 | 0 | r = usbi_backend.cancel_transfer(itransfer); |
1621 | 0 | if (r < 0) { |
1622 | 0 | if (r != LIBUSB_ERROR_NOT_FOUND && |
1623 | 0 | r != LIBUSB_ERROR_NO_DEVICE) |
1624 | 0 | usbi_err(ctx, "cancel transfer failed error %d", r); |
1625 | 0 | else |
1626 | 0 | usbi_dbg(ctx, "cancel transfer failed error %d", r); |
1627 | |
|
1628 | 0 | if (r == LIBUSB_ERROR_NO_DEVICE) |
1629 | 0 | itransfer->state_flags |= USBI_TRANSFER_DEVICE_DISAPPEARED; |
1630 | 0 | } |
1631 | |
|
1632 | 0 | itransfer->state_flags |= USBI_TRANSFER_CANCELLING; |
1633 | |
|
1634 | 0 | out: |
1635 | 0 | usbi_mutex_unlock(&itransfer->lock); |
1636 | 0 | return r; |
1637 | 0 | } |
1638 | | |
1639 | | /** \ingroup libusb_asyncio |
1640 | | * Set a transfers bulk stream id. Note users are advised to use |
1641 | | * libusb_fill_bulk_stream_transfer() instead of calling this function |
1642 | | * directly. |
1643 | | * |
1644 | | * Since version 1.0.19, \ref LIBUSB_API_VERSION >= 0x01000103 |
1645 | | * |
1646 | | * \param transfer the transfer to set the stream id for |
1647 | | * \param stream_id the stream id to set |
1648 | | * \see libusb_alloc_streams() |
1649 | | */ |
1650 | | void API_EXPORTED libusb_transfer_set_stream_id( |
1651 | | struct libusb_transfer *transfer, uint32_t stream_id) |
1652 | 0 | { |
1653 | 0 | struct usbi_transfer *itransfer = |
1654 | 0 | LIBUSB_TRANSFER_TO_USBI_TRANSFER(transfer); |
1655 | |
|
1656 | 0 | itransfer->stream_id = stream_id; |
1657 | 0 | } |
1658 | | |
1659 | | /** \ingroup libusb_asyncio |
1660 | | * Get a transfers bulk stream id. |
1661 | | * |
1662 | | * Since version 1.0.19, \ref LIBUSB_API_VERSION >= 0x01000103 |
1663 | | * |
1664 | | * \param transfer the transfer to get the stream id for |
1665 | | * \returns the stream id for the transfer |
1666 | | */ |
1667 | | uint32_t API_EXPORTED libusb_transfer_get_stream_id( |
1668 | | struct libusb_transfer *transfer) |
1669 | 0 | { |
1670 | 0 | struct usbi_transfer *itransfer = |
1671 | 0 | LIBUSB_TRANSFER_TO_USBI_TRANSFER(transfer); |
1672 | |
|
1673 | 0 | return itransfer->stream_id; |
1674 | 0 | } |
1675 | | |
1676 | | /* Handle completion of a transfer (completion might be an error condition). |
1677 | | * This will invoke the user-supplied callback function, which may end up |
1678 | | * freeing the transfer. Therefore you cannot use the transfer structure |
1679 | | * after calling this function, and you should free all backend-specific |
1680 | | * data before calling it. |
1681 | | * Do not call this function with the usbi_transfer lock held. User-specified |
1682 | | * callback functions may attempt to directly resubmit the transfer, which |
1683 | | * will attempt to take the lock. */ |
1684 | | int usbi_handle_transfer_completion(struct usbi_transfer *itransfer, |
1685 | | enum libusb_transfer_status status) |
1686 | 0 | { |
1687 | 0 | struct libusb_transfer *transfer = |
1688 | 0 | USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); |
1689 | 0 | struct libusb_context *ctx = ITRANSFER_CTX(itransfer); |
1690 | 0 | uint8_t flags; |
1691 | 0 | int r; |
1692 | |
|
1693 | 0 | usbi_mutex_lock(&ctx->flying_transfers_lock); |
1694 | 0 | r = remove_from_flying_list(itransfer); |
1695 | 0 | usbi_mutex_unlock(&ctx->flying_transfers_lock); |
1696 | 0 | if (r < 0) |
1697 | 0 | usbi_err(ctx, "failed to set timer for next timeout"); |
1698 | |
|
1699 | 0 | usbi_mutex_lock(&itransfer->lock); |
1700 | 0 | itransfer->state_flags &= ~USBI_TRANSFER_IN_FLIGHT; |
1701 | 0 | usbi_mutex_unlock(&itransfer->lock); |
1702 | |
|
1703 | 0 | if (status == LIBUSB_TRANSFER_COMPLETED |
1704 | 0 | && transfer->flags & LIBUSB_TRANSFER_SHORT_NOT_OK) { |
1705 | 0 | int rqlen = transfer->length; |
1706 | 0 | if (transfer->type == LIBUSB_TRANSFER_TYPE_CONTROL) |
1707 | 0 | rqlen -= LIBUSB_CONTROL_SETUP_SIZE; |
1708 | 0 | if (rqlen != itransfer->transferred) { |
1709 | 0 | usbi_dbg(ctx, "interpreting short transfer as error"); |
1710 | 0 | status = LIBUSB_TRANSFER_ERROR; |
1711 | 0 | } |
1712 | 0 | } |
1713 | |
|
1714 | 0 | flags = transfer->flags; |
1715 | 0 | transfer->status = status; |
1716 | 0 | transfer->actual_length = itransfer->transferred; |
1717 | 0 | assert(transfer->actual_length >= 0); |
1718 | 0 | usbi_dbg(ctx, "transfer %p has callback %p", |
1719 | 0 | (void *) transfer, transfer->callback); |
1720 | 0 | if (transfer->callback) { |
1721 | 0 | libusb_lock_event_waiters (ctx); |
1722 | 0 | transfer->callback(transfer); |
1723 | 0 | libusb_unlock_event_waiters(ctx); |
1724 | 0 | } |
1725 | | /* transfer might have been freed by the above call, do not use from |
1726 | | * this point. */ |
1727 | 0 | if (flags & LIBUSB_TRANSFER_FREE_TRANSFER) |
1728 | 0 | libusb_free_transfer(transfer); |
1729 | 0 | return r; |
1730 | 0 | } |
1731 | | |
1732 | | /* Similar to usbi_handle_transfer_completion() but exclusively for transfers |
1733 | | * that were asynchronously cancelled. The same concerns w.r.t. freeing of |
1734 | | * transfers exist here. |
1735 | | * Do not call this function with the usbi_transfer lock held. User-specified |
1736 | | * callback functions may attempt to directly resubmit the transfer, which |
1737 | | * will attempt to take the lock. */ |
1738 | | int usbi_handle_transfer_cancellation(struct usbi_transfer *itransfer) |
1739 | 0 | { |
1740 | 0 | struct libusb_context *ctx = ITRANSFER_CTX(itransfer); |
1741 | 0 | uint8_t timed_out; |
1742 | |
|
1743 | 0 | usbi_mutex_lock(&ctx->flying_transfers_lock); |
1744 | 0 | timed_out = itransfer->timeout_flags & USBI_TRANSFER_TIMED_OUT; |
1745 | 0 | usbi_mutex_unlock(&ctx->flying_transfers_lock); |
1746 | | |
1747 | | /* if the URB was cancelled due to timeout, report timeout to the user */ |
1748 | 0 | if (timed_out) { |
1749 | 0 | usbi_dbg(ctx, "detected timeout cancellation"); |
1750 | 0 | return usbi_handle_transfer_completion(itransfer, LIBUSB_TRANSFER_TIMED_OUT); |
1751 | 0 | } |
1752 | | |
1753 | | /* otherwise its a normal async cancel */ |
1754 | 0 | return usbi_handle_transfer_completion(itransfer, LIBUSB_TRANSFER_CANCELLED); |
1755 | 0 | } |
1756 | | |
1757 | | /* Add a completed transfer to the completed_transfers list of the |
1758 | | * context and signal the event. The backend's handle_transfer_completion() |
1759 | | * function will be called the next time an event handler runs. */ |
1760 | | void usbi_signal_transfer_completion(struct usbi_transfer *itransfer) |
1761 | 0 | { |
1762 | 0 | struct libusb_device *dev = itransfer->dev; |
1763 | |
|
1764 | 0 | if (dev) { |
1765 | 0 | struct libusb_context *ctx = DEVICE_CTX(dev); |
1766 | 0 | unsigned int event_flags; |
1767 | |
|
1768 | 0 | usbi_mutex_lock(&ctx->event_data_lock); |
1769 | 0 | event_flags = ctx->event_flags; |
1770 | 0 | ctx->event_flags |= USBI_EVENT_TRANSFER_COMPLETED; |
1771 | 0 | list_add_tail(&itransfer->completed_list, &ctx->completed_transfers); |
1772 | 0 | if (!event_flags) |
1773 | 0 | usbi_signal_event(&ctx->event); |
1774 | 0 | usbi_mutex_unlock(&ctx->event_data_lock); |
1775 | 0 | } |
1776 | 0 | } |
1777 | | |
1778 | | /** \ingroup libusb_poll |
1779 | | * Attempt to acquire the event handling lock. This lock is used to ensure that |
1780 | | * only one thread is monitoring libusb event sources at any one time. |
1781 | | * |
1782 | | * You only need to use this lock if you are developing an application |
1783 | | * which calls poll() or select() on libusb's file descriptors directly. |
1784 | | * If you stick to libusb's event handling loop functions (e.g. |
1785 | | * libusb_handle_events()) then you do not need to be concerned with this |
1786 | | * locking. |
1787 | | * |
1788 | | * While holding this lock, you are trusted to actually be handling events. |
1789 | | * If you are no longer handling events, you must call libusb_unlock_events() |
1790 | | * as soon as possible. |
1791 | | * |
1792 | | * \param ctx the context to operate on, or NULL for the default context |
1793 | | * \returns 0 if the lock was obtained successfully |
1794 | | * \returns 1 if the lock was not obtained (i.e. another thread holds the lock) |
1795 | | * \ref libusb_mtasync |
1796 | | */ |
1797 | | int API_EXPORTED libusb_try_lock_events(libusb_context *ctx) |
1798 | 0 | { |
1799 | 0 | int r; |
1800 | 0 | unsigned int ru; |
1801 | |
|
1802 | 0 | ctx = usbi_get_context(ctx); |
1803 | | |
1804 | | /* is someone else waiting to close a device? if so, don't let this thread |
1805 | | * start event handling */ |
1806 | 0 | usbi_mutex_lock(&ctx->event_data_lock); |
1807 | 0 | ru = ctx->device_close; |
1808 | 0 | usbi_mutex_unlock(&ctx->event_data_lock); |
1809 | 0 | if (ru) { |
1810 | 0 | usbi_dbg(ctx, "someone else is closing a device"); |
1811 | 0 | return 1; |
1812 | 0 | } |
1813 | | |
1814 | 0 | r = usbi_mutex_trylock(&ctx->events_lock); |
1815 | 0 | if (!r) |
1816 | 0 | return 1; |
1817 | | |
1818 | 0 | ctx->event_handler_active = 1; |
1819 | 0 | return 0; |
1820 | 0 | } |
1821 | | |
1822 | | /** \ingroup libusb_poll |
1823 | | * Acquire the event handling lock, blocking until successful acquisition if |
1824 | | * it is contended. This lock is used to ensure that only one thread is |
1825 | | * monitoring libusb event sources at any one time. |
1826 | | * |
1827 | | * You only need to use this lock if you are developing an application |
1828 | | * which calls poll() or select() on libusb's file descriptors directly. |
1829 | | * If you stick to libusb's event handling loop functions (e.g. |
1830 | | * libusb_handle_events()) then you do not need to be concerned with this |
1831 | | * locking. |
1832 | | * |
1833 | | * While holding this lock, you are trusted to actually be handling events. |
1834 | | * If you are no longer handling events, you must call libusb_unlock_events() |
1835 | | * as soon as possible. |
1836 | | * |
1837 | | * \param ctx the context to operate on, or NULL for the default context |
1838 | | * \ref libusb_mtasync |
1839 | | */ |
1840 | | void API_EXPORTED libusb_lock_events(libusb_context *ctx) |
1841 | 0 | { |
1842 | 0 | ctx = usbi_get_context(ctx); |
1843 | 0 | usbi_mutex_lock(&ctx->events_lock); |
1844 | 0 | ctx->event_handler_active = 1; |
1845 | 0 | } |
1846 | | |
1847 | | /** \ingroup libusb_poll |
1848 | | * Release the lock previously acquired with libusb_try_lock_events() or |
1849 | | * libusb_lock_events(). Releasing this lock will wake up any threads blocked |
1850 | | * on libusb_wait_for_event(). |
1851 | | * |
1852 | | * \param ctx the context to operate on, or NULL for the default context |
1853 | | * \ref libusb_mtasync |
1854 | | */ |
1855 | | void API_EXPORTED libusb_unlock_events(libusb_context *ctx) |
1856 | 0 | { |
1857 | 0 | ctx = usbi_get_context(ctx); |
1858 | 0 | ctx->event_handler_active = 0; |
1859 | 0 | usbi_mutex_unlock(&ctx->events_lock); |
1860 | | |
1861 | | /* FIXME: perhaps we should be a bit more efficient by not broadcasting |
1862 | | * the availability of the events lock when we are modifying pollfds |
1863 | | * (check ctx->device_close)? */ |
1864 | 0 | usbi_mutex_lock(&ctx->event_waiters_lock); |
1865 | 0 | usbi_cond_broadcast(&ctx->event_waiters_cond); |
1866 | 0 | usbi_mutex_unlock(&ctx->event_waiters_lock); |
1867 | 0 | } |
1868 | | |
1869 | | /** \ingroup libusb_poll |
1870 | | * Determine if it is still OK for this thread to be doing event handling. |
1871 | | * |
1872 | | * Sometimes, libusb needs to temporarily pause all event handlers, and this |
1873 | | * is the function you should use before polling file descriptors to see if |
1874 | | * this is the case. |
1875 | | * |
1876 | | * If this function instructs your thread to give up the events lock, you |
1877 | | * should just continue the usual logic that is documented in \ref libusb_mtasync. |
1878 | | * On the next iteration, your thread will fail to obtain the events lock, |
1879 | | * and will hence become an event waiter. |
1880 | | * |
1881 | | * This function should be called while the events lock is held: you don't |
1882 | | * need to worry about the results of this function if your thread is not |
1883 | | * the current event handler. |
1884 | | * |
1885 | | * \param ctx the context to operate on, or NULL for the default context |
1886 | | * \returns 1 if event handling can start or continue |
1887 | | * \returns 0 if this thread must give up the events lock |
1888 | | * \ref fullstory "Multi-threaded I/O: the full story" |
1889 | | */ |
1890 | | int API_EXPORTED libusb_event_handling_ok(libusb_context *ctx) |
1891 | 0 | { |
1892 | 0 | unsigned int r; |
1893 | |
|
1894 | 0 | ctx = usbi_get_context(ctx); |
1895 | | |
1896 | | /* is someone else waiting to close a device? if so, don't let this thread |
1897 | | * continue event handling */ |
1898 | 0 | usbi_mutex_lock(&ctx->event_data_lock); |
1899 | 0 | r = ctx->device_close; |
1900 | 0 | usbi_mutex_unlock(&ctx->event_data_lock); |
1901 | 0 | if (r) { |
1902 | 0 | usbi_dbg(ctx, "someone else is closing a device"); |
1903 | 0 | return 0; |
1904 | 0 | } |
1905 | | |
1906 | 0 | return 1; |
1907 | 0 | } |
1908 | | |
1909 | | |
1910 | | /** \ingroup libusb_poll |
1911 | | * Determine if an active thread is handling events (i.e. if anyone is holding |
1912 | | * the event handling lock). |
1913 | | * |
1914 | | * \param ctx the context to operate on, or NULL for the default context |
1915 | | * \returns 1 if a thread is handling events |
1916 | | * \returns 0 if there are no threads currently handling events |
1917 | | * \ref libusb_mtasync |
1918 | | */ |
1919 | | int API_EXPORTED libusb_event_handler_active(libusb_context *ctx) |
1920 | 0 | { |
1921 | 0 | unsigned int r; |
1922 | |
|
1923 | 0 | ctx = usbi_get_context(ctx); |
1924 | | |
1925 | | /* is someone else waiting to close a device? if so, don't let this thread |
1926 | | * start event handling -- indicate that event handling is happening */ |
1927 | 0 | usbi_mutex_lock(&ctx->event_data_lock); |
1928 | 0 | r = ctx->device_close; |
1929 | 0 | usbi_mutex_unlock(&ctx->event_data_lock); |
1930 | 0 | if (r) { |
1931 | 0 | usbi_dbg(ctx, "someone else is closing a device"); |
1932 | 0 | return 1; |
1933 | 0 | } |
1934 | | |
1935 | 0 | return ctx->event_handler_active; |
1936 | 0 | } |
1937 | | |
1938 | | /** \ingroup libusb_poll |
1939 | | * Interrupt any active thread that is handling events. This is mainly useful |
1940 | | * for interrupting a dedicated event handling thread when an application |
1941 | | * wishes to call libusb_exit(). |
1942 | | * |
1943 | | * Since version 1.0.21, \ref LIBUSB_API_VERSION >= 0x01000105 |
1944 | | * |
1945 | | * \param ctx the context to operate on, or NULL for the default context |
1946 | | * \ref libusb_mtasync |
1947 | | */ |
1948 | | void API_EXPORTED libusb_interrupt_event_handler(libusb_context *ctx) |
1949 | 0 | { |
1950 | 0 | unsigned int event_flags; |
1951 | |
|
1952 | 0 | usbi_dbg(ctx, " "); |
1953 | |
|
1954 | 0 | ctx = usbi_get_context(ctx); |
1955 | 0 | usbi_mutex_lock(&ctx->event_data_lock); |
1956 | |
|
1957 | 0 | event_flags = ctx->event_flags; |
1958 | 0 | ctx->event_flags |= USBI_EVENT_USER_INTERRUPT; |
1959 | 0 | if (!event_flags) |
1960 | 0 | usbi_signal_event(&ctx->event); |
1961 | |
|
1962 | 0 | usbi_mutex_unlock(&ctx->event_data_lock); |
1963 | 0 | } |
1964 | | |
1965 | | /** \ingroup libusb_poll |
1966 | | * Acquire the event waiters lock. This lock is designed to be obtained under |
1967 | | * the situation where you want to be aware when events are completed, but |
1968 | | * some other thread is event handling so calling libusb_handle_events() is not |
1969 | | * allowed. |
1970 | | * |
1971 | | * You then obtain this lock, re-check that another thread is still handling |
1972 | | * events, then call libusb_wait_for_event(). |
1973 | | * |
1974 | | * You only need to use this lock if you are developing an application |
1975 | | * which calls poll() or select() on libusb's file descriptors directly, |
1976 | | * <b>and</b> may potentially be handling events from 2 threads simultaneously. |
1977 | | * If you stick to libusb's event handling loop functions (e.g. |
1978 | | * libusb_handle_events()) then you do not need to be concerned with this |
1979 | | * locking. |
1980 | | * |
1981 | | * \param ctx the context to operate on, or NULL for the default context |
1982 | | * \ref libusb_mtasync |
1983 | | */ |
1984 | | void API_EXPORTED libusb_lock_event_waiters(libusb_context *ctx) |
1985 | 0 | { |
1986 | 0 | ctx = usbi_get_context(ctx); |
1987 | 0 | usbi_mutex_lock(&ctx->event_waiters_lock); |
1988 | 0 | } |
1989 | | |
1990 | | /** \ingroup libusb_poll |
1991 | | * Release the event waiters lock. |
1992 | | * \param ctx the context to operate on, or NULL for the default context |
1993 | | * \ref libusb_mtasync |
1994 | | */ |
1995 | | void API_EXPORTED libusb_unlock_event_waiters(libusb_context *ctx) |
1996 | 0 | { |
1997 | 0 | ctx = usbi_get_context(ctx); |
1998 | 0 | usbi_mutex_unlock(&ctx->event_waiters_lock); |
1999 | 0 | } |
2000 | | |
2001 | | /** \ingroup libusb_poll |
2002 | | * Wait for another thread to signal completion of an event. Must be called |
2003 | | * with the event waiters lock held, see libusb_lock_event_waiters(). |
2004 | | * |
2005 | | * This function will block until any of the following conditions are met: |
2006 | | * -# The timeout expires |
2007 | | * -# A transfer completes |
2008 | | * -# A thread releases the event handling lock through libusb_unlock_events() |
2009 | | * |
2010 | | * Condition 1 is obvious. Condition 2 unblocks your thread <em>after</em> |
2011 | | * the callback for the transfer has completed. Condition 3 is important |
2012 | | * because it means that the thread that was previously handling events is no |
2013 | | * longer doing so, so if any events are to complete, another thread needs to |
2014 | | * step up and start event handling. |
2015 | | * |
2016 | | * This function releases the event waiters lock before putting your thread |
2017 | | * to sleep, and reacquires the lock as it is being woken up. |
2018 | | * |
2019 | | * \param ctx the context to operate on, or NULL for the default context |
2020 | | * \param tv maximum timeout for this blocking function. A NULL value |
2021 | | * indicates unlimited timeout. |
2022 | | * \returns 0 after a transfer completes or another thread stops event handling |
2023 | | * \returns 1 if the timeout expired |
2024 | | * \returns \ref LIBUSB_ERROR_INVALID_PARAM if timeval is invalid |
2025 | | * \ref libusb_mtasync |
2026 | | */ |
2027 | | int API_EXPORTED libusb_wait_for_event(libusb_context *ctx, struct timeval *tv) |
2028 | 0 | { |
2029 | 0 | int r; |
2030 | |
|
2031 | 0 | ctx = usbi_get_context(ctx); |
2032 | 0 | if (!tv) { |
2033 | 0 | usbi_cond_wait(&ctx->event_waiters_cond, &ctx->event_waiters_lock); |
2034 | 0 | return 0; |
2035 | 0 | } |
2036 | | |
2037 | 0 | if (!TIMEVAL_IS_VALID(tv)) |
2038 | 0 | return LIBUSB_ERROR_INVALID_PARAM; |
2039 | | |
2040 | 0 | r = usbi_cond_timedwait(&ctx->event_waiters_cond, |
2041 | 0 | &ctx->event_waiters_lock, tv); |
2042 | 0 | if (r < 0) |
2043 | 0 | return r == LIBUSB_ERROR_TIMEOUT; |
2044 | | |
2045 | 0 | return 0; |
2046 | 0 | } |
2047 | | |
2048 | | /* NB: flying_transfers_lock must be held when calling this */ |
2049 | | static void handle_timeout(struct usbi_transfer *itransfer) |
2050 | 0 | { |
2051 | 0 | struct libusb_transfer *transfer = |
2052 | 0 | USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); |
2053 | 0 | int r; |
2054 | |
|
2055 | 0 | itransfer->timeout_flags |= USBI_TRANSFER_TIMEOUT_HANDLED; |
2056 | 0 | r = libusb_cancel_transfer(transfer); |
2057 | 0 | if (r == LIBUSB_SUCCESS) |
2058 | 0 | itransfer->timeout_flags |= USBI_TRANSFER_TIMED_OUT; |
2059 | 0 | else |
2060 | 0 | usbi_warn(TRANSFER_CTX(transfer), |
2061 | 0 | "async cancel failed %d", r); |
2062 | 0 | } |
2063 | | |
2064 | | /* NB: flying_transfers_lock must be held when calling this */ |
2065 | | static void handle_timeouts_locked(struct libusb_context *ctx) |
2066 | 0 | { |
2067 | 0 | struct timespec systime; |
2068 | 0 | struct usbi_transfer *itransfer; |
2069 | |
|
2070 | 0 | if (list_empty(&ctx->flying_transfers)) |
2071 | 0 | return; |
2072 | | |
2073 | | /* get current time */ |
2074 | 0 | usbi_get_monotonic_time(&systime); |
2075 | | |
2076 | | /* iterate through flying transfers list, finding all transfers that |
2077 | | * have expired timeouts */ |
2078 | 0 | for_each_transfer(ctx, itransfer) { |
2079 | 0 | struct timespec *cur_ts = &itransfer->timeout; |
2080 | | |
2081 | | /* if we've reached transfers of infinite timeout, we're all done */ |
2082 | 0 | if (!TIMESPEC_IS_SET(cur_ts)) |
2083 | 0 | return; |
2084 | | |
2085 | | /* ignore timeouts we've already handled */ |
2086 | 0 | if (itransfer->timeout_flags & (USBI_TRANSFER_TIMEOUT_HANDLED | USBI_TRANSFER_OS_HANDLES_TIMEOUT)) |
2087 | 0 | continue; |
2088 | | |
2089 | | /* if transfer has non-expired timeout, nothing more to do */ |
2090 | 0 | if (TIMESPEC_CMP(cur_ts, &systime, >)) |
2091 | 0 | return; |
2092 | | |
2093 | | /* otherwise, we've got an expired timeout to handle */ |
2094 | 0 | handle_timeout(itransfer); |
2095 | 0 | } |
2096 | 0 | } |
2097 | | |
2098 | | static void handle_timeouts(struct libusb_context *ctx) |
2099 | 0 | { |
2100 | 0 | ctx = usbi_get_context(ctx); |
2101 | 0 | usbi_mutex_lock(&ctx->flying_transfers_lock); |
2102 | 0 | handle_timeouts_locked(ctx); |
2103 | 0 | usbi_mutex_unlock(&ctx->flying_transfers_lock); |
2104 | 0 | } |
2105 | | |
2106 | | static int handle_event_trigger(struct libusb_context *ctx) |
2107 | 0 | { |
2108 | 0 | struct list_head hotplug_msgs; |
2109 | 0 | int hotplug_event = 0; |
2110 | 0 | int r = 0; |
2111 | |
|
2112 | 0 | usbi_dbg(ctx, "event triggered"); |
2113 | |
|
2114 | 0 | list_init(&hotplug_msgs); |
2115 | | |
2116 | | /* take the the event data lock while processing events */ |
2117 | 0 | usbi_mutex_lock(&ctx->event_data_lock); |
2118 | | |
2119 | | /* check if someone modified the event sources */ |
2120 | 0 | if (ctx->event_flags & USBI_EVENT_EVENT_SOURCES_MODIFIED) |
2121 | 0 | usbi_dbg(ctx, "someone updated the event sources"); |
2122 | |
|
2123 | 0 | if (ctx->event_flags & USBI_EVENT_USER_INTERRUPT) { |
2124 | 0 | usbi_dbg(ctx, "someone purposefully interrupted"); |
2125 | 0 | ctx->event_flags &= ~USBI_EVENT_USER_INTERRUPT; |
2126 | 0 | } |
2127 | |
|
2128 | 0 | if (ctx->event_flags & USBI_EVENT_HOTPLUG_CB_DEREGISTERED) { |
2129 | 0 | usbi_dbg(ctx, "someone unregistered a hotplug cb"); |
2130 | 0 | ctx->event_flags &= ~USBI_EVENT_HOTPLUG_CB_DEREGISTERED; |
2131 | 0 | hotplug_event = 1; |
2132 | 0 | } |
2133 | | |
2134 | | /* check if someone is closing a device */ |
2135 | 0 | if (ctx->event_flags & USBI_EVENT_DEVICE_CLOSE) |
2136 | 0 | usbi_dbg(ctx, "someone is closing a device"); |
2137 | | |
2138 | | /* check for any pending hotplug messages */ |
2139 | 0 | if (ctx->event_flags & USBI_EVENT_HOTPLUG_MSG_PENDING) { |
2140 | 0 | usbi_dbg(ctx, "hotplug message received"); |
2141 | 0 | ctx->event_flags &= ~USBI_EVENT_HOTPLUG_MSG_PENDING; |
2142 | 0 | hotplug_event = 1; |
2143 | 0 | assert(!list_empty(&ctx->hotplug_msgs)); |
2144 | 0 | list_cut(&hotplug_msgs, &ctx->hotplug_msgs); |
2145 | 0 | } |
2146 | | |
2147 | | /* complete any pending transfers */ |
2148 | 0 | if (ctx->event_flags & USBI_EVENT_TRANSFER_COMPLETED) { |
2149 | 0 | struct usbi_transfer *itransfer, *tmp; |
2150 | 0 | struct list_head completed_transfers; |
2151 | |
|
2152 | 0 | assert(!list_empty(&ctx->completed_transfers)); |
2153 | 0 | list_cut(&completed_transfers, &ctx->completed_transfers); |
2154 | 0 | usbi_mutex_unlock(&ctx->event_data_lock); |
2155 | |
|
2156 | 0 | __for_each_completed_transfer_safe(&completed_transfers, itransfer, tmp) { |
2157 | 0 | list_del(&itransfer->completed_list); |
2158 | 0 | r = usbi_backend.handle_transfer_completion(itransfer); |
2159 | 0 | if (r) { |
2160 | 0 | usbi_err(ctx, "backend handle_transfer_completion failed with error %d", r); |
2161 | 0 | break; |
2162 | 0 | } |
2163 | 0 | } |
2164 | |
|
2165 | 0 | usbi_mutex_lock(&ctx->event_data_lock); |
2166 | 0 | if (!list_empty(&completed_transfers)) { |
2167 | | /* an error occurred, put the remaining transfers back on the list */ |
2168 | 0 | list_splice_front(&completed_transfers, &ctx->completed_transfers); |
2169 | 0 | } else if (list_empty(&ctx->completed_transfers)) { |
2170 | 0 | ctx->event_flags &= ~USBI_EVENT_TRANSFER_COMPLETED; |
2171 | 0 | } |
2172 | 0 | } |
2173 | | |
2174 | | /* if no further pending events, clear the event */ |
2175 | 0 | if (!ctx->event_flags) |
2176 | 0 | usbi_clear_event(&ctx->event); |
2177 | |
|
2178 | 0 | usbi_mutex_unlock(&ctx->event_data_lock); |
2179 | | |
2180 | | /* process the hotplug events, if any */ |
2181 | 0 | if (hotplug_event) |
2182 | 0 | usbi_hotplug_process(ctx, &hotplug_msgs); |
2183 | |
|
2184 | 0 | return r; |
2185 | 0 | } |
2186 | | |
2187 | | #ifdef HAVE_OS_TIMER |
2188 | | static int handle_timer_trigger(struct libusb_context *ctx) |
2189 | 0 | { |
2190 | 0 | int r; |
2191 | |
|
2192 | 0 | usbi_mutex_lock(&ctx->flying_transfers_lock); |
2193 | | |
2194 | | /* process the timeout that just happened */ |
2195 | 0 | handle_timeouts_locked(ctx); |
2196 | | |
2197 | | /* arm for next timeout */ |
2198 | 0 | r = arm_timer_for_next_timeout(ctx); |
2199 | |
|
2200 | 0 | usbi_mutex_unlock(&ctx->flying_transfers_lock); |
2201 | |
|
2202 | 0 | return r; |
2203 | 0 | } |
2204 | | #endif |
2205 | | |
2206 | | /* do the actual event handling. assumes that no other thread is concurrently |
2207 | | * doing the same thing. */ |
2208 | | static int handle_events(struct libusb_context *ctx, struct timeval *tv) |
2209 | 0 | { |
2210 | 0 | struct usbi_reported_events reported_events; |
2211 | 0 | int r, timeout_ms; |
2212 | | |
2213 | | /* prevent attempts to recursively handle events (e.g. calling into |
2214 | | * libusb_handle_events() from within a hotplug or transfer callback) */ |
2215 | 0 | if (usbi_handling_events(ctx)) |
2216 | 0 | return LIBUSB_ERROR_BUSY; |
2217 | | |
2218 | | /* only reallocate the event source data when the list of event sources has |
2219 | | * been modified since the last handle_events(), otherwise reuse them to |
2220 | | * save the additional overhead */ |
2221 | 0 | usbi_mutex_lock(&ctx->event_data_lock); |
2222 | 0 | if (ctx->event_flags & USBI_EVENT_EVENT_SOURCES_MODIFIED) { |
2223 | 0 | usbi_dbg(ctx, "event sources modified, reallocating event data"); |
2224 | | |
2225 | | /* free anything removed since we last ran */ |
2226 | 0 | cleanup_removed_event_sources(ctx); |
2227 | |
|
2228 | 0 | r = usbi_alloc_event_data(ctx); |
2229 | 0 | if (r) { |
2230 | 0 | usbi_mutex_unlock(&ctx->event_data_lock); |
2231 | 0 | return r; |
2232 | 0 | } |
2233 | | |
2234 | | /* reset the flag now that we have the updated list */ |
2235 | 0 | ctx->event_flags &= ~USBI_EVENT_EVENT_SOURCES_MODIFIED; |
2236 | | |
2237 | | /* if no further pending events, clear the event so that we do |
2238 | | * not immediately return from the wait function */ |
2239 | 0 | if (!ctx->event_flags) |
2240 | 0 | usbi_clear_event(&ctx->event); |
2241 | 0 | } |
2242 | 0 | usbi_mutex_unlock(&ctx->event_data_lock); |
2243 | |
|
2244 | 0 | timeout_ms = (int)(tv->tv_sec * 1000) + (tv->tv_usec / 1000); |
2245 | | |
2246 | | /* round up to next millisecond */ |
2247 | 0 | if (tv->tv_usec % 1000) |
2248 | 0 | timeout_ms++; |
2249 | |
|
2250 | 0 | reported_events.event_bits = 0; |
2251 | |
|
2252 | 0 | usbi_start_event_handling(ctx); |
2253 | |
|
2254 | 0 | r = usbi_wait_for_events(ctx, &reported_events, timeout_ms); |
2255 | 0 | if (r != LIBUSB_SUCCESS) { |
2256 | 0 | if (r == LIBUSB_ERROR_TIMEOUT) { |
2257 | 0 | handle_timeouts(ctx); |
2258 | 0 | r = LIBUSB_SUCCESS; |
2259 | 0 | } |
2260 | 0 | goto done; |
2261 | 0 | } |
2262 | | |
2263 | 0 | if (reported_events.event_triggered) { |
2264 | 0 | r = handle_event_trigger(ctx); |
2265 | 0 | if (r) { |
2266 | | /* return error code */ |
2267 | 0 | goto done; |
2268 | 0 | } |
2269 | 0 | } |
2270 | | |
2271 | 0 | #ifdef HAVE_OS_TIMER |
2272 | 0 | if (reported_events.timer_triggered) { |
2273 | 0 | r = handle_timer_trigger(ctx); |
2274 | 0 | if (r) { |
2275 | | /* return error code */ |
2276 | 0 | goto done; |
2277 | 0 | } |
2278 | 0 | } |
2279 | 0 | #endif |
2280 | | |
2281 | 0 | if (!reported_events.num_ready) |
2282 | 0 | goto done; |
2283 | | |
2284 | 0 | r = usbi_backend.handle_events(ctx, reported_events.event_data, |
2285 | 0 | reported_events.event_data_count, reported_events.num_ready); |
2286 | 0 | if (r) |
2287 | 0 | usbi_err(ctx, "backend handle_events failed with error %d", r); |
2288 | |
|
2289 | 0 | done: |
2290 | 0 | usbi_end_event_handling(ctx); |
2291 | 0 | return r; |
2292 | 0 | } |
2293 | | |
2294 | | /* returns the smallest of: |
2295 | | * 1. timeout of next URB |
2296 | | * 2. user-supplied timeout |
2297 | | * returns 1 if there is an already-expired timeout, otherwise returns 0 |
2298 | | * and populates out |
2299 | | */ |
2300 | | static int get_next_timeout(libusb_context *ctx, struct timeval *tv, |
2301 | | struct timeval *out) |
2302 | 0 | { |
2303 | 0 | struct timeval timeout; |
2304 | 0 | int r = libusb_get_next_timeout(ctx, &timeout); |
2305 | 0 | if (r) { |
2306 | | /* timeout already expired? */ |
2307 | 0 | if (!timerisset(&timeout)) |
2308 | 0 | return 1; |
2309 | | |
2310 | | /* choose the smallest of next URB timeout or user specified timeout */ |
2311 | 0 | if (timercmp(&timeout, tv, <)) |
2312 | 0 | *out = timeout; |
2313 | 0 | else |
2314 | 0 | *out = *tv; |
2315 | 0 | } else { |
2316 | 0 | *out = *tv; |
2317 | 0 | } |
2318 | 0 | return 0; |
2319 | 0 | } |
2320 | | |
2321 | | /** \ingroup libusb_poll |
2322 | | * Handle any pending events. |
2323 | | * |
2324 | | * libusb determines "pending events" by checking if any timeouts have expired |
2325 | | * and by checking the set of file descriptors for activity. |
2326 | | * |
2327 | | * If a zero timeval is passed, this function will handle any already-pending |
2328 | | * events and then immediately return in non-blocking style. |
2329 | | * |
2330 | | * If a non-zero timeval is passed and no events are currently pending, this |
2331 | | * function will block waiting for events to handle up until the specified |
2332 | | * timeout. If an event arrives or a signal is raised, this function will |
2333 | | * return early. |
2334 | | * |
2335 | | * If the parameter completed is not NULL then <em>after obtaining the event |
2336 | | * handling lock</em> this function will return immediately if the integer |
2337 | | * pointed to is not 0. This allows for race free waiting for the completion |
2338 | | * of a specific transfer. |
2339 | | * |
2340 | | * \param ctx the context to operate on, or NULL for the default context |
2341 | | * \param tv the maximum time to block waiting for events, or an all zero |
2342 | | * timeval struct for non-blocking mode |
2343 | | * \param completed pointer to completion integer to check, or NULL |
2344 | | * \returns 0 on success |
2345 | | * \returns \ref LIBUSB_ERROR_INVALID_PARAM if timeval is invalid |
2346 | | * \returns another LIBUSB_ERROR code on other failure |
2347 | | * \ref libusb_mtasync |
2348 | | */ |
2349 | | int API_EXPORTED libusb_handle_events_timeout_completed(libusb_context *ctx, |
2350 | | struct timeval *tv, int *completed) |
2351 | 0 | { |
2352 | 0 | int r; |
2353 | 0 | struct timeval poll_timeout; |
2354 | |
|
2355 | 0 | if (!TIMEVAL_IS_VALID(tv)) |
2356 | 0 | return LIBUSB_ERROR_INVALID_PARAM; |
2357 | | |
2358 | 0 | ctx = usbi_get_context(ctx); |
2359 | 0 | r = get_next_timeout(ctx, tv, &poll_timeout); |
2360 | 0 | if (r) { |
2361 | | /* timeout already expired */ |
2362 | 0 | handle_timeouts(ctx); |
2363 | 0 | return 0; |
2364 | 0 | } |
2365 | | |
2366 | 0 | retry: |
2367 | 0 | if (libusb_try_lock_events(ctx) == 0) { |
2368 | 0 | if (completed == NULL || !*completed) { |
2369 | | /* we obtained the event lock: do our own event handling */ |
2370 | 0 | usbi_dbg(ctx, "doing our own event handling"); |
2371 | 0 | r = handle_events(ctx, &poll_timeout); |
2372 | 0 | } |
2373 | 0 | libusb_unlock_events(ctx); |
2374 | 0 | return r; |
2375 | 0 | } |
2376 | | |
2377 | | /* another thread is doing event handling. wait for thread events that |
2378 | | * notify event completion. */ |
2379 | 0 | libusb_lock_event_waiters(ctx); |
2380 | |
|
2381 | 0 | if (completed && *completed) |
2382 | 0 | goto already_done; |
2383 | | |
2384 | 0 | if (!libusb_event_handler_active(ctx)) { |
2385 | | /* we hit a race: whoever was event handling earlier finished in the |
2386 | | * time it took us to reach this point. try the cycle again. */ |
2387 | 0 | libusb_unlock_event_waiters(ctx); |
2388 | 0 | usbi_dbg(ctx, "event handler was active but went away, retrying"); |
2389 | 0 | goto retry; |
2390 | 0 | } |
2391 | | |
2392 | 0 | usbi_dbg(ctx, "another thread is doing event handling"); |
2393 | 0 | r = libusb_wait_for_event(ctx, &poll_timeout); |
2394 | |
|
2395 | 0 | already_done: |
2396 | 0 | libusb_unlock_event_waiters(ctx); |
2397 | |
|
2398 | 0 | if (r < 0) |
2399 | 0 | return r; |
2400 | 0 | else if (r == 1) |
2401 | 0 | handle_timeouts(ctx); |
2402 | 0 | return 0; |
2403 | 0 | } |
2404 | | |
2405 | | /** \ingroup libusb_poll |
2406 | | * Handle any pending events |
2407 | | * |
2408 | | * Like libusb_handle_events_timeout_completed(), but without the completed |
2409 | | * parameter, calling this function is equivalent to calling |
2410 | | * libusb_handle_events_timeout_completed() with a NULL completed parameter. |
2411 | | * |
2412 | | * This function is kept primarily for backwards compatibility. |
2413 | | * All new code should call libusb_handle_events_completed() or |
2414 | | * libusb_handle_events_timeout_completed() to avoid race conditions. |
2415 | | * |
2416 | | * \param ctx the context to operate on, or NULL for the default context |
2417 | | * \param tv the maximum time to block waiting for events, or an all zero |
2418 | | * timeval struct for non-blocking mode |
2419 | | * \returns 0 on success, or a LIBUSB_ERROR code on failure |
2420 | | */ |
2421 | | int API_EXPORTED libusb_handle_events_timeout(libusb_context *ctx, |
2422 | | struct timeval *tv) |
2423 | 0 | { |
2424 | 0 | return libusb_handle_events_timeout_completed(ctx, tv, NULL); |
2425 | 0 | } |
2426 | | |
2427 | | /** \ingroup libusb_poll |
2428 | | * Handle any pending events in blocking mode. There is currently a timeout |
2429 | | * hard-coded at 60 seconds but we plan to make it unlimited in future. For |
2430 | | * finer control over whether this function is blocking or non-blocking, or |
2431 | | * for control over the timeout, use libusb_handle_events_timeout_completed() |
2432 | | * instead. |
2433 | | * |
2434 | | * This function is kept primarily for backwards compatibility. |
2435 | | * All new code should call libusb_handle_events_completed() or |
2436 | | * libusb_handle_events_timeout_completed() to avoid race conditions. |
2437 | | * |
2438 | | * \param ctx the context to operate on, or NULL for the default context |
2439 | | * \returns 0 on success, or a LIBUSB_ERROR code on failure |
2440 | | */ |
2441 | | int API_EXPORTED libusb_handle_events(libusb_context *ctx) |
2442 | 0 | { |
2443 | 0 | struct timeval tv; |
2444 | 0 | tv.tv_sec = 60; |
2445 | 0 | tv.tv_usec = 0; |
2446 | 0 | return libusb_handle_events_timeout_completed(ctx, &tv, NULL); |
2447 | 0 | } |
2448 | | |
2449 | | /** \ingroup libusb_poll |
2450 | | * Handle any pending events in blocking mode. |
2451 | | * |
2452 | | * Like libusb_handle_events(), with the addition of a completed parameter |
2453 | | * to allow for race free waiting for the completion of a specific transfer. |
2454 | | * |
2455 | | * See libusb_handle_events_timeout_completed() for details on the completed |
2456 | | * parameter. |
2457 | | * |
2458 | | * \param ctx the context to operate on, or NULL for the default context |
2459 | | * \param completed pointer to completion integer to check, or NULL |
2460 | | * \returns 0 on success, or a LIBUSB_ERROR code on failure |
2461 | | * \ref libusb_mtasync |
2462 | | */ |
2463 | | int API_EXPORTED libusb_handle_events_completed(libusb_context *ctx, |
2464 | | int *completed) |
2465 | 0 | { |
2466 | 0 | struct timeval tv; |
2467 | 0 | tv.tv_sec = 60; |
2468 | 0 | tv.tv_usec = 0; |
2469 | 0 | return libusb_handle_events_timeout_completed(ctx, &tv, completed); |
2470 | 0 | } |
2471 | | |
2472 | | /** \ingroup libusb_poll |
2473 | | * Handle any pending events by polling file descriptors, without checking if |
2474 | | * any other threads are already doing so. Must be called with the event lock |
2475 | | * held, see libusb_lock_events(). |
2476 | | * |
2477 | | * This function is designed to be called under the situation where you have |
2478 | | * taken the event lock and are calling poll()/select() directly on libusb's |
2479 | | * file descriptors (as opposed to using libusb_handle_events() or similar). |
2480 | | * You detect events on libusb's descriptors, so you then call this function |
2481 | | * with a zero timeout value (while still holding the event lock). |
2482 | | * |
2483 | | * \param ctx the context to operate on, or NULL for the default context |
2484 | | * \param tv the maximum time to block waiting for events, or zero for |
2485 | | * non-blocking mode |
2486 | | * \returns 0 on success |
2487 | | * \returns \ref LIBUSB_ERROR_INVALID_PARAM if timeval is invalid |
2488 | | * \returns another LIBUSB_ERROR code on other failure |
2489 | | * \ref libusb_mtasync |
2490 | | */ |
2491 | | int API_EXPORTED libusb_handle_events_locked(libusb_context *ctx, |
2492 | | struct timeval *tv) |
2493 | 0 | { |
2494 | 0 | int r; |
2495 | 0 | struct timeval poll_timeout; |
2496 | |
|
2497 | 0 | if (!TIMEVAL_IS_VALID(tv)) |
2498 | 0 | return LIBUSB_ERROR_INVALID_PARAM; |
2499 | | |
2500 | 0 | ctx = usbi_get_context(ctx); |
2501 | 0 | r = get_next_timeout(ctx, tv, &poll_timeout); |
2502 | 0 | if (r) { |
2503 | | /* timeout already expired */ |
2504 | 0 | handle_timeouts(ctx); |
2505 | 0 | return 0; |
2506 | 0 | } |
2507 | | |
2508 | 0 | return handle_events(ctx, &poll_timeout); |
2509 | 0 | } |
2510 | | |
2511 | | /** \ingroup libusb_poll |
2512 | | * Determines whether your application must apply special timing considerations |
2513 | | * when monitoring libusb's file descriptors. |
2514 | | * |
2515 | | * This function is only useful for applications which retrieve and poll |
2516 | | * libusb's file descriptors in their own main loop (\ref libusb_pollmain). |
2517 | | * |
2518 | | * Ordinarily, libusb's event handler needs to be called into at specific |
2519 | | * moments in time (in addition to times when there is activity on the file |
2520 | | * descriptor set). The usual approach is to use libusb_get_next_timeout() |
2521 | | * to learn about when the next timeout occurs, and to adjust your |
2522 | | * poll()/select() timeout accordingly so that you can make a call into the |
2523 | | * library at that time. |
2524 | | * |
2525 | | * Some platforms supported by libusb do not come with this baggage - any |
2526 | | * events relevant to timing will be represented by activity on the file |
2527 | | * descriptor set, and libusb_get_next_timeout() will always return 0. |
2528 | | * This function allows you to detect whether you are running on such a |
2529 | | * platform. |
2530 | | * |
2531 | | * Since v1.0.5. |
2532 | | * |
2533 | | * \param ctx the context to operate on, or NULL for the default context |
2534 | | * \returns 0 if you must call into libusb at times determined by |
2535 | | * libusb_get_next_timeout(), or 1 if all timeout events are handled internally |
2536 | | * or through regular activity on the file descriptors. |
2537 | | * \ref libusb_pollmain "Polling libusb file descriptors for event handling" |
2538 | | */ |
2539 | | int API_EXPORTED libusb_pollfds_handle_timeouts(libusb_context *ctx) |
2540 | 0 | { |
2541 | 0 | ctx = usbi_get_context(ctx); |
2542 | 0 | return usbi_using_timer(ctx); |
2543 | 0 | } |
2544 | | |
2545 | | /** \ingroup libusb_poll |
2546 | | * Determine the next internal timeout that libusb needs to handle. You only |
2547 | | * need to use this function if you are calling poll() or select() or similar |
2548 | | * on libusb's file descriptors yourself - you do not need to use it if you |
2549 | | * are calling libusb_handle_events() or a variant directly. |
2550 | | * |
2551 | | * You should call this function in your main loop in order to determine how |
2552 | | * long to wait for select() or poll() to return results. libusb needs to be |
2553 | | * called into at this timeout, so you should use it as an upper bound on |
2554 | | * your select() or poll() call. |
2555 | | * |
2556 | | * When the timeout has expired, call into libusb_handle_events_timeout() |
2557 | | * (perhaps in non-blocking mode) so that libusb can handle the timeout. |
2558 | | * |
2559 | | * This function may return 1 (success) and an all-zero timeval. If this is |
2560 | | * the case, it indicates that libusb has a timeout that has already expired |
2561 | | * so you should call libusb_handle_events_timeout() or similar immediately. |
2562 | | * A return code of 0 indicates that there are no pending timeouts. |
2563 | | * |
2564 | | * On some platforms, this function will always returns 0 (no pending |
2565 | | * timeouts). See \ref polltime. |
2566 | | * |
2567 | | * \param ctx the context to operate on, or NULL for the default context |
2568 | | * \param tv output location for a relative time against the current |
2569 | | * clock in which libusb must be called into in order to process timeout events |
2570 | | * \returns 0 if there are no pending timeouts, 1 if a timeout was returned, |
2571 | | * or \ref LIBUSB_ERROR_OTHER on failure |
2572 | | */ |
2573 | | int API_EXPORTED libusb_get_next_timeout(libusb_context *ctx, |
2574 | | struct timeval *tv) |
2575 | 0 | { |
2576 | 0 | struct usbi_transfer *itransfer; |
2577 | 0 | struct timespec systime; |
2578 | 0 | struct timespec next_timeout = { 0, 0 }; |
2579 | |
|
2580 | 0 | ctx = usbi_get_context(ctx); |
2581 | 0 | if (usbi_using_timer(ctx)) |
2582 | 0 | return 0; |
2583 | | |
2584 | 0 | usbi_mutex_lock(&ctx->flying_transfers_lock); |
2585 | 0 | if (list_empty(&ctx->flying_transfers)) { |
2586 | 0 | usbi_mutex_unlock(&ctx->flying_transfers_lock); |
2587 | 0 | usbi_dbg(ctx, "no URBs, no timeout!"); |
2588 | 0 | return 0; |
2589 | 0 | } |
2590 | | |
2591 | | /* find next transfer which hasn't already been processed as timed out */ |
2592 | 0 | for_each_transfer(ctx, itransfer) { |
2593 | 0 | if (itransfer->timeout_flags & (USBI_TRANSFER_TIMEOUT_HANDLED | USBI_TRANSFER_OS_HANDLES_TIMEOUT)) |
2594 | 0 | continue; |
2595 | | |
2596 | | /* if we've reached transfers of infinite timeout, we're done looking */ |
2597 | 0 | if (!TIMESPEC_IS_SET(&itransfer->timeout)) |
2598 | 0 | break; |
2599 | | |
2600 | 0 | next_timeout = itransfer->timeout; |
2601 | 0 | break; |
2602 | 0 | } |
2603 | 0 | usbi_mutex_unlock(&ctx->flying_transfers_lock); |
2604 | |
|
2605 | 0 | if (!TIMESPEC_IS_SET(&next_timeout)) { |
2606 | 0 | usbi_dbg(ctx, "no URB with timeout or all handled by OS; no timeout!"); |
2607 | 0 | return 0; |
2608 | 0 | } |
2609 | | |
2610 | 0 | usbi_get_monotonic_time(&systime); |
2611 | |
|
2612 | 0 | if (!TIMESPEC_CMP(&systime, &next_timeout, <)) { |
2613 | 0 | usbi_dbg(ctx, "first timeout already expired"); |
2614 | 0 | timerclear(tv); |
2615 | 0 | } else { |
2616 | 0 | TIMESPEC_SUB(&next_timeout, &systime, &next_timeout); |
2617 | 0 | TIMESPEC_TO_TIMEVAL(tv, &next_timeout); |
2618 | 0 | usbi_dbg(ctx, "next timeout in %ld.%06lds", (long)tv->tv_sec, (long)tv->tv_usec); |
2619 | 0 | } |
2620 | |
|
2621 | 0 | return 1; |
2622 | 0 | } |
2623 | | |
2624 | | /** \ingroup libusb_poll |
2625 | | * Register notification functions for file descriptor additions/removals. |
2626 | | * These functions will be invoked for every new or removed file descriptor |
2627 | | * that libusb uses as an event source. |
2628 | | * |
2629 | | * To remove notifiers, pass NULL values for the function pointers. |
2630 | | * |
2631 | | * Note that file descriptors may have been added even before you register |
2632 | | * these notifiers (e.g. at libusb_init_context() time). |
2633 | | * |
2634 | | * Additionally, note that the removal notifier may be called during |
2635 | | * libusb_exit() (e.g. when it is closing file descriptors that were opened |
2636 | | * and added to the poll set at libusb_init_context() time). If you don't want this, |
2637 | | * remove the notifiers immediately before calling libusb_exit(). |
2638 | | * |
2639 | | * \param ctx the context to operate on, or NULL for the default context |
2640 | | * \param added_cb pointer to function for addition notifications |
2641 | | * \param removed_cb pointer to function for removal notifications |
2642 | | * \param user_data User data to be passed back to callbacks (useful for |
2643 | | * passing context information) |
2644 | | */ |
2645 | | void API_EXPORTED libusb_set_pollfd_notifiers(libusb_context *ctx, |
2646 | | libusb_pollfd_added_cb added_cb, libusb_pollfd_removed_cb removed_cb, |
2647 | | void *user_data) |
2648 | 0 | { |
2649 | 0 | #if !defined(PLATFORM_WINDOWS) |
2650 | 0 | ctx = usbi_get_context(ctx); |
2651 | 0 | ctx->fd_added_cb = added_cb; |
2652 | 0 | ctx->fd_removed_cb = removed_cb; |
2653 | 0 | ctx->fd_cb_user_data = user_data; |
2654 | | #else |
2655 | | usbi_err(ctx, "external polling of libusb's internal event sources " \ |
2656 | | "is not yet supported on Windows"); |
2657 | | UNUSED(added_cb); |
2658 | | UNUSED(removed_cb); |
2659 | | UNUSED(user_data); |
2660 | | #endif |
2661 | 0 | } |
2662 | | |
2663 | | /* |
2664 | | * Interrupt the iteration of the event handling thread, so that it picks |
2665 | | * up the event source change. Callers of this function must hold the event_data_lock. |
2666 | | */ |
2667 | | static void usbi_event_source_notification(struct libusb_context *ctx) |
2668 | 0 | { |
2669 | 0 | unsigned int event_flags; |
2670 | | |
2671 | | /* Record that there is a new poll fd. |
2672 | | * Only signal an event if there are no prior pending events. */ |
2673 | 0 | event_flags = ctx->event_flags; |
2674 | 0 | ctx->event_flags |= USBI_EVENT_EVENT_SOURCES_MODIFIED; |
2675 | 0 | if (!event_flags) |
2676 | 0 | usbi_signal_event(&ctx->event); |
2677 | 0 | } |
2678 | | |
2679 | | /* Add an event source to the list of event sources to be monitored. |
2680 | | * poll_events should be specified as a bitmask of events passed to poll(), e.g. |
2681 | | * POLLIN and/or POLLOUT. */ |
2682 | | int usbi_add_event_source(struct libusb_context *ctx, usbi_os_handle_t os_handle, short poll_events) |
2683 | 0 | { |
2684 | 0 | struct usbi_event_source *ievent_source = malloc(sizeof(*ievent_source)); |
2685 | |
|
2686 | 0 | if (!ievent_source) |
2687 | 0 | return LIBUSB_ERROR_NO_MEM; |
2688 | | |
2689 | 0 | usbi_dbg(ctx, "add " USBI_OS_HANDLE_FORMAT_STRING " events %d", os_handle, poll_events); |
2690 | 0 | ievent_source->data.os_handle = os_handle; |
2691 | 0 | ievent_source->data.poll_events = poll_events; |
2692 | 0 | usbi_mutex_lock(&ctx->event_data_lock); |
2693 | 0 | list_add_tail(&ievent_source->list, &ctx->event_sources); |
2694 | 0 | usbi_event_source_notification(ctx); |
2695 | 0 | usbi_mutex_unlock(&ctx->event_data_lock); |
2696 | |
|
2697 | 0 | #if !defined(PLATFORM_WINDOWS) |
2698 | 0 | if (ctx->fd_added_cb) |
2699 | 0 | ctx->fd_added_cb(os_handle, poll_events, ctx->fd_cb_user_data); |
2700 | 0 | #endif |
2701 | |
|
2702 | 0 | return 0; |
2703 | 0 | } |
2704 | | |
2705 | | /* Remove an event source from the list of event sources to be monitored. */ |
2706 | | void usbi_remove_event_source(struct libusb_context *ctx, usbi_os_handle_t os_handle) |
2707 | 0 | { |
2708 | 0 | struct usbi_event_source *ievent_source; |
2709 | 0 | int found = 0; |
2710 | |
|
2711 | 0 | usbi_dbg(ctx, "remove " USBI_OS_HANDLE_FORMAT_STRING, os_handle); |
2712 | 0 | usbi_mutex_lock(&ctx->event_data_lock); |
2713 | 0 | for_each_event_source(ctx, ievent_source) { |
2714 | 0 | if (ievent_source->data.os_handle == os_handle) { |
2715 | 0 | found = 1; |
2716 | 0 | break; |
2717 | 0 | } |
2718 | 0 | } |
2719 | |
|
2720 | 0 | if (!found) { |
2721 | 0 | usbi_dbg(ctx, "couldn't find " USBI_OS_HANDLE_FORMAT_STRING " to remove", os_handle); |
2722 | 0 | usbi_mutex_unlock(&ctx->event_data_lock); |
2723 | 0 | return; |
2724 | 0 | } |
2725 | | |
2726 | 0 | list_del(&ievent_source->list); |
2727 | 0 | list_add_tail(&ievent_source->list, &ctx->removed_event_sources); |
2728 | 0 | usbi_event_source_notification(ctx); |
2729 | 0 | usbi_mutex_unlock(&ctx->event_data_lock); |
2730 | |
|
2731 | 0 | #if !defined(PLATFORM_WINDOWS) |
2732 | 0 | if (ctx->fd_removed_cb) |
2733 | 0 | ctx->fd_removed_cb(os_handle, ctx->fd_cb_user_data); |
2734 | 0 | #endif |
2735 | 0 | } |
2736 | | |
2737 | | /** \ingroup libusb_poll |
2738 | | * Retrieve a list of file descriptors that should be polled by your main loop |
2739 | | * as libusb event sources. |
2740 | | * |
2741 | | * The returned list is NULL-terminated and should be freed with libusb_free_pollfds() |
2742 | | * when done. The actual list contents must not be touched. |
2743 | | * |
2744 | | * As file descriptors are a Unix-specific concept, this function is not |
2745 | | * available on Windows and will always return NULL. |
2746 | | * |
2747 | | * \param ctx the context to operate on, or NULL for the default context |
2748 | | * \returns a NULL-terminated list of libusb_pollfd structures |
2749 | | * \returns NULL on error |
2750 | | * \returns NULL on platforms where the functionality is not available |
2751 | | */ |
2752 | | DEFAULT_VISIBILITY |
2753 | | const struct libusb_pollfd ** LIBUSB_CALL libusb_get_pollfds( |
2754 | | libusb_context *ctx) |
2755 | 0 | { |
2756 | 0 | #if !defined(PLATFORM_WINDOWS) |
2757 | 0 | struct libusb_pollfd **ret = NULL; |
2758 | 0 | struct usbi_event_source *ievent_source; |
2759 | 0 | size_t i; |
2760 | |
|
2761 | 0 | static_assert(sizeof(struct usbi_event_source_data) == sizeof(struct libusb_pollfd), |
2762 | 0 | "mismatch between usbi_event_source_data and libusb_pollfd sizes"); |
2763 | |
|
2764 | 0 | ctx = usbi_get_context(ctx); |
2765 | |
|
2766 | 0 | usbi_mutex_lock(&ctx->event_data_lock); |
2767 | |
|
2768 | 0 | i = 0; |
2769 | 0 | for_each_event_source(ctx, ievent_source) |
2770 | 0 | i++; |
2771 | |
|
2772 | 0 | ret = calloc(i + 1, sizeof(struct libusb_pollfd *)); |
2773 | 0 | if (!ret) |
2774 | 0 | goto out; |
2775 | | |
2776 | 0 | i = 0; |
2777 | 0 | for_each_event_source(ctx, ievent_source) |
2778 | 0 | ret[i++] = (struct libusb_pollfd *)ievent_source; |
2779 | |
|
2780 | 0 | out: |
2781 | 0 | usbi_mutex_unlock(&ctx->event_data_lock); |
2782 | 0 | return (const struct libusb_pollfd **)ret; |
2783 | | #else |
2784 | | usbi_err(ctx, "external polling of libusb's internal event sources " \ |
2785 | | "is not yet supported on Windows"); |
2786 | | return NULL; |
2787 | | #endif |
2788 | 0 | } |
2789 | | |
2790 | | /** \ingroup libusb_poll |
2791 | | * Free a list of libusb_pollfd structures. This should be called for all |
2792 | | * pollfd lists allocated with libusb_get_pollfds(). |
2793 | | * |
2794 | | * Since version 1.0.20, \ref LIBUSB_API_VERSION >= 0x01000104 |
2795 | | * |
2796 | | * It is legal to call this function with a NULL pollfd list. In this case, |
2797 | | * the function will simply do nothing. |
2798 | | * |
2799 | | * \param pollfds the list of libusb_pollfd structures to free |
2800 | | */ |
2801 | | void API_EXPORTED libusb_free_pollfds(const struct libusb_pollfd **pollfds) |
2802 | 0 | { |
2803 | 0 | #if !defined(PLATFORM_WINDOWS) |
2804 | 0 | free((void *)pollfds); |
2805 | | #else |
2806 | | UNUSED(pollfds); |
2807 | | #endif |
2808 | 0 | } |
2809 | | |
2810 | | /* Backends may call this from handle_events to report disconnection of a |
2811 | | * device. This function ensures transfers get cancelled appropriately. |
2812 | | * Callers of this function must hold the events_lock. |
2813 | | */ |
2814 | | void usbi_handle_disconnect(struct libusb_device_handle *dev_handle) |
2815 | 0 | { |
2816 | 0 | struct libusb_context *ctx = HANDLE_CTX(dev_handle); |
2817 | 0 | struct usbi_transfer *cur; |
2818 | 0 | struct usbi_transfer *to_cancel; |
2819 | |
|
2820 | 0 | usbi_dbg(ctx, "device %d.%d", |
2821 | 0 | dev_handle->dev->bus_number, dev_handle->dev->device_address); |
2822 | | |
2823 | | /* terminate all pending transfers with the LIBUSB_TRANSFER_NO_DEVICE |
2824 | | * status code. |
2825 | | * |
2826 | | * when we find a transfer for this device on the list, there are two |
2827 | | * possible scenarios: |
2828 | | * 1. the transfer is currently in-flight, in which case we terminate the |
2829 | | * transfer here |
2830 | | * 2. the transfer has been added to the flying transfer list by |
2831 | | * libusb_submit_transfer, has failed to submit and |
2832 | | * libusb_submit_transfer is waiting for us to release the |
2833 | | * flying_transfers_lock to remove it, so we ignore it |
2834 | | */ |
2835 | |
|
2836 | 0 | while (1) { |
2837 | 0 | to_cancel = NULL; |
2838 | 0 | usbi_mutex_lock(&ctx->flying_transfers_lock); |
2839 | 0 | for_each_transfer(ctx, cur) { |
2840 | 0 | struct libusb_transfer *cur_transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(cur); |
2841 | 0 | if (cur_transfer->dev_handle == dev_handle) { |
2842 | 0 | usbi_mutex_lock(&cur->lock); |
2843 | 0 | if (cur->state_flags & USBI_TRANSFER_IN_FLIGHT) |
2844 | 0 | to_cancel = cur; |
2845 | 0 | usbi_mutex_unlock(&cur->lock); |
2846 | |
|
2847 | 0 | if (to_cancel) |
2848 | 0 | break; |
2849 | 0 | } |
2850 | 0 | } |
2851 | 0 | usbi_mutex_unlock(&ctx->flying_transfers_lock); |
2852 | |
|
2853 | 0 | if (!to_cancel) |
2854 | 0 | break; |
2855 | | |
2856 | 0 | struct libusb_transfer *transfer_to_cancel = USBI_TRANSFER_TO_LIBUSB_TRANSFER(to_cancel); |
2857 | 0 | usbi_dbg(ctx, "cancelling transfer %p from disconnect", |
2858 | 0 | (void *) transfer_to_cancel); |
2859 | |
|
2860 | 0 | usbi_mutex_lock(&to_cancel->lock); |
2861 | 0 | usbi_backend.clear_transfer_priv(to_cancel); |
2862 | 0 | usbi_mutex_unlock(&to_cancel->lock); |
2863 | 0 | usbi_handle_transfer_completion(to_cancel, LIBUSB_TRANSFER_NO_DEVICE); |
2864 | 0 | } |
2865 | 0 | } |