/rust/registry/src/index.crates.io-1949cf8c6b5b557f/virtio-queue-0.16.0/src/queue.rs
Line | Count | Source |
1 | | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. |
2 | | // Copyright (C) 2020-2021 Alibaba Cloud. All rights reserved. |
3 | | // Copyright © 2019 Intel Corporation. |
4 | | // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. |
5 | | // Use of this source code is governed by a BSD-style license that can be |
6 | | // found in the LICENSE-BSD-3-Clause file. |
7 | | // |
8 | | // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause |
9 | | |
10 | | use std::mem::size_of; |
11 | | use std::num::Wrapping; |
12 | | use std::ops::Deref; |
13 | | use std::sync::atomic::{fence, Ordering}; |
14 | | |
15 | | use vm_memory::{Address, Bytes, GuestAddress, GuestMemory}; |
16 | | |
17 | | use crate::defs::{ |
18 | | DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR, |
19 | | VIRTQ_AVAIL_ELEMENT_SIZE, VIRTQ_AVAIL_RING_HEADER_SIZE, VIRTQ_AVAIL_RING_META_SIZE, |
20 | | VIRTQ_USED_ELEMENT_SIZE, VIRTQ_USED_RING_HEADER_SIZE, VIRTQ_USED_RING_META_SIZE, |
21 | | }; |
22 | | use crate::desc::{split::VirtqUsedElem, RawDescriptor}; |
23 | | use crate::{error, DescriptorChain, Error, QueueGuard, QueueOwnedT, QueueState, QueueT}; |
24 | | use virtio_bindings::bindings::virtio_ring::VRING_USED_F_NO_NOTIFY; |
25 | | |
26 | | /// The maximum queue size as defined in the Virtio Spec. |
27 | | pub const MAX_QUEUE_SIZE: u16 = 32768; |
28 | | |
29 | | /// Struct to maintain information and manipulate a virtio queue. |
30 | | /// |
31 | | /// # Example |
32 | | /// |
33 | | /// ```rust |
34 | | /// use virtio_queue::{Queue, QueueOwnedT, QueueT}; |
35 | | /// use vm_memory::{Bytes, GuestAddress, GuestAddressSpace, GuestMemoryMmap}; |
36 | | /// |
37 | | /// let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); |
38 | | /// let mut queue = Queue::new(1024).unwrap(); |
39 | | /// |
40 | | /// // First, the driver sets up the queue; this set up is done via writes on the bus (PCI, MMIO). |
41 | | /// queue.set_size(8); |
42 | | /// queue.set_desc_table_address(Some(0x1000), None); |
43 | | /// queue.set_avail_ring_address(Some(0x2000), None); |
44 | | /// queue.set_used_ring_address(Some(0x3000), None); |
45 | | /// queue.set_event_idx(true); |
46 | | /// queue.set_ready(true); |
47 | | /// // The user should check if the queue is valid before starting to use it. |
48 | | /// assert!(queue.is_valid(&m)); |
49 | | /// |
50 | | /// // Here the driver would add entries in the available ring and then update the `idx` field of |
51 | | /// // the available ring (address = 0x2000 + 2). |
52 | | /// m.write_obj(3, GuestAddress(0x2002)); |
53 | | /// |
54 | | /// loop { |
55 | | /// queue.disable_notification(&m).unwrap(); |
56 | | /// |
57 | | /// // Consume entries from the available ring. |
58 | | /// while let Some(chain) = queue.iter(&m).unwrap().next() { |
59 | | /// // Process the descriptor chain, and then add an entry in the used ring and optionally |
60 | | /// // notify the driver. |
61 | | /// queue.add_used(&m, chain.head_index(), 0x100).unwrap(); |
62 | | /// |
63 | | /// if queue.needs_notification(&m).unwrap() { |
64 | | /// // Here we would notify the driver it has new entries in the used ring to consume. |
65 | | /// } |
66 | | /// } |
67 | | /// if !queue.enable_notification(&m).unwrap() { |
68 | | /// break; |
69 | | /// } |
70 | | /// } |
71 | | /// |
72 | | /// // We can reset the queue at some point. |
73 | | /// queue.reset(); |
74 | | /// // The queue should not be ready after reset. |
75 | | /// assert!(!queue.ready()); |
76 | | /// ``` |
77 | | #[derive(Debug, Default, PartialEq, Eq)] |
78 | | pub struct Queue { |
79 | | /// The maximum size in elements offered by the device. |
80 | | max_size: u16, |
81 | | |
82 | | /// Tail position of the available ring. |
83 | | next_avail: Wrapping<u16>, |
84 | | |
85 | | /// Head position of the used ring. |
86 | | next_used: Wrapping<u16>, |
87 | | |
88 | | /// VIRTIO_F_RING_EVENT_IDX negotiated. |
89 | | event_idx_enabled: bool, |
90 | | |
91 | | /// The number of descriptor chains placed in the used ring via `add_used` |
92 | | /// since the last time `needs_notification` was called on the associated queue. |
93 | | num_added: Wrapping<u16>, |
94 | | |
95 | | /// The queue size in elements the driver selected. |
96 | | size: u16, |
97 | | |
98 | | /// Indicates if the queue is finished with configuration. |
99 | | ready: bool, |
100 | | |
101 | | /// Guest physical address of the descriptor table. |
102 | | desc_table: GuestAddress, |
103 | | |
104 | | /// Guest physical address of the available ring. |
105 | | avail_ring: GuestAddress, |
106 | | |
107 | | /// Guest physical address of the used ring. |
108 | | used_ring: GuestAddress, |
109 | | } |
110 | | |
111 | | impl Queue { |
112 | | /// Equivalent of [`QueueT::set_size`] returning an error in case of invalid size. |
113 | | /// |
114 | | /// This should not be directly used, as the preferred method is part of the [`QueueT`] |
115 | | /// interface. This is a convenience function for implementing save/restore capabilities. |
116 | 8.71k | pub fn try_set_size(&mut self, size: u16) -> Result<(), Error> { |
117 | 8.71k | if size > self.max_size() || size == 0 || (size & (size - 1)) != 0 { |
118 | 8.31k | return Err(Error::InvalidSize); |
119 | 402 | } |
120 | 402 | self.size = size; |
121 | 402 | Ok(()) |
122 | 8.71k | } |
123 | | |
124 | | /// Tries to set the descriptor table address. In case of an invalid value, the address is |
125 | | /// not updated. |
126 | | /// |
127 | | /// This should not be directly used, as the preferred method is |
128 | | /// [`QueueT::set_desc_table_address`]. This is a convenience function for implementing |
129 | | /// save/restore capabilities. |
130 | 8.71k | pub fn try_set_desc_table_address(&mut self, desc_table: GuestAddress) -> Result<(), Error> { |
131 | 8.71k | if desc_table.mask(0xf) != 0 { |
132 | 0 | return Err(Error::InvalidDescTableAlign); |
133 | 8.71k | } |
134 | 8.71k | self.desc_table = desc_table; |
135 | | |
136 | 8.71k | Ok(()) |
137 | 8.71k | } |
138 | | |
139 | | /// Tries to update the available ring address. In case of an invalid value, the address is |
140 | | /// not updated. |
141 | | /// |
142 | | /// This should not be directly used, as the preferred method is |
143 | | /// [`QueueT::set_avail_ring_address`]. This is a convenience function for implementing |
144 | | /// save/restore capabilities. |
145 | 8.71k | pub fn try_set_avail_ring_address(&mut self, avail_ring: GuestAddress) -> Result<(), Error> { |
146 | 8.71k | if avail_ring.mask(0x1) != 0 { |
147 | 0 | return Err(Error::InvalidAvailRingAlign); |
148 | 8.71k | } |
149 | 8.71k | self.avail_ring = avail_ring; |
150 | 8.71k | Ok(()) |
151 | 8.71k | } |
152 | | |
153 | | /// Tries to update the used ring address. In cae of an invalid value, the address is not |
154 | | /// updated. |
155 | | /// |
156 | | /// This should not be directly used, as the preferred method is |
157 | | /// [`QueueT::set_used_ring_address`]. This is a convenience function for implementing |
158 | | /// save/restore capabilities. |
159 | 8.71k | pub fn try_set_used_ring_address(&mut self, used_ring: GuestAddress) -> Result<(), Error> { |
160 | 8.71k | if used_ring.mask(0x3) != 0 { |
161 | 0 | return Err(Error::InvalidUsedRingAlign); |
162 | 8.71k | } |
163 | 8.71k | self.used_ring = used_ring; |
164 | 8.71k | Ok(()) |
165 | 8.71k | } |
166 | | |
167 | | /// Returns the state of the `Queue`. |
168 | | /// |
169 | | /// This is useful for implementing save/restore capabilities. |
170 | | /// The state does not have support for serialization, but this can be |
171 | | /// added by VMMs locally through the use of a |
172 | | /// [remote type](https://serde.rs/remote-derive.html). |
173 | | /// |
174 | | /// Alternatively, a version aware and serializable/deserializable QueueState |
175 | | /// is available in the `virtio-queue-ser` crate. |
176 | 0 | pub fn state(&self) -> QueueState { |
177 | 0 | QueueState { |
178 | 0 | max_size: self.max_size, |
179 | 0 | next_avail: self.next_avail(), |
180 | 0 | next_used: self.next_used(), |
181 | 0 | event_idx_enabled: self.event_idx_enabled, |
182 | 0 | size: self.size, |
183 | 0 | ready: self.ready, |
184 | 0 | desc_table: self.desc_table(), |
185 | 0 | avail_ring: self.avail_ring(), |
186 | 0 | used_ring: self.used_ring(), |
187 | 0 | } |
188 | 0 | } |
189 | | |
190 | | // Helper method that writes `val` to the `avail_event` field of the used ring, using |
191 | | // the provided ordering. |
192 | 0 | fn set_avail_event<M: GuestMemory>( |
193 | 0 | &self, |
194 | 0 | mem: &M, |
195 | 0 | val: u16, |
196 | 0 | order: Ordering, |
197 | 0 | ) -> Result<(), Error> { |
198 | | // This can not overflow an u64 since it is working with relatively small numbers compared |
199 | | // to u64::MAX. |
200 | 0 | let avail_event_offset = |
201 | 0 | VIRTQ_USED_RING_HEADER_SIZE + VIRTQ_USED_ELEMENT_SIZE * u64::from(self.size); |
202 | 0 | let addr = self |
203 | 0 | .used_ring |
204 | 0 | .checked_add(avail_event_offset) |
205 | 0 | .ok_or(Error::AddressOverflow)?; |
206 | | |
207 | 0 | mem.store(u16::to_le(val), addr, order) |
208 | 0 | .map_err(Error::GuestMemory) |
209 | 0 | } Unexecuted instantiation: <virtio_queue::queue::Queue>::set_avail_event::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> Unexecuted instantiation: <virtio_queue::queue::Queue>::set_avail_event::<_> |
210 | | |
211 | | // Set the value of the `flags` field of the used ring, applying the specified ordering. |
212 | 53.9k | fn set_used_flags<M: GuestMemory>( |
213 | 53.9k | &mut self, |
214 | 53.9k | mem: &M, |
215 | 53.9k | val: u16, |
216 | 53.9k | order: Ordering, |
217 | 53.9k | ) -> Result<(), Error> { |
218 | 53.9k | mem.store(u16::to_le(val), self.used_ring, order) |
219 | 53.9k | .map_err(Error::GuestMemory) |
220 | 53.9k | } <virtio_queue::queue::Queue>::set_used_flags::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> Line | Count | Source | 212 | 53.9k | fn set_used_flags<M: GuestMemory>( | 213 | 53.9k | &mut self, | 214 | 53.9k | mem: &M, | 215 | 53.9k | val: u16, | 216 | 53.9k | order: Ordering, | 217 | 53.9k | ) -> Result<(), Error> { | 218 | 53.9k | mem.store(u16::to_le(val), self.used_ring, order) | 219 | 53.9k | .map_err(Error::GuestMemory) | 220 | 53.9k | } |
Unexecuted instantiation: <virtio_queue::queue::Queue>::set_used_flags::<_> |
221 | | |
222 | | // Write the appropriate values to enable or disable notifications from the driver. |
223 | | // |
224 | | // Every access in this method uses `Relaxed` ordering because a fence is added by the caller |
225 | | // when appropriate. |
226 | 53.9k | fn set_notification<M: GuestMemory>(&mut self, mem: &M, enable: bool) -> Result<(), Error> { |
227 | 53.9k | if enable { |
228 | 53.9k | if self.event_idx_enabled { |
229 | | // We call `set_avail_event` using the `next_avail` value, instead of reading |
230 | | // and using the current `avail_idx` to avoid missing notifications. More |
231 | | // details in `enable_notification`. |
232 | 0 | self.set_avail_event(mem, self.next_avail.0, Ordering::Relaxed) |
233 | | } else { |
234 | 53.9k | self.set_used_flags(mem, 0, Ordering::Relaxed) |
235 | | } |
236 | 0 | } else if !self.event_idx_enabled { |
237 | 0 | self.set_used_flags(mem, VRING_USED_F_NO_NOTIFY as u16, Ordering::Relaxed) |
238 | | } else { |
239 | | // Notifications are effectively disabled by default after triggering once when |
240 | | // `VIRTIO_F_EVENT_IDX` is negotiated, so we don't do anything in that case. |
241 | 0 | Ok(()) |
242 | | } |
243 | 53.9k | } <virtio_queue::queue::Queue>::set_notification::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> Line | Count | Source | 226 | 53.9k | fn set_notification<M: GuestMemory>(&mut self, mem: &M, enable: bool) -> Result<(), Error> { | 227 | 53.9k | if enable { | 228 | 53.9k | if self.event_idx_enabled { | 229 | | // We call `set_avail_event` using the `next_avail` value, instead of reading | 230 | | // and using the current `avail_idx` to avoid missing notifications. More | 231 | | // details in `enable_notification`. | 232 | 0 | self.set_avail_event(mem, self.next_avail.0, Ordering::Relaxed) | 233 | | } else { | 234 | 53.9k | self.set_used_flags(mem, 0, Ordering::Relaxed) | 235 | | } | 236 | 0 | } else if !self.event_idx_enabled { | 237 | 0 | self.set_used_flags(mem, VRING_USED_F_NO_NOTIFY as u16, Ordering::Relaxed) | 238 | | } else { | 239 | | // Notifications are effectively disabled by default after triggering once when | 240 | | // `VIRTIO_F_EVENT_IDX` is negotiated, so we don't do anything in that case. | 241 | 0 | Ok(()) | 242 | | } | 243 | 53.9k | } |
Unexecuted instantiation: <virtio_queue::queue::Queue>::set_notification::<_> |
244 | | |
245 | | // Return the value present in the used_event field of the avail ring. |
246 | | // |
247 | | // If the VIRTIO_F_EVENT_IDX feature bit is not negotiated, the flags field in the available |
248 | | // ring offers a crude mechanism for the driver to inform the device that it doesn’t want |
249 | | // interrupts when buffers are used. Otherwise virtq_avail.used_event is a more performant |
250 | | // alternative where the driver specifies how far the device can progress before interrupting. |
251 | | // |
252 | | // Neither of these interrupt suppression methods are reliable, as they are not synchronized |
253 | | // with the device, but they serve as useful optimizations. So we only ensure access to the |
254 | | // virtq_avail.used_event is atomic, but do not need to synchronize with other memory accesses. |
255 | 0 | fn used_event<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> { |
256 | | // This can not overflow an u64 since it is working with relatively small numbers compared |
257 | | // to u64::MAX. |
258 | 0 | let used_event_offset = |
259 | 0 | VIRTQ_AVAIL_RING_HEADER_SIZE + u64::from(self.size) * VIRTQ_AVAIL_ELEMENT_SIZE; |
260 | 0 | let used_event_addr = self |
261 | 0 | .avail_ring |
262 | 0 | .checked_add(used_event_offset) |
263 | 0 | .ok_or(Error::AddressOverflow)?; |
264 | | |
265 | 0 | mem.load(used_event_addr, order) |
266 | 0 | .map(u16::from_le) |
267 | 0 | .map(Wrapping) |
268 | 0 | .map_err(Error::GuestMemory) |
269 | 0 | } Unexecuted instantiation: <virtio_queue::queue::Queue>::used_event::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> Unexecuted instantiation: <virtio_queue::queue::Queue>::used_event::<_> |
270 | | } |
271 | | |
272 | | impl<'a> QueueGuard<'a> for Queue { |
273 | | type G = &'a mut Self; |
274 | | } |
275 | | |
276 | | impl QueueT for Queue { |
277 | 8.71k | fn new(max_size: u16) -> Result<Self, Error> { |
278 | | // We need to check that the max size is a power of 2 because we're setting this as the |
279 | | // queue size, and the valid queue sizes are a power of 2 as per the specification. |
280 | 8.71k | if max_size == 0 || max_size > MAX_QUEUE_SIZE || (max_size & (max_size - 1)) != 0 { |
281 | 0 | return Err(Error::InvalidMaxSize); |
282 | 8.71k | } |
283 | 8.71k | Ok(Queue { |
284 | 8.71k | max_size, |
285 | 8.71k | size: max_size, |
286 | 8.71k | ready: false, |
287 | 8.71k | desc_table: GuestAddress(DEFAULT_DESC_TABLE_ADDR), |
288 | 8.71k | avail_ring: GuestAddress(DEFAULT_AVAIL_RING_ADDR), |
289 | 8.71k | used_ring: GuestAddress(DEFAULT_USED_RING_ADDR), |
290 | 8.71k | next_avail: Wrapping(0), |
291 | 8.71k | next_used: Wrapping(0), |
292 | 8.71k | event_idx_enabled: false, |
293 | 8.71k | num_added: Wrapping(0), |
294 | 8.71k | }) |
295 | 8.71k | } |
296 | | |
297 | 0 | fn is_valid<M: GuestMemory>(&self, mem: &M) -> bool { |
298 | 0 | let queue_size = self.size as u64; |
299 | 0 | let desc_table = self.desc_table; |
300 | | // The multiplication can not overflow an u64 since we are multiplying an u16 with a |
301 | | // small number. |
302 | 0 | let desc_table_size = size_of::<RawDescriptor>() as u64 * queue_size; |
303 | 0 | let avail_ring = self.avail_ring; |
304 | | // The operations below can not overflow an u64 since they're working with relatively small |
305 | | // numbers compared to u64::MAX. |
306 | 0 | let avail_ring_size = VIRTQ_AVAIL_RING_META_SIZE + VIRTQ_AVAIL_ELEMENT_SIZE * queue_size; |
307 | 0 | let used_ring = self.used_ring; |
308 | 0 | let used_ring_size = VIRTQ_USED_RING_META_SIZE + VIRTQ_USED_ELEMENT_SIZE * queue_size; |
309 | | |
310 | 0 | if !self.ready { |
311 | 0 | error!("attempt to use virtio queue that is not marked ready"); |
312 | 0 | false |
313 | 0 | } else if desc_table |
314 | 0 | .checked_add(desc_table_size) |
315 | 0 | .is_none_or(|v| !mem.address_in_range(v)) Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>::{closure#0}Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<_>::{closure#0} |
316 | | { |
317 | 0 | error!( |
318 | | "virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}", |
319 | 0 | desc_table.raw_value(), |
320 | | desc_table_size |
321 | | ); |
322 | 0 | false |
323 | 0 | } else if avail_ring |
324 | 0 | .checked_add(avail_ring_size) |
325 | 0 | .is_none_or(|v| !mem.address_in_range(v)) Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>::{closure#1}Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<_>::{closure#1} |
326 | | { |
327 | 0 | error!( |
328 | | "virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}", |
329 | 0 | avail_ring.raw_value(), |
330 | | avail_ring_size |
331 | | ); |
332 | 0 | false |
333 | 0 | } else if used_ring |
334 | 0 | .checked_add(used_ring_size) |
335 | 0 | .is_none_or(|v| !mem.address_in_range(v)) Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>::{closure#2}Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<_>::{closure#2} |
336 | | { |
337 | 0 | error!( |
338 | | "virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}", |
339 | 0 | used_ring.raw_value(), |
340 | | used_ring_size |
341 | | ); |
342 | 0 | false |
343 | | } else { |
344 | 0 | true |
345 | | } |
346 | 0 | } Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<_> |
347 | | |
348 | 0 | fn reset(&mut self) { |
349 | 0 | self.ready = false; |
350 | 0 | self.size = self.max_size; |
351 | 0 | self.desc_table = GuestAddress(DEFAULT_DESC_TABLE_ADDR); |
352 | 0 | self.avail_ring = GuestAddress(DEFAULT_AVAIL_RING_ADDR); |
353 | 0 | self.used_ring = GuestAddress(DEFAULT_USED_RING_ADDR); |
354 | 0 | self.next_avail = Wrapping(0); |
355 | 0 | self.next_used = Wrapping(0); |
356 | 0 | self.num_added = Wrapping(0); |
357 | 0 | self.event_idx_enabled = false; |
358 | 0 | } |
359 | | |
360 | 0 | fn lock(&mut self) -> <Self as QueueGuard<'_>>::G { |
361 | 0 | self |
362 | 0 | } |
363 | | |
364 | 8.71k | fn max_size(&self) -> u16 { |
365 | 8.71k | self.max_size |
366 | 8.71k | } |
367 | | |
368 | 857 | fn size(&self) -> u16 { |
369 | 857 | self.size |
370 | 857 | } |
371 | | |
372 | 8.71k | fn set_size(&mut self, size: u16) { |
373 | 8.71k | if self.try_set_size(size).is_err() { |
374 | 8.31k | error!("virtio queue with invalid size: {}", size); |
375 | 402 | } |
376 | 8.71k | } |
377 | | |
378 | 0 | fn ready(&self) -> bool { |
379 | 0 | self.ready |
380 | 0 | } |
381 | | |
382 | 8.71k | fn set_ready(&mut self, ready: bool) { |
383 | 8.71k | self.ready = ready; |
384 | 8.71k | } |
385 | | |
386 | 0 | fn set_desc_table_address(&mut self, low: Option<u32>, high: Option<u32>) { |
387 | 0 | let low = low.unwrap_or(self.desc_table.0 as u32) as u64; |
388 | 0 | let high = high.unwrap_or((self.desc_table.0 >> 32) as u32) as u64; |
389 | | |
390 | 0 | let desc_table = GuestAddress((high << 32) | low); |
391 | 0 | if self.try_set_desc_table_address(desc_table).is_err() { |
392 | 0 | error!("virtio queue descriptor table breaks alignment constraints"); |
393 | 0 | } |
394 | 0 | } |
395 | | |
396 | 0 | fn set_avail_ring_address(&mut self, low: Option<u32>, high: Option<u32>) { |
397 | 0 | let low = low.unwrap_or(self.avail_ring.0 as u32) as u64; |
398 | 0 | let high = high.unwrap_or((self.avail_ring.0 >> 32) as u32) as u64; |
399 | | |
400 | 0 | let avail_ring = GuestAddress((high << 32) | low); |
401 | 0 | if self.try_set_avail_ring_address(avail_ring).is_err() { |
402 | 0 | error!("virtio queue available ring breaks alignment constraints"); |
403 | 0 | } |
404 | 0 | } |
405 | | |
406 | 0 | fn set_used_ring_address(&mut self, low: Option<u32>, high: Option<u32>) { |
407 | 0 | let low = low.unwrap_or(self.used_ring.0 as u32) as u64; |
408 | 0 | let high = high.unwrap_or((self.used_ring.0 >> 32) as u32) as u64; |
409 | | |
410 | 0 | let used_ring = GuestAddress((high << 32) | low); |
411 | 0 | if self.try_set_used_ring_address(used_ring).is_err() { |
412 | 0 | error!("virtio queue used ring breaks alignment constraints"); |
413 | 0 | } |
414 | 0 | } |
415 | | |
416 | 10.9k | fn set_event_idx(&mut self, enabled: bool) { |
417 | 10.9k | self.event_idx_enabled = enabled; |
418 | 10.9k | } |
419 | | |
420 | 225k | fn avail_idx<M>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> |
421 | 225k | where |
422 | 225k | M: GuestMemory + ?Sized, |
423 | | { |
424 | 225k | let addr = self |
425 | 225k | .avail_ring |
426 | 225k | .checked_add(2) |
427 | 225k | .ok_or(Error::AddressOverflow)?; |
428 | | |
429 | 225k | mem.load(addr, order) |
430 | 225k | .map(u16::from_le) |
431 | 225k | .map(Wrapping) |
432 | 225k | .map_err(Error::GuestMemory) |
433 | 225k | } <virtio_queue::queue::Queue as virtio_queue::QueueT>::avail_idx::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> Line | Count | Source | 420 | 225k | fn avail_idx<M>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> | 421 | 225k | where | 422 | 225k | M: GuestMemory + ?Sized, | 423 | | { | 424 | 225k | let addr = self | 425 | 225k | .avail_ring | 426 | 225k | .checked_add(2) | 427 | 225k | .ok_or(Error::AddressOverflow)?; | 428 | | | 429 | 225k | mem.load(addr, order) | 430 | 225k | .map(u16::from_le) | 431 | 225k | .map(Wrapping) | 432 | 225k | .map_err(Error::GuestMemory) | 433 | 225k | } |
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::avail_idx::<_> |
434 | | |
435 | 689 | fn used_idx<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> { |
436 | 689 | let addr = self |
437 | 689 | .used_ring |
438 | 689 | .checked_add(2) |
439 | 689 | .ok_or(Error::AddressOverflow)?; |
440 | | |
441 | 689 | mem.load(addr, order) |
442 | 689 | .map(u16::from_le) |
443 | 689 | .map(Wrapping) |
444 | 689 | .map_err(Error::GuestMemory) |
445 | 689 | } <virtio_queue::queue::Queue as virtio_queue::QueueT>::used_idx::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> Line | Count | Source | 435 | 689 | fn used_idx<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> { | 436 | 689 | let addr = self | 437 | 689 | .used_ring | 438 | 689 | .checked_add(2) | 439 | 689 | .ok_or(Error::AddressOverflow)?; | 440 | | | 441 | 689 | mem.load(addr, order) | 442 | 689 | .map(u16::from_le) | 443 | 689 | .map(Wrapping) | 444 | 689 | .map_err(Error::GuestMemory) | 445 | 689 | } |
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::used_idx::<_> |
446 | | |
447 | 157k | fn add_used<M: GuestMemory>( |
448 | 157k | &mut self, |
449 | 157k | mem: &M, |
450 | 157k | head_index: u16, |
451 | 157k | len: u32, |
452 | 157k | ) -> Result<(), Error> { |
453 | 157k | if head_index >= self.size { |
454 | 166 | error!( |
455 | | "attempted to add out of bounds descriptor to used ring: {}", |
456 | | head_index |
457 | | ); |
458 | 166 | return Err(Error::InvalidDescriptorIndex); |
459 | 157k | } |
460 | | |
461 | 157k | let next_used_index = u64::from(self.next_used.0 % self.size); |
462 | | // This can not overflow an u64 since it is working with relatively small numbers compared |
463 | | // to u64::MAX. |
464 | 157k | let offset = VIRTQ_USED_RING_HEADER_SIZE + next_used_index * VIRTQ_USED_ELEMENT_SIZE; |
465 | 157k | let addr = self |
466 | 157k | .used_ring |
467 | 157k | .checked_add(offset) |
468 | 157k | .ok_or(Error::AddressOverflow)?; |
469 | 157k | mem.write_obj(VirtqUsedElem::new(head_index.into(), len), addr) |
470 | 157k | .map_err(Error::GuestMemory)?; |
471 | | |
472 | 157k | self.next_used += Wrapping(1); |
473 | 157k | self.num_added += Wrapping(1); |
474 | | |
475 | 157k | mem.store( |
476 | 157k | u16::to_le(self.next_used.0), |
477 | 157k | self.used_ring |
478 | 157k | .checked_add(2) |
479 | 157k | .ok_or(Error::AddressOverflow)?, |
480 | 157k | Ordering::Release, |
481 | | ) |
482 | 157k | .map_err(Error::GuestMemory) |
483 | 157k | } <virtio_queue::queue::Queue as virtio_queue::QueueT>::add_used::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> Line | Count | Source | 447 | 157k | fn add_used<M: GuestMemory>( | 448 | 157k | &mut self, | 449 | 157k | mem: &M, | 450 | 157k | head_index: u16, | 451 | 157k | len: u32, | 452 | 157k | ) -> Result<(), Error> { | 453 | 157k | if head_index >= self.size { | 454 | 166 | error!( | 455 | | "attempted to add out of bounds descriptor to used ring: {}", | 456 | | head_index | 457 | | ); | 458 | 166 | return Err(Error::InvalidDescriptorIndex); | 459 | 157k | } | 460 | | | 461 | 157k | let next_used_index = u64::from(self.next_used.0 % self.size); | 462 | | // This can not overflow an u64 since it is working with relatively small numbers compared | 463 | | // to u64::MAX. | 464 | 157k | let offset = VIRTQ_USED_RING_HEADER_SIZE + next_used_index * VIRTQ_USED_ELEMENT_SIZE; | 465 | 157k | let addr = self | 466 | 157k | .used_ring | 467 | 157k | .checked_add(offset) | 468 | 157k | .ok_or(Error::AddressOverflow)?; | 469 | 157k | mem.write_obj(VirtqUsedElem::new(head_index.into(), len), addr) | 470 | 157k | .map_err(Error::GuestMemory)?; | 471 | | | 472 | 157k | self.next_used += Wrapping(1); | 473 | 157k | self.num_added += Wrapping(1); | 474 | | | 475 | 157k | mem.store( | 476 | 157k | u16::to_le(self.next_used.0), | 477 | 157k | self.used_ring | 478 | 157k | .checked_add(2) | 479 | 157k | .ok_or(Error::AddressOverflow)?, | 480 | 157k | Ordering::Release, | 481 | | ) | 482 | 157k | .map_err(Error::GuestMemory) | 483 | 157k | } |
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::add_used::<_> |
484 | | |
485 | 53.9k | fn enable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> { |
486 | 53.9k | self.set_notification(mem, true)?; |
487 | | // Ensures the following read is not reordered before any previous write operation. |
488 | 53.9k | fence(Ordering::SeqCst); |
489 | | |
490 | | // We double check here to avoid the situation where the available ring has been updated |
491 | | // just before we re-enabled notifications, and it's possible to miss one. We compare the |
492 | | // current `avail_idx` value to `self.next_avail` because it's where we stopped processing |
493 | | // entries. There are situations where we intentionally avoid processing everything in the |
494 | | // available ring (which will cause this method to return `true`), but in that case we'll |
495 | | // probably not re-enable notifications as we already know there are pending entries. |
496 | 53.9k | self.avail_idx(mem, Ordering::Relaxed) |
497 | 53.9k | .map(|idx| idx != self.next_avail) <virtio_queue::queue::Queue as virtio_queue::QueueT>::enable_notification::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>::{closure#0}Line | Count | Source | 497 | 53.9k | .map(|idx| idx != self.next_avail) |
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::enable_notification::<_>::{closure#0} |
498 | 53.9k | } <virtio_queue::queue::Queue as virtio_queue::QueueT>::enable_notification::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> Line | Count | Source | 485 | 53.9k | fn enable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> { | 486 | 53.9k | self.set_notification(mem, true)?; | 487 | | // Ensures the following read is not reordered before any previous write operation. | 488 | 53.9k | fence(Ordering::SeqCst); | 489 | | | 490 | | // We double check here to avoid the situation where the available ring has been updated | 491 | | // just before we re-enabled notifications, and it's possible to miss one. We compare the | 492 | | // current `avail_idx` value to `self.next_avail` because it's where we stopped processing | 493 | | // entries. There are situations where we intentionally avoid processing everything in the | 494 | | // available ring (which will cause this method to return `true`), but in that case we'll | 495 | | // probably not re-enable notifications as we already know there are pending entries. | 496 | 53.9k | self.avail_idx(mem, Ordering::Relaxed) | 497 | 53.9k | .map(|idx| idx != self.next_avail) | 498 | 53.9k | } |
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::enable_notification::<_> |
499 | | |
500 | 0 | fn disable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<(), Error> { |
501 | 0 | self.set_notification(mem, false) |
502 | 0 | } |
503 | | |
504 | 2.11k | fn needs_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> { |
505 | 2.11k | let used_idx = self.next_used; |
506 | | |
507 | | // Complete all the writes in add_used() before reading the event. |
508 | 2.11k | fence(Ordering::SeqCst); |
509 | | |
510 | | // The VRING_AVAIL_F_NO_INTERRUPT flag isn't supported yet. |
511 | | |
512 | | // When the `EVENT_IDX` feature is negotiated, the driver writes into `used_event` |
513 | | // a value that's used by the device to determine whether a notification must |
514 | | // be submitted after adding a descriptor chain to the used ring. According to the |
515 | | // standard, the notification must be sent when `next_used == used_event + 1`, but |
516 | | // various device model implementations rely on an inequality instead, most likely |
517 | | // to also support use cases where a bunch of descriptor chains are added to the used |
518 | | // ring first, and only afterwards the `needs_notification` logic is called. For example, |
519 | | // the approach based on `num_added` below is taken from the Linux Kernel implementation |
520 | | // (i.e. https://elixir.bootlin.com/linux/v5.15.35/source/drivers/virtio/virtio_ring.c#L661) |
521 | | |
522 | | // The `old` variable below is used to determine the value of `next_used` from when |
523 | | // `needs_notification` was called last (each `needs_notification` call resets `num_added` |
524 | | // to zero, while each `add_used` called increments it by one). Then, the logic below |
525 | | // uses wrapped arithmetic to see whether `used_event` can be found between `old` and |
526 | | // `next_used` in the circular sequence space of the used ring. |
527 | 2.11k | if self.event_idx_enabled { |
528 | 0 | let used_event = self.used_event(mem, Ordering::Relaxed)?; |
529 | 0 | let old = used_idx - self.num_added; |
530 | 0 | self.num_added = Wrapping(0); |
531 | | |
532 | 0 | return Ok(used_idx - used_event - Wrapping(1) < used_idx - old); |
533 | 2.11k | } |
534 | | |
535 | 2.11k | Ok(true) |
536 | 2.11k | } <virtio_queue::queue::Queue as virtio_queue::QueueT>::needs_notification::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> Line | Count | Source | 504 | 2.11k | fn needs_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> { | 505 | 2.11k | let used_idx = self.next_used; | 506 | | | 507 | | // Complete all the writes in add_used() before reading the event. | 508 | 2.11k | fence(Ordering::SeqCst); | 509 | | | 510 | | // The VRING_AVAIL_F_NO_INTERRUPT flag isn't supported yet. | 511 | | | 512 | | // When the `EVENT_IDX` feature is negotiated, the driver writes into `used_event` | 513 | | // a value that's used by the device to determine whether a notification must | 514 | | // be submitted after adding a descriptor chain to the used ring. According to the | 515 | | // standard, the notification must be sent when `next_used == used_event + 1`, but | 516 | | // various device model implementations rely on an inequality instead, most likely | 517 | | // to also support use cases where a bunch of descriptor chains are added to the used | 518 | | // ring first, and only afterwards the `needs_notification` logic is called. For example, | 519 | | // the approach based on `num_added` below is taken from the Linux Kernel implementation | 520 | | // (i.e. https://elixir.bootlin.com/linux/v5.15.35/source/drivers/virtio/virtio_ring.c#L661) | 521 | | | 522 | | // The `old` variable below is used to determine the value of `next_used` from when | 523 | | // `needs_notification` was called last (each `needs_notification` call resets `num_added` | 524 | | // to zero, while each `add_used` called increments it by one). Then, the logic below | 525 | | // uses wrapped arithmetic to see whether `used_event` can be found between `old` and | 526 | | // `next_used` in the circular sequence space of the used ring. | 527 | 2.11k | if self.event_idx_enabled { | 528 | 0 | let used_event = self.used_event(mem, Ordering::Relaxed)?; | 529 | 0 | let old = used_idx - self.num_added; | 530 | 0 | self.num_added = Wrapping(0); | 531 | | | 532 | 0 | return Ok(used_idx - used_event - Wrapping(1) < used_idx - old); | 533 | 2.11k | } | 534 | | | 535 | 2.11k | Ok(true) | 536 | 2.11k | } |
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::needs_notification::<_> |
537 | | |
538 | 0 | fn next_avail(&self) -> u16 { |
539 | 0 | self.next_avail.0 |
540 | 0 | } |
541 | | |
542 | 8.71k | fn set_next_avail(&mut self, next_avail: u16) { |
543 | 8.71k | self.next_avail = Wrapping(next_avail); |
544 | 8.71k | } |
545 | | |
546 | 0 | fn next_used(&self) -> u16 { |
547 | 0 | self.next_used.0 |
548 | 0 | } |
549 | | |
550 | 8.71k | fn set_next_used(&mut self, next_used: u16) { |
551 | 8.71k | self.next_used = Wrapping(next_used); |
552 | 8.71k | } |
553 | | |
554 | 0 | fn desc_table(&self) -> u64 { |
555 | 0 | self.desc_table.0 |
556 | 0 | } |
557 | | |
558 | 0 | fn avail_ring(&self) -> u64 { |
559 | 0 | self.avail_ring.0 |
560 | 0 | } |
561 | | |
562 | 0 | fn used_ring(&self) -> u64 { |
563 | 0 | self.used_ring.0 |
564 | 0 | } |
565 | | |
566 | 0 | fn event_idx_enabled(&self) -> bool { |
567 | 0 | self.event_idx_enabled |
568 | 0 | } |
569 | | |
570 | 171k | fn pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>> |
571 | 171k | where |
572 | 171k | M: Clone + Deref, |
573 | 171k | M::Target: GuestMemory, |
574 | | { |
575 | | // Default, iter-based impl. Will be subsequently improved. |
576 | 171k | match self.iter(mem) { |
577 | 168k | Ok(mut iter) => iter.next(), |
578 | 2.48k | Err(e) => { |
579 | 2.48k | error!("Iterator error {}", e); |
580 | 2.48k | None |
581 | | } |
582 | | } |
583 | 171k | } <virtio_queue::queue::Queue as virtio_queue::QueueT>::pop_descriptor_chain::<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> Line | Count | Source | 570 | 157k | fn pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>> | 571 | 157k | where | 572 | 157k | M: Clone + Deref, | 573 | 157k | M::Target: GuestMemory, | 574 | | { | 575 | | // Default, iter-based impl. Will be subsequently improved. | 576 | 157k | match self.iter(mem) { | 577 | 155k | Ok(mut iter) => iter.next(), | 578 | 2.21k | Err(e) => { | 579 | 2.21k | error!("Iterator error {}", e); | 580 | 2.21k | None | 581 | | } | 582 | | } | 583 | 157k | } |
<virtio_queue::queue::Queue as virtio_queue::QueueT>::pop_descriptor_chain::<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> Line | Count | Source | 570 | 13.6k | fn pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>> | 571 | 13.6k | where | 572 | 13.6k | M: Clone + Deref, | 573 | 13.6k | M::Target: GuestMemory, | 574 | | { | 575 | | // Default, iter-based impl. Will be subsequently improved. | 576 | 13.6k | match self.iter(mem) { | 577 | 13.3k | Ok(mut iter) => iter.next(), | 578 | 274 | Err(e) => { | 579 | 274 | error!("Iterator error {}", e); | 580 | 274 | None | 581 | | } | 582 | | } | 583 | 13.6k | } |
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::pop_descriptor_chain::<_> |
584 | | } |
585 | | |
586 | | impl QueueOwnedT for Queue { |
587 | 171k | fn iter<M>(&mut self, mem: M) -> Result<AvailIter<'_, M>, Error> |
588 | 171k | where |
589 | 171k | M: Deref, |
590 | 171k | M::Target: GuestMemory, |
591 | | { |
592 | | // We're checking here that a reset did not happen without re-initializing the queue. |
593 | | // TODO: In the future we might want to also check that the other parameters in the |
594 | | // queue are valid. |
595 | 171k | if !self.ready || self.avail_ring == GuestAddress(0) { |
596 | 0 | return Err(Error::QueueNotReady); |
597 | 171k | } |
598 | | |
599 | 171k | self.avail_idx(mem.deref(), Ordering::Acquire) |
600 | 171k | .map(move |idx| AvailIter::new(mem, idx, self))? <virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>::{closure#0}Line | Count | Source | 600 | 157k | .map(move |idx| AvailIter::new(mem, idx, self))? |
<virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>::{closure#0}Line | Count | Source | 600 | 13.6k | .map(move |idx| AvailIter::new(mem, idx, self))? |
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<_>::{closure#0} |
601 | 171k | } <virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> Line | Count | Source | 587 | 157k | fn iter<M>(&mut self, mem: M) -> Result<AvailIter<'_, M>, Error> | 588 | 157k | where | 589 | 157k | M: Deref, | 590 | 157k | M::Target: GuestMemory, | 591 | | { | 592 | | // We're checking here that a reset did not happen without re-initializing the queue. | 593 | | // TODO: In the future we might want to also check that the other parameters in the | 594 | | // queue are valid. | 595 | 157k | if !self.ready || self.avail_ring == GuestAddress(0) { | 596 | 0 | return Err(Error::QueueNotReady); | 597 | 157k | } | 598 | | | 599 | 157k | self.avail_idx(mem.deref(), Ordering::Acquire) | 600 | 157k | .map(move |idx| AvailIter::new(mem, idx, self))? | 601 | 157k | } |
<virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> Line | Count | Source | 587 | 13.6k | fn iter<M>(&mut self, mem: M) -> Result<AvailIter<'_, M>, Error> | 588 | 13.6k | where | 589 | 13.6k | M: Deref, | 590 | 13.6k | M::Target: GuestMemory, | 591 | | { | 592 | | // We're checking here that a reset did not happen without re-initializing the queue. | 593 | | // TODO: In the future we might want to also check that the other parameters in the | 594 | | // queue are valid. | 595 | 13.6k | if !self.ready || self.avail_ring == GuestAddress(0) { | 596 | 0 | return Err(Error::QueueNotReady); | 597 | 13.6k | } | 598 | | | 599 | 13.6k | self.avail_idx(mem.deref(), Ordering::Acquire) | 600 | 13.6k | .map(move |idx| AvailIter::new(mem, idx, self))? | 601 | 13.6k | } |
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<_> |
602 | | |
603 | 28 | fn go_to_previous_position(&mut self) { |
604 | 28 | self.next_avail -= Wrapping(1); |
605 | 28 | } |
606 | | } |
607 | | |
608 | | /// Consuming iterator over all available descriptor chain heads in the queue. |
609 | | /// |
610 | | /// # Example |
611 | | /// |
612 | | /// ```rust |
613 | | /// # use virtio_bindings::bindings::virtio_ring::{VRING_DESC_F_NEXT, VRING_DESC_F_WRITE}; |
614 | | /// # use virtio_queue::mock::MockSplitQueue; |
615 | | /// use virtio_queue::{desc::{split::Descriptor as SplitDescriptor, RawDescriptor}, Queue, QueueOwnedT}; |
616 | | /// use vm_memory::{GuestAddress, GuestMemoryMmap}; |
617 | | /// |
618 | | /// # fn populate_queue(m: &GuestMemoryMmap) -> Queue { |
619 | | /// # let vq = MockSplitQueue::new(m, 16); |
620 | | /// # let mut q: Queue = vq.create_queue().unwrap(); |
621 | | /// # |
622 | | /// # // The chains are (0, 1), (2, 3, 4) and (5, 6). |
623 | | /// # let mut descs = Vec::new(); |
624 | | /// # for i in 0..7 { |
625 | | /// # let flags = match i { |
626 | | /// # 1 | 6 => 0, |
627 | | /// # 2 | 5 => VRING_DESC_F_NEXT | VRING_DESC_F_WRITE, |
628 | | /// # 4 => VRING_DESC_F_WRITE, |
629 | | /// # _ => VRING_DESC_F_NEXT, |
630 | | /// # }; |
631 | | /// # |
632 | | /// # descs.push(RawDescriptor::from(SplitDescriptor::new((0x1000 * (i + 1)) as u64, 0x1000, flags as u16, i + 1))); |
633 | | /// # } |
634 | | /// # |
635 | | /// # vq.add_desc_chains(&descs, 0).unwrap(); |
636 | | /// # q |
637 | | /// # } |
638 | | /// let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); |
639 | | /// // Populate the queue with descriptor chains and update the available ring accordingly. |
640 | | /// let mut queue = populate_queue(m); |
641 | | /// let mut i = queue.iter(m).unwrap(); |
642 | | /// |
643 | | /// { |
644 | | /// let mut c = i.next().unwrap(); |
645 | | /// let _first_head_index = c.head_index(); |
646 | | /// // We should have two descriptors in the first chain. |
647 | | /// let _desc1 = c.next().unwrap(); |
648 | | /// let _desc2 = c.next().unwrap(); |
649 | | /// } |
650 | | /// |
651 | | /// { |
652 | | /// let c = i.next().unwrap(); |
653 | | /// let _second_head_index = c.head_index(); |
654 | | /// |
655 | | /// let mut iter = c.writable(); |
656 | | /// // We should have two writable descriptors in the second chain. |
657 | | /// let _desc1 = iter.next().unwrap(); |
658 | | /// let _desc2 = iter.next().unwrap(); |
659 | | /// } |
660 | | /// |
661 | | /// { |
662 | | /// let c = i.next().unwrap(); |
663 | | /// let _third_head_index = c.head_index(); |
664 | | /// |
665 | | /// let mut iter = c.readable(); |
666 | | /// // We should have one readable descriptor in the third chain. |
667 | | /// let _desc1 = iter.next().unwrap(); |
668 | | /// } |
669 | | /// // Let's go back one position in the available ring. |
670 | | /// i.go_to_previous_position(); |
671 | | /// // We should be able to access again the third descriptor chain. |
672 | | /// let c = i.next().unwrap(); |
673 | | /// let _third_head_index = c.head_index(); |
674 | | /// ``` |
675 | | #[derive(Debug)] |
676 | | pub struct AvailIter<'b, M> { |
677 | | mem: M, |
678 | | desc_table: GuestAddress, |
679 | | avail_ring: GuestAddress, |
680 | | queue_size: u16, |
681 | | last_index: Wrapping<u16>, |
682 | | next_avail: &'b mut Wrapping<u16>, |
683 | | } |
684 | | |
685 | | impl<'b, M> AvailIter<'b, M> |
686 | | where |
687 | | M: Deref, |
688 | | M::Target: GuestMemory, |
689 | | { |
690 | | /// Create a new instance of `AvailInter`. |
691 | | /// |
692 | | /// # Arguments |
693 | | /// * `mem` - the `GuestMemory` object that can be used to access the queue buffers. |
694 | | /// * `idx` - the index of the available ring entry where the driver would put the next |
695 | | /// available descriptor chain. |
696 | | /// * `queue` - the `Queue` object from which the needed data to create the `AvailIter` can |
697 | | /// be retrieved. |
698 | 171k | pub(crate) fn new(mem: M, idx: Wrapping<u16>, queue: &'b mut Queue) -> Result<Self, Error> { |
699 | | // The number of descriptor chain heads to process should always |
700 | | // be smaller or equal to the queue size, as the driver should |
701 | | // never ask the VMM to process a available ring entry more than |
702 | | // once. Checking and reporting such incorrect driver behavior |
703 | | // can prevent potential hanging and Denial-of-Service from |
704 | | // happening on the VMM side. |
705 | 171k | if (idx - queue.next_avail).0 > queue.size { |
706 | 2.48k | return Err(Error::InvalidAvailRingIndex); |
707 | 168k | } |
708 | | |
709 | 168k | Ok(AvailIter { |
710 | 168k | mem, |
711 | 168k | desc_table: queue.desc_table, |
712 | 168k | avail_ring: queue.avail_ring, |
713 | 168k | queue_size: queue.size, |
714 | 168k | last_index: idx, |
715 | 168k | next_avail: &mut queue.next_avail, |
716 | 168k | }) |
717 | 171k | } <virtio_queue::queue::AvailIter<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::new Line | Count | Source | 698 | 157k | pub(crate) fn new(mem: M, idx: Wrapping<u16>, queue: &'b mut Queue) -> Result<Self, Error> { | 699 | | // The number of descriptor chain heads to process should always | 700 | | // be smaller or equal to the queue size, as the driver should | 701 | | // never ask the VMM to process a available ring entry more than | 702 | | // once. Checking and reporting such incorrect driver behavior | 703 | | // can prevent potential hanging and Denial-of-Service from | 704 | | // happening on the VMM side. | 705 | 157k | if (idx - queue.next_avail).0 > queue.size { | 706 | 2.21k | return Err(Error::InvalidAvailRingIndex); | 707 | 155k | } | 708 | | | 709 | 155k | Ok(AvailIter { | 710 | 155k | mem, | 711 | 155k | desc_table: queue.desc_table, | 712 | 155k | avail_ring: queue.avail_ring, | 713 | 155k | queue_size: queue.size, | 714 | 155k | last_index: idx, | 715 | 155k | next_avail: &mut queue.next_avail, | 716 | 155k | }) | 717 | 157k | } |
<virtio_queue::queue::AvailIter<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>::new Line | Count | Source | 698 | 13.6k | pub(crate) fn new(mem: M, idx: Wrapping<u16>, queue: &'b mut Queue) -> Result<Self, Error> { | 699 | | // The number of descriptor chain heads to process should always | 700 | | // be smaller or equal to the queue size, as the driver should | 701 | | // never ask the VMM to process a available ring entry more than | 702 | | // once. Checking and reporting such incorrect driver behavior | 703 | | // can prevent potential hanging and Denial-of-Service from | 704 | | // happening on the VMM side. | 705 | 13.6k | if (idx - queue.next_avail).0 > queue.size { | 706 | 274 | return Err(Error::InvalidAvailRingIndex); | 707 | 13.3k | } | 708 | | | 709 | 13.3k | Ok(AvailIter { | 710 | 13.3k | mem, | 711 | 13.3k | desc_table: queue.desc_table, | 712 | 13.3k | avail_ring: queue.avail_ring, | 713 | 13.3k | queue_size: queue.size, | 714 | 13.3k | last_index: idx, | 715 | 13.3k | next_avail: &mut queue.next_avail, | 716 | 13.3k | }) | 717 | 13.6k | } |
Unexecuted instantiation: <virtio_queue::queue::AvailIter<_>>::new |
718 | | |
719 | | /// Goes back one position in the available descriptor chain offered by the driver. |
720 | | /// |
721 | | /// Rust does not support bidirectional iterators. This is the only way to revert the effect |
722 | | /// of an iterator increment on the queue. |
723 | | /// |
724 | | /// Note: this method assumes there's only one thread manipulating the queue, so it should only |
725 | | /// be invoked in single-threaded context. |
726 | 0 | pub fn go_to_previous_position(&mut self) { |
727 | 0 | *self.next_avail -= Wrapping(1); |
728 | 0 | } |
729 | | } |
730 | | |
731 | | impl<M> Iterator for AvailIter<'_, M> |
732 | | where |
733 | | M: Clone + Deref, |
734 | | M::Target: GuestMemory, |
735 | | { |
736 | | type Item = DescriptorChain<M>; |
737 | | |
738 | 168k | fn next(&mut self) -> Option<Self::Item> { |
739 | 168k | if *self.next_avail == self.last_index { |
740 | 1.41k | return None; |
741 | 167k | } |
742 | | |
743 | | // These two operations can not overflow an u64 since they're working with relatively small |
744 | | // numbers compared to u64::MAX. |
745 | 167k | let elem_off = |
746 | 167k | u64::from(self.next_avail.0.checked_rem(self.queue_size)?) * VIRTQ_AVAIL_ELEMENT_SIZE; |
747 | 167k | let offset = VIRTQ_AVAIL_RING_HEADER_SIZE + elem_off; |
748 | | |
749 | 167k | let addr = self.avail_ring.checked_add(offset)?; |
750 | 167k | let head_index: u16 = self |
751 | 167k | .mem |
752 | 167k | .load(addr, Ordering::Acquire) |
753 | 167k | .map(u16::from_le) |
754 | 167k | .map_err(|_| error!("Failed to read from memory {:x}", addr.raw_value()))Unexecuted instantiation: <virtio_queue::queue::AvailIter<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::iter::traits::iterator::Iterator>::next::{closure#0}Unexecuted instantiation: <virtio_queue::queue::AvailIter<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> as core::iter::traits::iterator::Iterator>::next::{closure#0}Unexecuted instantiation: <virtio_queue::queue::AvailIter<_> as core::iter::traits::iterator::Iterator>::next::{closure#0} |
755 | 167k | .ok()?; |
756 | | |
757 | 167k | *self.next_avail += Wrapping(1); |
758 | | |
759 | 167k | Some(DescriptorChain::new( |
760 | 167k | self.mem.clone(), |
761 | 167k | self.desc_table, |
762 | 167k | self.queue_size, |
763 | 167k | head_index, |
764 | 167k | )) |
765 | 168k | } <virtio_queue::queue::AvailIter<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::iter::traits::iterator::Iterator>::next Line | Count | Source | 738 | 155k | fn next(&mut self) -> Option<Self::Item> { | 739 | 155k | if *self.next_avail == self.last_index { | 740 | 1.33k | return None; | 741 | 153k | } | 742 | | | 743 | | // These two operations can not overflow an u64 since they're working with relatively small | 744 | | // numbers compared to u64::MAX. | 745 | 153k | let elem_off = | 746 | 153k | u64::from(self.next_avail.0.checked_rem(self.queue_size)?) * VIRTQ_AVAIL_ELEMENT_SIZE; | 747 | 153k | let offset = VIRTQ_AVAIL_RING_HEADER_SIZE + elem_off; | 748 | | | 749 | 153k | let addr = self.avail_ring.checked_add(offset)?; | 750 | 153k | let head_index: u16 = self | 751 | 153k | .mem | 752 | 153k | .load(addr, Ordering::Acquire) | 753 | 153k | .map(u16::from_le) | 754 | 153k | .map_err(|_| error!("Failed to read from memory {:x}", addr.raw_value())) | 755 | 153k | .ok()?; | 756 | | | 757 | 153k | *self.next_avail += Wrapping(1); | 758 | | | 759 | 153k | Some(DescriptorChain::new( | 760 | 153k | self.mem.clone(), | 761 | 153k | self.desc_table, | 762 | 153k | self.queue_size, | 763 | 153k | head_index, | 764 | 153k | )) | 765 | 155k | } |
<virtio_queue::queue::AvailIter<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> as core::iter::traits::iterator::Iterator>::next Line | Count | Source | 738 | 13.3k | fn next(&mut self) -> Option<Self::Item> { | 739 | 13.3k | if *self.next_avail == self.last_index { | 740 | 81 | return None; | 741 | 13.2k | } | 742 | | | 743 | | // These two operations can not overflow an u64 since they're working with relatively small | 744 | | // numbers compared to u64::MAX. | 745 | 13.2k | let elem_off = | 746 | 13.2k | u64::from(self.next_avail.0.checked_rem(self.queue_size)?) * VIRTQ_AVAIL_ELEMENT_SIZE; | 747 | 13.2k | let offset = VIRTQ_AVAIL_RING_HEADER_SIZE + elem_off; | 748 | | | 749 | 13.2k | let addr = self.avail_ring.checked_add(offset)?; | 750 | 13.2k | let head_index: u16 = self | 751 | 13.2k | .mem | 752 | 13.2k | .load(addr, Ordering::Acquire) | 753 | 13.2k | .map(u16::from_le) | 754 | 13.2k | .map_err(|_| error!("Failed to read from memory {:x}", addr.raw_value())) | 755 | 13.2k | .ok()?; | 756 | | | 757 | 13.2k | *self.next_avail += Wrapping(1); | 758 | | | 759 | 13.2k | Some(DescriptorChain::new( | 760 | 13.2k | self.mem.clone(), | 761 | 13.2k | self.desc_table, | 762 | 13.2k | self.queue_size, | 763 | 13.2k | head_index, | 764 | 13.2k | )) | 765 | 13.3k | } |
Unexecuted instantiation: <virtio_queue::queue::AvailIter<_> as core::iter::traits::iterator::Iterator>::next |
766 | | } |
767 | | |
768 | | #[cfg(any(test, feature = "test-utils"))] |
769 | | // It is convenient for tests to implement `PartialEq`, but it is not a |
770 | | // proper implementation as `GuestMemory` errors cannot implement `PartialEq`. |
771 | | impl PartialEq for Error { |
772 | | fn eq(&self, other: &Self) -> bool { |
773 | | format!("{}", &self) == format!("{}", other) |
774 | | } |
775 | | } |
776 | | |
777 | | #[cfg(test)] |
778 | | mod tests { |
779 | | use super::*; |
780 | | use crate::defs::{DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR}; |
781 | | use crate::desc::{split::Descriptor as SplitDescriptor, RawDescriptor}; |
782 | | use crate::mock::MockSplitQueue; |
783 | | use virtio_bindings::bindings::virtio_ring::{ |
784 | | VRING_DESC_F_NEXT, VRING_DESC_F_WRITE, VRING_USED_F_NO_NOTIFY, |
785 | | }; |
786 | | |
787 | | use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap}; |
788 | | |
789 | | #[test] |
790 | | fn test_queue_is_valid() { |
791 | | let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); |
792 | | let vq = MockSplitQueue::new(m, 16); |
793 | | let mut q: Queue = vq.create_queue().unwrap(); |
794 | | |
795 | | // q is currently valid |
796 | | assert!(q.is_valid(m)); |
797 | | |
798 | | // shouldn't be valid when not marked as ready |
799 | | q.set_ready(false); |
800 | | assert!(!q.ready()); |
801 | | assert!(!q.is_valid(m)); |
802 | | q.set_ready(true); |
803 | | |
804 | | // shouldn't be allowed to set a size > max_size |
805 | | q.set_size(q.max_size() << 1); |
806 | | assert_eq!(q.size, q.max_size()); |
807 | | |
808 | | // or set the size to 0 |
809 | | q.set_size(0); |
810 | | assert_eq!(q.size, q.max_size()); |
811 | | |
812 | | // or set a size which is not a power of 2 |
813 | | q.set_size(11); |
814 | | assert_eq!(q.size, q.max_size()); |
815 | | |
816 | | // but should be allowed to set a size if 0 < size <= max_size and size is a power of two |
817 | | q.set_size(4); |
818 | | assert_eq!(q.size, 4); |
819 | | q.size = q.max_size(); |
820 | | |
821 | | // shouldn't be allowed to set an address that breaks the alignment constraint |
822 | | q.set_desc_table_address(Some(0xf), None); |
823 | | assert_eq!(q.desc_table.0, vq.desc_table_addr().0); |
824 | | // should be allowed to set an aligned out of bounds address |
825 | | q.set_desc_table_address(Some(0xffff_fff0), None); |
826 | | assert_eq!(q.desc_table.0, 0xffff_fff0); |
827 | | // but shouldn't be valid |
828 | | assert!(!q.is_valid(m)); |
829 | | // but should be allowed to set a valid description table address |
830 | | q.set_desc_table_address(Some(0x10), None); |
831 | | assert_eq!(q.desc_table.0, 0x10); |
832 | | assert!(q.is_valid(m)); |
833 | | let addr = vq.desc_table_addr().0; |
834 | | q.set_desc_table_address(Some(addr as u32), Some((addr >> 32) as u32)); |
835 | | |
836 | | // shouldn't be allowed to set an address that breaks the alignment constraint |
837 | | q.set_avail_ring_address(Some(0x1), None); |
838 | | assert_eq!(q.avail_ring.0, vq.avail_addr().0); |
839 | | // should be allowed to set an aligned out of bounds address |
840 | | q.set_avail_ring_address(Some(0xffff_fffe), None); |
841 | | assert_eq!(q.avail_ring.0, 0xffff_fffe); |
842 | | // but shouldn't be valid |
843 | | assert!(!q.is_valid(m)); |
844 | | // but should be allowed to set a valid available ring address |
845 | | q.set_avail_ring_address(Some(0x2), None); |
846 | | assert_eq!(q.avail_ring.0, 0x2); |
847 | | assert!(q.is_valid(m)); |
848 | | let addr = vq.avail_addr().0; |
849 | | q.set_avail_ring_address(Some(addr as u32), Some((addr >> 32) as u32)); |
850 | | |
851 | | // shouldn't be allowed to set an address that breaks the alignment constraint |
852 | | q.set_used_ring_address(Some(0x3), None); |
853 | | assert_eq!(q.used_ring.0, vq.used_addr().0); |
854 | | // should be allowed to set an aligned out of bounds address |
855 | | q.set_used_ring_address(Some(0xffff_fffc), None); |
856 | | assert_eq!(q.used_ring.0, 0xffff_fffc); |
857 | | // but shouldn't be valid |
858 | | assert!(!q.is_valid(m)); |
859 | | // but should be allowed to set a valid used ring address |
860 | | q.set_used_ring_address(Some(0x4), None); |
861 | | assert_eq!(q.used_ring.0, 0x4); |
862 | | let addr = vq.used_addr().0; |
863 | | q.set_used_ring_address(Some(addr as u32), Some((addr >> 32) as u32)); |
864 | | assert!(q.is_valid(m)); |
865 | | } |
866 | | |
867 | | #[test] |
868 | | fn test_add_used() { |
869 | | let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); |
870 | | let vq = MockSplitQueue::new(mem, 16); |
871 | | let mut q: Queue = vq.create_queue().unwrap(); |
872 | | |
873 | | assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(0)); |
874 | | assert_eq!(u16::from_le(vq.used().idx().load()), 0); |
875 | | |
876 | | // index too large |
877 | | assert!(q.add_used(mem, 16, 0x1000).is_err()); |
878 | | assert_eq!(u16::from_le(vq.used().idx().load()), 0); |
879 | | |
880 | | // should be ok |
881 | | q.add_used(mem, 1, 0x1000).unwrap(); |
882 | | assert_eq!(q.next_used, Wrapping(1)); |
883 | | assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(1)); |
884 | | assert_eq!(u16::from_le(vq.used().idx().load()), 1); |
885 | | |
886 | | let x = vq.used().ring().ref_at(0).unwrap().load(); |
887 | | assert_eq!(x.id(), 1); |
888 | | assert_eq!(x.len(), 0x1000); |
889 | | } |
890 | | |
891 | | #[test] |
892 | | fn test_reset_queue() { |
893 | | let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); |
894 | | let vq = MockSplitQueue::new(m, 16); |
895 | | let mut q: Queue = vq.create_queue().unwrap(); |
896 | | |
897 | | q.set_size(8); |
898 | | // The address set by `MockSplitQueue` for the descriptor table is DEFAULT_DESC_TABLE_ADDR, |
899 | | // so let's change it for testing the reset. |
900 | | q.set_desc_table_address(Some(0x5000), None); |
901 | | // Same for `event_idx_enabled`, `next_avail` `next_used` and `signalled_used`. |
902 | | q.set_event_idx(true); |
903 | | q.set_next_avail(2); |
904 | | q.set_next_used(4); |
905 | | q.num_added = Wrapping(15); |
906 | | assert_eq!(q.size, 8); |
907 | | // `create_queue` also marks the queue as ready. |
908 | | assert!(q.ready); |
909 | | assert_ne!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR)); |
910 | | assert_ne!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR)); |
911 | | assert_ne!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR)); |
912 | | assert_ne!(q.next_avail, Wrapping(0)); |
913 | | assert_ne!(q.next_used, Wrapping(0)); |
914 | | assert_ne!(q.num_added, Wrapping(0)); |
915 | | assert!(q.event_idx_enabled); |
916 | | |
917 | | q.reset(); |
918 | | assert_eq!(q.size, 16); |
919 | | assert!(!q.ready); |
920 | | assert_eq!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR)); |
921 | | assert_eq!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR)); |
922 | | assert_eq!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR)); |
923 | | assert_eq!(q.next_avail, Wrapping(0)); |
924 | | assert_eq!(q.next_used, Wrapping(0)); |
925 | | assert_eq!(q.num_added, Wrapping(0)); |
926 | | assert!(!q.event_idx_enabled); |
927 | | } |
928 | | |
929 | | #[test] |
930 | | fn test_needs_notification() { |
931 | | let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); |
932 | | let qsize = 16; |
933 | | let vq = MockSplitQueue::new(mem, qsize); |
934 | | let mut q: Queue = vq.create_queue().unwrap(); |
935 | | let avail_addr = vq.avail_addr(); |
936 | | |
937 | | // It should always return true when EVENT_IDX isn't enabled. |
938 | | for i in 0..qsize { |
939 | | q.next_used = Wrapping(i); |
940 | | assert!(q.needs_notification(mem).unwrap()); |
941 | | } |
942 | | |
943 | | mem.write_obj::<u16>( |
944 | | u16::to_le(4), |
945 | | avail_addr.unchecked_add(4 + qsize as u64 * 2), |
946 | | ) |
947 | | .unwrap(); |
948 | | q.set_event_idx(true); |
949 | | |
950 | | // Incrementing up to this value causes an `u16` to wrap back to 0. |
951 | | let wrap = u32::from(u16::MAX) + 1; |
952 | | |
953 | | for i in 0..wrap + 12 { |
954 | | q.next_used = Wrapping(i as u16); |
955 | | // Let's test wrapping around the maximum index value as well. |
956 | | // `num_added` needs to be at least `1` to represent the fact that new descriptor |
957 | | // chains have be added to the used ring since the last time `needs_notification` |
958 | | // returned. |
959 | | q.num_added = Wrapping(1); |
960 | | let expected = i == 5 || i == (5 + wrap); |
961 | | assert_eq!((q.needs_notification(mem).unwrap(), i), (expected, i)); |
962 | | } |
963 | | |
964 | | mem.write_obj::<u16>( |
965 | | u16::to_le(8), |
966 | | avail_addr.unchecked_add(4 + qsize as u64 * 2), |
967 | | ) |
968 | | .unwrap(); |
969 | | |
970 | | // Returns `false` because the current `used_event` value is behind both `next_used` and |
971 | | // the value of `next_used` at the time when `needs_notification` last returned (which is |
972 | | // computed based on `num_added` as described in the comments for `needs_notification`. |
973 | | assert!(!q.needs_notification(mem).unwrap()); |
974 | | |
975 | | mem.write_obj::<u16>( |
976 | | u16::to_le(15), |
977 | | avail_addr.unchecked_add(4 + qsize as u64 * 2), |
978 | | ) |
979 | | .unwrap(); |
980 | | |
981 | | q.num_added = Wrapping(1); |
982 | | assert!(!q.needs_notification(mem).unwrap()); |
983 | | |
984 | | q.next_used = Wrapping(15); |
985 | | q.num_added = Wrapping(1); |
986 | | assert!(!q.needs_notification(mem).unwrap()); |
987 | | |
988 | | q.next_used = Wrapping(16); |
989 | | q.num_added = Wrapping(1); |
990 | | assert!(q.needs_notification(mem).unwrap()); |
991 | | |
992 | | // Calling `needs_notification` again immediately returns `false`. |
993 | | assert!(!q.needs_notification(mem).unwrap()); |
994 | | |
995 | | mem.write_obj::<u16>( |
996 | | u16::to_le(u16::MAX - 3), |
997 | | avail_addr.unchecked_add(4 + qsize as u64 * 2), |
998 | | ) |
999 | | .unwrap(); |
1000 | | q.next_used = Wrapping(u16::MAX - 2); |
1001 | | q.num_added = Wrapping(1); |
1002 | | // Returns `true` because, when looking at circular sequence of indices of the used ring, |
1003 | | // the value we wrote in the `used_event` appears between the "old" value of `next_used` |
1004 | | // (i.e. `next_used` - `num_added`) and the current `next_used`, thus suggesting that we |
1005 | | // need to notify the driver. |
1006 | | assert!(q.needs_notification(mem).unwrap()); |
1007 | | } |
1008 | | |
1009 | | #[test] |
1010 | | fn test_enable_disable_notification() { |
1011 | | let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); |
1012 | | let vq = MockSplitQueue::new(mem, 16); |
1013 | | |
1014 | | let mut q: Queue = vq.create_queue().unwrap(); |
1015 | | let used_addr = vq.used_addr(); |
1016 | | |
1017 | | assert!(!q.event_idx_enabled); |
1018 | | |
1019 | | q.enable_notification(mem).unwrap(); |
1020 | | let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap(); |
1021 | | assert_eq!(v, 0); |
1022 | | |
1023 | | q.disable_notification(mem).unwrap(); |
1024 | | let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap(); |
1025 | | assert_eq!(v, VRING_USED_F_NO_NOTIFY as u16); |
1026 | | |
1027 | | q.enable_notification(mem).unwrap(); |
1028 | | let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap(); |
1029 | | assert_eq!(v, 0); |
1030 | | |
1031 | | q.set_event_idx(true); |
1032 | | let avail_addr = vq.avail_addr(); |
1033 | | mem.write_obj::<u16>(u16::to_le(2), avail_addr.unchecked_add(2)) |
1034 | | .unwrap(); |
1035 | | |
1036 | | assert!(q.enable_notification(mem).unwrap()); |
1037 | | q.next_avail = Wrapping(2); |
1038 | | assert!(!q.enable_notification(mem).unwrap()); |
1039 | | |
1040 | | mem.write_obj::<u16>(u16::to_le(8), avail_addr.unchecked_add(2)) |
1041 | | .unwrap(); |
1042 | | |
1043 | | assert!(q.enable_notification(mem).unwrap()); |
1044 | | q.next_avail = Wrapping(8); |
1045 | | assert!(!q.enable_notification(mem).unwrap()); |
1046 | | } |
1047 | | |
1048 | | #[test] |
1049 | | fn test_consume_chains_with_notif() { |
1050 | | let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); |
1051 | | let vq = MockSplitQueue::new(mem, 16); |
1052 | | |
1053 | | let mut q: Queue = vq.create_queue().unwrap(); |
1054 | | |
1055 | | // q is currently valid. |
1056 | | assert!(q.is_valid(mem)); |
1057 | | |
1058 | | // The chains are (0, 1), (2, 3, 4), (5, 6), (7, 8), (9, 10, 11, 12). |
1059 | | let mut descs = Vec::new(); |
1060 | | for i in 0..13 { |
1061 | | let flags = match i { |
1062 | | 1 | 4 | 6 | 8 | 12 => 0, |
1063 | | _ => VRING_DESC_F_NEXT, |
1064 | | }; |
1065 | | |
1066 | | descs.push(RawDescriptor::from(SplitDescriptor::new( |
1067 | | (0x1000 * (i + 1)) as u64, |
1068 | | 0x1000, |
1069 | | flags as u16, |
1070 | | i + 1, |
1071 | | ))); |
1072 | | } |
1073 | | |
1074 | | vq.add_desc_chains(&descs, 0).unwrap(); |
1075 | | // Update the index of the chain that can be consumed to not be the last one. |
1076 | | // This enables us to consume chains in multiple iterations as opposed to consuming |
1077 | | // all the driver written chains at once. |
1078 | | vq.avail().idx().store(u16::to_le(2)); |
1079 | | // No descriptor chains are consumed at this point. |
1080 | | assert_eq!(q.next_avail(), 0); |
1081 | | |
1082 | | let mut i = 0; |
1083 | | |
1084 | | loop { |
1085 | | i += 1; |
1086 | | q.disable_notification(mem).unwrap(); |
1087 | | |
1088 | | while let Some(chain) = q.iter(mem).unwrap().next() { |
1089 | | // Process the descriptor chain, and then add entries to the |
1090 | | // used ring. |
1091 | | let head_index = chain.head_index(); |
1092 | | let mut desc_len = 0; |
1093 | | chain.for_each(|d| { |
1094 | | if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE { |
1095 | | desc_len += d.len(); |
1096 | | } |
1097 | | }); |
1098 | | q.add_used(mem, head_index, desc_len).unwrap(); |
1099 | | } |
1100 | | if !q.enable_notification(mem).unwrap() { |
1101 | | break; |
1102 | | } |
1103 | | } |
1104 | | // The chains should be consumed in a single loop iteration because there's nothing updating |
1105 | | // the `idx` field of the available ring in the meantime. |
1106 | | assert_eq!(i, 1); |
1107 | | // The next chain that can be consumed should have index 2. |
1108 | | assert_eq!(q.next_avail(), 2); |
1109 | | assert_eq!(q.next_used(), 2); |
1110 | | // Let the device know it can consume one more chain. |
1111 | | vq.avail().idx().store(u16::to_le(3)); |
1112 | | i = 0; |
1113 | | |
1114 | | loop { |
1115 | | i += 1; |
1116 | | q.disable_notification(mem).unwrap(); |
1117 | | |
1118 | | while let Some(chain) = q.iter(mem).unwrap().next() { |
1119 | | // Process the descriptor chain, and then add entries to the |
1120 | | // used ring. |
1121 | | let head_index = chain.head_index(); |
1122 | | let mut desc_len = 0; |
1123 | | chain.for_each(|d| { |
1124 | | if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE { |
1125 | | desc_len += d.len(); |
1126 | | } |
1127 | | }); |
1128 | | q.add_used(mem, head_index, desc_len).unwrap(); |
1129 | | } |
1130 | | |
1131 | | // For the simplicity of the test we are updating here the `idx` value of the available |
1132 | | // ring. Ideally this should be done on a separate thread. |
1133 | | // Because of this update, the loop should be iterated again to consume the new |
1134 | | // available descriptor chains. |
1135 | | vq.avail().idx().store(u16::to_le(4)); |
1136 | | if !q.enable_notification(mem).unwrap() { |
1137 | | break; |
1138 | | } |
1139 | | } |
1140 | | assert_eq!(i, 2); |
1141 | | // The next chain that can be consumed should have index 4. |
1142 | | assert_eq!(q.next_avail(), 4); |
1143 | | assert_eq!(q.next_used(), 4); |
1144 | | |
1145 | | // Set an `idx` that is bigger than the number of entries added in the ring. |
1146 | | // This is an allowed scenario, but the indexes of the chain will have unexpected values. |
1147 | | vq.avail().idx().store(u16::to_le(7)); |
1148 | | loop { |
1149 | | q.disable_notification(mem).unwrap(); |
1150 | | |
1151 | | while let Some(chain) = q.iter(mem).unwrap().next() { |
1152 | | // Process the descriptor chain, and then add entries to the |
1153 | | // used ring. |
1154 | | let head_index = chain.head_index(); |
1155 | | let mut desc_len = 0; |
1156 | | chain.for_each(|d| { |
1157 | | if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE { |
1158 | | desc_len += d.len(); |
1159 | | } |
1160 | | }); |
1161 | | q.add_used(mem, head_index, desc_len).unwrap(); |
1162 | | } |
1163 | | if !q.enable_notification(mem).unwrap() { |
1164 | | break; |
1165 | | } |
1166 | | } |
1167 | | assert_eq!(q.next_avail(), 7); |
1168 | | assert_eq!(q.next_used(), 7); |
1169 | | } |
1170 | | |
1171 | | #[test] |
1172 | | fn test_invalid_avail_idx() { |
1173 | | // This is a negative test for the following MUST from the spec: `A driver MUST NOT |
1174 | | // decrement the available idx on a virtqueue (ie. there is no way to “unexpose” buffers).`. |
1175 | | // We validate that for this misconfiguration, the device does not panic. |
1176 | | let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); |
1177 | | let vq = MockSplitQueue::new(mem, 16); |
1178 | | |
1179 | | let mut q: Queue = vq.create_queue().unwrap(); |
1180 | | |
1181 | | // q is currently valid. |
1182 | | assert!(q.is_valid(mem)); |
1183 | | |
1184 | | // The chains are (0, 1), (2, 3, 4), (5, 6). |
1185 | | let mut descs = Vec::new(); |
1186 | | for i in 0..7 { |
1187 | | let flags = match i { |
1188 | | 1 | 4 | 6 => 0, |
1189 | | _ => VRING_DESC_F_NEXT, |
1190 | | }; |
1191 | | |
1192 | | descs.push(RawDescriptor::from(SplitDescriptor::new( |
1193 | | (0x1000 * (i + 1)) as u64, |
1194 | | 0x1000, |
1195 | | flags as u16, |
1196 | | i + 1, |
1197 | | ))); |
1198 | | } |
1199 | | |
1200 | | vq.add_desc_chains(&descs, 0).unwrap(); |
1201 | | // Let the device know it can consume chains with the index < 2. |
1202 | | vq.avail().idx().store(u16::to_le(3)); |
1203 | | // No descriptor chains are consumed at this point. |
1204 | | assert_eq!(q.next_avail(), 0); |
1205 | | assert_eq!(q.next_used(), 0); |
1206 | | |
1207 | | loop { |
1208 | | q.disable_notification(mem).unwrap(); |
1209 | | |
1210 | | while let Some(chain) = q.iter(mem).unwrap().next() { |
1211 | | // Process the descriptor chain, and then add entries to the |
1212 | | // used ring. |
1213 | | let head_index = chain.head_index(); |
1214 | | let mut desc_len = 0; |
1215 | | chain.for_each(|d| { |
1216 | | if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE { |
1217 | | desc_len += d.len(); |
1218 | | } |
1219 | | }); |
1220 | | q.add_used(mem, head_index, desc_len).unwrap(); |
1221 | | } |
1222 | | if !q.enable_notification(mem).unwrap() { |
1223 | | break; |
1224 | | } |
1225 | | } |
1226 | | // The next chain that can be consumed should have index 3. |
1227 | | assert_eq!(q.next_avail(), 3); |
1228 | | assert_eq!(q.avail_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3)); |
1229 | | assert_eq!(q.next_used(), 3); |
1230 | | assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3)); |
1231 | | assert!(q.lock().ready()); |
1232 | | |
1233 | | // Decrement `idx` which should be forbidden. We don't enforce this thing, but we should |
1234 | | // test that we don't panic in case the driver decrements it. |
1235 | | vq.avail().idx().store(u16::to_le(1)); |
1236 | | // Invalid available ring index |
1237 | | assert!(q.iter(mem).is_err()); |
1238 | | } |
1239 | | |
1240 | | #[test] |
1241 | | fn test_iterator_and_avail_idx() { |
1242 | | // This test ensures constructing a descriptor chain iterator succeeds |
1243 | | // with valid available ring indexes while produces an error with invalid |
1244 | | // indexes. |
1245 | | let queue_size = 2; |
1246 | | let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); |
1247 | | let vq = MockSplitQueue::new(mem, queue_size); |
1248 | | |
1249 | | let mut q: Queue = vq.create_queue().unwrap(); |
1250 | | |
1251 | | // q is currently valid. |
1252 | | assert!(q.is_valid(mem)); |
1253 | | |
1254 | | // Create descriptors to fill up the queue |
1255 | | let mut descs = Vec::new(); |
1256 | | for i in 0..queue_size { |
1257 | | descs.push(RawDescriptor::from(SplitDescriptor::new( |
1258 | | (0x1000 * (i + 1)) as u64, |
1259 | | 0x1000, |
1260 | | 0_u16, |
1261 | | i + 1, |
1262 | | ))); |
1263 | | } |
1264 | | vq.add_desc_chains(&descs, 0).unwrap(); |
1265 | | |
1266 | | // Set the 'next_available' index to 'u16:MAX' to test the wrapping scenarios |
1267 | | q.set_next_avail(u16::MAX); |
1268 | | |
1269 | | // When the number of chains exposed by the driver is equal to or less than the queue |
1270 | | // size, the available ring index is valid and constructs an iterator successfully. |
1271 | | let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size); |
1272 | | vq.avail().idx().store(u16::to_le(avail_idx.0)); |
1273 | | assert!(q.iter(mem).is_ok()); |
1274 | | let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size - 1); |
1275 | | vq.avail().idx().store(u16::to_le(avail_idx.0)); |
1276 | | assert!(q.iter(mem).is_ok()); |
1277 | | |
1278 | | // When the number of chains exposed by the driver is larger than the queue size, the |
1279 | | // available ring index is invalid and produces an error from constructing an iterator. |
1280 | | let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size + 1); |
1281 | | vq.avail().idx().store(u16::to_le(avail_idx.0)); |
1282 | | assert!(q.iter(mem).is_err()); |
1283 | | } |
1284 | | |
1285 | | #[test] |
1286 | | fn test_descriptor_and_iterator() { |
1287 | | let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); |
1288 | | let vq = MockSplitQueue::new(m, 16); |
1289 | | |
1290 | | let mut q: Queue = vq.create_queue().unwrap(); |
1291 | | |
1292 | | // q is currently valid |
1293 | | assert!(q.is_valid(m)); |
1294 | | |
1295 | | // the chains are (0, 1), (2, 3, 4) and (5, 6) |
1296 | | let mut descs = Vec::new(); |
1297 | | for j in 0..7 { |
1298 | | let flags = match j { |
1299 | | 1 | 6 => 0, |
1300 | | 2 | 5 => VRING_DESC_F_NEXT | VRING_DESC_F_WRITE, |
1301 | | 4 => VRING_DESC_F_WRITE, |
1302 | | _ => VRING_DESC_F_NEXT, |
1303 | | }; |
1304 | | |
1305 | | descs.push(RawDescriptor::from(SplitDescriptor::new( |
1306 | | (0x1000 * (j + 1)) as u64, |
1307 | | 0x1000, |
1308 | | flags as u16, |
1309 | | j + 1, |
1310 | | ))); |
1311 | | } |
1312 | | |
1313 | | vq.add_desc_chains(&descs, 0).unwrap(); |
1314 | | |
1315 | | let mut i = q.iter(m).unwrap(); |
1316 | | |
1317 | | { |
1318 | | let c = i.next().unwrap(); |
1319 | | assert_eq!(c.head_index(), 0); |
1320 | | |
1321 | | let mut iter = c; |
1322 | | assert!(iter.next().is_some()); |
1323 | | assert!(iter.next().is_some()); |
1324 | | assert!(iter.next().is_none()); |
1325 | | assert!(iter.next().is_none()); |
1326 | | } |
1327 | | |
1328 | | { |
1329 | | let c = i.next().unwrap(); |
1330 | | assert_eq!(c.head_index(), 2); |
1331 | | |
1332 | | let mut iter = c.writable(); |
1333 | | assert!(iter.next().is_some()); |
1334 | | assert!(iter.next().is_some()); |
1335 | | assert!(iter.next().is_none()); |
1336 | | assert!(iter.next().is_none()); |
1337 | | } |
1338 | | |
1339 | | { |
1340 | | let c = i.next().unwrap(); |
1341 | | assert_eq!(c.head_index(), 5); |
1342 | | |
1343 | | let mut iter = c.readable(); |
1344 | | assert!(iter.next().is_some()); |
1345 | | assert!(iter.next().is_none()); |
1346 | | assert!(iter.next().is_none()); |
1347 | | } |
1348 | | } |
1349 | | |
1350 | | #[test] |
1351 | | fn test_iterator() { |
1352 | | let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); |
1353 | | let vq = MockSplitQueue::new(m, 16); |
1354 | | |
1355 | | let mut q: Queue = vq.create_queue().unwrap(); |
1356 | | |
1357 | | q.size = q.max_size; |
1358 | | q.desc_table = vq.desc_table_addr(); |
1359 | | q.avail_ring = vq.avail_addr(); |
1360 | | q.used_ring = vq.used_addr(); |
1361 | | assert!(q.is_valid(m)); |
1362 | | |
1363 | | { |
1364 | | // an invalid queue should return an iterator with no next |
1365 | | q.ready = false; |
1366 | | assert!(q.iter(m).is_err()); |
1367 | | } |
1368 | | |
1369 | | q.ready = true; |
1370 | | |
1371 | | // now let's create two simple descriptor chains |
1372 | | // the chains are (0, 1) and (2, 3, 4) |
1373 | | { |
1374 | | let mut descs = Vec::new(); |
1375 | | for j in 0..5u16 { |
1376 | | let flags = match j { |
1377 | | 1 | 4 => 0, |
1378 | | _ => VRING_DESC_F_NEXT, |
1379 | | }; |
1380 | | |
1381 | | descs.push(RawDescriptor::from(SplitDescriptor::new( |
1382 | | (0x1000 * (j + 1)) as u64, |
1383 | | 0x1000, |
1384 | | flags as u16, |
1385 | | j + 1, |
1386 | | ))); |
1387 | | } |
1388 | | vq.add_desc_chains(&descs, 0).unwrap(); |
1389 | | |
1390 | | let mut i = q.iter(m).unwrap(); |
1391 | | |
1392 | | { |
1393 | | let mut c = i.next().unwrap(); |
1394 | | assert_eq!(c.head_index(), 0); |
1395 | | |
1396 | | c.next().unwrap(); |
1397 | | assert!(c.next().is_some()); |
1398 | | assert!(c.next().is_none()); |
1399 | | assert_eq!(c.head_index(), 0); |
1400 | | } |
1401 | | |
1402 | | { |
1403 | | let mut c = i.next().unwrap(); |
1404 | | assert_eq!(c.head_index(), 2); |
1405 | | |
1406 | | c.next().unwrap(); |
1407 | | c.next().unwrap(); |
1408 | | c.next().unwrap(); |
1409 | | assert!(c.next().is_none()); |
1410 | | assert_eq!(c.head_index(), 2); |
1411 | | } |
1412 | | |
1413 | | // also test go_to_previous_position() works as expected |
1414 | | { |
1415 | | assert!(i.next().is_none()); |
1416 | | i.go_to_previous_position(); |
1417 | | let mut c = q.iter(m).unwrap().next().unwrap(); |
1418 | | c.next().unwrap(); |
1419 | | c.next().unwrap(); |
1420 | | c.next().unwrap(); |
1421 | | assert!(c.next().is_none()); |
1422 | | } |
1423 | | } |
1424 | | |
1425 | | // Test that iterating some broken descriptor chain does not exceed |
1426 | | // 2^32 bytes in total (VIRTIO spec version 1.2, 2.7.5.2: |
1427 | | // Drivers MUST NOT add a descriptor chain longer than 2^32 bytes in |
1428 | | // total) |
1429 | | { |
1430 | | let descs = vec![ |
1431 | | RawDescriptor::from(SplitDescriptor::new( |
1432 | | 0x1000, |
1433 | | 0xffff_ffff, |
1434 | | VRING_DESC_F_NEXT as u16, |
1435 | | 1, |
1436 | | )), |
1437 | | RawDescriptor::from(SplitDescriptor::new(0x1000, 0x1234_5678, 0, 2)), |
1438 | | ]; |
1439 | | vq.add_desc_chains(&descs, 0).unwrap(); |
1440 | | let mut yielded_bytes_by_iteration = 0_u32; |
1441 | | for d in q.iter(m).unwrap().next().unwrap() { |
1442 | | yielded_bytes_by_iteration = yielded_bytes_by_iteration |
1443 | | .checked_add(d.len()) |
1444 | | .expect("iterator should not yield more than 2^32 bytes"); |
1445 | | } |
1446 | | } |
1447 | | |
1448 | | // Same as above, but test with a descriptor which is self-referential |
1449 | | { |
1450 | | let descs = vec![RawDescriptor::from(SplitDescriptor::new( |
1451 | | 0x1000, |
1452 | | 0xffff_ffff, |
1453 | | VRING_DESC_F_NEXT as u16, |
1454 | | 0, |
1455 | | ))]; |
1456 | | vq.add_desc_chains(&descs, 0).unwrap(); |
1457 | | let mut yielded_bytes_by_iteration = 0_u32; |
1458 | | for d in q.iter(m).unwrap().next().unwrap() { |
1459 | | yielded_bytes_by_iteration = yielded_bytes_by_iteration |
1460 | | .checked_add(d.len()) |
1461 | | .expect("iterator should not yield more than 2^32 bytes"); |
1462 | | } |
1463 | | } |
1464 | | } |
1465 | | |
1466 | | #[test] |
1467 | | fn test_regression_iterator_division() { |
1468 | | // This is a regression test that tests that the iterator does not try to divide |
1469 | | // by 0 when the queue size is 0 |
1470 | | let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); |
1471 | | let vq = MockSplitQueue::new(m, 1); |
1472 | | // This input was generated by the fuzzer, both for the QueueS and the Descriptor |
1473 | | let descriptors: Vec<RawDescriptor> = vec![RawDescriptor::from(SplitDescriptor::new( |
1474 | | 14178673876262995140, |
1475 | | 3301229764, |
1476 | | 50372, |
1477 | | 50372, |
1478 | | ))]; |
1479 | | vq.build_desc_chain(&descriptors).unwrap(); |
1480 | | |
1481 | | let mut q = Queue { |
1482 | | max_size: 38, |
1483 | | next_avail: Wrapping(0), |
1484 | | next_used: Wrapping(0), |
1485 | | event_idx_enabled: false, |
1486 | | num_added: Wrapping(0), |
1487 | | size: 0, |
1488 | | ready: false, |
1489 | | desc_table: GuestAddress(12837708984796196), |
1490 | | avail_ring: GuestAddress(0), |
1491 | | used_ring: GuestAddress(9943947977301164032), |
1492 | | }; |
1493 | | |
1494 | | assert!(q.pop_descriptor_chain(m).is_none()); |
1495 | | } |
1496 | | |
1497 | | #[test] |
1498 | | fn test_setters_error_cases() { |
1499 | | assert_eq!(Queue::new(15).unwrap_err(), Error::InvalidMaxSize); |
1500 | | let mut q = Queue::new(16).unwrap(); |
1501 | | |
1502 | | let expected_val = q.desc_table.0; |
1503 | | assert_eq!( |
1504 | | q.try_set_desc_table_address(GuestAddress(0xf)).unwrap_err(), |
1505 | | Error::InvalidDescTableAlign |
1506 | | ); |
1507 | | assert_eq!(q.desc_table(), expected_val); |
1508 | | |
1509 | | let expected_val = q.avail_ring.0; |
1510 | | assert_eq!( |
1511 | | q.try_set_avail_ring_address(GuestAddress(0x1)).unwrap_err(), |
1512 | | Error::InvalidAvailRingAlign |
1513 | | ); |
1514 | | assert_eq!(q.avail_ring(), expected_val); |
1515 | | |
1516 | | let expected_val = q.used_ring.0; |
1517 | | assert_eq!( |
1518 | | q.try_set_used_ring_address(GuestAddress(0x3)).unwrap_err(), |
1519 | | Error::InvalidUsedRingAlign |
1520 | | ); |
1521 | | assert_eq!(q.used_ring(), expected_val); |
1522 | | |
1523 | | let expected_val = q.size; |
1524 | | assert_eq!(q.try_set_size(15).unwrap_err(), Error::InvalidSize); |
1525 | | assert_eq!(q.size(), expected_val) |
1526 | | } |
1527 | | |
1528 | | #[test] |
1529 | | // This is a regression test for a fuzzing finding. If the driver requests a reset of the |
1530 | | // device, but then does not re-initializes the queue then a subsequent call to process |
1531 | | // a request should yield no descriptors to process. Before this fix we were processing |
1532 | | // descriptors that were added to the queue before, and we were ending up processing 255 |
1533 | | // descriptors per chain. |
1534 | | fn test_regression_timeout_after_reset() { |
1535 | | // The input below was generated by libfuzzer and adapted for this test. |
1536 | | let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x0), 0x10000)]).unwrap(); |
1537 | | let vq = MockSplitQueue::new(m, 1024); |
1538 | | |
1539 | | // This input below was generated by the fuzzer. |
1540 | | let descriptors: Vec<RawDescriptor> = vec![ |
1541 | | RawDescriptor::from(SplitDescriptor::new(21508325467, 0, 1, 4)), |
1542 | | RawDescriptor::from(SplitDescriptor::new(2097152, 4096, 3, 0)), |
1543 | | RawDescriptor::from(SplitDescriptor::new( |
1544 | | 18374686479672737792, |
1545 | | 4294967295, |
1546 | | 65535, |
1547 | | 29, |
1548 | | )), |
1549 | | RawDescriptor::from(SplitDescriptor::new(76842670169653248, 1114115, 0, 0)), |
1550 | | RawDescriptor::from(SplitDescriptor::new(16, 983040, 126, 3)), |
1551 | | RawDescriptor::from(SplitDescriptor::new(897648164864, 0, 0, 0)), |
1552 | | RawDescriptor::from(SplitDescriptor::new(111669149722, 0, 0, 0)), |
1553 | | ]; |
1554 | | vq.build_multiple_desc_chains(&descriptors).unwrap(); |
1555 | | |
1556 | | let mut q: Queue = vq.create_queue().unwrap(); |
1557 | | |
1558 | | // Setting the queue to ready should not allow consuming descriptors after reset. |
1559 | | q.reset(); |
1560 | | q.set_ready(true); |
1561 | | let mut counter = 0; |
1562 | | while let Some(mut desc_chain) = q.pop_descriptor_chain(m) { |
1563 | | // this empty loop is here to check that there are no side effects |
1564 | | // in terms of memory & execution time. |
1565 | | while desc_chain.next().is_some() { |
1566 | | counter += 1; |
1567 | | } |
1568 | | } |
1569 | | assert_eq!(counter, 0); |
1570 | | |
1571 | | // Setting the avail_addr to valid should not allow consuming descriptors after reset. |
1572 | | q.reset(); |
1573 | | q.set_avail_ring_address(Some(0x1000), None); |
1574 | | assert_eq!(q.avail_ring, GuestAddress(0x1000)); |
1575 | | counter = 0; |
1576 | | while let Some(mut desc_chain) = q.pop_descriptor_chain(m) { |
1577 | | // this empty loop is here to check that there are no side effects |
1578 | | // in terms of memory & execution time. |
1579 | | while desc_chain.next().is_some() { |
1580 | | counter += 1; |
1581 | | } |
1582 | | } |
1583 | | assert_eq!(counter, 0); |
1584 | | } |
1585 | | } |