Coverage Report

Created: 2024-05-21 06:19

/rust/registry/src/index.crates.io-6f17d22bba15001f/virtio-queue-0.12.0/src/queue.rs
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
// Copyright (C) 2020-2021 Alibaba Cloud. All rights reserved.
3
// Copyright © 2019 Intel Corporation.
4
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
5
// Use of this source code is governed by a BSD-style license that can be
6
// found in the LICENSE-BSD-3-Clause file.
7
//
8
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
9
10
use std::mem::size_of;
11
use std::num::Wrapping;
12
use std::ops::Deref;
13
use std::sync::atomic::{fence, Ordering};
14
15
use vm_memory::{Address, Bytes, GuestAddress, GuestMemory};
16
17
use crate::defs::{
18
    DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR,
19
    VIRTQ_AVAIL_ELEMENT_SIZE, VIRTQ_AVAIL_RING_HEADER_SIZE, VIRTQ_AVAIL_RING_META_SIZE,
20
    VIRTQ_USED_ELEMENT_SIZE, VIRTQ_USED_RING_HEADER_SIZE, VIRTQ_USED_RING_META_SIZE,
21
};
22
use crate::{
23
    error, Descriptor, DescriptorChain, Error, QueueGuard, QueueOwnedT, QueueState, QueueT,
24
    VirtqUsedElem,
25
};
26
use virtio_bindings::bindings::virtio_ring::VRING_USED_F_NO_NOTIFY;
27
28
/// The maximum queue size as defined in the Virtio Spec.
29
pub const MAX_QUEUE_SIZE: u16 = 32768;
30
31
/// Struct to maintain information and manipulate a virtio queue.
32
///
33
/// # Example
34
///
35
/// ```rust
36
/// use virtio_queue::{Queue, QueueOwnedT, QueueT};
37
/// use vm_memory::{Bytes, GuestAddress, GuestAddressSpace, GuestMemoryMmap};
38
///
39
/// let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
40
/// let mut queue = Queue::new(1024).unwrap();
41
///
42
/// // First, the driver sets up the queue; this set up is done via writes on the bus (PCI, MMIO).
43
/// queue.set_size(8);
44
/// queue.set_desc_table_address(Some(0x1000), None);
45
/// queue.set_avail_ring_address(Some(0x2000), None);
46
/// queue.set_used_ring_address(Some(0x3000), None);
47
/// queue.set_event_idx(true);
48
/// queue.set_ready(true);
49
/// // The user should check if the queue is valid before starting to use it.
50
/// assert!(queue.is_valid(&m));
51
///
52
/// // Here the driver would add entries in the available ring and then update the `idx` field of
53
/// // the available ring (address = 0x2000 + 2).
54
/// m.write_obj(3, GuestAddress(0x2002));
55
///
56
/// loop {
57
///     queue.disable_notification(&m).unwrap();
58
///
59
///     // Consume entries from the available ring.
60
///     while let Some(chain) = queue.iter(&m).unwrap().next() {
61
///         // Process the descriptor chain, and then add an entry in the used ring and optionally
62
///         // notify the driver.
63
///         queue.add_used(&m, chain.head_index(), 0x100).unwrap();
64
///
65
///         if queue.needs_notification(&m).unwrap() {
66
///             // Here we would notify the driver it has new entries in the used ring to consume.
67
///         }
68
///     }
69
///     if !queue.enable_notification(&m).unwrap() {
70
///         break;
71
///     }
72
/// }
73
///
74
/// // We can reset the queue at some point.
75
/// queue.reset();
76
/// // The queue should not be ready after reset.
77
/// assert!(!queue.ready());
78
/// ```
79
0
#[derive(Debug, Default, PartialEq, Eq)]
80
pub struct Queue {
81
    /// The maximum size in elements offered by the device.
82
    max_size: u16,
83
84
    /// Tail position of the available ring.
85
    next_avail: Wrapping<u16>,
86
87
    /// Head position of the used ring.
88
    next_used: Wrapping<u16>,
89
90
    /// VIRTIO_F_RING_EVENT_IDX negotiated.
91
    event_idx_enabled: bool,
92
93
    /// The number of descriptor chains placed in the used ring via `add_used`
94
    /// since the last time `needs_notification` was called on the associated queue.
95
    num_added: Wrapping<u16>,
96
97
    /// The queue size in elements the driver selected.
98
    size: u16,
99
100
    /// Indicates if the queue is finished with configuration.
101
    ready: bool,
102
103
    /// Guest physical address of the descriptor table.
104
    desc_table: GuestAddress,
105
106
    /// Guest physical address of the available ring.
107
    avail_ring: GuestAddress,
108
109
    /// Guest physical address of the used ring.
110
    used_ring: GuestAddress,
111
}
112
113
impl Queue {
114
    /// Equivalent of [`QueueT::set_size`] returning an error in case of invalid size.
115
    ///
116
    /// This should not be directly used, as the preferred method is part of the [`QueueT`]
117
    /// interface. This is a convenience function for implementing save/restore capabilities.
118
8.59k
    pub fn try_set_size(&mut self, size: u16) -> Result<(), Error> {
119
8.59k
        if size > self.max_size() || size == 0 || (size & (size - 1)) != 0 {
120
8.16k
            return Err(Error::InvalidSize);
121
434
        }
122
434
        self.size = size;
123
434
        Ok(())
124
8.59k
    }
125
126
    /// Tries to set the descriptor table address. In case of an invalid value, the address is
127
    /// not updated.
128
    ///
129
    /// This should not be directly used, as the preferred method is
130
    /// [`QueueT::set_desc_table_address`]. This is a convenience function for implementing
131
    /// save/restore capabilities.
132
8.59k
    pub fn try_set_desc_table_address(&mut self, desc_table: GuestAddress) -> Result<(), Error> {
133
8.59k
        if desc_table.mask(0xf) != 0 {
134
0
            return Err(Error::InvalidDescTableAlign);
135
8.59k
        }
136
8.59k
        self.desc_table = desc_table;
137
8.59k
138
8.59k
        Ok(())
139
8.59k
    }
140
141
    /// Tries to update the available ring address. In case of an invalid value, the address is
142
    /// not updated.
143
    ///
144
    /// This should not be directly used, as the preferred method is
145
    /// [`QueueT::set_avail_ring_address`]. This is a convenience function for implementing
146
    /// save/restore capabilities.
147
8.59k
    pub fn try_set_avail_ring_address(&mut self, avail_ring: GuestAddress) -> Result<(), Error> {
148
8.59k
        if avail_ring.mask(0x1) != 0 {
149
0
            return Err(Error::InvalidAvailRingAlign);
150
8.59k
        }
151
8.59k
        self.avail_ring = avail_ring;
152
8.59k
        Ok(())
153
8.59k
    }
154
155
    /// Tries to update the used ring address. In cae of an invalid value, the address is not
156
    /// updated.
157
    ///
158
    /// This should not be directly used, as the preferred method is
159
    /// [`QueueT::set_used_ring_address`]. This is a convenience function for implementing
160
    /// save/restore capabilities.
161
8.59k
    pub fn try_set_used_ring_address(&mut self, used_ring: GuestAddress) -> Result<(), Error> {
162
8.59k
        if used_ring.mask(0x3) != 0 {
163
0
            return Err(Error::InvalidUsedRingAlign);
164
8.59k
        }
165
8.59k
        self.used_ring = used_ring;
166
8.59k
        Ok(())
167
8.59k
    }
168
169
    /// Returns the state of the `Queue`.
170
    ///
171
    /// This is useful for implementing save/restore capabilities.
172
    /// The state does not have support for serialization, but this can be
173
    /// added by VMMs locally through the use of a
174
    /// [remote type](https://serde.rs/remote-derive.html).
175
    ///
176
    /// Alternatively, a version aware and serializable/deserializable QueueState
177
    /// is available in the `virtio-queue-ser` crate.
178
0
    pub fn state(&self) -> QueueState {
179
0
        QueueState {
180
0
            max_size: self.max_size,
181
0
            next_avail: self.next_avail(),
182
0
            next_used: self.next_used(),
183
0
            event_idx_enabled: self.event_idx_enabled,
184
0
            size: self.size,
185
0
            ready: self.ready,
186
0
            desc_table: self.desc_table(),
187
0
            avail_ring: self.avail_ring(),
188
0
            used_ring: self.used_ring(),
189
0
        }
190
0
    }
191
192
    // Helper method that writes `val` to the `avail_event` field of the used ring, using
193
    // the provided ordering.
194
0
    fn set_avail_event<M: GuestMemory>(
195
0
        &self,
196
0
        mem: &M,
197
0
        val: u16,
198
0
        order: Ordering,
199
0
    ) -> Result<(), Error> {
200
0
        // This can not overflow an u64 since it is working with relatively small numbers compared
201
0
        // to u64::MAX.
202
0
        let avail_event_offset =
203
0
            VIRTQ_USED_RING_HEADER_SIZE + VIRTQ_USED_ELEMENT_SIZE * u64::from(self.size);
204
0
        let addr = self
205
0
            .used_ring
206
0
            .checked_add(avail_event_offset)
207
0
            .ok_or(Error::AddressOverflow)?;
208
209
0
        mem.store(u16::to_le(val), addr, order)
210
0
            .map_err(Error::GuestMemory)
211
0
    }
Unexecuted instantiation: <virtio_queue::queue::Queue>::set_avail_event::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Unexecuted instantiation: <virtio_queue::queue::Queue>::set_avail_event::<_>
212
213
    // Set the value of the `flags` field of the used ring, applying the specified ordering.
214
15.5k
    fn set_used_flags<M: GuestMemory>(
215
15.5k
        &mut self,
216
15.5k
        mem: &M,
217
15.5k
        val: u16,
218
15.5k
        order: Ordering,
219
15.5k
    ) -> Result<(), Error> {
220
15.5k
        mem.store(u16::to_le(val), self.used_ring, order)
221
15.5k
            .map_err(Error::GuestMemory)
222
15.5k
    }
<virtio_queue::queue::Queue>::set_used_flags::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
214
15.5k
    fn set_used_flags<M: GuestMemory>(
215
15.5k
        &mut self,
216
15.5k
        mem: &M,
217
15.5k
        val: u16,
218
15.5k
        order: Ordering,
219
15.5k
    ) -> Result<(), Error> {
220
15.5k
        mem.store(u16::to_le(val), self.used_ring, order)
221
15.5k
            .map_err(Error::GuestMemory)
222
15.5k
    }
Unexecuted instantiation: <virtio_queue::queue::Queue>::set_used_flags::<_>
223
224
    // Write the appropriate values to enable or disable notifications from the driver.
225
    //
226
    // Every access in this method uses `Relaxed` ordering because a fence is added by the caller
227
    // when appropriate.
228
15.5k
    fn set_notification<M: GuestMemory>(&mut self, mem: &M, enable: bool) -> Result<(), Error> {
229
15.5k
        if enable {
230
15.5k
            if self.event_idx_enabled {
231
                // We call `set_avail_event` using the `next_avail` value, instead of reading
232
                // and using the current `avail_idx` to avoid missing notifications. More
233
                // details in `enable_notification`.
234
0
                self.set_avail_event(mem, self.next_avail.0, Ordering::Relaxed)
235
            } else {
236
15.5k
                self.set_used_flags(mem, 0, Ordering::Relaxed)
237
            }
238
0
        } else if !self.event_idx_enabled {
239
0
            self.set_used_flags(mem, VRING_USED_F_NO_NOTIFY as u16, Ordering::Relaxed)
240
        } else {
241
            // Notifications are effectively disabled by default after triggering once when
242
            // `VIRTIO_F_EVENT_IDX` is negotiated, so we don't do anything in that case.
243
0
            Ok(())
244
        }
245
15.5k
    }
<virtio_queue::queue::Queue>::set_notification::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
228
15.5k
    fn set_notification<M: GuestMemory>(&mut self, mem: &M, enable: bool) -> Result<(), Error> {
229
15.5k
        if enable {
230
15.5k
            if self.event_idx_enabled {
231
                // We call `set_avail_event` using the `next_avail` value, instead of reading
232
                // and using the current `avail_idx` to avoid missing notifications. More
233
                // details in `enable_notification`.
234
0
                self.set_avail_event(mem, self.next_avail.0, Ordering::Relaxed)
235
            } else {
236
15.5k
                self.set_used_flags(mem, 0, Ordering::Relaxed)
237
            }
238
0
        } else if !self.event_idx_enabled {
239
0
            self.set_used_flags(mem, VRING_USED_F_NO_NOTIFY as u16, Ordering::Relaxed)
240
        } else {
241
            // Notifications are effectively disabled by default after triggering once when
242
            // `VIRTIO_F_EVENT_IDX` is negotiated, so we don't do anything in that case.
243
0
            Ok(())
244
        }
245
15.5k
    }
Unexecuted instantiation: <virtio_queue::queue::Queue>::set_notification::<_>
246
247
    // Return the value present in the used_event field of the avail ring.
248
    //
249
    // If the VIRTIO_F_EVENT_IDX feature bit is not negotiated, the flags field in the available
250
    // ring offers a crude mechanism for the driver to inform the device that it doesn’t want
251
    // interrupts when buffers are used. Otherwise virtq_avail.used_event is a more performant
252
    // alternative where the driver specifies how far the device can progress before interrupting.
253
    //
254
    // Neither of these interrupt suppression methods are reliable, as they are not synchronized
255
    // with the device, but they serve as useful optimizations. So we only ensure access to the
256
    // virtq_avail.used_event is atomic, but do not need to synchronize with other memory accesses.
257
0
    fn used_event<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
258
0
        // This can not overflow an u64 since it is working with relatively small numbers compared
259
0
        // to u64::MAX.
260
0
        let used_event_offset =
261
0
            VIRTQ_AVAIL_RING_HEADER_SIZE + u64::from(self.size) * VIRTQ_AVAIL_ELEMENT_SIZE;
262
0
        let used_event_addr = self
263
0
            .avail_ring
264
0
            .checked_add(used_event_offset)
265
0
            .ok_or(Error::AddressOverflow)?;
266
267
0
        mem.load(used_event_addr, order)
268
0
            .map(u16::from_le)
269
0
            .map(Wrapping)
270
0
            .map_err(Error::GuestMemory)
271
0
    }
Unexecuted instantiation: <virtio_queue::queue::Queue>::used_event::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Unexecuted instantiation: <virtio_queue::queue::Queue>::used_event::<_>
272
}
273
274
impl<'a> QueueGuard<'a> for Queue {
275
    type G = &'a mut Self;
276
}
277
278
impl QueueT for Queue {
279
8.59k
    fn new(max_size: u16) -> Result<Self, Error> {
280
8.59k
        // We need to check that the max size is a power of 2 because we're setting this as the
281
8.59k
        // queue size, and the valid queue sizes are a power of 2 as per the specification.
282
8.59k
        if max_size == 0 || max_size > MAX_QUEUE_SIZE || (max_size & (max_size - 1)) != 0 {
283
0
            return Err(Error::InvalidMaxSize);
284
8.59k
        }
285
8.59k
        Ok(Queue {
286
8.59k
            max_size,
287
8.59k
            size: max_size,
288
8.59k
            ready: false,
289
8.59k
            desc_table: GuestAddress(DEFAULT_DESC_TABLE_ADDR),
290
8.59k
            avail_ring: GuestAddress(DEFAULT_AVAIL_RING_ADDR),
291
8.59k
            used_ring: GuestAddress(DEFAULT_USED_RING_ADDR),
292
8.59k
            next_avail: Wrapping(0),
293
8.59k
            next_used: Wrapping(0),
294
8.59k
            event_idx_enabled: false,
295
8.59k
            num_added: Wrapping(0),
296
8.59k
        })
297
8.59k
    }
298
299
0
    fn is_valid<M: GuestMemory>(&self, mem: &M) -> bool {
300
0
        let queue_size = self.size as u64;
301
0
        let desc_table = self.desc_table;
302
0
        // The multiplication can not overflow an u64 since we are multiplying an u16 with a
303
0
        // small number.
304
0
        let desc_table_size = size_of::<Descriptor>() as u64 * queue_size;
305
0
        let avail_ring = self.avail_ring;
306
0
        // The operations below can not overflow an u64 since they're working with relatively small
307
0
        // numbers compared to u64::MAX.
308
0
        let avail_ring_size = VIRTQ_AVAIL_RING_META_SIZE + VIRTQ_AVAIL_ELEMENT_SIZE * queue_size;
309
0
        let used_ring = self.used_ring;
310
0
        let used_ring_size = VIRTQ_USED_RING_META_SIZE + VIRTQ_USED_ELEMENT_SIZE * queue_size;
311
0
312
0
        if !self.ready {
313
0
            error!("attempt to use virtio queue that is not marked ready");
314
0
            false
315
0
        } else if desc_table
316
0
            .checked_add(desc_table_size)
317
0
            .map_or(true, |v| !mem.address_in_range(v))
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>::{closure#0}
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<_>::{closure#0}
318
        {
319
0
            error!(
320
0
                "virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
321
0
                desc_table.raw_value(),
322
                desc_table_size
323
            );
324
0
            false
325
0
        } else if avail_ring
326
0
            .checked_add(avail_ring_size)
327
0
            .map_or(true, |v| !mem.address_in_range(v))
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>::{closure#1}
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<_>::{closure#1}
328
        {
329
0
            error!(
330
0
                "virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
331
0
                avail_ring.raw_value(),
332
                avail_ring_size
333
            );
334
0
            false
335
0
        } else if used_ring
336
0
            .checked_add(used_ring_size)
337
0
            .map_or(true, |v| !mem.address_in_range(v))
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>::{closure#2}
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<_>::{closure#2}
338
        {
339
0
            error!(
340
0
                "virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
341
0
                used_ring.raw_value(),
342
                used_ring_size
343
            );
344
0
            false
345
        } else {
346
0
            true
347
        }
348
0
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<_>
349
350
0
    fn reset(&mut self) {
351
0
        self.ready = false;
352
0
        self.size = self.max_size;
353
0
        self.desc_table = GuestAddress(DEFAULT_DESC_TABLE_ADDR);
354
0
        self.avail_ring = GuestAddress(DEFAULT_AVAIL_RING_ADDR);
355
0
        self.used_ring = GuestAddress(DEFAULT_USED_RING_ADDR);
356
0
        self.next_avail = Wrapping(0);
357
0
        self.next_used = Wrapping(0);
358
0
        self.num_added = Wrapping(0);
359
0
        self.event_idx_enabled = false;
360
0
    }
361
362
0
    fn lock(&mut self) -> <Self as QueueGuard>::G {
363
0
        self
364
0
    }
365
366
8.59k
    fn max_size(&self) -> u16 {
367
8.59k
        self.max_size
368
8.59k
    }
369
370
752
    fn size(&self) -> u16 {
371
752
        self.size
372
752
    }
373
374
8.59k
    fn set_size(&mut self, size: u16) {
375
8.59k
        if self.try_set_size(size).is_err() {
376
8.16k
            error!("virtio queue with invalid size: {}", size);
377
434
        }
378
8.59k
    }
379
380
0
    fn ready(&self) -> bool {
381
0
        self.ready
382
0
    }
383
384
8.59k
    fn set_ready(&mut self, ready: bool) {
385
8.59k
        self.ready = ready;
386
8.59k
    }
387
388
0
    fn set_desc_table_address(&mut self, low: Option<u32>, high: Option<u32>) {
389
0
        let low = low.unwrap_or(self.desc_table.0 as u32) as u64;
390
0
        let high = high.unwrap_or((self.desc_table.0 >> 32) as u32) as u64;
391
0
392
0
        let desc_table = GuestAddress((high << 32) | low);
393
0
        if self.try_set_desc_table_address(desc_table).is_err() {
394
0
            error!("virtio queue descriptor table breaks alignment constraints");
395
0
        }
396
0
    }
397
398
0
    fn set_avail_ring_address(&mut self, low: Option<u32>, high: Option<u32>) {
399
0
        let low = low.unwrap_or(self.avail_ring.0 as u32) as u64;
400
0
        let high = high.unwrap_or((self.avail_ring.0 >> 32) as u32) as u64;
401
0
402
0
        let avail_ring = GuestAddress((high << 32) | low);
403
0
        if self.try_set_avail_ring_address(avail_ring).is_err() {
404
0
            error!("virtio queue available ring breaks alignment constraints");
405
0
        }
406
0
    }
407
408
0
    fn set_used_ring_address(&mut self, low: Option<u32>, high: Option<u32>) {
409
0
        let low = low.unwrap_or(self.used_ring.0 as u32) as u64;
410
0
        let high = high.unwrap_or((self.used_ring.0 >> 32) as u32) as u64;
411
0
412
0
        let used_ring = GuestAddress((high << 32) | low);
413
0
        if self.try_set_used_ring_address(used_ring).is_err() {
414
0
            error!("virtio queue used ring breaks alignment constraints");
415
0
        }
416
0
    }
417
418
9.78k
    fn set_event_idx(&mut self, enabled: bool) {
419
9.78k
        self.event_idx_enabled = enabled;
420
9.78k
    }
421
422
201k
    fn avail_idx<M>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error>
423
201k
    where
424
201k
        M: GuestMemory + ?Sized,
425
201k
    {
426
201k
        let addr = self
427
201k
            .avail_ring
428
201k
            .checked_add(2)
429
201k
            .ok_or(Error::AddressOverflow)?;
430
431
201k
        mem.load(addr, order)
432
201k
            .map(u16::from_le)
433
201k
            .map(Wrapping)
434
201k
            .map_err(Error::GuestMemory)
435
201k
    }
<virtio_queue::queue::Queue as virtio_queue::QueueT>::avail_idx::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
422
201k
    fn avail_idx<M>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error>
423
201k
    where
424
201k
        M: GuestMemory + ?Sized,
425
201k
    {
426
201k
        let addr = self
427
201k
            .avail_ring
428
201k
            .checked_add(2)
429
201k
            .ok_or(Error::AddressOverflow)?;
430
431
201k
        mem.load(addr, order)
432
201k
            .map(u16::from_le)
433
201k
            .map(Wrapping)
434
201k
            .map_err(Error::GuestMemory)
435
201k
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::avail_idx::<_>
436
437
597
    fn used_idx<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
438
597
        let addr = self
439
597
            .used_ring
440
597
            .checked_add(2)
441
597
            .ok_or(Error::AddressOverflow)?;
442
443
597
        mem.load(addr, order)
444
597
            .map(u16::from_le)
445
597
            .map(Wrapping)
446
597
            .map_err(Error::GuestMemory)
447
597
    }
<virtio_queue::queue::Queue as virtio_queue::QueueT>::used_idx::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
437
597
    fn used_idx<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
438
597
        let addr = self
439
597
            .used_ring
440
597
            .checked_add(2)
441
597
            .ok_or(Error::AddressOverflow)?;
442
443
597
        mem.load(addr, order)
444
597
            .map(u16::from_le)
445
597
            .map(Wrapping)
446
597
            .map_err(Error::GuestMemory)
447
597
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::used_idx::<_>
448
449
166k
    fn add_used<M: GuestMemory>(
450
166k
        &mut self,
451
166k
        mem: &M,
452
166k
        head_index: u16,
453
166k
        len: u32,
454
166k
    ) -> Result<(), Error> {
455
166k
        if head_index >= self.size {
456
148
            error!(
457
0
                "attempted to add out of bounds descriptor to used ring: {}",
458
                head_index
459
            );
460
148
            return Err(Error::InvalidDescriptorIndex);
461
166k
        }
462
166k
463
166k
        let next_used_index = u64::from(self.next_used.0 % self.size);
464
166k
        // This can not overflow an u64 since it is working with relatively small numbers compared
465
166k
        // to u64::MAX.
466
166k
        let offset = VIRTQ_USED_RING_HEADER_SIZE + next_used_index * VIRTQ_USED_ELEMENT_SIZE;
467
166k
        let addr = self
468
166k
            .used_ring
469
166k
            .checked_add(offset)
470
166k
            .ok_or(Error::AddressOverflow)?;
471
166k
        mem.write_obj(VirtqUsedElem::new(head_index.into(), len), addr)
472
166k
            .map_err(Error::GuestMemory)?;
473
474
166k
        self.next_used += Wrapping(1);
475
166k
        self.num_added += Wrapping(1);
476
166k
477
166k
        mem.store(
478
166k
            u16::to_le(self.next_used.0),
479
166k
            self.used_ring
480
166k
                .checked_add(2)
481
166k
                .ok_or(Error::AddressOverflow)?,
482
166k
            Ordering::Release,
483
166k
        )
484
166k
        .map_err(Error::GuestMemory)
485
166k
    }
<virtio_queue::queue::Queue as virtio_queue::QueueT>::add_used::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
449
166k
    fn add_used<M: GuestMemory>(
450
166k
        &mut self,
451
166k
        mem: &M,
452
166k
        head_index: u16,
453
166k
        len: u32,
454
166k
    ) -> Result<(), Error> {
455
166k
        if head_index >= self.size {
456
148
            error!(
457
0
                "attempted to add out of bounds descriptor to used ring: {}",
458
                head_index
459
            );
460
148
            return Err(Error::InvalidDescriptorIndex);
461
166k
        }
462
166k
463
166k
        let next_used_index = u64::from(self.next_used.0 % self.size);
464
166k
        // This can not overflow an u64 since it is working with relatively small numbers compared
465
166k
        // to u64::MAX.
466
166k
        let offset = VIRTQ_USED_RING_HEADER_SIZE + next_used_index * VIRTQ_USED_ELEMENT_SIZE;
467
166k
        let addr = self
468
166k
            .used_ring
469
166k
            .checked_add(offset)
470
166k
            .ok_or(Error::AddressOverflow)?;
471
166k
        mem.write_obj(VirtqUsedElem::new(head_index.into(), len), addr)
472
166k
            .map_err(Error::GuestMemory)?;
473
474
166k
        self.next_used += Wrapping(1);
475
166k
        self.num_added += Wrapping(1);
476
166k
477
166k
        mem.store(
478
166k
            u16::to_le(self.next_used.0),
479
166k
            self.used_ring
480
166k
                .checked_add(2)
481
166k
                .ok_or(Error::AddressOverflow)?,
482
166k
            Ordering::Release,
483
166k
        )
484
166k
        .map_err(Error::GuestMemory)
485
166k
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::add_used::<_>
486
487
    // TODO: Turn this into a doc comment/example.
488
    // With the current implementation, a common way of consuming entries from the available ring
489
    // while also leveraging notification suppression is to use a loop, for example:
490
    //
491
    // loop {
492
    //     // We have to explicitly disable notifications if `VIRTIO_F_EVENT_IDX` has not been
493
    //     // negotiated.
494
    //     self.disable_notification()?;
495
    //
496
    //     for chain in self.iter()? {
497
    //         // Do something with each chain ...
498
    //         // Let's assume we process all available chains here.
499
    //     }
500
    //
501
    //     // If `enable_notification` returns `true`, the driver has added more entries to the
502
    //     // available ring.
503
    //     if !self.enable_notification()? {
504
    //         break;
505
    //     }
506
    // }
507
15.5k
    fn enable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
508
15.5k
        self.set_notification(mem, true)?;
509
        // Ensures the following read is not reordered before any previous write operation.
510
15.5k
        fence(Ordering::SeqCst);
511
15.5k
512
15.5k
        // We double check here to avoid the situation where the available ring has been updated
513
15.5k
        // just before we re-enabled notifications, and it's possible to miss one. We compare the
514
15.5k
        // current `avail_idx` value to `self.next_avail` because it's where we stopped processing
515
15.5k
        // entries. There are situations where we intentionally avoid processing everything in the
516
15.5k
        // available ring (which will cause this method to return `true`), but in that case we'll
517
15.5k
        // probably not re-enable notifications as we already know there are pending entries.
518
15.5k
        self.avail_idx(mem, Ordering::Relaxed)
519
15.5k
            .map(|idx| idx != self.next_avail)
<virtio_queue::queue::Queue as virtio_queue::QueueT>::enable_notification::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>::{closure#0}
Line
Count
Source
519
15.5k
            .map(|idx| idx != self.next_avail)
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::enable_notification::<_>::{closure#0}
520
15.5k
    }
<virtio_queue::queue::Queue as virtio_queue::QueueT>::enable_notification::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
507
15.5k
    fn enable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
508
15.5k
        self.set_notification(mem, true)?;
509
        // Ensures the following read is not reordered before any previous write operation.
510
15.5k
        fence(Ordering::SeqCst);
511
15.5k
512
15.5k
        // We double check here to avoid the situation where the available ring has been updated
513
15.5k
        // just before we re-enabled notifications, and it's possible to miss one. We compare the
514
15.5k
        // current `avail_idx` value to `self.next_avail` because it's where we stopped processing
515
15.5k
        // entries. There are situations where we intentionally avoid processing everything in the
516
15.5k
        // available ring (which will cause this method to return `true`), but in that case we'll
517
15.5k
        // probably not re-enable notifications as we already know there are pending entries.
518
15.5k
        self.avail_idx(mem, Ordering::Relaxed)
519
15.5k
            .map(|idx| idx != self.next_avail)
520
15.5k
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::enable_notification::<_>
521
522
0
    fn disable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<(), Error> {
523
0
        self.set_notification(mem, false)
524
0
    }
525
526
350
    fn needs_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
527
350
        let used_idx = self.next_used;
528
350
529
350
        // Complete all the writes in add_used() before reading the event.
530
350
        fence(Ordering::SeqCst);
531
350
532
350
        // The VRING_AVAIL_F_NO_INTERRUPT flag isn't supported yet.
533
350
534
350
        // When the `EVENT_IDX` feature is negotiated, the driver writes into `used_event`
535
350
        // a value that's used by the device to determine whether a notification must
536
350
        // be submitted after adding a descriptor chain to the used ring. According to the
537
350
        // standard, the notification must be sent when `next_used == used_event + 1`, but
538
350
        // various device model implementations rely on an inequality instead, most likely
539
350
        // to also support use cases where a bunch of descriptor chains are added to the used
540
350
        // ring first, and only afterwards the `needs_notification` logic is called. For example,
541
350
        // the approach based on `num_added` below is taken from the Linux Kernel implementation
542
350
        // (i.e. https://elixir.bootlin.com/linux/v5.15.35/source/drivers/virtio/virtio_ring.c#L661)
543
350
544
350
        // The `old` variable below is used to determine the value of `next_used` from when
545
350
        // `needs_notification` was called last (each `needs_notification` call resets `num_added`
546
350
        // to zero, while each `add_used` called increments it by one). Then, the logic below
547
350
        // uses wrapped arithmetic to see whether `used_event` can be found between `old` and
548
350
        // `next_used` in the circular sequence space of the used ring.
549
350
        if self.event_idx_enabled {
550
0
            let used_event = self.used_event(mem, Ordering::Relaxed)?;
551
0
            let old = used_idx - self.num_added;
552
0
            self.num_added = Wrapping(0);
553
0
554
0
            return Ok(used_idx - used_event - Wrapping(1) < used_idx - old);
555
350
        }
556
350
557
350
        Ok(true)
558
350
    }
<virtio_queue::queue::Queue as virtio_queue::QueueT>::needs_notification::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
526
350
    fn needs_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
527
350
        let used_idx = self.next_used;
528
350
529
350
        // Complete all the writes in add_used() before reading the event.
530
350
        fence(Ordering::SeqCst);
531
350
532
350
        // The VRING_AVAIL_F_NO_INTERRUPT flag isn't supported yet.
533
350
534
350
        // When the `EVENT_IDX` feature is negotiated, the driver writes into `used_event`
535
350
        // a value that's used by the device to determine whether a notification must
536
350
        // be submitted after adding a descriptor chain to the used ring. According to the
537
350
        // standard, the notification must be sent when `next_used == used_event + 1`, but
538
350
        // various device model implementations rely on an inequality instead, most likely
539
350
        // to also support use cases where a bunch of descriptor chains are added to the used
540
350
        // ring first, and only afterwards the `needs_notification` logic is called. For example,
541
350
        // the approach based on `num_added` below is taken from the Linux Kernel implementation
542
350
        // (i.e. https://elixir.bootlin.com/linux/v5.15.35/source/drivers/virtio/virtio_ring.c#L661)
543
350
544
350
        // The `old` variable below is used to determine the value of `next_used` from when
545
350
        // `needs_notification` was called last (each `needs_notification` call resets `num_added`
546
350
        // to zero, while each `add_used` called increments it by one). Then, the logic below
547
350
        // uses wrapped arithmetic to see whether `used_event` can be found between `old` and
548
350
        // `next_used` in the circular sequence space of the used ring.
549
350
        if self.event_idx_enabled {
550
0
            let used_event = self.used_event(mem, Ordering::Relaxed)?;
551
0
            let old = used_idx - self.num_added;
552
0
            self.num_added = Wrapping(0);
553
0
554
0
            return Ok(used_idx - used_event - Wrapping(1) < used_idx - old);
555
350
        }
556
350
557
350
        Ok(true)
558
350
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::needs_notification::<_>
559
560
0
    fn next_avail(&self) -> u16 {
561
0
        self.next_avail.0
562
0
    }
563
564
8.59k
    fn set_next_avail(&mut self, next_avail: u16) {
565
8.59k
        self.next_avail = Wrapping(next_avail);
566
8.59k
    }
567
568
0
    fn next_used(&self) -> u16 {
569
0
        self.next_used.0
570
0
    }
571
572
8.59k
    fn set_next_used(&mut self, next_used: u16) {
573
8.59k
        self.next_used = Wrapping(next_used);
574
8.59k
    }
575
576
0
    fn desc_table(&self) -> u64 {
577
0
        self.desc_table.0
578
0
    }
579
580
0
    fn avail_ring(&self) -> u64 {
581
0
        self.avail_ring.0
582
0
    }
583
584
0
    fn used_ring(&self) -> u64 {
585
0
        self.used_ring.0
586
0
    }
587
588
0
    fn event_idx_enabled(&self) -> bool {
589
0
        self.event_idx_enabled
590
0
    }
591
592
185k
    fn pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>>
593
185k
    where
594
185k
        M: Clone + Deref,
595
185k
        M::Target: GuestMemory,
596
185k
    {
597
185k
        // Default, iter-based impl. Will be subsequently improved.
598
185k
        match self.iter(mem) {
599
184k
            Ok(mut iter) => iter.next(),
600
1.35k
            Err(e) => {
601
1.35k
                error!("Iterator error {}", e);
602
1.35k
                None
603
            }
604
        }
605
185k
    }
<virtio_queue::queue::Queue as virtio_queue::QueueT>::pop_descriptor_chain::<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>
Line
Count
Source
592
169k
    fn pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>>
593
169k
    where
594
169k
        M: Clone + Deref,
595
169k
        M::Target: GuestMemory,
596
169k
    {
597
169k
        // Default, iter-based impl. Will be subsequently improved.
598
169k
        match self.iter(mem) {
599
167k
            Ok(mut iter) => iter.next(),
600
1.18k
            Err(e) => {
601
1.18k
                error!("Iterator error {}", e);
602
1.18k
                None
603
            }
604
        }
605
169k
    }
<virtio_queue::queue::Queue as virtio_queue::QueueT>::pop_descriptor_chain::<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
592
16.3k
    fn pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>>
593
16.3k
    where
594
16.3k
        M: Clone + Deref,
595
16.3k
        M::Target: GuestMemory,
596
16.3k
    {
597
16.3k
        // Default, iter-based impl. Will be subsequently improved.
598
16.3k
        match self.iter(mem) {
599
16.2k
            Ok(mut iter) => iter.next(),
600
178
            Err(e) => {
601
178
                error!("Iterator error {}", e);
602
178
                None
603
            }
604
        }
605
16.3k
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::pop_descriptor_chain::<_>
606
}
607
608
impl QueueOwnedT for Queue {
609
185k
    fn iter<M>(&mut self, mem: M) -> Result<AvailIter<'_, M>, Error>
610
185k
    where
611
185k
        M: Deref,
612
185k
        M::Target: GuestMemory,
613
185k
    {
614
185k
        // We're checking here that a reset did not happen without re-initializing the queue.
615
185k
        // TODO: In the future we might want to also check that the other parameters in the
616
185k
        // queue are valid.
617
185k
        if !self.ready || self.avail_ring == GuestAddress(0) {
618
0
            return Err(Error::QueueNotReady);
619
185k
        }
620
185k
621
185k
        self.avail_idx(mem.deref(), Ordering::Acquire)
622
185k
            .map(move |idx| AvailIter::new(mem, idx, self))?
<virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>::{closure#0}
Line
Count
Source
622
169k
            .map(move |idx| AvailIter::new(mem, idx, self))?
<virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>::{closure#0}
Line
Count
Source
622
16.3k
            .map(move |idx| AvailIter::new(mem, idx, self))?
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<_>::{closure#0}
623
185k
    }
<virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>
Line
Count
Source
609
169k
    fn iter<M>(&mut self, mem: M) -> Result<AvailIter<'_, M>, Error>
610
169k
    where
611
169k
        M: Deref,
612
169k
        M::Target: GuestMemory,
613
169k
    {
614
169k
        // We're checking here that a reset did not happen without re-initializing the queue.
615
169k
        // TODO: In the future we might want to also check that the other parameters in the
616
169k
        // queue are valid.
617
169k
        if !self.ready || self.avail_ring == GuestAddress(0) {
618
0
            return Err(Error::QueueNotReady);
619
169k
        }
620
169k
621
169k
        self.avail_idx(mem.deref(), Ordering::Acquire)
622
169k
            .map(move |idx| AvailIter::new(mem, idx, self))?
623
169k
    }
<virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
609
16.3k
    fn iter<M>(&mut self, mem: M) -> Result<AvailIter<'_, M>, Error>
610
16.3k
    where
611
16.3k
        M: Deref,
612
16.3k
        M::Target: GuestMemory,
613
16.3k
    {
614
16.3k
        // We're checking here that a reset did not happen without re-initializing the queue.
615
16.3k
        // TODO: In the future we might want to also check that the other parameters in the
616
16.3k
        // queue are valid.
617
16.3k
        if !self.ready || self.avail_ring == GuestAddress(0) {
618
0
            return Err(Error::QueueNotReady);
619
16.3k
        }
620
16.3k
621
16.3k
        self.avail_idx(mem.deref(), Ordering::Acquire)
622
16.3k
            .map(move |idx| AvailIter::new(mem, idx, self))?
623
16.3k
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<_>
624
625
69
    fn go_to_previous_position(&mut self) {
626
69
        self.next_avail -= Wrapping(1);
627
69
    }
628
}
629
630
/// Consuming iterator over all available descriptor chain heads in the queue.
631
///
632
/// # Example
633
///
634
/// ```rust
635
/// # use virtio_bindings::bindings::virtio_ring::{VRING_DESC_F_NEXT, VRING_DESC_F_WRITE};
636
/// # use virtio_queue::mock::MockSplitQueue;
637
/// use virtio_queue::{Descriptor, Queue, QueueOwnedT};
638
/// use vm_memory::{GuestAddress, GuestMemoryMmap};
639
///
640
/// # fn populate_queue(m: &GuestMemoryMmap) -> Queue {
641
/// #    let vq = MockSplitQueue::new(m, 16);
642
/// #    let mut q: Queue = vq.create_queue().unwrap();
643
/// #
644
/// #    // The chains are (0, 1), (2, 3, 4) and (5, 6).
645
/// #    let mut descs = Vec::new();
646
/// #    for i in 0..7 {
647
/// #        let flags = match i {
648
/// #            1 | 6 => 0,
649
/// #            2 | 5 => VRING_DESC_F_NEXT | VRING_DESC_F_WRITE,
650
/// #            4 => VRING_DESC_F_WRITE,
651
/// #            _ => VRING_DESC_F_NEXT,
652
/// #        };
653
/// #
654
/// #        descs.push(Descriptor::new((0x1000 * (i + 1)) as u64, 0x1000, flags as u16, i + 1));
655
/// #    }
656
/// #
657
/// #    vq.add_desc_chains(&descs, 0).unwrap();
658
/// #    q
659
/// # }
660
/// let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
661
/// // Populate the queue with descriptor chains and update the available ring accordingly.
662
/// let mut queue = populate_queue(m);
663
/// let mut i = queue.iter(m).unwrap();
664
///
665
/// {
666
///     let mut c = i.next().unwrap();
667
///     let _first_head_index = c.head_index();
668
///     // We should have two descriptors in the first chain.
669
///     let _desc1 = c.next().unwrap();
670
///     let _desc2 = c.next().unwrap();
671
/// }
672
///
673
/// {
674
///     let c = i.next().unwrap();
675
///     let _second_head_index = c.head_index();
676
///
677
///     let mut iter = c.writable();
678
///     // We should have two writable descriptors in the second chain.
679
///     let _desc1 = iter.next().unwrap();
680
///     let _desc2 = iter.next().unwrap();
681
/// }
682
///
683
/// {
684
///     let c = i.next().unwrap();
685
///     let _third_head_index = c.head_index();
686
///
687
///     let mut iter = c.readable();
688
///     // We should have one readable descriptor in the third chain.
689
///     let _desc1 = iter.next().unwrap();
690
/// }
691
/// // Let's go back one position in the available ring.
692
/// i.go_to_previous_position();
693
/// // We should be able to access again the third descriptor chain.
694
/// let c = i.next().unwrap();
695
/// let _third_head_index = c.head_index();
696
/// ```
697
0
#[derive(Debug)]
698
pub struct AvailIter<'b, M> {
699
    mem: M,
700
    desc_table: GuestAddress,
701
    avail_ring: GuestAddress,
702
    queue_size: u16,
703
    last_index: Wrapping<u16>,
704
    next_avail: &'b mut Wrapping<u16>,
705
}
706
707
impl<'b, M> AvailIter<'b, M>
708
where
709
    M: Deref,
710
    M::Target: GuestMemory,
711
{
712
    /// Create a new instance of `AvailInter`.
713
    ///
714
    /// # Arguments
715
    /// * `mem` - the `GuestMemory` object that can be used to access the queue buffers.
716
    /// * `idx` - the index of the available ring entry where the driver would put the next
717
    ///           available descriptor chain.
718
    /// * `queue` - the `Queue` object from which the needed data to create the `AvailIter` can
719
    ///             be retrieved.
720
185k
    pub(crate) fn new(mem: M, idx: Wrapping<u16>, queue: &'b mut Queue) -> Result<Self, Error> {
721
185k
        // The number of descriptor chain heads to process should always
722
185k
        // be smaller or equal to the queue size, as the driver should
723
185k
        // never ask the VMM to process a available ring entry more than
724
185k
        // once. Checking and reporting such incorrect driver behavior
725
185k
        // can prevent potential hanging and Denial-of-Service from
726
185k
        // happening on the VMM side.
727
185k
        if (idx - queue.next_avail).0 > queue.size {
728
1.35k
            return Err(Error::InvalidAvailRingIndex);
729
184k
        }
730
184k
731
184k
        Ok(AvailIter {
732
184k
            mem,
733
184k
            desc_table: queue.desc_table,
734
184k
            avail_ring: queue.avail_ring,
735
184k
            queue_size: queue.size,
736
184k
            last_index: idx,
737
184k
            next_avail: &mut queue.next_avail,
738
184k
        })
739
185k
    }
<virtio_queue::queue::AvailIter<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::new
Line
Count
Source
720
169k
    pub(crate) fn new(mem: M, idx: Wrapping<u16>, queue: &'b mut Queue) -> Result<Self, Error> {
721
169k
        // The number of descriptor chain heads to process should always
722
169k
        // be smaller or equal to the queue size, as the driver should
723
169k
        // never ask the VMM to process a available ring entry more than
724
169k
        // once. Checking and reporting such incorrect driver behavior
725
169k
        // can prevent potential hanging and Denial-of-Service from
726
169k
        // happening on the VMM side.
727
169k
        if (idx - queue.next_avail).0 > queue.size {
728
1.18k
            return Err(Error::InvalidAvailRingIndex);
729
167k
        }
730
167k
731
167k
        Ok(AvailIter {
732
167k
            mem,
733
167k
            desc_table: queue.desc_table,
734
167k
            avail_ring: queue.avail_ring,
735
167k
            queue_size: queue.size,
736
167k
            last_index: idx,
737
167k
            next_avail: &mut queue.next_avail,
738
167k
        })
739
169k
    }
<virtio_queue::queue::AvailIter<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>::new
Line
Count
Source
720
16.3k
    pub(crate) fn new(mem: M, idx: Wrapping<u16>, queue: &'b mut Queue) -> Result<Self, Error> {
721
16.3k
        // The number of descriptor chain heads to process should always
722
16.3k
        // be smaller or equal to the queue size, as the driver should
723
16.3k
        // never ask the VMM to process a available ring entry more than
724
16.3k
        // once. Checking and reporting such incorrect driver behavior
725
16.3k
        // can prevent potential hanging and Denial-of-Service from
726
16.3k
        // happening on the VMM side.
727
16.3k
        if (idx - queue.next_avail).0 > queue.size {
728
178
            return Err(Error::InvalidAvailRingIndex);
729
16.2k
        }
730
16.2k
731
16.2k
        Ok(AvailIter {
732
16.2k
            mem,
733
16.2k
            desc_table: queue.desc_table,
734
16.2k
            avail_ring: queue.avail_ring,
735
16.2k
            queue_size: queue.size,
736
16.2k
            last_index: idx,
737
16.2k
            next_avail: &mut queue.next_avail,
738
16.2k
        })
739
16.3k
    }
Unexecuted instantiation: <virtio_queue::queue::AvailIter<_>>::new
740
741
    /// Goes back one position in the available descriptor chain offered by the driver.
742
    ///
743
    /// Rust does not support bidirectional iterators. This is the only way to revert the effect
744
    /// of an iterator increment on the queue.
745
    ///
746
    /// Note: this method assumes there's only one thread manipulating the queue, so it should only
747
    /// be invoked in single-threaded context.
748
0
    pub fn go_to_previous_position(&mut self) {
749
0
        *self.next_avail -= Wrapping(1);
750
0
    }
751
}
752
753
impl<'b, M> Iterator for AvailIter<'b, M>
754
where
755
    M: Clone + Deref,
756
    M::Target: GuestMemory,
757
{
758
    type Item = DescriptorChain<M>;
759
760
184k
    fn next(&mut self) -> Option<Self::Item> {
761
184k
        if *self.next_avail == self.last_index {
762
1.30k
            return None;
763
182k
        }
764
765
        // These two operations can not overflow an u64 since they're working with relatively small
766
        // numbers compared to u64::MAX.
767
182k
        let elem_off =
768
182k
            u64::from(self.next_avail.0.checked_rem(self.queue_size)?) * VIRTQ_AVAIL_ELEMENT_SIZE;
769
182k
        let offset = VIRTQ_AVAIL_RING_HEADER_SIZE + elem_off;
770
771
182k
        let addr = self.avail_ring.checked_add(offset)?;
772
182k
        let head_index: u16 = self
773
182k
            .mem
774
182k
            .load(addr, Ordering::Acquire)
775
182k
            .map(u16::from_le)
776
182k
            .map_err(|_| error!("Failed to read from memory {:x}", addr.raw_value()))
Unexecuted instantiation: <virtio_queue::queue::AvailIter<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::iter::traits::iterator::Iterator>::next::{closure#0}
Unexecuted instantiation: <virtio_queue::queue::AvailIter<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> as core::iter::traits::iterator::Iterator>::next::{closure#0}
Unexecuted instantiation: <virtio_queue::queue::AvailIter<_> as core::iter::traits::iterator::Iterator>::next::{closure#0}
777
182k
            .ok()?;
778
779
182k
        *self.next_avail += Wrapping(1);
780
182k
781
182k
        Some(DescriptorChain::new(
782
182k
            self.mem.clone(),
783
182k
            self.desc_table,
784
182k
            self.queue_size,
785
182k
            head_index,
786
182k
        ))
787
184k
    }
<virtio_queue::queue::AvailIter<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
760
167k
    fn next(&mut self) -> Option<Self::Item> {
761
167k
        if *self.next_avail == self.last_index {
762
1.23k
            return None;
763
166k
        }
764
765
        // These two operations can not overflow an u64 since they're working with relatively small
766
        // numbers compared to u64::MAX.
767
166k
        let elem_off =
768
166k
            u64::from(self.next_avail.0.checked_rem(self.queue_size)?) * VIRTQ_AVAIL_ELEMENT_SIZE;
769
166k
        let offset = VIRTQ_AVAIL_RING_HEADER_SIZE + elem_off;
770
771
166k
        let addr = self.avail_ring.checked_add(offset)?;
772
166k
        let head_index: u16 = self
773
166k
            .mem
774
166k
            .load(addr, Ordering::Acquire)
775
166k
            .map(u16::from_le)
776
166k
            .map_err(|_| error!("Failed to read from memory {:x}", addr.raw_value()))
777
166k
            .ok()?;
778
779
166k
        *self.next_avail += Wrapping(1);
780
166k
781
166k
        Some(DescriptorChain::new(
782
166k
            self.mem.clone(),
783
166k
            self.desc_table,
784
166k
            self.queue_size,
785
166k
            head_index,
786
166k
        ))
787
167k
    }
<virtio_queue::queue::AvailIter<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
760
16.2k
    fn next(&mut self) -> Option<Self::Item> {
761
16.2k
        if *self.next_avail == self.last_index {
762
64
            return None;
763
16.1k
        }
764
765
        // These two operations can not overflow an u64 since they're working with relatively small
766
        // numbers compared to u64::MAX.
767
16.1k
        let elem_off =
768
16.1k
            u64::from(self.next_avail.0.checked_rem(self.queue_size)?) * VIRTQ_AVAIL_ELEMENT_SIZE;
769
16.1k
        let offset = VIRTQ_AVAIL_RING_HEADER_SIZE + elem_off;
770
771
16.1k
        let addr = self.avail_ring.checked_add(offset)?;
772
16.1k
        let head_index: u16 = self
773
16.1k
            .mem
774
16.1k
            .load(addr, Ordering::Acquire)
775
16.1k
            .map(u16::from_le)
776
16.1k
            .map_err(|_| error!("Failed to read from memory {:x}", addr.raw_value()))
777
16.1k
            .ok()?;
778
779
16.1k
        *self.next_avail += Wrapping(1);
780
16.1k
781
16.1k
        Some(DescriptorChain::new(
782
16.1k
            self.mem.clone(),
783
16.1k
            self.desc_table,
784
16.1k
            self.queue_size,
785
16.1k
            head_index,
786
16.1k
        ))
787
16.2k
    }
Unexecuted instantiation: <virtio_queue::queue::AvailIter<_> as core::iter::traits::iterator::Iterator>::next
788
}
789
790
#[cfg(any(test, feature = "test-utils"))]
791
// It is convenient for tests to implement `PartialEq`, but it is not a
792
// proper implementation as `GuestMemory` errors cannot implement `PartialEq`.
793
impl PartialEq for Error {
794
    fn eq(&self, other: &Self) -> bool {
795
        format!("{}", &self) == format!("{}", other)
796
    }
797
}
798
799
#[cfg(test)]
800
mod tests {
801
    use super::*;
802
    use crate::defs::{DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR};
803
    use crate::mock::MockSplitQueue;
804
    use crate::Descriptor;
805
    use virtio_bindings::bindings::virtio_ring::{
806
        VRING_DESC_F_NEXT, VRING_DESC_F_WRITE, VRING_USED_F_NO_NOTIFY,
807
    };
808
809
    use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
810
811
    #[test]
812
    fn test_queue_is_valid() {
813
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
814
        let vq = MockSplitQueue::new(m, 16);
815
        let mut q: Queue = vq.create_queue().unwrap();
816
817
        // q is currently valid
818
        assert!(q.is_valid(m));
819
820
        // shouldn't be valid when not marked as ready
821
        q.set_ready(false);
822
        assert!(!q.ready());
823
        assert!(!q.is_valid(m));
824
        q.set_ready(true);
825
826
        // shouldn't be allowed to set a size > max_size
827
        q.set_size(q.max_size() << 1);
828
        assert_eq!(q.size, q.max_size());
829
830
        // or set the size to 0
831
        q.set_size(0);
832
        assert_eq!(q.size, q.max_size());
833
834
        // or set a size which is not a power of 2
835
        q.set_size(11);
836
        assert_eq!(q.size, q.max_size());
837
838
        // but should be allowed to set a size if 0 < size <= max_size and size is a power of two
839
        q.set_size(4);
840
        assert_eq!(q.size, 4);
841
        q.size = q.max_size();
842
843
        // shouldn't be allowed to set an address that breaks the alignment constraint
844
        q.set_desc_table_address(Some(0xf), None);
845
        assert_eq!(q.desc_table.0, vq.desc_table_addr().0);
846
        // should be allowed to set an aligned out of bounds address
847
        q.set_desc_table_address(Some(0xffff_fff0), None);
848
        assert_eq!(q.desc_table.0, 0xffff_fff0);
849
        // but shouldn't be valid
850
        assert!(!q.is_valid(m));
851
        // but should be allowed to set a valid description table address
852
        q.set_desc_table_address(Some(0x10), None);
853
        assert_eq!(q.desc_table.0, 0x10);
854
        assert!(q.is_valid(m));
855
        let addr = vq.desc_table_addr().0;
856
        q.set_desc_table_address(Some(addr as u32), Some((addr >> 32) as u32));
857
858
        // shouldn't be allowed to set an address that breaks the alignment constraint
859
        q.set_avail_ring_address(Some(0x1), None);
860
        assert_eq!(q.avail_ring.0, vq.avail_addr().0);
861
        // should be allowed to set an aligned out of bounds address
862
        q.set_avail_ring_address(Some(0xffff_fffe), None);
863
        assert_eq!(q.avail_ring.0, 0xffff_fffe);
864
        // but shouldn't be valid
865
        assert!(!q.is_valid(m));
866
        // but should be allowed to set a valid available ring address
867
        q.set_avail_ring_address(Some(0x2), None);
868
        assert_eq!(q.avail_ring.0, 0x2);
869
        assert!(q.is_valid(m));
870
        let addr = vq.avail_addr().0;
871
        q.set_avail_ring_address(Some(addr as u32), Some((addr >> 32) as u32));
872
873
        // shouldn't be allowed to set an address that breaks the alignment constraint
874
        q.set_used_ring_address(Some(0x3), None);
875
        assert_eq!(q.used_ring.0, vq.used_addr().0);
876
        // should be allowed to set an aligned out of bounds address
877
        q.set_used_ring_address(Some(0xffff_fffc), None);
878
        assert_eq!(q.used_ring.0, 0xffff_fffc);
879
        // but shouldn't be valid
880
        assert!(!q.is_valid(m));
881
        // but should be allowed to set a valid used ring address
882
        q.set_used_ring_address(Some(0x4), None);
883
        assert_eq!(q.used_ring.0, 0x4);
884
        let addr = vq.used_addr().0;
885
        q.set_used_ring_address(Some(addr as u32), Some((addr >> 32) as u32));
886
        assert!(q.is_valid(m));
887
    }
888
889
    #[test]
890
    fn test_add_used() {
891
        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
892
        let vq = MockSplitQueue::new(mem, 16);
893
        let mut q: Queue = vq.create_queue().unwrap();
894
895
        assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(0));
896
        assert_eq!(u16::from_le(vq.used().idx().load()), 0);
897
898
        // index too large
899
        assert!(q.add_used(mem, 16, 0x1000).is_err());
900
        assert_eq!(u16::from_le(vq.used().idx().load()), 0);
901
902
        // should be ok
903
        q.add_used(mem, 1, 0x1000).unwrap();
904
        assert_eq!(q.next_used, Wrapping(1));
905
        assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(1));
906
        assert_eq!(u16::from_le(vq.used().idx().load()), 1);
907
908
        let x = vq.used().ring().ref_at(0).unwrap().load();
909
        assert_eq!(x.id(), 1);
910
        assert_eq!(x.len(), 0x1000);
911
    }
912
913
    #[test]
914
    fn test_reset_queue() {
915
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
916
        let vq = MockSplitQueue::new(m, 16);
917
        let mut q: Queue = vq.create_queue().unwrap();
918
919
        q.set_size(8);
920
        // The address set by `MockSplitQueue` for the descriptor table is DEFAULT_DESC_TABLE_ADDR,
921
        // so let's change it for testing the reset.
922
        q.set_desc_table_address(Some(0x5000), None);
923
        // Same for `event_idx_enabled`, `next_avail` `next_used` and `signalled_used`.
924
        q.set_event_idx(true);
925
        q.set_next_avail(2);
926
        q.set_next_used(4);
927
        q.num_added = Wrapping(15);
928
        assert_eq!(q.size, 8);
929
        // `create_queue` also marks the queue as ready.
930
        assert!(q.ready);
931
        assert_ne!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR));
932
        assert_ne!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR));
933
        assert_ne!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR));
934
        assert_ne!(q.next_avail, Wrapping(0));
935
        assert_ne!(q.next_used, Wrapping(0));
936
        assert_ne!(q.num_added, Wrapping(0));
937
        assert!(q.event_idx_enabled);
938
939
        q.reset();
940
        assert_eq!(q.size, 16);
941
        assert!(!q.ready);
942
        assert_eq!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR));
943
        assert_eq!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR));
944
        assert_eq!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR));
945
        assert_eq!(q.next_avail, Wrapping(0));
946
        assert_eq!(q.next_used, Wrapping(0));
947
        assert_eq!(q.num_added, Wrapping(0));
948
        assert!(!q.event_idx_enabled);
949
    }
950
951
    #[test]
952
    fn test_needs_notification() {
953
        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
954
        let qsize = 16;
955
        let vq = MockSplitQueue::new(mem, qsize);
956
        let mut q: Queue = vq.create_queue().unwrap();
957
        let avail_addr = vq.avail_addr();
958
959
        // It should always return true when EVENT_IDX isn't enabled.
960
        for i in 0..qsize {
961
            q.next_used = Wrapping(i);
962
            assert!(q.needs_notification(mem).unwrap());
963
        }
964
965
        mem.write_obj::<u16>(
966
            u16::to_le(4),
967
            avail_addr.unchecked_add(4 + qsize as u64 * 2),
968
        )
969
        .unwrap();
970
        q.set_event_idx(true);
971
972
        // Incrementing up to this value causes an `u16` to wrap back to 0.
973
        let wrap = u32::from(u16::MAX) + 1;
974
975
        for i in 0..wrap + 12 {
976
            q.next_used = Wrapping(i as u16);
977
            // Let's test wrapping around the maximum index value as well.
978
            // `num_added` needs to be at least `1` to represent the fact that new descriptor
979
            // chains have be added to the used ring since the last time `needs_notification`
980
            // returned.
981
            q.num_added = Wrapping(1);
982
            let expected = i == 5 || i == (5 + wrap);
983
            assert_eq!((q.needs_notification(mem).unwrap(), i), (expected, i));
984
        }
985
986
        mem.write_obj::<u16>(
987
            u16::to_le(8),
988
            avail_addr.unchecked_add(4 + qsize as u64 * 2),
989
        )
990
        .unwrap();
991
992
        // Returns `false` because the current `used_event` value is behind both `next_used` and
993
        // the value of `next_used` at the time when `needs_notification` last returned (which is
994
        // computed based on `num_added` as described in the comments for `needs_notification`.
995
        assert!(!q.needs_notification(mem).unwrap());
996
997
        mem.write_obj::<u16>(
998
            u16::to_le(15),
999
            avail_addr.unchecked_add(4 + qsize as u64 * 2),
1000
        )
1001
        .unwrap();
1002
1003
        q.num_added = Wrapping(1);
1004
        assert!(!q.needs_notification(mem).unwrap());
1005
1006
        q.next_used = Wrapping(15);
1007
        q.num_added = Wrapping(1);
1008
        assert!(!q.needs_notification(mem).unwrap());
1009
1010
        q.next_used = Wrapping(16);
1011
        q.num_added = Wrapping(1);
1012
        assert!(q.needs_notification(mem).unwrap());
1013
1014
        // Calling `needs_notification` again immediately returns `false`.
1015
        assert!(!q.needs_notification(mem).unwrap());
1016
1017
        mem.write_obj::<u16>(
1018
            u16::to_le(u16::MAX - 3),
1019
            avail_addr.unchecked_add(4 + qsize as u64 * 2),
1020
        )
1021
        .unwrap();
1022
        q.next_used = Wrapping(u16::MAX - 2);
1023
        q.num_added = Wrapping(1);
1024
        // Returns `true` because, when looking at circular sequence of indices of the used ring,
1025
        // the value we wrote in the `used_event` appears between the "old" value of `next_used`
1026
        // (i.e. `next_used` - `num_added`) and the current `next_used`, thus suggesting that we
1027
        // need to notify the driver.
1028
        assert!(q.needs_notification(mem).unwrap());
1029
    }
1030
1031
    #[test]
1032
    fn test_enable_disable_notification() {
1033
        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1034
        let vq = MockSplitQueue::new(mem, 16);
1035
1036
        let mut q: Queue = vq.create_queue().unwrap();
1037
        let used_addr = vq.used_addr();
1038
1039
        assert!(!q.event_idx_enabled);
1040
1041
        q.enable_notification(mem).unwrap();
1042
        let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1043
        assert_eq!(v, 0);
1044
1045
        q.disable_notification(mem).unwrap();
1046
        let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1047
        assert_eq!(v, VRING_USED_F_NO_NOTIFY as u16);
1048
1049
        q.enable_notification(mem).unwrap();
1050
        let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1051
        assert_eq!(v, 0);
1052
1053
        q.set_event_idx(true);
1054
        let avail_addr = vq.avail_addr();
1055
        mem.write_obj::<u16>(u16::to_le(2), avail_addr.unchecked_add(2))
1056
            .unwrap();
1057
1058
        assert!(q.enable_notification(mem).unwrap());
1059
        q.next_avail = Wrapping(2);
1060
        assert!(!q.enable_notification(mem).unwrap());
1061
1062
        mem.write_obj::<u16>(u16::to_le(8), avail_addr.unchecked_add(2))
1063
            .unwrap();
1064
1065
        assert!(q.enable_notification(mem).unwrap());
1066
        q.next_avail = Wrapping(8);
1067
        assert!(!q.enable_notification(mem).unwrap());
1068
    }
1069
1070
    #[test]
1071
    fn test_consume_chains_with_notif() {
1072
        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1073
        let vq = MockSplitQueue::new(mem, 16);
1074
1075
        let mut q: Queue = vq.create_queue().unwrap();
1076
1077
        // q is currently valid.
1078
        assert!(q.is_valid(mem));
1079
1080
        // The chains are (0, 1), (2, 3, 4), (5, 6), (7, 8), (9, 10, 11, 12).
1081
        let mut descs = Vec::new();
1082
        for i in 0..13 {
1083
            let flags = match i {
1084
                1 | 4 | 6 | 8 | 12 => 0,
1085
                _ => VRING_DESC_F_NEXT,
1086
            };
1087
1088
            descs.push(Descriptor::new(
1089
                (0x1000 * (i + 1)) as u64,
1090
                0x1000,
1091
                flags as u16,
1092
                i + 1,
1093
            ));
1094
        }
1095
1096
        vq.add_desc_chains(&descs, 0).unwrap();
1097
        // Update the index of the chain that can be consumed to not be the last one.
1098
        // This enables us to consume chains in multiple iterations as opposed to consuming
1099
        // all the driver written chains at once.
1100
        vq.avail().idx().store(u16::to_le(2));
1101
        // No descriptor chains are consumed at this point.
1102
        assert_eq!(q.next_avail(), 0);
1103
1104
        let mut i = 0;
1105
1106
        loop {
1107
            i += 1;
1108
            q.disable_notification(mem).unwrap();
1109
1110
            while let Some(chain) = q.iter(mem).unwrap().next() {
1111
                // Process the descriptor chain, and then add entries to the
1112
                // used ring.
1113
                let head_index = chain.head_index();
1114
                let mut desc_len = 0;
1115
                chain.for_each(|d| {
1116
                    if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1117
                        desc_len += d.len();
1118
                    }
1119
                });
1120
                q.add_used(mem, head_index, desc_len).unwrap();
1121
            }
1122
            if !q.enable_notification(mem).unwrap() {
1123
                break;
1124
            }
1125
        }
1126
        // The chains should be consumed in a single loop iteration because there's nothing updating
1127
        // the `idx` field of the available ring in the meantime.
1128
        assert_eq!(i, 1);
1129
        // The next chain that can be consumed should have index 2.
1130
        assert_eq!(q.next_avail(), 2);
1131
        assert_eq!(q.next_used(), 2);
1132
        // Let the device know it can consume one more chain.
1133
        vq.avail().idx().store(u16::to_le(3));
1134
        i = 0;
1135
1136
        loop {
1137
            i += 1;
1138
            q.disable_notification(mem).unwrap();
1139
1140
            while let Some(chain) = q.iter(mem).unwrap().next() {
1141
                // Process the descriptor chain, and then add entries to the
1142
                // used ring.
1143
                let head_index = chain.head_index();
1144
                let mut desc_len = 0;
1145
                chain.for_each(|d| {
1146
                    if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1147
                        desc_len += d.len();
1148
                    }
1149
                });
1150
                q.add_used(mem, head_index, desc_len).unwrap();
1151
            }
1152
1153
            // For the simplicity of the test we are updating here the `idx` value of the available
1154
            // ring. Ideally this should be done on a separate thread.
1155
            // Because of this update, the loop should be iterated again to consume the new
1156
            // available descriptor chains.
1157
            vq.avail().idx().store(u16::to_le(4));
1158
            if !q.enable_notification(mem).unwrap() {
1159
                break;
1160
            }
1161
        }
1162
        assert_eq!(i, 2);
1163
        // The next chain that can be consumed should have index 4.
1164
        assert_eq!(q.next_avail(), 4);
1165
        assert_eq!(q.next_used(), 4);
1166
1167
        // Set an `idx` that is bigger than the number of entries added in the ring.
1168
        // This is an allowed scenario, but the indexes of the chain will have unexpected values.
1169
        vq.avail().idx().store(u16::to_le(7));
1170
        loop {
1171
            q.disable_notification(mem).unwrap();
1172
1173
            while let Some(chain) = q.iter(mem).unwrap().next() {
1174
                // Process the descriptor chain, and then add entries to the
1175
                // used ring.
1176
                let head_index = chain.head_index();
1177
                let mut desc_len = 0;
1178
                chain.for_each(|d| {
1179
                    if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1180
                        desc_len += d.len();
1181
                    }
1182
                });
1183
                q.add_used(mem, head_index, desc_len).unwrap();
1184
            }
1185
            if !q.enable_notification(mem).unwrap() {
1186
                break;
1187
            }
1188
        }
1189
        assert_eq!(q.next_avail(), 7);
1190
        assert_eq!(q.next_used(), 7);
1191
    }
1192
1193
    #[test]
1194
    fn test_invalid_avail_idx() {
1195
        // This is a negative test for the following MUST from the spec: `A driver MUST NOT
1196
        // decrement the available idx on a virtqueue (ie. there is no way to “unexpose” buffers).`.
1197
        // We validate that for this misconfiguration, the device does not panic.
1198
        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1199
        let vq = MockSplitQueue::new(mem, 16);
1200
1201
        let mut q: Queue = vq.create_queue().unwrap();
1202
1203
        // q is currently valid.
1204
        assert!(q.is_valid(mem));
1205
1206
        // The chains are (0, 1), (2, 3, 4), (5, 6).
1207
        let mut descs = Vec::new();
1208
        for i in 0..7 {
1209
            let flags = match i {
1210
                1 | 4 | 6 => 0,
1211
                _ => VRING_DESC_F_NEXT,
1212
            };
1213
1214
            descs.push(Descriptor::new(
1215
                (0x1000 * (i + 1)) as u64,
1216
                0x1000,
1217
                flags as u16,
1218
                i + 1,
1219
            ));
1220
        }
1221
1222
        vq.add_desc_chains(&descs, 0).unwrap();
1223
        // Let the device know it can consume chains with the index < 2.
1224
        vq.avail().idx().store(u16::to_le(3));
1225
        // No descriptor chains are consumed at this point.
1226
        assert_eq!(q.next_avail(), 0);
1227
        assert_eq!(q.next_used(), 0);
1228
1229
        loop {
1230
            q.disable_notification(mem).unwrap();
1231
1232
            while let Some(chain) = q.iter(mem).unwrap().next() {
1233
                // Process the descriptor chain, and then add entries to the
1234
                // used ring.
1235
                let head_index = chain.head_index();
1236
                let mut desc_len = 0;
1237
                chain.for_each(|d| {
1238
                    if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1239
                        desc_len += d.len();
1240
                    }
1241
                });
1242
                q.add_used(mem, head_index, desc_len).unwrap();
1243
            }
1244
            if !q.enable_notification(mem).unwrap() {
1245
                break;
1246
            }
1247
        }
1248
        // The next chain that can be consumed should have index 3.
1249
        assert_eq!(q.next_avail(), 3);
1250
        assert_eq!(q.avail_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3));
1251
        assert_eq!(q.next_used(), 3);
1252
        assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3));
1253
        assert!(q.lock().ready());
1254
1255
        // Decrement `idx` which should be forbidden. We don't enforce this thing, but we should
1256
        // test that we don't panic in case the driver decrements it.
1257
        vq.avail().idx().store(u16::to_le(1));
1258
        // Invalid available ring index
1259
        assert!(q.iter(mem).is_err());
1260
    }
1261
1262
    #[test]
1263
    fn test_iterator_and_avail_idx() {
1264
        // This test ensures constructing a descriptor chain iterator succeeds
1265
        // with valid available ring indexes while produces an error with invalid
1266
        // indexes.
1267
        let queue_size = 2;
1268
        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1269
        let vq = MockSplitQueue::new(mem, queue_size);
1270
1271
        let mut q: Queue = vq.create_queue().unwrap();
1272
1273
        // q is currently valid.
1274
        assert!(q.is_valid(mem));
1275
1276
        // Create descriptors to fill up the queue
1277
        let mut descs = Vec::new();
1278
        for i in 0..queue_size {
1279
            descs.push(Descriptor::new(
1280
                (0x1000 * (i + 1)) as u64,
1281
                0x1000,
1282
                0_u16,
1283
                i + 1,
1284
            ));
1285
        }
1286
        vq.add_desc_chains(&descs, 0).unwrap();
1287
1288
        // Set the 'next_available' index to 'u16:MAX' to test the wrapping scenarios
1289
        q.set_next_avail(u16::MAX);
1290
1291
        // When the number of chains exposed by the driver is equal to or less than the queue
1292
        // size, the available ring index is valid and constructs an iterator successfully.
1293
        let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size);
1294
        vq.avail().idx().store(u16::to_le(avail_idx.0));
1295
        assert!(q.iter(mem).is_ok());
1296
        let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size - 1);
1297
        vq.avail().idx().store(u16::to_le(avail_idx.0));
1298
        assert!(q.iter(mem).is_ok());
1299
1300
        // When the number of chains exposed by the driver is larger than the queue size, the
1301
        // available ring index is invalid and produces an error from constructing an iterator.
1302
        let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size + 1);
1303
        vq.avail().idx().store(u16::to_le(avail_idx.0));
1304
        assert!(q.iter(mem).is_err());
1305
    }
1306
1307
    #[test]
1308
    fn test_descriptor_and_iterator() {
1309
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1310
        let vq = MockSplitQueue::new(m, 16);
1311
1312
        let mut q: Queue = vq.create_queue().unwrap();
1313
1314
        // q is currently valid
1315
        assert!(q.is_valid(m));
1316
1317
        // the chains are (0, 1), (2, 3, 4) and (5, 6)
1318
        let mut descs = Vec::new();
1319
        for j in 0..7 {
1320
            let flags = match j {
1321
                1 | 6 => 0,
1322
                2 | 5 => VRING_DESC_F_NEXT | VRING_DESC_F_WRITE,
1323
                4 => VRING_DESC_F_WRITE,
1324
                _ => VRING_DESC_F_NEXT,
1325
            };
1326
1327
            descs.push(Descriptor::new(
1328
                (0x1000 * (j + 1)) as u64,
1329
                0x1000,
1330
                flags as u16,
1331
                j + 1,
1332
            ));
1333
        }
1334
1335
        vq.add_desc_chains(&descs, 0).unwrap();
1336
1337
        let mut i = q.iter(m).unwrap();
1338
1339
        {
1340
            let c = i.next().unwrap();
1341
            assert_eq!(c.head_index(), 0);
1342
1343
            let mut iter = c;
1344
            assert!(iter.next().is_some());
1345
            assert!(iter.next().is_some());
1346
            assert!(iter.next().is_none());
1347
            assert!(iter.next().is_none());
1348
        }
1349
1350
        {
1351
            let c = i.next().unwrap();
1352
            assert_eq!(c.head_index(), 2);
1353
1354
            let mut iter = c.writable();
1355
            assert!(iter.next().is_some());
1356
            assert!(iter.next().is_some());
1357
            assert!(iter.next().is_none());
1358
            assert!(iter.next().is_none());
1359
        }
1360
1361
        {
1362
            let c = i.next().unwrap();
1363
            assert_eq!(c.head_index(), 5);
1364
1365
            let mut iter = c.readable();
1366
            assert!(iter.next().is_some());
1367
            assert!(iter.next().is_none());
1368
            assert!(iter.next().is_none());
1369
        }
1370
    }
1371
1372
    #[test]
1373
    fn test_iterator() {
1374
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1375
        let vq = MockSplitQueue::new(m, 16);
1376
1377
        let mut q: Queue = vq.create_queue().unwrap();
1378
1379
        q.size = q.max_size;
1380
        q.desc_table = vq.desc_table_addr();
1381
        q.avail_ring = vq.avail_addr();
1382
        q.used_ring = vq.used_addr();
1383
        assert!(q.is_valid(m));
1384
1385
        {
1386
            // an invalid queue should return an iterator with no next
1387
            q.ready = false;
1388
            assert!(q.iter(m).is_err());
1389
        }
1390
1391
        q.ready = true;
1392
1393
        // now let's create two simple descriptor chains
1394
        // the chains are (0, 1) and (2, 3, 4)
1395
        {
1396
            let mut descs = Vec::new();
1397
            for j in 0..5u16 {
1398
                let flags = match j {
1399
                    1 | 4 => 0,
1400
                    _ => VRING_DESC_F_NEXT,
1401
                };
1402
1403
                descs.push(Descriptor::new(
1404
                    (0x1000 * (j + 1)) as u64,
1405
                    0x1000,
1406
                    flags as u16,
1407
                    j + 1,
1408
                ));
1409
            }
1410
            vq.add_desc_chains(&descs, 0).unwrap();
1411
1412
            let mut i = q.iter(m).unwrap();
1413
1414
            {
1415
                let mut c = i.next().unwrap();
1416
                assert_eq!(c.head_index(), 0);
1417
1418
                c.next().unwrap();
1419
                assert!(c.next().is_some());
1420
                assert!(c.next().is_none());
1421
                assert_eq!(c.head_index(), 0);
1422
            }
1423
1424
            {
1425
                let mut c = i.next().unwrap();
1426
                assert_eq!(c.head_index(), 2);
1427
1428
                c.next().unwrap();
1429
                c.next().unwrap();
1430
                c.next().unwrap();
1431
                assert!(c.next().is_none());
1432
                assert_eq!(c.head_index(), 2);
1433
            }
1434
1435
            // also test go_to_previous_position() works as expected
1436
            {
1437
                assert!(i.next().is_none());
1438
                i.go_to_previous_position();
1439
                let mut c = q.iter(m).unwrap().next().unwrap();
1440
                c.next().unwrap();
1441
                c.next().unwrap();
1442
                c.next().unwrap();
1443
                assert!(c.next().is_none());
1444
            }
1445
        }
1446
1447
        // Test that iterating some broken descriptor chain does not exceed
1448
        // 2^32 bytes in total (VIRTIO spec version 1.2, 2.7.5.2:
1449
        // Drivers MUST NOT add a descriptor chain longer than 2^32 bytes in
1450
        // total)
1451
        {
1452
            let descs = vec![
1453
                Descriptor::new(0x1000, 0xffff_ffff, VRING_DESC_F_NEXT as u16, 1),
1454
                Descriptor::new(0x1000, 0x1234_5678, 0, 2),
1455
            ];
1456
            vq.add_desc_chains(&descs, 0).unwrap();
1457
            let mut yielded_bytes_by_iteration = 0_u32;
1458
            for d in q.iter(m).unwrap().next().unwrap() {
1459
                yielded_bytes_by_iteration = yielded_bytes_by_iteration
1460
                    .checked_add(d.len())
1461
                    .expect("iterator should not yield more than 2^32 bytes");
1462
            }
1463
        }
1464
1465
        // Same as above, but test with a descriptor which is self-referential
1466
        {
1467
            let descs = vec![Descriptor::new(
1468
                0x1000,
1469
                0xffff_ffff,
1470
                VRING_DESC_F_NEXT as u16,
1471
                0,
1472
            )];
1473
            vq.add_desc_chains(&descs, 0).unwrap();
1474
            let mut yielded_bytes_by_iteration = 0_u32;
1475
            for d in q.iter(m).unwrap().next().unwrap() {
1476
                yielded_bytes_by_iteration = yielded_bytes_by_iteration
1477
                    .checked_add(d.len())
1478
                    .expect("iterator should not yield more than 2^32 bytes");
1479
            }
1480
        }
1481
    }
1482
1483
    #[test]
1484
    fn test_regression_iterator_division() {
1485
        // This is a regression test that tests that the iterator does not try to divide
1486
        // by 0 when the queue size is 0
1487
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1488
        let vq = MockSplitQueue::new(m, 1);
1489
        // This input was generated by the fuzzer, both for the QueueS and the Descriptor
1490
        let descriptors: Vec<Descriptor> = vec![Descriptor::new(
1491
            14178673876262995140,
1492
            3301229764,
1493
            50372,
1494
            50372,
1495
        )];
1496
        vq.build_desc_chain(&descriptors).unwrap();
1497
1498
        let mut q = Queue {
1499
            max_size: 38,
1500
            next_avail: Wrapping(0),
1501
            next_used: Wrapping(0),
1502
            event_idx_enabled: false,
1503
            num_added: Wrapping(0),
1504
            size: 0,
1505
            ready: false,
1506
            desc_table: GuestAddress(12837708984796196),
1507
            avail_ring: GuestAddress(0),
1508
            used_ring: GuestAddress(9943947977301164032),
1509
        };
1510
1511
        assert!(q.pop_descriptor_chain(m).is_none());
1512
    }
1513
1514
    #[test]
1515
    fn test_setters_error_cases() {
1516
        assert_eq!(Queue::new(15).unwrap_err(), Error::InvalidMaxSize);
1517
        let mut q = Queue::new(16).unwrap();
1518
1519
        let expected_val = q.desc_table.0;
1520
        assert_eq!(
1521
            q.try_set_desc_table_address(GuestAddress(0xf)).unwrap_err(),
1522
            Error::InvalidDescTableAlign
1523
        );
1524
        assert_eq!(q.desc_table(), expected_val);
1525
1526
        let expected_val = q.avail_ring.0;
1527
        assert_eq!(
1528
            q.try_set_avail_ring_address(GuestAddress(0x1)).unwrap_err(),
1529
            Error::InvalidAvailRingAlign
1530
        );
1531
        assert_eq!(q.avail_ring(), expected_val);
1532
1533
        let expected_val = q.used_ring.0;
1534
        assert_eq!(
1535
            q.try_set_used_ring_address(GuestAddress(0x3)).unwrap_err(),
1536
            Error::InvalidUsedRingAlign
1537
        );
1538
        assert_eq!(q.used_ring(), expected_val);
1539
1540
        let expected_val = q.size;
1541
        assert_eq!(q.try_set_size(15).unwrap_err(), Error::InvalidSize);
1542
        assert_eq!(q.size(), expected_val)
1543
    }
1544
1545
    #[test]
1546
    // This is a regression test for a fuzzing finding. If the driver requests a reset of the
1547
    // device, but then does not re-initializes the queue then a subsequent call to process
1548
    // a request should yield no descriptors to process. Before this fix we were processing
1549
    // descriptors that were added to the queue before, and we were ending up processing 255
1550
    // descriptors per chain.
1551
    fn test_regression_timeout_after_reset() {
1552
        // The input below was generated by libfuzzer and adapted for this test.
1553
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x0), 0x10000)]).unwrap();
1554
        let vq = MockSplitQueue::new(m, 1024);
1555
1556
        // This input below was generated by the fuzzer.
1557
        let descriptors: Vec<Descriptor> = vec![
1558
            Descriptor::new(21508325467, 0, 1, 4),
1559
            Descriptor::new(2097152, 4096, 3, 0),
1560
            Descriptor::new(18374686479672737792, 4294967295, 65535, 29),
1561
            Descriptor::new(76842670169653248, 1114115, 0, 0),
1562
            Descriptor::new(16, 983040, 126, 3),
1563
            Descriptor::new(897648164864, 0, 0, 0),
1564
            Descriptor::new(111669149722, 0, 0, 0),
1565
        ];
1566
        vq.build_multiple_desc_chains(&descriptors).unwrap();
1567
1568
        let mut q: Queue = vq.create_queue().unwrap();
1569
1570
        // Setting the queue to ready should not allow consuming descriptors after reset.
1571
        q.reset();
1572
        q.set_ready(true);
1573
        let mut counter = 0;
1574
        while let Some(mut desc_chain) = q.pop_descriptor_chain(m) {
1575
            // this empty loop is here to check that there are no side effects
1576
            // in terms of memory & execution time.
1577
            while desc_chain.next().is_some() {
1578
                counter += 1;
1579
            }
1580
        }
1581
        assert_eq!(counter, 0);
1582
1583
        // Setting the avail_addr to valid should not allow consuming descriptors after reset.
1584
        q.reset();
1585
        q.set_avail_ring_address(Some(0x1000), None);
1586
        assert_eq!(q.avail_ring, GuestAddress(0x1000));
1587
        counter = 0;
1588
        while let Some(mut desc_chain) = q.pop_descriptor_chain(m) {
1589
            // this empty loop is here to check that there are no side effects
1590
            // in terms of memory & execution time.
1591
            while desc_chain.next().is_some() {
1592
                counter += 1;
1593
            }
1594
        }
1595
        assert_eq!(counter, 0);
1596
    }
1597
}