Coverage Report

Created: 2025-03-07 06:49

/rust/registry/src/index.crates.io-6f17d22bba15001f/virtio-queue-0.14.0/src/queue.rs
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
// Copyright (C) 2020-2021 Alibaba Cloud. All rights reserved.
3
// Copyright © 2019 Intel Corporation.
4
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
5
// Use of this source code is governed by a BSD-style license that can be
6
// found in the LICENSE-BSD-3-Clause file.
7
//
8
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
9
10
use std::mem::size_of;
11
use std::num::Wrapping;
12
use std::ops::Deref;
13
use std::sync::atomic::{fence, Ordering};
14
15
use vm_memory::{Address, Bytes, GuestAddress, GuestMemory};
16
17
use crate::defs::{
18
    DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR,
19
    VIRTQ_AVAIL_ELEMENT_SIZE, VIRTQ_AVAIL_RING_HEADER_SIZE, VIRTQ_AVAIL_RING_META_SIZE,
20
    VIRTQ_USED_ELEMENT_SIZE, VIRTQ_USED_RING_HEADER_SIZE, VIRTQ_USED_RING_META_SIZE,
21
};
22
use crate::{
23
    error, Descriptor, DescriptorChain, Error, QueueGuard, QueueOwnedT, QueueState, QueueT,
24
    VirtqUsedElem,
25
};
26
use virtio_bindings::bindings::virtio_ring::VRING_USED_F_NO_NOTIFY;
27
28
/// The maximum queue size as defined in the Virtio Spec.
29
pub const MAX_QUEUE_SIZE: u16 = 32768;
30
31
/// Struct to maintain information and manipulate a virtio queue.
32
///
33
/// # Example
34
///
35
/// ```rust
36
/// use virtio_queue::{Queue, QueueOwnedT, QueueT};
37
/// use vm_memory::{Bytes, GuestAddress, GuestAddressSpace, GuestMemoryMmap};
38
///
39
/// let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
40
/// let mut queue = Queue::new(1024).unwrap();
41
///
42
/// // First, the driver sets up the queue; this set up is done via writes on the bus (PCI, MMIO).
43
/// queue.set_size(8);
44
/// queue.set_desc_table_address(Some(0x1000), None);
45
/// queue.set_avail_ring_address(Some(0x2000), None);
46
/// queue.set_used_ring_address(Some(0x3000), None);
47
/// queue.set_event_idx(true);
48
/// queue.set_ready(true);
49
/// // The user should check if the queue is valid before starting to use it.
50
/// assert!(queue.is_valid(&m));
51
///
52
/// // Here the driver would add entries in the available ring and then update the `idx` field of
53
/// // the available ring (address = 0x2000 + 2).
54
/// m.write_obj(3, GuestAddress(0x2002));
55
///
56
/// loop {
57
///     queue.disable_notification(&m).unwrap();
58
///
59
///     // Consume entries from the available ring.
60
///     while let Some(chain) = queue.iter(&m).unwrap().next() {
61
///         // Process the descriptor chain, and then add an entry in the used ring and optionally
62
///         // notify the driver.
63
///         queue.add_used(&m, chain.head_index(), 0x100).unwrap();
64
///
65
///         if queue.needs_notification(&m).unwrap() {
66
///             // Here we would notify the driver it has new entries in the used ring to consume.
67
///         }
68
///     }
69
///     if !queue.enable_notification(&m).unwrap() {
70
///         break;
71
///     }
72
/// }
73
///
74
/// // We can reset the queue at some point.
75
/// queue.reset();
76
/// // The queue should not be ready after reset.
77
/// assert!(!queue.ready());
78
/// ```
79
#[derive(Debug, Default, PartialEq, Eq)]
80
pub struct Queue {
81
    /// The maximum size in elements offered by the device.
82
    max_size: u16,
83
84
    /// Tail position of the available ring.
85
    next_avail: Wrapping<u16>,
86
87
    /// Head position of the used ring.
88
    next_used: Wrapping<u16>,
89
90
    /// VIRTIO_F_RING_EVENT_IDX negotiated.
91
    event_idx_enabled: bool,
92
93
    /// The number of descriptor chains placed in the used ring via `add_used`
94
    /// since the last time `needs_notification` was called on the associated queue.
95
    num_added: Wrapping<u16>,
96
97
    /// The queue size in elements the driver selected.
98
    size: u16,
99
100
    /// Indicates if the queue is finished with configuration.
101
    ready: bool,
102
103
    /// Guest physical address of the descriptor table.
104
    desc_table: GuestAddress,
105
106
    /// Guest physical address of the available ring.
107
    avail_ring: GuestAddress,
108
109
    /// Guest physical address of the used ring.
110
    used_ring: GuestAddress,
111
}
112
113
impl Queue {
114
    /// Equivalent of [`QueueT::set_size`] returning an error in case of invalid size.
115
    ///
116
    /// This should not be directly used, as the preferred method is part of the [`QueueT`]
117
    /// interface. This is a convenience function for implementing save/restore capabilities.
118
9.39k
    pub fn try_set_size(&mut self, size: u16) -> Result<(), Error> {
119
9.39k
        if size > self.max_size() || size == 0 || (size & (size - 1)) != 0 {
120
9.01k
            return Err(Error::InvalidSize);
121
387
        }
122
387
        self.size = size;
123
387
        Ok(())
124
9.39k
    }
125
126
    /// Tries to set the descriptor table address. In case of an invalid value, the address is
127
    /// not updated.
128
    ///
129
    /// This should not be directly used, as the preferred method is
130
    /// [`QueueT::set_desc_table_address`]. This is a convenience function for implementing
131
    /// save/restore capabilities.
132
9.39k
    pub fn try_set_desc_table_address(&mut self, desc_table: GuestAddress) -> Result<(), Error> {
133
9.39k
        if desc_table.mask(0xf) != 0 {
134
0
            return Err(Error::InvalidDescTableAlign);
135
9.39k
        }
136
9.39k
        self.desc_table = desc_table;
137
9.39k
138
9.39k
        Ok(())
139
9.39k
    }
140
141
    /// Tries to update the available ring address. In case of an invalid value, the address is
142
    /// not updated.
143
    ///
144
    /// This should not be directly used, as the preferred method is
145
    /// [`QueueT::set_avail_ring_address`]. This is a convenience function for implementing
146
    /// save/restore capabilities.
147
9.39k
    pub fn try_set_avail_ring_address(&mut self, avail_ring: GuestAddress) -> Result<(), Error> {
148
9.39k
        if avail_ring.mask(0x1) != 0 {
149
0
            return Err(Error::InvalidAvailRingAlign);
150
9.39k
        }
151
9.39k
        self.avail_ring = avail_ring;
152
9.39k
        Ok(())
153
9.39k
    }
154
155
    /// Tries to update the used ring address. In cae of an invalid value, the address is not
156
    /// updated.
157
    ///
158
    /// This should not be directly used, as the preferred method is
159
    /// [`QueueT::set_used_ring_address`]. This is a convenience function for implementing
160
    /// save/restore capabilities.
161
9.39k
    pub fn try_set_used_ring_address(&mut self, used_ring: GuestAddress) -> Result<(), Error> {
162
9.39k
        if used_ring.mask(0x3) != 0 {
163
0
            return Err(Error::InvalidUsedRingAlign);
164
9.39k
        }
165
9.39k
        self.used_ring = used_ring;
166
9.39k
        Ok(())
167
9.39k
    }
168
169
    /// Returns the state of the `Queue`.
170
    ///
171
    /// This is useful for implementing save/restore capabilities.
172
    /// The state does not have support for serialization, but this can be
173
    /// added by VMMs locally through the use of a
174
    /// [remote type](https://serde.rs/remote-derive.html).
175
    ///
176
    /// Alternatively, a version aware and serializable/deserializable QueueState
177
    /// is available in the `virtio-queue-ser` crate.
178
0
    pub fn state(&self) -> QueueState {
179
0
        QueueState {
180
0
            max_size: self.max_size,
181
0
            next_avail: self.next_avail(),
182
0
            next_used: self.next_used(),
183
0
            event_idx_enabled: self.event_idx_enabled,
184
0
            size: self.size,
185
0
            ready: self.ready,
186
0
            desc_table: self.desc_table(),
187
0
            avail_ring: self.avail_ring(),
188
0
            used_ring: self.used_ring(),
189
0
        }
190
0
    }
191
192
    // Helper method that writes `val` to the `avail_event` field of the used ring, using
193
    // the provided ordering.
194
0
    fn set_avail_event<M: GuestMemory>(
195
0
        &self,
196
0
        mem: &M,
197
0
        val: u16,
198
0
        order: Ordering,
199
0
    ) -> Result<(), Error> {
200
0
        // This can not overflow an u64 since it is working with relatively small numbers compared
201
0
        // to u64::MAX.
202
0
        let avail_event_offset =
203
0
            VIRTQ_USED_RING_HEADER_SIZE + VIRTQ_USED_ELEMENT_SIZE * u64::from(self.size);
204
0
        let addr = self
205
0
            .used_ring
206
0
            .checked_add(avail_event_offset)
207
0
            .ok_or(Error::AddressOverflow)?;
208
209
0
        mem.store(u16::to_le(val), addr, order)
210
0
            .map_err(Error::GuestMemory)
211
0
    }
Unexecuted instantiation: <virtio_queue::queue::Queue>::set_avail_event::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Unexecuted instantiation: <virtio_queue::queue::Queue>::set_avail_event::<_>
212
213
    // Set the value of the `flags` field of the used ring, applying the specified ordering.
214
45.5k
    fn set_used_flags<M: GuestMemory>(
215
45.5k
        &mut self,
216
45.5k
        mem: &M,
217
45.5k
        val: u16,
218
45.5k
        order: Ordering,
219
45.5k
    ) -> Result<(), Error> {
220
45.5k
        mem.store(u16::to_le(val), self.used_ring, order)
221
45.5k
            .map_err(Error::GuestMemory)
222
45.5k
    }
<virtio_queue::queue::Queue>::set_used_flags::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
214
45.5k
    fn set_used_flags<M: GuestMemory>(
215
45.5k
        &mut self,
216
45.5k
        mem: &M,
217
45.5k
        val: u16,
218
45.5k
        order: Ordering,
219
45.5k
    ) -> Result<(), Error> {
220
45.5k
        mem.store(u16::to_le(val), self.used_ring, order)
221
45.5k
            .map_err(Error::GuestMemory)
222
45.5k
    }
Unexecuted instantiation: <virtio_queue::queue::Queue>::set_used_flags::<_>
223
224
    // Write the appropriate values to enable or disable notifications from the driver.
225
    //
226
    // Every access in this method uses `Relaxed` ordering because a fence is added by the caller
227
    // when appropriate.
228
45.5k
    fn set_notification<M: GuestMemory>(&mut self, mem: &M, enable: bool) -> Result<(), Error> {
229
45.5k
        if enable {
230
45.5k
            if self.event_idx_enabled {
231
                // We call `set_avail_event` using the `next_avail` value, instead of reading
232
                // and using the current `avail_idx` to avoid missing notifications. More
233
                // details in `enable_notification`.
234
0
                self.set_avail_event(mem, self.next_avail.0, Ordering::Relaxed)
235
            } else {
236
45.5k
                self.set_used_flags(mem, 0, Ordering::Relaxed)
237
            }
238
0
        } else if !self.event_idx_enabled {
239
0
            self.set_used_flags(mem, VRING_USED_F_NO_NOTIFY as u16, Ordering::Relaxed)
240
        } else {
241
            // Notifications are effectively disabled by default after triggering once when
242
            // `VIRTIO_F_EVENT_IDX` is negotiated, so we don't do anything in that case.
243
0
            Ok(())
244
        }
245
45.5k
    }
<virtio_queue::queue::Queue>::set_notification::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
228
45.5k
    fn set_notification<M: GuestMemory>(&mut self, mem: &M, enable: bool) -> Result<(), Error> {
229
45.5k
        if enable {
230
45.5k
            if self.event_idx_enabled {
231
                // We call `set_avail_event` using the `next_avail` value, instead of reading
232
                // and using the current `avail_idx` to avoid missing notifications. More
233
                // details in `enable_notification`.
234
0
                self.set_avail_event(mem, self.next_avail.0, Ordering::Relaxed)
235
            } else {
236
45.5k
                self.set_used_flags(mem, 0, Ordering::Relaxed)
237
            }
238
0
        } else if !self.event_idx_enabled {
239
0
            self.set_used_flags(mem, VRING_USED_F_NO_NOTIFY as u16, Ordering::Relaxed)
240
        } else {
241
            // Notifications are effectively disabled by default after triggering once when
242
            // `VIRTIO_F_EVENT_IDX` is negotiated, so we don't do anything in that case.
243
0
            Ok(())
244
        }
245
45.5k
    }
Unexecuted instantiation: <virtio_queue::queue::Queue>::set_notification::<_>
246
247
    // Return the value present in the used_event field of the avail ring.
248
    //
249
    // If the VIRTIO_F_EVENT_IDX feature bit is not negotiated, the flags field in the available
250
    // ring offers a crude mechanism for the driver to inform the device that it doesn’t want
251
    // interrupts when buffers are used. Otherwise virtq_avail.used_event is a more performant
252
    // alternative where the driver specifies how far the device can progress before interrupting.
253
    //
254
    // Neither of these interrupt suppression methods are reliable, as they are not synchronized
255
    // with the device, but they serve as useful optimizations. So we only ensure access to the
256
    // virtq_avail.used_event is atomic, but do not need to synchronize with other memory accesses.
257
0
    fn used_event<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
258
0
        // This can not overflow an u64 since it is working with relatively small numbers compared
259
0
        // to u64::MAX.
260
0
        let used_event_offset =
261
0
            VIRTQ_AVAIL_RING_HEADER_SIZE + u64::from(self.size) * VIRTQ_AVAIL_ELEMENT_SIZE;
262
0
        let used_event_addr = self
263
0
            .avail_ring
264
0
            .checked_add(used_event_offset)
265
0
            .ok_or(Error::AddressOverflow)?;
266
267
0
        mem.load(used_event_addr, order)
268
0
            .map(u16::from_le)
269
0
            .map(Wrapping)
270
0
            .map_err(Error::GuestMemory)
271
0
    }
Unexecuted instantiation: <virtio_queue::queue::Queue>::used_event::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Unexecuted instantiation: <virtio_queue::queue::Queue>::used_event::<_>
272
}
273
274
impl<'a> QueueGuard<'a> for Queue {
275
    type G = &'a mut Self;
276
}
277
278
impl QueueT for Queue {
279
9.39k
    fn new(max_size: u16) -> Result<Self, Error> {
280
9.39k
        // We need to check that the max size is a power of 2 because we're setting this as the
281
9.39k
        // queue size, and the valid queue sizes are a power of 2 as per the specification.
282
9.39k
        if max_size == 0 || max_size > MAX_QUEUE_SIZE || (max_size & (max_size - 1)) != 0 {
283
0
            return Err(Error::InvalidMaxSize);
284
9.39k
        }
285
9.39k
        Ok(Queue {
286
9.39k
            max_size,
287
9.39k
            size: max_size,
288
9.39k
            ready: false,
289
9.39k
            desc_table: GuestAddress(DEFAULT_DESC_TABLE_ADDR),
290
9.39k
            avail_ring: GuestAddress(DEFAULT_AVAIL_RING_ADDR),
291
9.39k
            used_ring: GuestAddress(DEFAULT_USED_RING_ADDR),
292
9.39k
            next_avail: Wrapping(0),
293
9.39k
            next_used: Wrapping(0),
294
9.39k
            event_idx_enabled: false,
295
9.39k
            num_added: Wrapping(0),
296
9.39k
        })
297
9.39k
    }
298
299
0
    fn is_valid<M: GuestMemory>(&self, mem: &M) -> bool {
300
0
        let queue_size = self.size as u64;
301
0
        let desc_table = self.desc_table;
302
0
        // The multiplication can not overflow an u64 since we are multiplying an u16 with a
303
0
        // small number.
304
0
        let desc_table_size = size_of::<Descriptor>() as u64 * queue_size;
305
0
        let avail_ring = self.avail_ring;
306
0
        // The operations below can not overflow an u64 since they're working with relatively small
307
0
        // numbers compared to u64::MAX.
308
0
        let avail_ring_size = VIRTQ_AVAIL_RING_META_SIZE + VIRTQ_AVAIL_ELEMENT_SIZE * queue_size;
309
0
        let used_ring = self.used_ring;
310
0
        let used_ring_size = VIRTQ_USED_RING_META_SIZE + VIRTQ_USED_ELEMENT_SIZE * queue_size;
311
0
312
0
        if !self.ready {
313
0
            error!("attempt to use virtio queue that is not marked ready");
314
0
            false
315
0
        } else if desc_table
316
0
            .checked_add(desc_table_size)
317
0
            .map_or(true, |v| !mem.address_in_range(v))
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>::{closure#0}
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<_>::{closure#0}
318
        {
319
0
            error!(
320
0
                "virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
321
0
                desc_table.raw_value(),
322
                desc_table_size
323
            );
324
0
            false
325
0
        } else if avail_ring
326
0
            .checked_add(avail_ring_size)
327
0
            .map_or(true, |v| !mem.address_in_range(v))
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>::{closure#1}
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<_>::{closure#1}
328
        {
329
0
            error!(
330
0
                "virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
331
0
                avail_ring.raw_value(),
332
                avail_ring_size
333
            );
334
0
            false
335
0
        } else if used_ring
336
0
            .checked_add(used_ring_size)
337
0
            .map_or(true, |v| !mem.address_in_range(v))
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>::{closure#2}
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<_>::{closure#2}
338
        {
339
0
            error!(
340
0
                "virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
341
0
                used_ring.raw_value(),
342
                used_ring_size
343
            );
344
0
            false
345
        } else {
346
0
            true
347
        }
348
0
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::is_valid::<_>
349
350
0
    fn reset(&mut self) {
351
0
        self.ready = false;
352
0
        self.size = self.max_size;
353
0
        self.desc_table = GuestAddress(DEFAULT_DESC_TABLE_ADDR);
354
0
        self.avail_ring = GuestAddress(DEFAULT_AVAIL_RING_ADDR);
355
0
        self.used_ring = GuestAddress(DEFAULT_USED_RING_ADDR);
356
0
        self.next_avail = Wrapping(0);
357
0
        self.next_used = Wrapping(0);
358
0
        self.num_added = Wrapping(0);
359
0
        self.event_idx_enabled = false;
360
0
    }
361
362
0
    fn lock(&mut self) -> <Self as QueueGuard>::G {
363
0
        self
364
0
    }
365
366
9.39k
    fn max_size(&self) -> u16 {
367
9.39k
        self.max_size
368
9.39k
    }
369
370
816
    fn size(&self) -> u16 {
371
816
        self.size
372
816
    }
373
374
9.39k
    fn set_size(&mut self, size: u16) {
375
9.39k
        if self.try_set_size(size).is_err() {
376
9.01k
            error!("virtio queue with invalid size: {}", size);
377
387
        }
378
9.39k
    }
379
380
0
    fn ready(&self) -> bool {
381
0
        self.ready
382
0
    }
383
384
9.39k
    fn set_ready(&mut self, ready: bool) {
385
9.39k
        self.ready = ready;
386
9.39k
    }
387
388
0
    fn set_desc_table_address(&mut self, low: Option<u32>, high: Option<u32>) {
389
0
        let low = low.unwrap_or(self.desc_table.0 as u32) as u64;
390
0
        let high = high.unwrap_or((self.desc_table.0 >> 32) as u32) as u64;
391
0
392
0
        let desc_table = GuestAddress((high << 32) | low);
393
0
        if self.try_set_desc_table_address(desc_table).is_err() {
394
0
            error!("virtio queue descriptor table breaks alignment constraints");
395
0
        }
396
0
    }
397
398
0
    fn set_avail_ring_address(&mut self, low: Option<u32>, high: Option<u32>) {
399
0
        let low = low.unwrap_or(self.avail_ring.0 as u32) as u64;
400
0
        let high = high.unwrap_or((self.avail_ring.0 >> 32) as u32) as u64;
401
0
402
0
        let avail_ring = GuestAddress((high << 32) | low);
403
0
        if self.try_set_avail_ring_address(avail_ring).is_err() {
404
0
            error!("virtio queue available ring breaks alignment constraints");
405
0
        }
406
0
    }
407
408
0
    fn set_used_ring_address(&mut self, low: Option<u32>, high: Option<u32>) {
409
0
        let low = low.unwrap_or(self.used_ring.0 as u32) as u64;
410
0
        let high = high.unwrap_or((self.used_ring.0 >> 32) as u32) as u64;
411
0
412
0
        let used_ring = GuestAddress((high << 32) | low);
413
0
        if self.try_set_used_ring_address(used_ring).is_err() {
414
0
            error!("virtio queue used ring breaks alignment constraints");
415
0
        }
416
0
    }
417
418
11.7k
    fn set_event_idx(&mut self, enabled: bool) {
419
11.7k
        self.event_idx_enabled = enabled;
420
11.7k
    }
421
422
260k
    fn avail_idx<M>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error>
423
260k
    where
424
260k
        M: GuestMemory + ?Sized,
425
260k
    {
426
260k
        let addr = self
427
260k
            .avail_ring
428
260k
            .checked_add(2)
429
260k
            .ok_or(Error::AddressOverflow)?;
430
431
260k
        mem.load(addr, order)
432
260k
            .map(u16::from_le)
433
260k
            .map(Wrapping)
434
260k
            .map_err(Error::GuestMemory)
435
260k
    }
<virtio_queue::queue::Queue as virtio_queue::QueueT>::avail_idx::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
422
260k
    fn avail_idx<M>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error>
423
260k
    where
424
260k
        M: GuestMemory + ?Sized,
425
260k
    {
426
260k
        let addr = self
427
260k
            .avail_ring
428
260k
            .checked_add(2)
429
260k
            .ok_or(Error::AddressOverflow)?;
430
431
260k
        mem.load(addr, order)
432
260k
            .map(u16::from_le)
433
260k
            .map(Wrapping)
434
260k
            .map_err(Error::GuestMemory)
435
260k
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::avail_idx::<_>
436
437
754
    fn used_idx<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
438
754
        let addr = self
439
754
            .used_ring
440
754
            .checked_add(2)
441
754
            .ok_or(Error::AddressOverflow)?;
442
443
754
        mem.load(addr, order)
444
754
            .map(u16::from_le)
445
754
            .map(Wrapping)
446
754
            .map_err(Error::GuestMemory)
447
754
    }
<virtio_queue::queue::Queue as virtio_queue::QueueT>::used_idx::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
437
754
    fn used_idx<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> {
438
754
        let addr = self
439
754
            .used_ring
440
754
            .checked_add(2)
441
754
            .ok_or(Error::AddressOverflow)?;
442
443
754
        mem.load(addr, order)
444
754
            .map(u16::from_le)
445
754
            .map(Wrapping)
446
754
            .map_err(Error::GuestMemory)
447
754
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::used_idx::<_>
448
449
192k
    fn add_used<M: GuestMemory>(
450
192k
        &mut self,
451
192k
        mem: &M,
452
192k
        head_index: u16,
453
192k
        len: u32,
454
192k
    ) -> Result<(), Error> {
455
192k
        if head_index >= self.size {
456
194
            error!(
457
0
                "attempted to add out of bounds descriptor to used ring: {}",
458
                head_index
459
            );
460
194
            return Err(Error::InvalidDescriptorIndex);
461
192k
        }
462
192k
463
192k
        let next_used_index = u64::from(self.next_used.0 % self.size);
464
192k
        // This can not overflow an u64 since it is working with relatively small numbers compared
465
192k
        // to u64::MAX.
466
192k
        let offset = VIRTQ_USED_RING_HEADER_SIZE + next_used_index * VIRTQ_USED_ELEMENT_SIZE;
467
192k
        let addr = self
468
192k
            .used_ring
469
192k
            .checked_add(offset)
470
192k
            .ok_or(Error::AddressOverflow)?;
471
192k
        mem.write_obj(VirtqUsedElem::new(head_index.into(), len), addr)
472
192k
            .map_err(Error::GuestMemory)?;
473
474
192k
        self.next_used += Wrapping(1);
475
192k
        self.num_added += Wrapping(1);
476
192k
477
192k
        mem.store(
478
192k
            u16::to_le(self.next_used.0),
479
192k
            self.used_ring
480
192k
                .checked_add(2)
481
192k
                .ok_or(Error::AddressOverflow)?,
482
192k
            Ordering::Release,
483
192k
        )
484
192k
        .map_err(Error::GuestMemory)
485
192k
    }
<virtio_queue::queue::Queue as virtio_queue::QueueT>::add_used::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
449
192k
    fn add_used<M: GuestMemory>(
450
192k
        &mut self,
451
192k
        mem: &M,
452
192k
        head_index: u16,
453
192k
        len: u32,
454
192k
    ) -> Result<(), Error> {
455
192k
        if head_index >= self.size {
456
194
            error!(
457
0
                "attempted to add out of bounds descriptor to used ring: {}",
458
                head_index
459
            );
460
194
            return Err(Error::InvalidDescriptorIndex);
461
192k
        }
462
192k
463
192k
        let next_used_index = u64::from(self.next_used.0 % self.size);
464
192k
        // This can not overflow an u64 since it is working with relatively small numbers compared
465
192k
        // to u64::MAX.
466
192k
        let offset = VIRTQ_USED_RING_HEADER_SIZE + next_used_index * VIRTQ_USED_ELEMENT_SIZE;
467
192k
        let addr = self
468
192k
            .used_ring
469
192k
            .checked_add(offset)
470
192k
            .ok_or(Error::AddressOverflow)?;
471
192k
        mem.write_obj(VirtqUsedElem::new(head_index.into(), len), addr)
472
192k
            .map_err(Error::GuestMemory)?;
473
474
192k
        self.next_used += Wrapping(1);
475
192k
        self.num_added += Wrapping(1);
476
192k
477
192k
        mem.store(
478
192k
            u16::to_le(self.next_used.0),
479
192k
            self.used_ring
480
192k
                .checked_add(2)
481
192k
                .ok_or(Error::AddressOverflow)?,
482
192k
            Ordering::Release,
483
192k
        )
484
192k
        .map_err(Error::GuestMemory)
485
192k
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::add_used::<_>
486
487
45.5k
    fn enable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
488
45.5k
        self.set_notification(mem, true)?;
489
        // Ensures the following read is not reordered before any previous write operation.
490
45.5k
        fence(Ordering::SeqCst);
491
45.5k
492
45.5k
        // We double check here to avoid the situation where the available ring has been updated
493
45.5k
        // just before we re-enabled notifications, and it's possible to miss one. We compare the
494
45.5k
        // current `avail_idx` value to `self.next_avail` because it's where we stopped processing
495
45.5k
        // entries. There are situations where we intentionally avoid processing everything in the
496
45.5k
        // available ring (which will cause this method to return `true`), but in that case we'll
497
45.5k
        // probably not re-enable notifications as we already know there are pending entries.
498
45.5k
        self.avail_idx(mem, Ordering::Relaxed)
499
45.5k
            .map(|idx| idx != self.next_avail)
<virtio_queue::queue::Queue as virtio_queue::QueueT>::enable_notification::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>::{closure#0}
Line
Count
Source
499
45.5k
            .map(|idx| idx != self.next_avail)
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::enable_notification::<_>::{closure#0}
500
45.5k
    }
<virtio_queue::queue::Queue as virtio_queue::QueueT>::enable_notification::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
487
45.5k
    fn enable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
488
45.5k
        self.set_notification(mem, true)?;
489
        // Ensures the following read is not reordered before any previous write operation.
490
45.5k
        fence(Ordering::SeqCst);
491
45.5k
492
45.5k
        // We double check here to avoid the situation where the available ring has been updated
493
45.5k
        // just before we re-enabled notifications, and it's possible to miss one. We compare the
494
45.5k
        // current `avail_idx` value to `self.next_avail` because it's where we stopped processing
495
45.5k
        // entries. There are situations where we intentionally avoid processing everything in the
496
45.5k
        // available ring (which will cause this method to return `true`), but in that case we'll
497
45.5k
        // probably not re-enable notifications as we already know there are pending entries.
498
45.5k
        self.avail_idx(mem, Ordering::Relaxed)
499
45.5k
            .map(|idx| idx != self.next_avail)
500
45.5k
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::enable_notification::<_>
501
502
0
    fn disable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<(), Error> {
503
0
        self.set_notification(mem, false)
504
0
    }
505
506
968
    fn needs_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
507
968
        let used_idx = self.next_used;
508
968
509
968
        // Complete all the writes in add_used() before reading the event.
510
968
        fence(Ordering::SeqCst);
511
968
512
968
        // The VRING_AVAIL_F_NO_INTERRUPT flag isn't supported yet.
513
968
514
968
        // When the `EVENT_IDX` feature is negotiated, the driver writes into `used_event`
515
968
        // a value that's used by the device to determine whether a notification must
516
968
        // be submitted after adding a descriptor chain to the used ring. According to the
517
968
        // standard, the notification must be sent when `next_used == used_event + 1`, but
518
968
        // various device model implementations rely on an inequality instead, most likely
519
968
        // to also support use cases where a bunch of descriptor chains are added to the used
520
968
        // ring first, and only afterwards the `needs_notification` logic is called. For example,
521
968
        // the approach based on `num_added` below is taken from the Linux Kernel implementation
522
968
        // (i.e. https://elixir.bootlin.com/linux/v5.15.35/source/drivers/virtio/virtio_ring.c#L661)
523
968
524
968
        // The `old` variable below is used to determine the value of `next_used` from when
525
968
        // `needs_notification` was called last (each `needs_notification` call resets `num_added`
526
968
        // to zero, while each `add_used` called increments it by one). Then, the logic below
527
968
        // uses wrapped arithmetic to see whether `used_event` can be found between `old` and
528
968
        // `next_used` in the circular sequence space of the used ring.
529
968
        if self.event_idx_enabled {
530
0
            let used_event = self.used_event(mem, Ordering::Relaxed)?;
531
0
            let old = used_idx - self.num_added;
532
0
            self.num_added = Wrapping(0);
533
0
534
0
            return Ok(used_idx - used_event - Wrapping(1) < used_idx - old);
535
968
        }
536
968
537
968
        Ok(true)
538
968
    }
<virtio_queue::queue::Queue as virtio_queue::QueueT>::needs_notification::<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
506
968
    fn needs_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
507
968
        let used_idx = self.next_used;
508
968
509
968
        // Complete all the writes in add_used() before reading the event.
510
968
        fence(Ordering::SeqCst);
511
968
512
968
        // The VRING_AVAIL_F_NO_INTERRUPT flag isn't supported yet.
513
968
514
968
        // When the `EVENT_IDX` feature is negotiated, the driver writes into `used_event`
515
968
        // a value that's used by the device to determine whether a notification must
516
968
        // be submitted after adding a descriptor chain to the used ring. According to the
517
968
        // standard, the notification must be sent when `next_used == used_event + 1`, but
518
968
        // various device model implementations rely on an inequality instead, most likely
519
968
        // to also support use cases where a bunch of descriptor chains are added to the used
520
968
        // ring first, and only afterwards the `needs_notification` logic is called. For example,
521
968
        // the approach based on `num_added` below is taken from the Linux Kernel implementation
522
968
        // (i.e. https://elixir.bootlin.com/linux/v5.15.35/source/drivers/virtio/virtio_ring.c#L661)
523
968
524
968
        // The `old` variable below is used to determine the value of `next_used` from when
525
968
        // `needs_notification` was called last (each `needs_notification` call resets `num_added`
526
968
        // to zero, while each `add_used` called increments it by one). Then, the logic below
527
968
        // uses wrapped arithmetic to see whether `used_event` can be found between `old` and
528
968
        // `next_used` in the circular sequence space of the used ring.
529
968
        if self.event_idx_enabled {
530
0
            let used_event = self.used_event(mem, Ordering::Relaxed)?;
531
0
            let old = used_idx - self.num_added;
532
0
            self.num_added = Wrapping(0);
533
0
534
0
            return Ok(used_idx - used_event - Wrapping(1) < used_idx - old);
535
968
        }
536
968
537
968
        Ok(true)
538
968
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::needs_notification::<_>
539
540
0
    fn next_avail(&self) -> u16 {
541
0
        self.next_avail.0
542
0
    }
543
544
9.39k
    fn set_next_avail(&mut self, next_avail: u16) {
545
9.39k
        self.next_avail = Wrapping(next_avail);
546
9.39k
    }
547
548
0
    fn next_used(&self) -> u16 {
549
0
        self.next_used.0
550
0
    }
551
552
9.39k
    fn set_next_used(&mut self, next_used: u16) {
553
9.39k
        self.next_used = Wrapping(next_used);
554
9.39k
    }
555
556
0
    fn desc_table(&self) -> u64 {
557
0
        self.desc_table.0
558
0
    }
559
560
0
    fn avail_ring(&self) -> u64 {
561
0
        self.avail_ring.0
562
0
    }
563
564
0
    fn used_ring(&self) -> u64 {
565
0
        self.used_ring.0
566
0
    }
567
568
0
    fn event_idx_enabled(&self) -> bool {
569
0
        self.event_idx_enabled
570
0
    }
571
572
214k
    fn pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>>
573
214k
    where
574
214k
        M: Clone + Deref,
575
214k
        M::Target: GuestMemory,
576
214k
    {
577
214k
        // Default, iter-based impl. Will be subsequently improved.
578
214k
        match self.iter(mem) {
579
212k
            Ok(mut iter) => iter.next(),
580
1.29k
            Err(e) => {
581
1.29k
                error!("Iterator error {}", e);
582
1.29k
                None
583
            }
584
        }
585
214k
    }
<virtio_queue::queue::Queue as virtio_queue::QueueT>::pop_descriptor_chain::<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>
Line
Count
Source
572
189k
    fn pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>>
573
189k
    where
574
189k
        M: Clone + Deref,
575
189k
        M::Target: GuestMemory,
576
189k
    {
577
189k
        // Default, iter-based impl. Will be subsequently improved.
578
189k
        match self.iter(mem) {
579
188k
            Ok(mut iter) => iter.next(),
580
1.07k
            Err(e) => {
581
1.07k
                error!("Iterator error {}", e);
582
1.07k
                None
583
            }
584
        }
585
189k
    }
<virtio_queue::queue::Queue as virtio_queue::QueueT>::pop_descriptor_chain::<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
572
24.2k
    fn pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>>
573
24.2k
    where
574
24.2k
        M: Clone + Deref,
575
24.2k
        M::Target: GuestMemory,
576
24.2k
    {
577
24.2k
        // Default, iter-based impl. Will be subsequently improved.
578
24.2k
        match self.iter(mem) {
579
24.0k
            Ok(mut iter) => iter.next(),
580
221
            Err(e) => {
581
221
                error!("Iterator error {}", e);
582
221
                None
583
            }
584
        }
585
24.2k
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueT>::pop_descriptor_chain::<_>
586
}
587
588
impl QueueOwnedT for Queue {
589
214k
    fn iter<M>(&mut self, mem: M) -> Result<AvailIter<'_, M>, Error>
590
214k
    where
591
214k
        M: Deref,
592
214k
        M::Target: GuestMemory,
593
214k
    {
594
214k
        // We're checking here that a reset did not happen without re-initializing the queue.
595
214k
        // TODO: In the future we might want to also check that the other parameters in the
596
214k
        // queue are valid.
597
214k
        if !self.ready || self.avail_ring == GuestAddress(0) {
598
0
            return Err(Error::QueueNotReady);
599
214k
        }
600
214k
601
214k
        self.avail_idx(mem.deref(), Ordering::Acquire)
602
214k
            .map(move |idx| AvailIter::new(mem, idx, self))?
<virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>::{closure#0}
Line
Count
Source
602
189k
            .map(move |idx| AvailIter::new(mem, idx, self))?
<virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>::{closure#0}
Line
Count
Source
602
24.2k
            .map(move |idx| AvailIter::new(mem, idx, self))?
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<_>::{closure#0}
603
214k
    }
<virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>
Line
Count
Source
589
189k
    fn iter<M>(&mut self, mem: M) -> Result<AvailIter<'_, M>, Error>
590
189k
    where
591
189k
        M: Deref,
592
189k
        M::Target: GuestMemory,
593
189k
    {
594
189k
        // We're checking here that a reset did not happen without re-initializing the queue.
595
189k
        // TODO: In the future we might want to also check that the other parameters in the
596
189k
        // queue are valid.
597
189k
        if !self.ready || self.avail_ring == GuestAddress(0) {
598
0
            return Err(Error::QueueNotReady);
599
189k
        }
600
189k
601
189k
        self.avail_idx(mem.deref(), Ordering::Acquire)
602
189k
            .map(move |idx| AvailIter::new(mem, idx, self))?
603
189k
    }
<virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>
Line
Count
Source
589
24.2k
    fn iter<M>(&mut self, mem: M) -> Result<AvailIter<'_, M>, Error>
590
24.2k
    where
591
24.2k
        M: Deref,
592
24.2k
        M::Target: GuestMemory,
593
24.2k
    {
594
24.2k
        // We're checking here that a reset did not happen without re-initializing the queue.
595
24.2k
        // TODO: In the future we might want to also check that the other parameters in the
596
24.2k
        // queue are valid.
597
24.2k
        if !self.ready || self.avail_ring == GuestAddress(0) {
598
0
            return Err(Error::QueueNotReady);
599
24.2k
        }
600
24.2k
601
24.2k
        self.avail_idx(mem.deref(), Ordering::Acquire)
602
24.2k
            .map(move |idx| AvailIter::new(mem, idx, self))?
603
24.2k
    }
Unexecuted instantiation: <virtio_queue::queue::Queue as virtio_queue::QueueOwnedT>::iter::<_>
604
605
46
    fn go_to_previous_position(&mut self) {
606
46
        self.next_avail -= Wrapping(1);
607
46
    }
608
}
609
610
/// Consuming iterator over all available descriptor chain heads in the queue.
611
///
612
/// # Example
613
///
614
/// ```rust
615
/// # use virtio_bindings::bindings::virtio_ring::{VRING_DESC_F_NEXT, VRING_DESC_F_WRITE};
616
/// # use virtio_queue::mock::MockSplitQueue;
617
/// use virtio_queue::{Descriptor, Queue, QueueOwnedT};
618
/// use vm_memory::{GuestAddress, GuestMemoryMmap};
619
///
620
/// # fn populate_queue(m: &GuestMemoryMmap) -> Queue {
621
/// #    let vq = MockSplitQueue::new(m, 16);
622
/// #    let mut q: Queue = vq.create_queue().unwrap();
623
/// #
624
/// #    // The chains are (0, 1), (2, 3, 4) and (5, 6).
625
/// #    let mut descs = Vec::new();
626
/// #    for i in 0..7 {
627
/// #        let flags = match i {
628
/// #            1 | 6 => 0,
629
/// #            2 | 5 => VRING_DESC_F_NEXT | VRING_DESC_F_WRITE,
630
/// #            4 => VRING_DESC_F_WRITE,
631
/// #            _ => VRING_DESC_F_NEXT,
632
/// #        };
633
/// #
634
/// #        descs.push(Descriptor::new((0x1000 * (i + 1)) as u64, 0x1000, flags as u16, i + 1));
635
/// #    }
636
/// #
637
/// #    vq.add_desc_chains(&descs, 0).unwrap();
638
/// #    q
639
/// # }
640
/// let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
641
/// // Populate the queue with descriptor chains and update the available ring accordingly.
642
/// let mut queue = populate_queue(m);
643
/// let mut i = queue.iter(m).unwrap();
644
///
645
/// {
646
///     let mut c = i.next().unwrap();
647
///     let _first_head_index = c.head_index();
648
///     // We should have two descriptors in the first chain.
649
///     let _desc1 = c.next().unwrap();
650
///     let _desc2 = c.next().unwrap();
651
/// }
652
///
653
/// {
654
///     let c = i.next().unwrap();
655
///     let _second_head_index = c.head_index();
656
///
657
///     let mut iter = c.writable();
658
///     // We should have two writable descriptors in the second chain.
659
///     let _desc1 = iter.next().unwrap();
660
///     let _desc2 = iter.next().unwrap();
661
/// }
662
///
663
/// {
664
///     let c = i.next().unwrap();
665
///     let _third_head_index = c.head_index();
666
///
667
///     let mut iter = c.readable();
668
///     // We should have one readable descriptor in the third chain.
669
///     let _desc1 = iter.next().unwrap();
670
/// }
671
/// // Let's go back one position in the available ring.
672
/// i.go_to_previous_position();
673
/// // We should be able to access again the third descriptor chain.
674
/// let c = i.next().unwrap();
675
/// let _third_head_index = c.head_index();
676
/// ```
677
#[derive(Debug)]
678
pub struct AvailIter<'b, M> {
679
    mem: M,
680
    desc_table: GuestAddress,
681
    avail_ring: GuestAddress,
682
    queue_size: u16,
683
    last_index: Wrapping<u16>,
684
    next_avail: &'b mut Wrapping<u16>,
685
}
686
687
impl<'b, M> AvailIter<'b, M>
688
where
689
    M: Deref,
690
    M::Target: GuestMemory,
691
{
692
    /// Create a new instance of `AvailInter`.
693
    ///
694
    /// # Arguments
695
    /// * `mem` - the `GuestMemory` object that can be used to access the queue buffers.
696
    /// * `idx` - the index of the available ring entry where the driver would put the next
697
    ///           available descriptor chain.
698
    /// * `queue` - the `Queue` object from which the needed data to create the `AvailIter` can
699
    ///             be retrieved.
700
214k
    pub(crate) fn new(mem: M, idx: Wrapping<u16>, queue: &'b mut Queue) -> Result<Self, Error> {
701
214k
        // The number of descriptor chain heads to process should always
702
214k
        // be smaller or equal to the queue size, as the driver should
703
214k
        // never ask the VMM to process a available ring entry more than
704
214k
        // once. Checking and reporting such incorrect driver behavior
705
214k
        // can prevent potential hanging and Denial-of-Service from
706
214k
        // happening on the VMM side.
707
214k
        if (idx - queue.next_avail).0 > queue.size {
708
1.29k
            return Err(Error::InvalidAvailRingIndex);
709
212k
        }
710
212k
711
212k
        Ok(AvailIter {
712
212k
            mem,
713
212k
            desc_table: queue.desc_table,
714
212k
            avail_ring: queue.avail_ring,
715
212k
            queue_size: queue.size,
716
212k
            last_index: idx,
717
212k
            next_avail: &mut queue.next_avail,
718
212k
        })
719
214k
    }
<virtio_queue::queue::AvailIter<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::new
Line
Count
Source
700
189k
    pub(crate) fn new(mem: M, idx: Wrapping<u16>, queue: &'b mut Queue) -> Result<Self, Error> {
701
189k
        // The number of descriptor chain heads to process should always
702
189k
        // be smaller or equal to the queue size, as the driver should
703
189k
        // never ask the VMM to process a available ring entry more than
704
189k
        // once. Checking and reporting such incorrect driver behavior
705
189k
        // can prevent potential hanging and Denial-of-Service from
706
189k
        // happening on the VMM side.
707
189k
        if (idx - queue.next_avail).0 > queue.size {
708
1.07k
            return Err(Error::InvalidAvailRingIndex);
709
188k
        }
710
188k
711
188k
        Ok(AvailIter {
712
188k
            mem,
713
188k
            desc_table: queue.desc_table,
714
188k
            avail_ring: queue.avail_ring,
715
188k
            queue_size: queue.size,
716
188k
            last_index: idx,
717
188k
            next_avail: &mut queue.next_avail,
718
188k
        })
719
189k
    }
<virtio_queue::queue::AvailIter<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>::new
Line
Count
Source
700
24.2k
    pub(crate) fn new(mem: M, idx: Wrapping<u16>, queue: &'b mut Queue) -> Result<Self, Error> {
701
24.2k
        // The number of descriptor chain heads to process should always
702
24.2k
        // be smaller or equal to the queue size, as the driver should
703
24.2k
        // never ask the VMM to process a available ring entry more than
704
24.2k
        // once. Checking and reporting such incorrect driver behavior
705
24.2k
        // can prevent potential hanging and Denial-of-Service from
706
24.2k
        // happening on the VMM side.
707
24.2k
        if (idx - queue.next_avail).0 > queue.size {
708
221
            return Err(Error::InvalidAvailRingIndex);
709
24.0k
        }
710
24.0k
711
24.0k
        Ok(AvailIter {
712
24.0k
            mem,
713
24.0k
            desc_table: queue.desc_table,
714
24.0k
            avail_ring: queue.avail_ring,
715
24.0k
            queue_size: queue.size,
716
24.0k
            last_index: idx,
717
24.0k
            next_avail: &mut queue.next_avail,
718
24.0k
        })
719
24.2k
    }
Unexecuted instantiation: <virtio_queue::queue::AvailIter<_>>::new
720
721
    /// Goes back one position in the available descriptor chain offered by the driver.
722
    ///
723
    /// Rust does not support bidirectional iterators. This is the only way to revert the effect
724
    /// of an iterator increment on the queue.
725
    ///
726
    /// Note: this method assumes there's only one thread manipulating the queue, so it should only
727
    /// be invoked in single-threaded context.
728
0
    pub fn go_to_previous_position(&mut self) {
729
0
        *self.next_avail -= Wrapping(1);
730
0
    }
731
}
732
733
impl<'b, M> Iterator for AvailIter<'b, M>
734
where
735
    M: Clone + Deref,
736
    M::Target: GuestMemory,
737
{
738
    type Item = DescriptorChain<M>;
739
740
212k
    fn next(&mut self) -> Option<Self::Item> {
741
212k
        if *self.next_avail == self.last_index {
742
1.69k
            return None;
743
211k
        }
744
745
        // These two operations can not overflow an u64 since they're working with relatively small
746
        // numbers compared to u64::MAX.
747
211k
        let elem_off =
748
211k
            u64::from(self.next_avail.0.checked_rem(self.queue_size)?) * VIRTQ_AVAIL_ELEMENT_SIZE;
749
211k
        let offset = VIRTQ_AVAIL_RING_HEADER_SIZE + elem_off;
750
751
211k
        let addr = self.avail_ring.checked_add(offset)?;
752
211k
        let head_index: u16 = self
753
211k
            .mem
754
211k
            .load(addr, Ordering::Acquire)
755
211k
            .map(u16::from_le)
756
211k
            .map_err(|_| error!("Failed to read from memory {:x}", addr.raw_value()))
Unexecuted instantiation: <virtio_queue::queue::AvailIter<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::iter::traits::iterator::Iterator>::next::{closure#0}
Unexecuted instantiation: <virtio_queue::queue::AvailIter<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> as core::iter::traits::iterator::Iterator>::next::{closure#0}
Unexecuted instantiation: <virtio_queue::queue::AvailIter<_> as core::iter::traits::iterator::Iterator>::next::{closure#0}
757
211k
            .ok()?;
758
759
211k
        *self.next_avail += Wrapping(1);
760
211k
761
211k
        Some(DescriptorChain::new(
762
211k
            self.mem.clone(),
763
211k
            self.desc_table,
764
211k
            self.queue_size,
765
211k
            head_index,
766
211k
        ))
767
212k
    }
<virtio_queue::queue::AvailIter<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
740
188k
    fn next(&mut self) -> Option<Self::Item> {
741
188k
        if *self.next_avail == self.last_index {
742
1.60k
            return None;
743
187k
        }
744
745
        // These two operations can not overflow an u64 since they're working with relatively small
746
        // numbers compared to u64::MAX.
747
187k
        let elem_off =
748
187k
            u64::from(self.next_avail.0.checked_rem(self.queue_size)?) * VIRTQ_AVAIL_ELEMENT_SIZE;
749
187k
        let offset = VIRTQ_AVAIL_RING_HEADER_SIZE + elem_off;
750
751
187k
        let addr = self.avail_ring.checked_add(offset)?;
752
187k
        let head_index: u16 = self
753
187k
            .mem
754
187k
            .load(addr, Ordering::Acquire)
755
187k
            .map(u16::from_le)
756
187k
            .map_err(|_| error!("Failed to read from memory {:x}", addr.raw_value()))
757
187k
            .ok()?;
758
759
187k
        *self.next_avail += Wrapping(1);
760
187k
761
187k
        Some(DescriptorChain::new(
762
187k
            self.mem.clone(),
763
187k
            self.desc_table,
764
187k
            self.queue_size,
765
187k
            head_index,
766
187k
        ))
767
188k
    }
<virtio_queue::queue::AvailIter<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
740
24.0k
    fn next(&mut self) -> Option<Self::Item> {
741
24.0k
        if *self.next_avail == self.last_index {
742
93
            return None;
743
23.9k
        }
744
745
        // These two operations can not overflow an u64 since they're working with relatively small
746
        // numbers compared to u64::MAX.
747
23.9k
        let elem_off =
748
23.9k
            u64::from(self.next_avail.0.checked_rem(self.queue_size)?) * VIRTQ_AVAIL_ELEMENT_SIZE;
749
23.9k
        let offset = VIRTQ_AVAIL_RING_HEADER_SIZE + elem_off;
750
751
23.9k
        let addr = self.avail_ring.checked_add(offset)?;
752
23.9k
        let head_index: u16 = self
753
23.9k
            .mem
754
23.9k
            .load(addr, Ordering::Acquire)
755
23.9k
            .map(u16::from_le)
756
23.9k
            .map_err(|_| error!("Failed to read from memory {:x}", addr.raw_value()))
757
23.9k
            .ok()?;
758
759
23.9k
        *self.next_avail += Wrapping(1);
760
23.9k
761
23.9k
        Some(DescriptorChain::new(
762
23.9k
            self.mem.clone(),
763
23.9k
            self.desc_table,
764
23.9k
            self.queue_size,
765
23.9k
            head_index,
766
23.9k
        ))
767
24.0k
    }
Unexecuted instantiation: <virtio_queue::queue::AvailIter<_> as core::iter::traits::iterator::Iterator>::next
768
}
769
770
#[cfg(any(test, feature = "test-utils"))]
771
// It is convenient for tests to implement `PartialEq`, but it is not a
772
// proper implementation as `GuestMemory` errors cannot implement `PartialEq`.
773
impl PartialEq for Error {
774
    fn eq(&self, other: &Self) -> bool {
775
        format!("{}", &self) == format!("{}", other)
776
    }
777
}
778
779
#[cfg(test)]
780
mod tests {
781
    use super::*;
782
    use crate::defs::{DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR};
783
    use crate::mock::MockSplitQueue;
784
    use crate::Descriptor;
785
    use virtio_bindings::bindings::virtio_ring::{
786
        VRING_DESC_F_NEXT, VRING_DESC_F_WRITE, VRING_USED_F_NO_NOTIFY,
787
    };
788
789
    use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
790
791
    #[test]
792
    fn test_queue_is_valid() {
793
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
794
        let vq = MockSplitQueue::new(m, 16);
795
        let mut q: Queue = vq.create_queue().unwrap();
796
797
        // q is currently valid
798
        assert!(q.is_valid(m));
799
800
        // shouldn't be valid when not marked as ready
801
        q.set_ready(false);
802
        assert!(!q.ready());
803
        assert!(!q.is_valid(m));
804
        q.set_ready(true);
805
806
        // shouldn't be allowed to set a size > max_size
807
        q.set_size(q.max_size() << 1);
808
        assert_eq!(q.size, q.max_size());
809
810
        // or set the size to 0
811
        q.set_size(0);
812
        assert_eq!(q.size, q.max_size());
813
814
        // or set a size which is not a power of 2
815
        q.set_size(11);
816
        assert_eq!(q.size, q.max_size());
817
818
        // but should be allowed to set a size if 0 < size <= max_size and size is a power of two
819
        q.set_size(4);
820
        assert_eq!(q.size, 4);
821
        q.size = q.max_size();
822
823
        // shouldn't be allowed to set an address that breaks the alignment constraint
824
        q.set_desc_table_address(Some(0xf), None);
825
        assert_eq!(q.desc_table.0, vq.desc_table_addr().0);
826
        // should be allowed to set an aligned out of bounds address
827
        q.set_desc_table_address(Some(0xffff_fff0), None);
828
        assert_eq!(q.desc_table.0, 0xffff_fff0);
829
        // but shouldn't be valid
830
        assert!(!q.is_valid(m));
831
        // but should be allowed to set a valid description table address
832
        q.set_desc_table_address(Some(0x10), None);
833
        assert_eq!(q.desc_table.0, 0x10);
834
        assert!(q.is_valid(m));
835
        let addr = vq.desc_table_addr().0;
836
        q.set_desc_table_address(Some(addr as u32), Some((addr >> 32) as u32));
837
838
        // shouldn't be allowed to set an address that breaks the alignment constraint
839
        q.set_avail_ring_address(Some(0x1), None);
840
        assert_eq!(q.avail_ring.0, vq.avail_addr().0);
841
        // should be allowed to set an aligned out of bounds address
842
        q.set_avail_ring_address(Some(0xffff_fffe), None);
843
        assert_eq!(q.avail_ring.0, 0xffff_fffe);
844
        // but shouldn't be valid
845
        assert!(!q.is_valid(m));
846
        // but should be allowed to set a valid available ring address
847
        q.set_avail_ring_address(Some(0x2), None);
848
        assert_eq!(q.avail_ring.0, 0x2);
849
        assert!(q.is_valid(m));
850
        let addr = vq.avail_addr().0;
851
        q.set_avail_ring_address(Some(addr as u32), Some((addr >> 32) as u32));
852
853
        // shouldn't be allowed to set an address that breaks the alignment constraint
854
        q.set_used_ring_address(Some(0x3), None);
855
        assert_eq!(q.used_ring.0, vq.used_addr().0);
856
        // should be allowed to set an aligned out of bounds address
857
        q.set_used_ring_address(Some(0xffff_fffc), None);
858
        assert_eq!(q.used_ring.0, 0xffff_fffc);
859
        // but shouldn't be valid
860
        assert!(!q.is_valid(m));
861
        // but should be allowed to set a valid used ring address
862
        q.set_used_ring_address(Some(0x4), None);
863
        assert_eq!(q.used_ring.0, 0x4);
864
        let addr = vq.used_addr().0;
865
        q.set_used_ring_address(Some(addr as u32), Some((addr >> 32) as u32));
866
        assert!(q.is_valid(m));
867
    }
868
869
    #[test]
870
    fn test_add_used() {
871
        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
872
        let vq = MockSplitQueue::new(mem, 16);
873
        let mut q: Queue = vq.create_queue().unwrap();
874
875
        assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(0));
876
        assert_eq!(u16::from_le(vq.used().idx().load()), 0);
877
878
        // index too large
879
        assert!(q.add_used(mem, 16, 0x1000).is_err());
880
        assert_eq!(u16::from_le(vq.used().idx().load()), 0);
881
882
        // should be ok
883
        q.add_used(mem, 1, 0x1000).unwrap();
884
        assert_eq!(q.next_used, Wrapping(1));
885
        assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(1));
886
        assert_eq!(u16::from_le(vq.used().idx().load()), 1);
887
888
        let x = vq.used().ring().ref_at(0).unwrap().load();
889
        assert_eq!(x.id(), 1);
890
        assert_eq!(x.len(), 0x1000);
891
    }
892
893
    #[test]
894
    fn test_reset_queue() {
895
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
896
        let vq = MockSplitQueue::new(m, 16);
897
        let mut q: Queue = vq.create_queue().unwrap();
898
899
        q.set_size(8);
900
        // The address set by `MockSplitQueue` for the descriptor table is DEFAULT_DESC_TABLE_ADDR,
901
        // so let's change it for testing the reset.
902
        q.set_desc_table_address(Some(0x5000), None);
903
        // Same for `event_idx_enabled`, `next_avail` `next_used` and `signalled_used`.
904
        q.set_event_idx(true);
905
        q.set_next_avail(2);
906
        q.set_next_used(4);
907
        q.num_added = Wrapping(15);
908
        assert_eq!(q.size, 8);
909
        // `create_queue` also marks the queue as ready.
910
        assert!(q.ready);
911
        assert_ne!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR));
912
        assert_ne!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR));
913
        assert_ne!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR));
914
        assert_ne!(q.next_avail, Wrapping(0));
915
        assert_ne!(q.next_used, Wrapping(0));
916
        assert_ne!(q.num_added, Wrapping(0));
917
        assert!(q.event_idx_enabled);
918
919
        q.reset();
920
        assert_eq!(q.size, 16);
921
        assert!(!q.ready);
922
        assert_eq!(q.desc_table, GuestAddress(DEFAULT_DESC_TABLE_ADDR));
923
        assert_eq!(q.avail_ring, GuestAddress(DEFAULT_AVAIL_RING_ADDR));
924
        assert_eq!(q.used_ring, GuestAddress(DEFAULT_USED_RING_ADDR));
925
        assert_eq!(q.next_avail, Wrapping(0));
926
        assert_eq!(q.next_used, Wrapping(0));
927
        assert_eq!(q.num_added, Wrapping(0));
928
        assert!(!q.event_idx_enabled);
929
    }
930
931
    #[test]
932
    fn test_needs_notification() {
933
        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
934
        let qsize = 16;
935
        let vq = MockSplitQueue::new(mem, qsize);
936
        let mut q: Queue = vq.create_queue().unwrap();
937
        let avail_addr = vq.avail_addr();
938
939
        // It should always return true when EVENT_IDX isn't enabled.
940
        for i in 0..qsize {
941
            q.next_used = Wrapping(i);
942
            assert!(q.needs_notification(mem).unwrap());
943
        }
944
945
        mem.write_obj::<u16>(
946
            u16::to_le(4),
947
            avail_addr.unchecked_add(4 + qsize as u64 * 2),
948
        )
949
        .unwrap();
950
        q.set_event_idx(true);
951
952
        // Incrementing up to this value causes an `u16` to wrap back to 0.
953
        let wrap = u32::from(u16::MAX) + 1;
954
955
        for i in 0..wrap + 12 {
956
            q.next_used = Wrapping(i as u16);
957
            // Let's test wrapping around the maximum index value as well.
958
            // `num_added` needs to be at least `1` to represent the fact that new descriptor
959
            // chains have be added to the used ring since the last time `needs_notification`
960
            // returned.
961
            q.num_added = Wrapping(1);
962
            let expected = i == 5 || i == (5 + wrap);
963
            assert_eq!((q.needs_notification(mem).unwrap(), i), (expected, i));
964
        }
965
966
        mem.write_obj::<u16>(
967
            u16::to_le(8),
968
            avail_addr.unchecked_add(4 + qsize as u64 * 2),
969
        )
970
        .unwrap();
971
972
        // Returns `false` because the current `used_event` value is behind both `next_used` and
973
        // the value of `next_used` at the time when `needs_notification` last returned (which is
974
        // computed based on `num_added` as described in the comments for `needs_notification`.
975
        assert!(!q.needs_notification(mem).unwrap());
976
977
        mem.write_obj::<u16>(
978
            u16::to_le(15),
979
            avail_addr.unchecked_add(4 + qsize as u64 * 2),
980
        )
981
        .unwrap();
982
983
        q.num_added = Wrapping(1);
984
        assert!(!q.needs_notification(mem).unwrap());
985
986
        q.next_used = Wrapping(15);
987
        q.num_added = Wrapping(1);
988
        assert!(!q.needs_notification(mem).unwrap());
989
990
        q.next_used = Wrapping(16);
991
        q.num_added = Wrapping(1);
992
        assert!(q.needs_notification(mem).unwrap());
993
994
        // Calling `needs_notification` again immediately returns `false`.
995
        assert!(!q.needs_notification(mem).unwrap());
996
997
        mem.write_obj::<u16>(
998
            u16::to_le(u16::MAX - 3),
999
            avail_addr.unchecked_add(4 + qsize as u64 * 2),
1000
        )
1001
        .unwrap();
1002
        q.next_used = Wrapping(u16::MAX - 2);
1003
        q.num_added = Wrapping(1);
1004
        // Returns `true` because, when looking at circular sequence of indices of the used ring,
1005
        // the value we wrote in the `used_event` appears between the "old" value of `next_used`
1006
        // (i.e. `next_used` - `num_added`) and the current `next_used`, thus suggesting that we
1007
        // need to notify the driver.
1008
        assert!(q.needs_notification(mem).unwrap());
1009
    }
1010
1011
    #[test]
1012
    fn test_enable_disable_notification() {
1013
        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1014
        let vq = MockSplitQueue::new(mem, 16);
1015
1016
        let mut q: Queue = vq.create_queue().unwrap();
1017
        let used_addr = vq.used_addr();
1018
1019
        assert!(!q.event_idx_enabled);
1020
1021
        q.enable_notification(mem).unwrap();
1022
        let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1023
        assert_eq!(v, 0);
1024
1025
        q.disable_notification(mem).unwrap();
1026
        let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1027
        assert_eq!(v, VRING_USED_F_NO_NOTIFY as u16);
1028
1029
        q.enable_notification(mem).unwrap();
1030
        let v = mem.read_obj::<u16>(used_addr).map(u16::from_le).unwrap();
1031
        assert_eq!(v, 0);
1032
1033
        q.set_event_idx(true);
1034
        let avail_addr = vq.avail_addr();
1035
        mem.write_obj::<u16>(u16::to_le(2), avail_addr.unchecked_add(2))
1036
            .unwrap();
1037
1038
        assert!(q.enable_notification(mem).unwrap());
1039
        q.next_avail = Wrapping(2);
1040
        assert!(!q.enable_notification(mem).unwrap());
1041
1042
        mem.write_obj::<u16>(u16::to_le(8), avail_addr.unchecked_add(2))
1043
            .unwrap();
1044
1045
        assert!(q.enable_notification(mem).unwrap());
1046
        q.next_avail = Wrapping(8);
1047
        assert!(!q.enable_notification(mem).unwrap());
1048
    }
1049
1050
    #[test]
1051
    fn test_consume_chains_with_notif() {
1052
        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1053
        let vq = MockSplitQueue::new(mem, 16);
1054
1055
        let mut q: Queue = vq.create_queue().unwrap();
1056
1057
        // q is currently valid.
1058
        assert!(q.is_valid(mem));
1059
1060
        // The chains are (0, 1), (2, 3, 4), (5, 6), (7, 8), (9, 10, 11, 12).
1061
        let mut descs = Vec::new();
1062
        for i in 0..13 {
1063
            let flags = match i {
1064
                1 | 4 | 6 | 8 | 12 => 0,
1065
                _ => VRING_DESC_F_NEXT,
1066
            };
1067
1068
            descs.push(Descriptor::new(
1069
                (0x1000 * (i + 1)) as u64,
1070
                0x1000,
1071
                flags as u16,
1072
                i + 1,
1073
            ));
1074
        }
1075
1076
        vq.add_desc_chains(&descs, 0).unwrap();
1077
        // Update the index of the chain that can be consumed to not be the last one.
1078
        // This enables us to consume chains in multiple iterations as opposed to consuming
1079
        // all the driver written chains at once.
1080
        vq.avail().idx().store(u16::to_le(2));
1081
        // No descriptor chains are consumed at this point.
1082
        assert_eq!(q.next_avail(), 0);
1083
1084
        let mut i = 0;
1085
1086
        loop {
1087
            i += 1;
1088
            q.disable_notification(mem).unwrap();
1089
1090
            while let Some(chain) = q.iter(mem).unwrap().next() {
1091
                // Process the descriptor chain, and then add entries to the
1092
                // used ring.
1093
                let head_index = chain.head_index();
1094
                let mut desc_len = 0;
1095
                chain.for_each(|d| {
1096
                    if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1097
                        desc_len += d.len();
1098
                    }
1099
                });
1100
                q.add_used(mem, head_index, desc_len).unwrap();
1101
            }
1102
            if !q.enable_notification(mem).unwrap() {
1103
                break;
1104
            }
1105
        }
1106
        // The chains should be consumed in a single loop iteration because there's nothing updating
1107
        // the `idx` field of the available ring in the meantime.
1108
        assert_eq!(i, 1);
1109
        // The next chain that can be consumed should have index 2.
1110
        assert_eq!(q.next_avail(), 2);
1111
        assert_eq!(q.next_used(), 2);
1112
        // Let the device know it can consume one more chain.
1113
        vq.avail().idx().store(u16::to_le(3));
1114
        i = 0;
1115
1116
        loop {
1117
            i += 1;
1118
            q.disable_notification(mem).unwrap();
1119
1120
            while let Some(chain) = q.iter(mem).unwrap().next() {
1121
                // Process the descriptor chain, and then add entries to the
1122
                // used ring.
1123
                let head_index = chain.head_index();
1124
                let mut desc_len = 0;
1125
                chain.for_each(|d| {
1126
                    if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1127
                        desc_len += d.len();
1128
                    }
1129
                });
1130
                q.add_used(mem, head_index, desc_len).unwrap();
1131
            }
1132
1133
            // For the simplicity of the test we are updating here the `idx` value of the available
1134
            // ring. Ideally this should be done on a separate thread.
1135
            // Because of this update, the loop should be iterated again to consume the new
1136
            // available descriptor chains.
1137
            vq.avail().idx().store(u16::to_le(4));
1138
            if !q.enable_notification(mem).unwrap() {
1139
                break;
1140
            }
1141
        }
1142
        assert_eq!(i, 2);
1143
        // The next chain that can be consumed should have index 4.
1144
        assert_eq!(q.next_avail(), 4);
1145
        assert_eq!(q.next_used(), 4);
1146
1147
        // Set an `idx` that is bigger than the number of entries added in the ring.
1148
        // This is an allowed scenario, but the indexes of the chain will have unexpected values.
1149
        vq.avail().idx().store(u16::to_le(7));
1150
        loop {
1151
            q.disable_notification(mem).unwrap();
1152
1153
            while let Some(chain) = q.iter(mem).unwrap().next() {
1154
                // Process the descriptor chain, and then add entries to the
1155
                // used ring.
1156
                let head_index = chain.head_index();
1157
                let mut desc_len = 0;
1158
                chain.for_each(|d| {
1159
                    if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1160
                        desc_len += d.len();
1161
                    }
1162
                });
1163
                q.add_used(mem, head_index, desc_len).unwrap();
1164
            }
1165
            if !q.enable_notification(mem).unwrap() {
1166
                break;
1167
            }
1168
        }
1169
        assert_eq!(q.next_avail(), 7);
1170
        assert_eq!(q.next_used(), 7);
1171
    }
1172
1173
    #[test]
1174
    fn test_invalid_avail_idx() {
1175
        // This is a negative test for the following MUST from the spec: `A driver MUST NOT
1176
        // decrement the available idx on a virtqueue (ie. there is no way to “unexpose” buffers).`.
1177
        // We validate that for this misconfiguration, the device does not panic.
1178
        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1179
        let vq = MockSplitQueue::new(mem, 16);
1180
1181
        let mut q: Queue = vq.create_queue().unwrap();
1182
1183
        // q is currently valid.
1184
        assert!(q.is_valid(mem));
1185
1186
        // The chains are (0, 1), (2, 3, 4), (5, 6).
1187
        let mut descs = Vec::new();
1188
        for i in 0..7 {
1189
            let flags = match i {
1190
                1 | 4 | 6 => 0,
1191
                _ => VRING_DESC_F_NEXT,
1192
            };
1193
1194
            descs.push(Descriptor::new(
1195
                (0x1000 * (i + 1)) as u64,
1196
                0x1000,
1197
                flags as u16,
1198
                i + 1,
1199
            ));
1200
        }
1201
1202
        vq.add_desc_chains(&descs, 0).unwrap();
1203
        // Let the device know it can consume chains with the index < 2.
1204
        vq.avail().idx().store(u16::to_le(3));
1205
        // No descriptor chains are consumed at this point.
1206
        assert_eq!(q.next_avail(), 0);
1207
        assert_eq!(q.next_used(), 0);
1208
1209
        loop {
1210
            q.disable_notification(mem).unwrap();
1211
1212
            while let Some(chain) = q.iter(mem).unwrap().next() {
1213
                // Process the descriptor chain, and then add entries to the
1214
                // used ring.
1215
                let head_index = chain.head_index();
1216
                let mut desc_len = 0;
1217
                chain.for_each(|d| {
1218
                    if d.flags() as u32 & VRING_DESC_F_WRITE == VRING_DESC_F_WRITE {
1219
                        desc_len += d.len();
1220
                    }
1221
                });
1222
                q.add_used(mem, head_index, desc_len).unwrap();
1223
            }
1224
            if !q.enable_notification(mem).unwrap() {
1225
                break;
1226
            }
1227
        }
1228
        // The next chain that can be consumed should have index 3.
1229
        assert_eq!(q.next_avail(), 3);
1230
        assert_eq!(q.avail_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3));
1231
        assert_eq!(q.next_used(), 3);
1232
        assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(3));
1233
        assert!(q.lock().ready());
1234
1235
        // Decrement `idx` which should be forbidden. We don't enforce this thing, but we should
1236
        // test that we don't panic in case the driver decrements it.
1237
        vq.avail().idx().store(u16::to_le(1));
1238
        // Invalid available ring index
1239
        assert!(q.iter(mem).is_err());
1240
    }
1241
1242
    #[test]
1243
    fn test_iterator_and_avail_idx() {
1244
        // This test ensures constructing a descriptor chain iterator succeeds
1245
        // with valid available ring indexes while produces an error with invalid
1246
        // indexes.
1247
        let queue_size = 2;
1248
        let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1249
        let vq = MockSplitQueue::new(mem, queue_size);
1250
1251
        let mut q: Queue = vq.create_queue().unwrap();
1252
1253
        // q is currently valid.
1254
        assert!(q.is_valid(mem));
1255
1256
        // Create descriptors to fill up the queue
1257
        let mut descs = Vec::new();
1258
        for i in 0..queue_size {
1259
            descs.push(Descriptor::new(
1260
                (0x1000 * (i + 1)) as u64,
1261
                0x1000,
1262
                0_u16,
1263
                i + 1,
1264
            ));
1265
        }
1266
        vq.add_desc_chains(&descs, 0).unwrap();
1267
1268
        // Set the 'next_available' index to 'u16:MAX' to test the wrapping scenarios
1269
        q.set_next_avail(u16::MAX);
1270
1271
        // When the number of chains exposed by the driver is equal to or less than the queue
1272
        // size, the available ring index is valid and constructs an iterator successfully.
1273
        let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size);
1274
        vq.avail().idx().store(u16::to_le(avail_idx.0));
1275
        assert!(q.iter(mem).is_ok());
1276
        let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size - 1);
1277
        vq.avail().idx().store(u16::to_le(avail_idx.0));
1278
        assert!(q.iter(mem).is_ok());
1279
1280
        // When the number of chains exposed by the driver is larger than the queue size, the
1281
        // available ring index is invalid and produces an error from constructing an iterator.
1282
        let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size + 1);
1283
        vq.avail().idx().store(u16::to_le(avail_idx.0));
1284
        assert!(q.iter(mem).is_err());
1285
    }
1286
1287
    #[test]
1288
    fn test_descriptor_and_iterator() {
1289
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1290
        let vq = MockSplitQueue::new(m, 16);
1291
1292
        let mut q: Queue = vq.create_queue().unwrap();
1293
1294
        // q is currently valid
1295
        assert!(q.is_valid(m));
1296
1297
        // the chains are (0, 1), (2, 3, 4) and (5, 6)
1298
        let mut descs = Vec::new();
1299
        for j in 0..7 {
1300
            let flags = match j {
1301
                1 | 6 => 0,
1302
                2 | 5 => VRING_DESC_F_NEXT | VRING_DESC_F_WRITE,
1303
                4 => VRING_DESC_F_WRITE,
1304
                _ => VRING_DESC_F_NEXT,
1305
            };
1306
1307
            descs.push(Descriptor::new(
1308
                (0x1000 * (j + 1)) as u64,
1309
                0x1000,
1310
                flags as u16,
1311
                j + 1,
1312
            ));
1313
        }
1314
1315
        vq.add_desc_chains(&descs, 0).unwrap();
1316
1317
        let mut i = q.iter(m).unwrap();
1318
1319
        {
1320
            let c = i.next().unwrap();
1321
            assert_eq!(c.head_index(), 0);
1322
1323
            let mut iter = c;
1324
            assert!(iter.next().is_some());
1325
            assert!(iter.next().is_some());
1326
            assert!(iter.next().is_none());
1327
            assert!(iter.next().is_none());
1328
        }
1329
1330
        {
1331
            let c = i.next().unwrap();
1332
            assert_eq!(c.head_index(), 2);
1333
1334
            let mut iter = c.writable();
1335
            assert!(iter.next().is_some());
1336
            assert!(iter.next().is_some());
1337
            assert!(iter.next().is_none());
1338
            assert!(iter.next().is_none());
1339
        }
1340
1341
        {
1342
            let c = i.next().unwrap();
1343
            assert_eq!(c.head_index(), 5);
1344
1345
            let mut iter = c.readable();
1346
            assert!(iter.next().is_some());
1347
            assert!(iter.next().is_none());
1348
            assert!(iter.next().is_none());
1349
        }
1350
    }
1351
1352
    #[test]
1353
    fn test_iterator() {
1354
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1355
        let vq = MockSplitQueue::new(m, 16);
1356
1357
        let mut q: Queue = vq.create_queue().unwrap();
1358
1359
        q.size = q.max_size;
1360
        q.desc_table = vq.desc_table_addr();
1361
        q.avail_ring = vq.avail_addr();
1362
        q.used_ring = vq.used_addr();
1363
        assert!(q.is_valid(m));
1364
1365
        {
1366
            // an invalid queue should return an iterator with no next
1367
            q.ready = false;
1368
            assert!(q.iter(m).is_err());
1369
        }
1370
1371
        q.ready = true;
1372
1373
        // now let's create two simple descriptor chains
1374
        // the chains are (0, 1) and (2, 3, 4)
1375
        {
1376
            let mut descs = Vec::new();
1377
            for j in 0..5u16 {
1378
                let flags = match j {
1379
                    1 | 4 => 0,
1380
                    _ => VRING_DESC_F_NEXT,
1381
                };
1382
1383
                descs.push(Descriptor::new(
1384
                    (0x1000 * (j + 1)) as u64,
1385
                    0x1000,
1386
                    flags as u16,
1387
                    j + 1,
1388
                ));
1389
            }
1390
            vq.add_desc_chains(&descs, 0).unwrap();
1391
1392
            let mut i = q.iter(m).unwrap();
1393
1394
            {
1395
                let mut c = i.next().unwrap();
1396
                assert_eq!(c.head_index(), 0);
1397
1398
                c.next().unwrap();
1399
                assert!(c.next().is_some());
1400
                assert!(c.next().is_none());
1401
                assert_eq!(c.head_index(), 0);
1402
            }
1403
1404
            {
1405
                let mut c = i.next().unwrap();
1406
                assert_eq!(c.head_index(), 2);
1407
1408
                c.next().unwrap();
1409
                c.next().unwrap();
1410
                c.next().unwrap();
1411
                assert!(c.next().is_none());
1412
                assert_eq!(c.head_index(), 2);
1413
            }
1414
1415
            // also test go_to_previous_position() works as expected
1416
            {
1417
                assert!(i.next().is_none());
1418
                i.go_to_previous_position();
1419
                let mut c = q.iter(m).unwrap().next().unwrap();
1420
                c.next().unwrap();
1421
                c.next().unwrap();
1422
                c.next().unwrap();
1423
                assert!(c.next().is_none());
1424
            }
1425
        }
1426
1427
        // Test that iterating some broken descriptor chain does not exceed
1428
        // 2^32 bytes in total (VIRTIO spec version 1.2, 2.7.5.2:
1429
        // Drivers MUST NOT add a descriptor chain longer than 2^32 bytes in
1430
        // total)
1431
        {
1432
            let descs = vec![
1433
                Descriptor::new(0x1000, 0xffff_ffff, VRING_DESC_F_NEXT as u16, 1),
1434
                Descriptor::new(0x1000, 0x1234_5678, 0, 2),
1435
            ];
1436
            vq.add_desc_chains(&descs, 0).unwrap();
1437
            let mut yielded_bytes_by_iteration = 0_u32;
1438
            for d in q.iter(m).unwrap().next().unwrap() {
1439
                yielded_bytes_by_iteration = yielded_bytes_by_iteration
1440
                    .checked_add(d.len())
1441
                    .expect("iterator should not yield more than 2^32 bytes");
1442
            }
1443
        }
1444
1445
        // Same as above, but test with a descriptor which is self-referential
1446
        {
1447
            let descs = vec![Descriptor::new(
1448
                0x1000,
1449
                0xffff_ffff,
1450
                VRING_DESC_F_NEXT as u16,
1451
                0,
1452
            )];
1453
            vq.add_desc_chains(&descs, 0).unwrap();
1454
            let mut yielded_bytes_by_iteration = 0_u32;
1455
            for d in q.iter(m).unwrap().next().unwrap() {
1456
                yielded_bytes_by_iteration = yielded_bytes_by_iteration
1457
                    .checked_add(d.len())
1458
                    .expect("iterator should not yield more than 2^32 bytes");
1459
            }
1460
        }
1461
    }
1462
1463
    #[test]
1464
    fn test_regression_iterator_division() {
1465
        // This is a regression test that tests that the iterator does not try to divide
1466
        // by 0 when the queue size is 0
1467
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1468
        let vq = MockSplitQueue::new(m, 1);
1469
        // This input was generated by the fuzzer, both for the QueueS and the Descriptor
1470
        let descriptors: Vec<Descriptor> = vec![Descriptor::new(
1471
            14178673876262995140,
1472
            3301229764,
1473
            50372,
1474
            50372,
1475
        )];
1476
        vq.build_desc_chain(&descriptors).unwrap();
1477
1478
        let mut q = Queue {
1479
            max_size: 38,
1480
            next_avail: Wrapping(0),
1481
            next_used: Wrapping(0),
1482
            event_idx_enabled: false,
1483
            num_added: Wrapping(0),
1484
            size: 0,
1485
            ready: false,
1486
            desc_table: GuestAddress(12837708984796196),
1487
            avail_ring: GuestAddress(0),
1488
            used_ring: GuestAddress(9943947977301164032),
1489
        };
1490
1491
        assert!(q.pop_descriptor_chain(m).is_none());
1492
    }
1493
1494
    #[test]
1495
    fn test_setters_error_cases() {
1496
        assert_eq!(Queue::new(15).unwrap_err(), Error::InvalidMaxSize);
1497
        let mut q = Queue::new(16).unwrap();
1498
1499
        let expected_val = q.desc_table.0;
1500
        assert_eq!(
1501
            q.try_set_desc_table_address(GuestAddress(0xf)).unwrap_err(),
1502
            Error::InvalidDescTableAlign
1503
        );
1504
        assert_eq!(q.desc_table(), expected_val);
1505
1506
        let expected_val = q.avail_ring.0;
1507
        assert_eq!(
1508
            q.try_set_avail_ring_address(GuestAddress(0x1)).unwrap_err(),
1509
            Error::InvalidAvailRingAlign
1510
        );
1511
        assert_eq!(q.avail_ring(), expected_val);
1512
1513
        let expected_val = q.used_ring.0;
1514
        assert_eq!(
1515
            q.try_set_used_ring_address(GuestAddress(0x3)).unwrap_err(),
1516
            Error::InvalidUsedRingAlign
1517
        );
1518
        assert_eq!(q.used_ring(), expected_val);
1519
1520
        let expected_val = q.size;
1521
        assert_eq!(q.try_set_size(15).unwrap_err(), Error::InvalidSize);
1522
        assert_eq!(q.size(), expected_val)
1523
    }
1524
1525
    #[test]
1526
    // This is a regression test for a fuzzing finding. If the driver requests a reset of the
1527
    // device, but then does not re-initializes the queue then a subsequent call to process
1528
    // a request should yield no descriptors to process. Before this fix we were processing
1529
    // descriptors that were added to the queue before, and we were ending up processing 255
1530
    // descriptors per chain.
1531
    fn test_regression_timeout_after_reset() {
1532
        // The input below was generated by libfuzzer and adapted for this test.
1533
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x0), 0x10000)]).unwrap();
1534
        let vq = MockSplitQueue::new(m, 1024);
1535
1536
        // This input below was generated by the fuzzer.
1537
        let descriptors: Vec<Descriptor> = vec![
1538
            Descriptor::new(21508325467, 0, 1, 4),
1539
            Descriptor::new(2097152, 4096, 3, 0),
1540
            Descriptor::new(18374686479672737792, 4294967295, 65535, 29),
1541
            Descriptor::new(76842670169653248, 1114115, 0, 0),
1542
            Descriptor::new(16, 983040, 126, 3),
1543
            Descriptor::new(897648164864, 0, 0, 0),
1544
            Descriptor::new(111669149722, 0, 0, 0),
1545
        ];
1546
        vq.build_multiple_desc_chains(&descriptors).unwrap();
1547
1548
        let mut q: Queue = vq.create_queue().unwrap();
1549
1550
        // Setting the queue to ready should not allow consuming descriptors after reset.
1551
        q.reset();
1552
        q.set_ready(true);
1553
        let mut counter = 0;
1554
        while let Some(mut desc_chain) = q.pop_descriptor_chain(m) {
1555
            // this empty loop is here to check that there are no side effects
1556
            // in terms of memory & execution time.
1557
            while desc_chain.next().is_some() {
1558
                counter += 1;
1559
            }
1560
        }
1561
        assert_eq!(counter, 0);
1562
1563
        // Setting the avail_addr to valid should not allow consuming descriptors after reset.
1564
        q.reset();
1565
        q.set_avail_ring_address(Some(0x1000), None);
1566
        assert_eq!(q.avail_ring, GuestAddress(0x1000));
1567
        counter = 0;
1568
        while let Some(mut desc_chain) = q.pop_descriptor_chain(m) {
1569
            // this empty loop is here to check that there are no side effects
1570
            // in terms of memory & execution time.
1571
            while desc_chain.next().is_some() {
1572
                counter += 1;
1573
            }
1574
        }
1575
        assert_eq!(counter, 0);
1576
    }
1577
}