Coverage Report

Created: 2026-01-22 07:12

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/virtio-queue-0.16.0/src/chain.rs
Line
Count
Source
1
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE-BSD-3-Clause file.
4
//
5
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
6
//
7
// Copyright © 2019 Intel Corporation
8
//
9
// Copyright (C) 2020-2021 Alibaba Cloud. All rights reserved.
10
//
11
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
12
13
use std::fmt::{self, Debug};
14
use std::mem::size_of;
15
use std::ops::Deref;
16
17
use vm_memory::bitmap::{BitmapSlice, WithBitmapSlice};
18
use vm_memory::{Address, Bytes, GuestAddress, GuestMemory, GuestMemoryRegion};
19
20
use crate::{desc::split::Descriptor, Error, Reader, Writer};
21
use virtio_bindings::bindings::virtio_ring::VRING_DESC_ALIGN_SIZE;
22
23
/// A virtio descriptor chain.
24
#[derive(Clone, Debug)]
25
pub struct DescriptorChain<M> {
26
    mem: M,
27
    desc_table: GuestAddress,
28
    queue_size: u16,
29
    head_index: u16,
30
    next_index: u16,
31
    ttl: u16,
32
    yielded_bytes: u32,
33
    is_indirect: bool,
34
}
35
36
impl<M> DescriptorChain<M>
37
where
38
    M: Deref,
39
    M::Target: GuestMemory,
40
{
41
170k
    fn with_ttl(
42
170k
        mem: M,
43
170k
        desc_table: GuestAddress,
44
170k
        queue_size: u16,
45
170k
        ttl: u16,
46
170k
        head_index: u16,
47
170k
    ) -> Self {
48
170k
        DescriptorChain {
49
170k
            mem,
50
170k
            desc_table,
51
170k
            queue_size,
52
170k
            head_index,
53
170k
            next_index: head_index,
54
170k
            ttl,
55
170k
            is_indirect: false,
56
170k
            yielded_bytes: 0,
57
170k
        }
58
170k
    }
<virtio_queue::chain::DescriptorChain<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::with_ttl
Line
Count
Source
41
155k
    fn with_ttl(
42
155k
        mem: M,
43
155k
        desc_table: GuestAddress,
44
155k
        queue_size: u16,
45
155k
        ttl: u16,
46
155k
        head_index: u16,
47
155k
    ) -> Self {
48
155k
        DescriptorChain {
49
155k
            mem,
50
155k
            desc_table,
51
155k
            queue_size,
52
155k
            head_index,
53
155k
            next_index: head_index,
54
155k
            ttl,
55
155k
            is_indirect: false,
56
155k
            yielded_bytes: 0,
57
155k
        }
58
155k
    }
<virtio_queue::chain::DescriptorChain<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>::with_ttl
Line
Count
Source
41
15.7k
    fn with_ttl(
42
15.7k
        mem: M,
43
15.7k
        desc_table: GuestAddress,
44
15.7k
        queue_size: u16,
45
15.7k
        ttl: u16,
46
15.7k
        head_index: u16,
47
15.7k
    ) -> Self {
48
15.7k
        DescriptorChain {
49
15.7k
            mem,
50
15.7k
            desc_table,
51
15.7k
            queue_size,
52
15.7k
            head_index,
53
15.7k
            next_index: head_index,
54
15.7k
            ttl,
55
15.7k
            is_indirect: false,
56
15.7k
            yielded_bytes: 0,
57
15.7k
        }
58
15.7k
    }
Unexecuted instantiation: <virtio_queue::chain::DescriptorChain<_>>::with_ttl
59
60
    /// Create a new `DescriptorChain` instance.
61
    ///
62
    /// # Arguments
63
    /// * `mem` - the `GuestMemory` object that can be used to access the buffers pointed to by the
64
    ///           descriptor chain.
65
    /// * `desc_table` - the address of the descriptor table.
66
    /// * `queue_size` - the size of the queue, which is also the maximum size of a descriptor
67
    ///                  chain.
68
    /// * `head_index` - the descriptor index of the chain head.
69
170k
    pub(crate) fn new(mem: M, desc_table: GuestAddress, queue_size: u16, head_index: u16) -> Self {
70
170k
        Self::with_ttl(mem, desc_table, queue_size, queue_size, head_index)
71
170k
    }
<virtio_queue::chain::DescriptorChain<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::new
Line
Count
Source
69
155k
    pub(crate) fn new(mem: M, desc_table: GuestAddress, queue_size: u16, head_index: u16) -> Self {
70
155k
        Self::with_ttl(mem, desc_table, queue_size, queue_size, head_index)
71
155k
    }
<virtio_queue::chain::DescriptorChain<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>::new
Line
Count
Source
69
15.7k
    pub(crate) fn new(mem: M, desc_table: GuestAddress, queue_size: u16, head_index: u16) -> Self {
70
15.7k
        Self::with_ttl(mem, desc_table, queue_size, queue_size, head_index)
71
15.7k
    }
Unexecuted instantiation: <virtio_queue::chain::DescriptorChain<_>>::new
72
73
    /// Get the descriptor index of the chain head.
74
209k
    pub fn head_index(&self) -> u16 {
75
209k
        self.head_index
76
209k
    }
<virtio_queue::chain::DescriptorChain<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::head_index
Line
Count
Source
74
194k
    pub fn head_index(&self) -> u16 {
75
194k
        self.head_index
76
194k
    }
<virtio_queue::chain::DescriptorChain<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>::head_index
Line
Count
Source
74
15.2k
    pub fn head_index(&self) -> u16 {
75
15.2k
        self.head_index
76
15.2k
    }
Unexecuted instantiation: <virtio_queue::chain::DescriptorChain<_>>::head_index
77
78
    /// Return a `GuestMemory` object that can be used to access the buffers pointed to by the
79
    /// descriptor chain.
80
6.35M
    pub fn memory(&self) -> &M::Target {
81
6.35M
        self.mem.deref()
82
6.35M
    }
<virtio_queue::chain::DescriptorChain<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::memory
Line
Count
Source
80
4.75M
    pub fn memory(&self) -> &M::Target {
81
4.75M
        self.mem.deref()
82
4.75M
    }
<virtio_queue::chain::DescriptorChain<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>::memory
Line
Count
Source
80
1.60M
    pub fn memory(&self) -> &M::Target {
81
1.60M
        self.mem.deref()
82
1.60M
    }
Unexecuted instantiation: <virtio_queue::chain::DescriptorChain<_>>::memory
83
84
    /// Return an iterator that only yields the readable descriptors in the chain.
85
0
    pub fn readable(self) -> DescriptorChainRwIter<M> {
86
0
        DescriptorChainRwIter {
87
0
            chain: self,
88
0
            writable: false,
89
0
        }
90
0
    }
91
92
    /// Return a new instance of Writer
93
0
    pub fn writer<'a, B: BitmapSlice>(self, mem: &'a M::Target) -> Result<Writer<'a, B>, Error>
94
0
    where
95
0
        M::Target: Sized,
96
0
        <<M::Target as GuestMemory>::R as GuestMemoryRegion>::B: WithBitmapSlice<'a, S = B>,
97
    {
98
0
        Writer::new(mem, self).map_err(|_| Error::InvalidChain)
99
0
    }
100
101
    /// Return a new instance of Reader
102
0
    pub fn reader<'a, B: BitmapSlice>(self, mem: &'a M::Target) -> Result<Reader<'a, B>, Error>
103
0
    where
104
0
        M::Target: Sized,
105
0
        <<M::Target as GuestMemory>::R as GuestMemoryRegion>::B: WithBitmapSlice<'a, S = B>,
106
    {
107
0
        Reader::new(mem, self).map_err(|_| Error::InvalidChain)
108
0
    }
109
110
    /// Return an iterator that only yields the writable descriptors in the chain.
111
0
    pub fn writable(self) -> DescriptorChainRwIter<M> {
112
0
        DescriptorChainRwIter {
113
0
            chain: self,
114
0
            writable: true,
115
0
        }
116
0
    }
117
118
    // Alters the internal state of the `DescriptorChain` to switch iterating over an
119
    // indirect descriptor table defined by `desc`.
120
35.1k
    fn switch_to_indirect_table(&mut self, desc: Descriptor) -> Result<(), Error> {
121
        // Check the VIRTQ_DESC_F_INDIRECT flag (i.e., is_indirect) is not set inside
122
        // an indirect descriptor.
123
        // (see VIRTIO Spec, Section 2.6.5.3.1 Driver Requirements: Indirect Descriptors)
124
35.1k
        if self.is_indirect {
125
861
            return Err(Error::InvalidIndirectDescriptor);
126
34.2k
        }
127
128
        // Alignment requirements for vring elements start from virtio 1.0,
129
        // but this is not necessary for address of indirect descriptor.
130
34.2k
        if desc.len() & (VRING_DESC_ALIGN_SIZE - 1) != 0 {
131
2.16k
            return Err(Error::InvalidIndirectDescriptorTable);
132
32.1k
        }
133
134
        // It is safe to do a plain division since we checked above that desc.len() is a multiple of
135
        // VRING_DESC_ALIGN_SIZE, and VRING_DESC_ALIGN_SIZE is != 0.
136
32.1k
        let table_len = desc.len() / VRING_DESC_ALIGN_SIZE;
137
32.1k
        if table_len > u32::from(u16::MAX) {
138
1.53k
            return Err(Error::InvalidIndirectDescriptorTable);
139
30.5k
        }
140
141
30.5k
        self.desc_table = desc.addr();
142
        // try_from cannot fail as we've checked table_len above
143
30.5k
        self.queue_size = u16::try_from(table_len).expect("invalid table_len");
144
30.5k
        self.next_index = 0;
145
30.5k
        self.ttl = self.queue_size;
146
30.5k
        self.is_indirect = true;
147
148
30.5k
        Ok(())
149
35.1k
    }
<virtio_queue::chain::DescriptorChain<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::switch_to_indirect_table
Line
Count
Source
120
26.4k
    fn switch_to_indirect_table(&mut self, desc: Descriptor) -> Result<(), Error> {
121
        // Check the VIRTQ_DESC_F_INDIRECT flag (i.e., is_indirect) is not set inside
122
        // an indirect descriptor.
123
        // (see VIRTIO Spec, Section 2.6.5.3.1 Driver Requirements: Indirect Descriptors)
124
26.4k
        if self.is_indirect {
125
601
            return Err(Error::InvalidIndirectDescriptor);
126
25.8k
        }
127
128
        // Alignment requirements for vring elements start from virtio 1.0,
129
        // but this is not necessary for address of indirect descriptor.
130
25.8k
        if desc.len() & (VRING_DESC_ALIGN_SIZE - 1) != 0 {
131
540
            return Err(Error::InvalidIndirectDescriptorTable);
132
25.2k
        }
133
134
        // It is safe to do a plain division since we checked above that desc.len() is a multiple of
135
        // VRING_DESC_ALIGN_SIZE, and VRING_DESC_ALIGN_SIZE is != 0.
136
25.2k
        let table_len = desc.len() / VRING_DESC_ALIGN_SIZE;
137
25.2k
        if table_len > u32::from(u16::MAX) {
138
512
            return Err(Error::InvalidIndirectDescriptorTable);
139
24.7k
        }
140
141
24.7k
        self.desc_table = desc.addr();
142
        // try_from cannot fail as we've checked table_len above
143
24.7k
        self.queue_size = u16::try_from(table_len).expect("invalid table_len");
144
24.7k
        self.next_index = 0;
145
24.7k
        self.ttl = self.queue_size;
146
24.7k
        self.is_indirect = true;
147
148
24.7k
        Ok(())
149
26.4k
    }
<virtio_queue::chain::DescriptorChain<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>::switch_to_indirect_table
Line
Count
Source
120
8.70k
    fn switch_to_indirect_table(&mut self, desc: Descriptor) -> Result<(), Error> {
121
        // Check the VIRTQ_DESC_F_INDIRECT flag (i.e., is_indirect) is not set inside
122
        // an indirect descriptor.
123
        // (see VIRTIO Spec, Section 2.6.5.3.1 Driver Requirements: Indirect Descriptors)
124
8.70k
        if self.is_indirect {
125
260
            return Err(Error::InvalidIndirectDescriptor);
126
8.44k
        }
127
128
        // Alignment requirements for vring elements start from virtio 1.0,
129
        // but this is not necessary for address of indirect descriptor.
130
8.44k
        if desc.len() & (VRING_DESC_ALIGN_SIZE - 1) != 0 {
131
1.62k
            return Err(Error::InvalidIndirectDescriptorTable);
132
6.82k
        }
133
134
        // It is safe to do a plain division since we checked above that desc.len() is a multiple of
135
        // VRING_DESC_ALIGN_SIZE, and VRING_DESC_ALIGN_SIZE is != 0.
136
6.82k
        let table_len = desc.len() / VRING_DESC_ALIGN_SIZE;
137
6.82k
        if table_len > u32::from(u16::MAX) {
138
1.01k
            return Err(Error::InvalidIndirectDescriptorTable);
139
5.80k
        }
140
141
5.80k
        self.desc_table = desc.addr();
142
        // try_from cannot fail as we've checked table_len above
143
5.80k
        self.queue_size = u16::try_from(table_len).expect("invalid table_len");
144
5.80k
        self.next_index = 0;
145
5.80k
        self.ttl = self.queue_size;
146
5.80k
        self.is_indirect = true;
147
148
5.80k
        Ok(())
149
8.70k
    }
Unexecuted instantiation: <virtio_queue::chain::DescriptorChain<_>>::switch_to_indirect_table
150
}
151
152
impl<M> Iterator for DescriptorChain<M>
153
where
154
    M: Deref,
155
    M::Target: GuestMemory,
156
{
157
    type Item = Descriptor;
158
159
    /// Return the next descriptor in this descriptor chain, if there is one.
160
    ///
161
    /// Note that this is distinct from the next descriptor chain returned by
162
    /// [`AvailIter`](struct.AvailIter.html), which is the head of the next
163
    /// _available_ descriptor chain.
164
2.13M
    fn next(&mut self) -> Option<Self::Item> {
165
2.13M
        if self.ttl == 0 || self.next_index >= self.queue_size {
166
11.9k
            return None;
167
2.12M
        }
168
169
2.12M
        let desc_addr = self
170
2.12M
            .desc_table
171
            // The multiplication can not overflow an u64 since we are multiplying an u16 with a
172
            // small number.
173
2.12M
            .checked_add(self.next_index as u64 * size_of::<Descriptor>() as u64)?;
174
175
        // The guest device driver should not touch the descriptor once submitted, so it's safe
176
        // to use read_obj() here.
177
2.12M
        let desc = self.mem.read_obj::<Descriptor>(desc_addr).ok()?;
178
179
2.11M
        if desc.refers_to_indirect_table() {
180
35.1k
            self.switch_to_indirect_table(desc).ok()?;
181
30.5k
            return self.next();
182
2.07M
        }
183
184
        // constructing a chain that is longer than 2^32 bytes is illegal,
185
        // let's terminate the iteration if something violated this.
186
        // (VIRTIO v1.2, 2.7.5.2: "Drivers MUST NOT add a descriptor chain
187
        // longer than 2^32 bytes in total;")
188
2.07M
        match self.yielded_bytes.checked_add(desc.len()) {
189
2.07M
            Some(yielded_bytes) => self.yielded_bytes = yielded_bytes,
190
1.46k
            None => return None,
191
        };
192
193
2.07M
        if desc.has_next() {
194
1.97M
            self.next_index = desc.next();
195
1.97M
            // It's ok to decrement `self.ttl` here because we check at the start of the method
196
1.97M
            // that it's greater than 0.
197
1.97M
            self.ttl -= 1;
198
1.97M
        } else {
199
105k
            self.ttl = 0;
200
105k
        }
201
202
2.07M
        Some(desc)
203
2.13M
    }
<virtio_queue::chain::DescriptorChain<vm_memory::atomic::GuestMemoryLoadGuard<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
164
531k
    fn next(&mut self) -> Option<Self::Item> {
165
531k
        if self.ttl == 0 || self.next_index >= self.queue_size {
166
5.17k
            return None;
167
526k
        }
168
169
526k
        let desc_addr = self
170
526k
            .desc_table
171
            // The multiplication can not overflow an u64 since we are multiplying an u16 with a
172
            // small number.
173
526k
            .checked_add(self.next_index as u64 * size_of::<Descriptor>() as u64)?;
174
175
        // The guest device driver should not touch the descriptor once submitted, so it's safe
176
        // to use read_obj() here.
177
526k
        let desc = self.mem.read_obj::<Descriptor>(desc_addr).ok()?;
178
179
520k
        if desc.refers_to_indirect_table() {
180
26.4k
            self.switch_to_indirect_table(desc).ok()?;
181
24.7k
            return self.next();
182
493k
        }
183
184
        // constructing a chain that is longer than 2^32 bytes is illegal,
185
        // let's terminate the iteration if something violated this.
186
        // (VIRTIO v1.2, 2.7.5.2: "Drivers MUST NOT add a descriptor chain
187
        // longer than 2^32 bytes in total;")
188
493k
        match self.yielded_bytes.checked_add(desc.len()) {
189
493k
            Some(yielded_bytes) => self.yielded_bytes = yielded_bytes,
190
314
            None => return None,
191
        };
192
193
493k
        if desc.has_next() {
194
390k
            self.next_index = desc.next();
195
390k
            // It's ok to decrement `self.ttl` here because we check at the start of the method
196
390k
            // that it's greater than 0.
197
390k
            self.ttl -= 1;
198
390k
        } else {
199
102k
            self.ttl = 0;
200
102k
        }
201
202
493k
        Some(desc)
203
531k
    }
<virtio_queue::chain::DescriptorChain<&vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
164
1.60M
    fn next(&mut self) -> Option<Self::Item> {
165
1.60M
        if self.ttl == 0 || self.next_index >= self.queue_size {
166
6.82k
            return None;
167
1.59M
        }
168
169
1.59M
        let desc_addr = self
170
1.59M
            .desc_table
171
            // The multiplication can not overflow an u64 since we are multiplying an u16 with a
172
            // small number.
173
1.59M
            .checked_add(self.next_index as u64 * size_of::<Descriptor>() as u64)?;
174
175
        // The guest device driver should not touch the descriptor once submitted, so it's safe
176
        // to use read_obj() here.
177
1.59M
        let desc = self.mem.read_obj::<Descriptor>(desc_addr).ok()?;
178
179
1.59M
        if desc.refers_to_indirect_table() {
180
8.70k
            self.switch_to_indirect_table(desc).ok()?;
181
5.80k
            return self.next();
182
1.58M
        }
183
184
        // constructing a chain that is longer than 2^32 bytes is illegal,
185
        // let's terminate the iteration if something violated this.
186
        // (VIRTIO v1.2, 2.7.5.2: "Drivers MUST NOT add a descriptor chain
187
        // longer than 2^32 bytes in total;")
188
1.58M
        match self.yielded_bytes.checked_add(desc.len()) {
189
1.58M
            Some(yielded_bytes) => self.yielded_bytes = yielded_bytes,
190
1.14k
            None => return None,
191
        };
192
193
1.58M
        if desc.has_next() {
194
1.58M
            self.next_index = desc.next();
195
1.58M
            // It's ok to decrement `self.ttl` here because we check at the start of the method
196
1.58M
            // that it's greater than 0.
197
1.58M
            self.ttl -= 1;
198
1.58M
        } else {
199
2.34k
            self.ttl = 0;
200
2.34k
        }
201
202
1.58M
        Some(desc)
203
1.60M
    }
Unexecuted instantiation: <virtio_queue::chain::DescriptorChain<_> as core::iter::traits::iterator::Iterator>::next
204
}
205
206
/// An iterator for readable or writable descriptors.
207
#[derive(Clone)]
208
pub struct DescriptorChainRwIter<M> {
209
    chain: DescriptorChain<M>,
210
    writable: bool,
211
}
212
213
impl<M> Iterator for DescriptorChainRwIter<M>
214
where
215
    M: Deref,
216
    M::Target: GuestMemory,
217
{
218
    type Item = Descriptor;
219
220
    /// Return the next readable/writeable descriptor (depending on the `writable` value) in this
221
    /// descriptor chain, if there is one.
222
    ///
223
    /// Note that this is distinct from the next descriptor chain returned by
224
    /// [`AvailIter`](struct.AvailIter.html), which is the head of the next
225
    /// _available_ descriptor chain.
226
0
    fn next(&mut self) -> Option<Self::Item> {
227
        loop {
228
0
            match self.chain.next() {
229
0
                Some(v) => {
230
0
                    if v.is_write_only() == self.writable {
231
0
                        return Some(v);
232
0
                    }
233
                }
234
0
                None => return None,
235
            }
236
        }
237
0
    }
238
}
239
240
// We can't derive Debug, because rustc doesn't generate the `M::T: Debug` constraint
241
impl<M> Debug for DescriptorChainRwIter<M>
242
where
243
    M: Debug,
244
{
245
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
246
0
        f.debug_struct("DescriptorChainRwIter")
247
0
            .field("chain", &self.chain)
248
0
            .field("writable", &self.writable)
249
0
            .finish()
250
0
    }
251
}
252
253
#[cfg(test)]
254
mod tests {
255
    use super::*;
256
    use crate::desc::{split::Descriptor as SplitDescriptor, RawDescriptor};
257
    use crate::mock::{DescriptorTable, MockSplitQueue};
258
    use virtio_bindings::bindings::virtio_ring::{VRING_DESC_F_INDIRECT, VRING_DESC_F_NEXT};
259
    use vm_memory::GuestMemoryMmap;
260
261
    #[test]
262
    fn test_checked_new_descriptor_chain() {
263
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
264
        let vq = MockSplitQueue::new(m, 16);
265
266
        assert!(vq.end().0 < 0x1000);
267
268
        // index >= queue_size
269
        assert!(
270
            DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 16)
271
                .next()
272
                .is_none()
273
        );
274
275
        // desc_table address is way off
276
        assert!(
277
            DescriptorChain::<&GuestMemoryMmap>::new(m, GuestAddress(0x00ff_ffff_ffff), 16, 0)
278
                .next()
279
                .is_none()
280
        );
281
282
        {
283
            // the first desc has a normal len, and the next_descriptor flag is set
284
            // but the the index of the next descriptor is too large
285
            let desc = RawDescriptor::from(SplitDescriptor::new(
286
                0x1000,
287
                0x1000,
288
                VRING_DESC_F_NEXT as u16,
289
                16,
290
            ));
291
            vq.desc_table().store(0, desc).unwrap();
292
293
            let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0);
294
            c.next().unwrap();
295
            assert!(c.next().is_none());
296
        }
297
298
        // finally, let's test an ok chain
299
        {
300
            let desc = RawDescriptor::from(SplitDescriptor::new(
301
                0x1000,
302
                0x1000,
303
                VRING_DESC_F_NEXT as u16,
304
                1,
305
            ));
306
            vq.desc_table().store(0, desc).unwrap();
307
308
            let desc = RawDescriptor::from(SplitDescriptor::new(0x2000, 0x1000, 0, 0));
309
            vq.desc_table().store(1, desc).unwrap();
310
311
            let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0);
312
313
            assert_eq!(
314
                c.memory() as *const GuestMemoryMmap,
315
                m as *const GuestMemoryMmap
316
            );
317
318
            assert_eq!(c.desc_table, vq.start());
319
            assert_eq!(c.queue_size, 16);
320
            assert_eq!(c.ttl, c.queue_size);
321
322
            let desc = c.next().unwrap();
323
            assert_eq!(desc.addr(), GuestAddress(0x1000));
324
            assert_eq!(desc.len(), 0x1000);
325
            assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16);
326
            assert_eq!(desc.next(), 1);
327
            assert_eq!(c.ttl, c.queue_size - 1);
328
329
            assert!(c.next().is_some());
330
            // The descriptor above was the last from the chain, so `ttl` should be 0 now.
331
            assert_eq!(c.ttl, 0);
332
            assert!(c.next().is_none());
333
            assert_eq!(c.ttl, 0);
334
        }
335
    }
336
337
    #[test]
338
    fn test_ttl_wrap_around() {
339
        const QUEUE_SIZE: u16 = 16;
340
341
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x100000)]).unwrap();
342
        let vq = MockSplitQueue::new(m, QUEUE_SIZE);
343
344
        // Populate the entire descriptor table with entries. Only the last one should not have the
345
        // VIRTQ_DESC_F_NEXT set.
346
        for i in 0..QUEUE_SIZE - 1 {
347
            let desc = RawDescriptor::from(SplitDescriptor::new(
348
                0x1000 * (i + 1) as u64,
349
                0x1000,
350
                VRING_DESC_F_NEXT as u16,
351
                i + 1,
352
            ));
353
            vq.desc_table().store(i, desc).unwrap();
354
        }
355
        let desc = RawDescriptor::from(SplitDescriptor::new((0x1000 * 16) as u64, 0x1000, 0, 0));
356
        vq.desc_table().store(QUEUE_SIZE - 1, desc).unwrap();
357
358
        let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), QUEUE_SIZE, 0);
359
        assert_eq!(c.ttl, c.queue_size);
360
361
        // Validate that `ttl` wraps around even when the entire descriptor table is populated.
362
        for i in 0..QUEUE_SIZE {
363
            let _desc = c.next().unwrap();
364
            assert_eq!(c.ttl, c.queue_size - i - 1);
365
        }
366
        assert!(c.next().is_none());
367
    }
368
369
    #[test]
370
    fn test_new_from_indirect_descriptor() {
371
        // This is testing that chaining an indirect table works as expected. It is also a negative
372
        // test for the following requirement from the spec:
373
        // `A driver MUST NOT set both VIRTQ_DESC_F_INDIRECT and VIRTQ_DESC_F_NEXT in flags.`. In
374
        // case the driver is setting both of these flags, we check that the device doesn't panic.
375
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
376
        let vq = MockSplitQueue::new(m, 16);
377
        let dtable = vq.desc_table();
378
379
        // Create a chain with one normal descriptor and one pointing to an indirect table.
380
        let desc = RawDescriptor::from(SplitDescriptor::new(
381
            0x6000,
382
            0x1000,
383
            VRING_DESC_F_NEXT as u16,
384
            1,
385
        ));
386
        dtable.store(0, desc).unwrap();
387
        // The spec forbids setting both VIRTQ_DESC_F_INDIRECT and VIRTQ_DESC_F_NEXT in flags. We do
388
        // not currently enforce this rule, we just ignore the VIRTQ_DESC_F_NEXT flag.
389
        let desc = RawDescriptor::from(SplitDescriptor::new(
390
            0x7000,
391
            0x1000,
392
            (VRING_DESC_F_INDIRECT | VRING_DESC_F_NEXT) as u16,
393
            2,
394
        ));
395
        dtable.store(1, desc).unwrap();
396
        let desc = RawDescriptor::from(SplitDescriptor::new(0x8000, 0x1000, 0, 0));
397
        dtable.store(2, desc).unwrap();
398
399
        let mut c: DescriptorChain<&GuestMemoryMmap> = DescriptorChain::new(m, vq.start(), 16, 0);
400
401
        // create an indirect table with 4 chained descriptors
402
        let idtable = DescriptorTable::new(m, GuestAddress(0x7000), 4);
403
        for i in 0..4u16 {
404
            let desc: RawDescriptor = if i < 3 {
405
                RawDescriptor::from(SplitDescriptor::new(
406
                    0x1000 * i as u64,
407
                    0x1000,
408
                    VRING_DESC_F_NEXT as u16,
409
                    i + 1,
410
                ))
411
            } else {
412
                RawDescriptor::from(SplitDescriptor::new(0x1000 * i as u64, 0x1000, 0, 0))
413
            };
414
            idtable.store(i, desc).unwrap();
415
        }
416
417
        assert_eq!(c.head_index(), 0);
418
        // Consume the first descriptor.
419
        c.next().unwrap();
420
421
        // The chain logic hasn't parsed the indirect descriptor yet.
422
        assert!(!c.is_indirect);
423
424
        // Try to iterate through the indirect descriptor chain.
425
        for i in 0..4 {
426
            let desc = c.next().unwrap();
427
            assert!(c.is_indirect);
428
            if i < 3 {
429
                assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16);
430
                assert_eq!(desc.next(), i + 1);
431
            }
432
        }
433
        // Even though we added a new descriptor after the one that is pointing to the indirect
434
        // table, this descriptor won't be available when parsing the chain.
435
        assert!(c.next().is_none());
436
    }
437
438
    #[test]
439
    fn test_indirect_descriptor_address_noaligned() {
440
        // Alignment requirements for vring elements start from virtio 1.0,
441
        // but this is not necessary for address of indirect descriptor.
442
        let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
443
        let vq = MockSplitQueue::new(m, 16);
444
        let dtable = vq.desc_table();
445
446
        // Create a chain with a descriptor pointing to an indirect table with unaligned address.
447
        let desc = RawDescriptor::from(SplitDescriptor::new(
448
            0x7001,
449
            0x1000,
450
            (VRING_DESC_F_INDIRECT | VRING_DESC_F_NEXT) as u16,
451
            2,
452
        ));
453
        dtable.store(0, desc).unwrap();
454
455
        let mut c: DescriptorChain<&GuestMemoryMmap> = DescriptorChain::new(m, vq.start(), 16, 0);
456
457
        // Create an indirect table with 4 chained descriptors.
458
        let idtable = DescriptorTable::new(m, GuestAddress(0x7001), 4);
459
        for i in 0..4u16 {
460
            let desc: RawDescriptor = if i < 3 {
461
                RawDescriptor::from(SplitDescriptor::new(
462
                    0x1000 * i as u64,
463
                    0x1000,
464
                    VRING_DESC_F_NEXT as u16,
465
                    i + 1,
466
                ))
467
            } else {
468
                RawDescriptor::from(SplitDescriptor::new(0x1000 * i as u64, 0x1000, 0, 0))
469
            };
470
            idtable.store(i, desc).unwrap();
471
        }
472
473
        // Try to iterate through the indirect descriptor chain.
474
        for i in 0..4 {
475
            let desc = c.next().unwrap();
476
            assert!(c.is_indirect);
477
            if i < 3 {
478
                assert_eq!(desc.flags(), VRING_DESC_F_NEXT as u16);
479
                assert_eq!(desc.next(), i + 1);
480
            }
481
        }
482
    }
483
484
    #[test]
485
    fn test_indirect_descriptor_err() {
486
        // We are testing here different misconfigurations of the indirect table. For these error
487
        // case scenarios, the iterator over the descriptor chain won't return a new descriptor.
488
        {
489
            let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
490
            let vq = MockSplitQueue::new(m, 16);
491
492
            // Create a chain with a descriptor pointing to an invalid indirect table: len not a
493
            // multiple of descriptor size.
494
            let desc = RawDescriptor::from(SplitDescriptor::new(
495
                0x1000,
496
                0x1001,
497
                VRING_DESC_F_INDIRECT as u16,
498
                0,
499
            ));
500
            vq.desc_table().store(0, desc).unwrap();
501
502
            let mut c: DescriptorChain<&GuestMemoryMmap> =
503
                DescriptorChain::new(m, vq.start(), 16, 0);
504
505
            assert!(c.next().is_none());
506
        }
507
508
        {
509
            let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
510
            let vq = MockSplitQueue::new(m, 16);
511
512
            // Create a chain with a descriptor pointing to an invalid indirect table: table len >
513
            // u16::MAX.
514
            let desc = RawDescriptor::from(SplitDescriptor::new(
515
                0x1000,
516
                (u16::MAX as u32 + 1) * VRING_DESC_ALIGN_SIZE,
517
                VRING_DESC_F_INDIRECT as u16,
518
                0,
519
            ));
520
            vq.desc_table().store(0, desc).unwrap();
521
522
            let mut c: DescriptorChain<&GuestMemoryMmap> =
523
                DescriptorChain::new(m, vq.start(), 16, 0);
524
525
            assert!(c.next().is_none());
526
        }
527
528
        {
529
            let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
530
            let vq = MockSplitQueue::new(m, 16);
531
532
            // Create a chain with a descriptor pointing to an indirect table.
533
            let desc = RawDescriptor::from(SplitDescriptor::new(
534
                0x1000,
535
                0x1000,
536
                VRING_DESC_F_INDIRECT as u16,
537
                0,
538
            ));
539
            vq.desc_table().store(0, desc).unwrap();
540
            // It's ok for an indirect descriptor to have flags = 0.
541
            let desc = RawDescriptor::from(SplitDescriptor::new(0x3000, 0x1000, 0, 0));
542
            m.write_obj(desc, GuestAddress(0x1000)).unwrap();
543
544
            let mut c: DescriptorChain<&GuestMemoryMmap> =
545
                DescriptorChain::new(m, vq.start(), 16, 0);
546
            assert!(c.next().is_some());
547
548
            // But it's not allowed to have an indirect descriptor that points to another indirect
549
            // table.
550
            let desc = RawDescriptor::from(SplitDescriptor::new(
551
                0x3000,
552
                0x1000,
553
                VRING_DESC_F_INDIRECT as u16,
554
                0,
555
            ));
556
            m.write_obj(desc, GuestAddress(0x1000)).unwrap();
557
558
            let mut c: DescriptorChain<&GuestMemoryMmap> =
559
                DescriptorChain::new(m, vq.start(), 16, 0);
560
561
            assert!(c.next().is_none());
562
        }
563
    }
564
}