Coverage Report

Created: 2025-03-07 06:49

/src/cloud-hypervisor/virtio-devices/src/vsock/packet.rs
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
// SPDX-License-Identifier: Apache-2.0
3
//
4
5
//! `VsockPacket` provides a thin wrapper over the buffers exchanged via virtio queues.
6
//! There are two components to a vsock packet, each using its own descriptor in a
7
//! virtio queue:
8
//! - the packet header; and
9
//! - the packet data/buffer.
10
//!
11
//! There is a 1:1 relation between descriptor chains and packets: the first (chain head) holds
12
//! the header, and an optional second descriptor holds the data. The second descriptor is only
13
//! present for data packets (VSOCK_OP_RW).
14
//!
15
//! `VsockPacket` wraps these two buffers and provides direct access to the data stored
16
//! in guest memory. This is done to avoid unnecessarily copying data from guest memory
17
//! to temporary buffers, before passing it on to the vsock backend.
18
19
use std::ops::Deref;
20
use std::sync::Arc;
21
22
use byteorder::{ByteOrder, LittleEndian};
23
use virtio_queue::DescriptorChain;
24
use vm_memory::{Address, GuestMemory};
25
use vm_virtio::{AccessPlatform, Translatable};
26
27
use super::{defs, Result, VsockError};
28
use crate::get_host_address_range;
29
30
// The vsock packet header is defined by the C struct:
31
//
32
// ```C
33
// struct virtio_vsock_hdr {
34
//     le64 src_cid;
35
//     le64 dst_cid;
36
//     le32 src_port;
37
//     le32 dst_port;
38
//     le32 len;
39
//     le16 type;
40
//     le16 op;
41
//     le32 flags;
42
//     le32 buf_alloc;
43
//     le32 fwd_cnt;
44
// };
45
// ```
46
//
47
// This struct will occupy the buffer pointed to by the head descriptor. We'll be accessing it
48
// as a byte slice. To that end, we define below the offsets for each field struct, as well as the
49
// packed struct size, as a bunch of `usize` consts.
50
// Note that these offsets are only used privately by the `VsockPacket` struct, the public interface
51
// consisting of getter and setter methods, for each struct field, that will also handle the correct
52
// endianness.
53
54
/// The vsock packet header struct size (when packed).
55
pub const VSOCK_PKT_HDR_SIZE: usize = 44;
56
57
// Source CID.
58
const HDROFF_SRC_CID: usize = 0;
59
60
// Destination CID.
61
const HDROFF_DST_CID: usize = 8;
62
63
// Source port.
64
const HDROFF_SRC_PORT: usize = 16;
65
66
// Destination port.
67
const HDROFF_DST_PORT: usize = 20;
68
69
// Data length (in bytes) - may be 0, if there is no data buffer.
70
const HDROFF_LEN: usize = 24;
71
72
// Socket type. Currently, only connection-oriented streams are defined by the vsock protocol.
73
const HDROFF_TYPE: usize = 28;
74
75
// Operation ID - one of the VSOCK_OP_* values; e.g.
76
// - VSOCK_OP_RW: a data packet;
77
// - VSOCK_OP_REQUEST: connection request;
78
// - VSOCK_OP_RST: forceful connection termination;
79
// etc (see `super::defs::uapi` for the full list).
80
const HDROFF_OP: usize = 30;
81
82
// Additional options (flags) associated with the current operation (`op`).
83
// Currently, only used with shutdown requests (VSOCK_OP_SHUTDOWN).
84
const HDROFF_FLAGS: usize = 32;
85
86
// Size (in bytes) of the packet sender receive buffer (for the connection to which this packet
87
// belongs).
88
const HDROFF_BUF_ALLOC: usize = 36;
89
90
// Number of bytes the sender has received and consumed (for the connection to which this packet
91
// belongs). For instance, for our Unix backend, this counter would be the total number of bytes
92
// we have successfully written to a backing Unix socket.
93
const HDROFF_FWD_CNT: usize = 40;
94
95
/// The vsock packet, implemented as a wrapper over a virtq descriptor chain:
96
/// - the chain head, holding the packet header; and
97
/// - (an optional) data/buffer descriptor, only present for data packets (VSOCK_OP_RW).
98
///
99
pub struct VsockPacket {
100
    hdr: *mut u8,
101
    buf: Option<*mut u8>,
102
    buf_size: usize,
103
}
104
105
impl VsockPacket {
106
    /// Create the packet wrapper from a TX virtq chain head.
107
    ///
108
    /// The chain head is expected to hold valid packet header data. A following packet buffer
109
    /// descriptor can optionally end the chain. Bounds and pointer checks are performed when
110
    /// creating the wrapper.
111
    ///
112
0
    pub fn from_tx_virtq_head<M>(
113
0
        desc_chain: &mut DescriptorChain<M>,
114
0
        access_platform: Option<&Arc<dyn AccessPlatform>>,
115
0
    ) -> Result<Self>
116
0
    where
117
0
        M: Clone + Deref,
118
0
        M::Target: GuestMemory,
119
0
    {
120
0
        let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
121
122
        // All buffers in the TX queue must be readable.
123
        //
124
0
        if head.is_write_only() {
125
0
            return Err(VsockError::UnreadableDescriptor);
126
0
        }
127
0
128
0
        // The packet header should fit inside the head descriptor.
129
0
        if head.len() < VSOCK_PKT_HDR_SIZE as u32 {
130
0
            return Err(VsockError::HdrDescTooSmall(head.len()));
131
0
        }
132
133
0
        let mut pkt = Self {
134
0
            hdr: get_host_address_range(
135
0
                desc_chain.memory(),
136
0
                head.addr()
137
0
                    .translate_gva(access_platform, VSOCK_PKT_HDR_SIZE),
138
0
                VSOCK_PKT_HDR_SIZE,
139
0
            )
140
0
            .ok_or(VsockError::GuestMemory)?,
141
0
            buf: None,
142
0
            buf_size: 0,
143
0
        };
144
0
145
0
        // No point looking for a data/buffer descriptor, if the packet is zero-length.
146
0
        if pkt.is_empty() {
147
0
            return Ok(pkt);
148
0
        }
149
0
150
0
        // Reject weirdly-sized packets.
151
0
        //
152
0
        if pkt.len() > defs::MAX_PKT_BUF_SIZE as u32 {
153
0
            return Err(VsockError::InvalidPktLen(pkt.len()));
154
0
        }
155
0
156
0
        // Prior to Linux v6.3 there are two descriptors
157
0
        if head.has_next() {
158
0
            let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?;
159
160
            // TX data should be read-only.
161
0
            if buf_desc.is_write_only() {
162
0
                return Err(VsockError::UnreadableDescriptor);
163
0
            }
164
0
165
0
            // The data buffer should be large enough to fit the size of the data, as described by
166
0
            // the header descriptor.
167
0
            if buf_desc.len() < pkt.len() {
168
0
                return Err(VsockError::BufDescTooSmall);
169
0
            }
170
0
            let buf_size = buf_desc.len() as usize;
171
0
            pkt.buf_size = buf_size;
172
0
            pkt.buf = Some(
173
0
                get_host_address_range(
174
0
                    desc_chain.memory(),
175
0
                    buf_desc.addr().translate_gva(access_platform, buf_size),
176
0
                    pkt.buf_size,
177
0
                )
178
0
                .ok_or(VsockError::GuestMemory)?,
179
            );
180
        } else {
181
0
            let buf_size: usize = head.len() as usize - VSOCK_PKT_HDR_SIZE;
182
0
            pkt.buf_size = buf_size;
183
0
            pkt.buf = Some(
184
0
                get_host_address_range(
185
0
                    desc_chain.memory(),
186
0
                    head.addr()
187
0
                        .checked_add(VSOCK_PKT_HDR_SIZE as u64)
188
0
                        .unwrap()
189
0
                        .translate_gva(access_platform, buf_size),
190
0
                    buf_size,
191
0
                )
192
0
                .ok_or(VsockError::GuestMemory)?,
193
            );
194
        }
195
196
0
        Ok(pkt)
197
0
    }
198
199
    /// Create the packet wrapper from an RX virtq chain head.
200
    ///
201
    /// There must be two descriptors in the chain, both writable: a header descriptor and a data
202
    /// descriptor. Bounds and pointer checks are performed when creating the wrapper.
203
    ///
204
0
    pub fn from_rx_virtq_head<M>(
205
0
        desc_chain: &mut DescriptorChain<M>,
206
0
        access_platform: Option<&Arc<dyn AccessPlatform>>,
207
0
    ) -> Result<Self>
208
0
    where
209
0
        M: Clone + Deref,
210
0
        M::Target: GuestMemory,
211
0
    {
212
0
        let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
213
214
        // All RX buffers must be writable.
215
        //
216
0
        if !head.is_write_only() {
217
0
            return Err(VsockError::UnwritableDescriptor);
218
0
        }
219
0
220
0
        // The packet header should fit inside the head descriptor.
221
0
        if head.len() < VSOCK_PKT_HDR_SIZE as u32 {
222
0
            return Err(VsockError::HdrDescTooSmall(head.len()));
223
0
        }
224
0
225
0
        // Prior to Linux v6.3 there are two descriptors
226
0
        if head.has_next() {
227
0
            let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?;
228
0
            let buf_size = buf_desc.len() as usize;
229
0
230
0
            Ok(Self {
231
0
                hdr: get_host_address_range(
232
0
                    desc_chain.memory(),
233
0
                    head.addr()
234
0
                        .translate_gva(access_platform, VSOCK_PKT_HDR_SIZE),
235
0
                    VSOCK_PKT_HDR_SIZE,
236
0
                )
237
0
                .ok_or(VsockError::GuestMemory)?,
238
                buf: Some(
239
0
                    get_host_address_range(
240
0
                        desc_chain.memory(),
241
0
                        buf_desc.addr().translate_gva(access_platform, buf_size),
242
0
                        buf_size,
243
0
                    )
244
0
                    .ok_or(VsockError::GuestMemory)?,
245
                ),
246
0
                buf_size,
247
            })
248
        } else {
249
0
            let buf_size: usize = head.len() as usize - VSOCK_PKT_HDR_SIZE;
250
0
            Ok(Self {
251
0
                hdr: get_host_address_range(
252
0
                    desc_chain.memory(),
253
0
                    head.addr()
254
0
                        .translate_gva(access_platform, VSOCK_PKT_HDR_SIZE),
255
0
                    VSOCK_PKT_HDR_SIZE,
256
0
                )
257
0
                .ok_or(VsockError::GuestMemory)?,
258
                buf: Some(
259
0
                    get_host_address_range(
260
0
                        desc_chain.memory(),
261
0
                        head.addr()
262
0
                            .checked_add(VSOCK_PKT_HDR_SIZE as u64)
263
0
                            .unwrap()
264
0
                            .translate_gva(access_platform, buf_size),
265
0
                        buf_size,
266
0
                    )
267
0
                    .ok_or(VsockError::GuestMemory)?,
268
                ),
269
0
                buf_size,
270
            })
271
        }
272
0
    }
273
274
    /// Provides in-place, byte-slice, access to the vsock packet header.
275
    ///
276
0
    pub fn hdr(&self) -> &[u8] {
277
0
        // SAFETY: bound checks have already been performed when creating the packet
278
0
        // from the virtq descriptor.
279
0
        unsafe { std::slice::from_raw_parts(self.hdr as *const u8, VSOCK_PKT_HDR_SIZE) }
280
0
    }
281
282
    /// Provides in-place, byte-slice, mutable access to the vsock packet header.
283
    ///
284
0
    pub fn hdr_mut(&mut self) -> &mut [u8] {
285
0
        // SAFETY: bound checks have already been performed when creating the packet
286
0
        // from the virtq descriptor.
287
0
        unsafe { std::slice::from_raw_parts_mut(self.hdr, VSOCK_PKT_HDR_SIZE) }
288
0
    }
289
290
    /// Provides in-place, byte-slice access to the vsock packet data buffer.
291
    ///
292
    /// Note: control packets (e.g. connection request or reset) have no data buffer associated.
293
    ///       For those packets, this method will return `None`.
294
    /// Also note: calling `len()` on the returned slice will yield the buffer size, which may be
295
    ///            (and often is) larger than the length of the packet data. The packet data length
296
    ///            is stored in the packet header, and accessible via `VsockPacket::len()`.
297
0
    pub fn buf(&self) -> Option<&[u8]> {
298
0
        self.buf.map(|ptr| {
299
0
            // SAFETY: bound checks have already been performed when creating the packet
300
0
            // from the virtq descriptor.
301
0
            unsafe { std::slice::from_raw_parts(ptr as *const u8, self.buf_size) }
302
0
        })
303
0
    }
304
305
    /// Provides in-place, byte-slice, mutable access to the vsock packet data buffer.
306
    ///
307
    /// Note: control packets (e.g. connection request or reset) have no data buffer associated.
308
    ///       For those packets, this method will return `None`.
309
    /// Also note: calling `len()` on the returned slice will yield the buffer size, which may be
310
    ///            (and often is) larger than the length of the packet data. The packet data length
311
    ///            is stored in the packet header, and accessible via `VsockPacket::len()`.
312
0
    pub fn buf_mut(&mut self) -> Option<&mut [u8]> {
313
0
        self.buf.map(|ptr| {
314
0
            // SAFETY: bound checks have already been performed when creating the packet
315
0
            // from the virtq descriptor.
316
0
            unsafe { std::slice::from_raw_parts_mut(ptr, self.buf_size) }
317
0
        })
318
0
    }
319
320
0
    pub fn src_cid(&self) -> u64 {
321
0
        LittleEndian::read_u64(&self.hdr()[HDROFF_SRC_CID..])
322
0
    }
323
324
0
    pub fn set_src_cid(&mut self, cid: u64) -> &mut Self {
325
0
        LittleEndian::write_u64(&mut self.hdr_mut()[HDROFF_SRC_CID..], cid);
326
0
        self
327
0
    }
328
329
0
    pub fn dst_cid(&self) -> u64 {
330
0
        LittleEndian::read_u64(&self.hdr()[HDROFF_DST_CID..])
331
0
    }
332
333
0
    pub fn set_dst_cid(&mut self, cid: u64) -> &mut Self {
334
0
        LittleEndian::write_u64(&mut self.hdr_mut()[HDROFF_DST_CID..], cid);
335
0
        self
336
0
    }
337
338
0
    pub fn src_port(&self) -> u32 {
339
0
        LittleEndian::read_u32(&self.hdr()[HDROFF_SRC_PORT..])
340
0
    }
341
342
0
    pub fn set_src_port(&mut self, port: u32) -> &mut Self {
343
0
        LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_SRC_PORT..], port);
344
0
        self
345
0
    }
346
347
0
    pub fn dst_port(&self) -> u32 {
348
0
        LittleEndian::read_u32(&self.hdr()[HDROFF_DST_PORT..])
349
0
    }
350
351
0
    pub fn set_dst_port(&mut self, port: u32) -> &mut Self {
352
0
        LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_DST_PORT..], port);
353
0
        self
354
0
    }
355
356
0
    pub fn len(&self) -> u32 {
357
0
        LittleEndian::read_u32(&self.hdr()[HDROFF_LEN..])
358
0
    }
359
360
0
    pub fn is_empty(&self) -> bool {
361
0
        self.len() == 0
362
0
    }
363
364
0
    pub fn set_len(&mut self, len: u32) -> &mut Self {
365
0
        LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_LEN..], len);
366
0
        self
367
0
    }
368
369
0
    pub fn type_(&self) -> u16 {
370
0
        LittleEndian::read_u16(&self.hdr()[HDROFF_TYPE..])
371
0
    }
372
373
0
    pub fn set_type(&mut self, type_: u16) -> &mut Self {
374
0
        LittleEndian::write_u16(&mut self.hdr_mut()[HDROFF_TYPE..], type_);
375
0
        self
376
0
    }
377
378
0
    pub fn op(&self) -> u16 {
379
0
        LittleEndian::read_u16(&self.hdr()[HDROFF_OP..])
380
0
    }
381
382
0
    pub fn set_op(&mut self, op: u16) -> &mut Self {
383
0
        LittleEndian::write_u16(&mut self.hdr_mut()[HDROFF_OP..], op);
384
0
        self
385
0
    }
386
387
0
    pub fn flags(&self) -> u32 {
388
0
        LittleEndian::read_u32(&self.hdr()[HDROFF_FLAGS..])
389
0
    }
390
391
0
    pub fn set_flags(&mut self, flags: u32) -> &mut Self {
392
0
        LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_FLAGS..], flags);
393
0
        self
394
0
    }
395
396
0
    pub fn set_flag(&mut self, flag: u32) -> &mut Self {
397
0
        self.set_flags(self.flags() | flag);
398
0
        self
399
0
    }
400
401
0
    pub fn buf_alloc(&self) -> u32 {
402
0
        LittleEndian::read_u32(&self.hdr()[HDROFF_BUF_ALLOC..])
403
0
    }
404
405
0
    pub fn set_buf_alloc(&mut self, buf_alloc: u32) -> &mut Self {
406
0
        LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_BUF_ALLOC..], buf_alloc);
407
0
        self
408
0
    }
409
410
0
    pub fn fwd_cnt(&self) -> u32 {
411
0
        LittleEndian::read_u32(&self.hdr()[HDROFF_FWD_CNT..])
412
0
    }
413
414
0
    pub fn set_fwd_cnt(&mut self, fwd_cnt: u32) -> &mut Self {
415
0
        LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_FWD_CNT..], fwd_cnt);
416
0
        self
417
0
    }
418
}
419
420
#[cfg(test)]
421
#[allow(clippy::undocumented_unsafe_blocks)]
422
mod tests {
423
    use virtio_bindings::virtio_ring::VRING_DESC_F_WRITE;
424
    use virtio_queue::QueueOwnedT;
425
    use vm_memory::GuestAddress;
426
    use vm_virtio::queue::testing::VirtqDesc as GuestQDesc;
427
428
    use super::super::tests::TestContext;
429
    use super::*;
430
    use crate::vsock::defs::MAX_PKT_BUF_SIZE;
431
    use crate::GuestMemoryMmap;
432
433
    macro_rules! create_context {
434
        ($test_ctx:ident, $handler_ctx:ident) => {
435
            let $test_ctx = TestContext::new();
436
            let mut $handler_ctx = $test_ctx.create_epoll_handler_context();
437
            // For TX packets, hdr.len should be set to a valid value.
438
            set_pkt_len(1024, &$handler_ctx.guest_txvq.dtable[0], &$test_ctx.mem);
439
        };
440
    }
441
442
    macro_rules! expect_asm_error {
443
        (tx, $test_ctx:expr, $handler_ctx:expr, $err:pat) => {
444
            expect_asm_error!($test_ctx, $handler_ctx, $err, from_tx_virtq_head, 1);
445
        };
446
        (rx, $test_ctx:expr, $handler_ctx:expr, $err:pat) => {
447
            expect_asm_error!($test_ctx, $handler_ctx, $err, from_rx_virtq_head, 0);
448
        };
449
        ($test_ctx:expr, $handler_ctx:expr, $err:pat, $ctor:ident, $vq:expr) => {
450
            match VsockPacket::$ctor(
451
                &mut $handler_ctx.handler.queues[$vq]
452
                    .iter(&$test_ctx.mem)
453
                    .unwrap()
454
                    .next()
455
                    .unwrap(),
456
                None,
457
            ) {
458
                Err($err) => (),
459
                Ok(_) => panic!("Packet assembly should've failed!"),
460
                Err(other) => panic!("Packet assembly failed with: {:?}", other),
461
            }
462
        };
463
    }
464
465
    fn set_pkt_len(len: u32, guest_desc: &GuestQDesc, mem: &GuestMemoryMmap) {
466
        let hdr_gpa = guest_desc.addr.get();
467
        let hdr_ptr =
468
            get_host_address_range(mem, GuestAddress(hdr_gpa), VSOCK_PKT_HDR_SIZE).unwrap();
469
        let len_ptr = unsafe { hdr_ptr.add(HDROFF_LEN) };
470
471
        LittleEndian::write_u32(unsafe { std::slice::from_raw_parts_mut(len_ptr, 4) }, len);
472
    }
473
474
    #[test]
475
    fn test_tx_packet_assembly() {
476
        // Test case: successful TX packet assembly.
477
        {
478
            create_context!(test_ctx, handler_ctx);
479
480
            let pkt = VsockPacket::from_tx_virtq_head(
481
                &mut handler_ctx.handler.queues[1]
482
                    .iter(&test_ctx.mem)
483
                    .unwrap()
484
                    .next()
485
                    .unwrap(),
486
                None,
487
            )
488
            .unwrap();
489
            assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
490
            assert_eq!(
491
                pkt.buf().unwrap().len(),
492
                handler_ctx.guest_txvq.dtable[1].len.get() as usize
493
            );
494
        }
495
496
        // Test case: error on write-only hdr descriptor.
497
        {
498
            create_context!(test_ctx, handler_ctx);
499
            handler_ctx.guest_txvq.dtable[0]
500
                .flags
501
                .set(VRING_DESC_F_WRITE.try_into().unwrap());
502
            expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor);
503
        }
504
505
        // Test case: header descriptor has insufficient space to hold the packet header.
506
        {
507
            create_context!(test_ctx, handler_ctx);
508
            handler_ctx.guest_txvq.dtable[0]
509
                .len
510
                .set(VSOCK_PKT_HDR_SIZE as u32 - 1);
511
            expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::HdrDescTooSmall(_));
512
        }
513
514
        // Test case: zero-length TX packet.
515
        {
516
            create_context!(test_ctx, handler_ctx);
517
            set_pkt_len(0, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
518
            let mut pkt = VsockPacket::from_tx_virtq_head(
519
                &mut handler_ctx.handler.queues[1]
520
                    .iter(&test_ctx.mem)
521
                    .unwrap()
522
                    .next()
523
                    .unwrap(),
524
                None,
525
            )
526
            .unwrap();
527
            assert!(pkt.buf().is_none());
528
            assert!(pkt.buf_mut().is_none());
529
        }
530
531
        // Test case: TX packet has more data than we can handle.
532
        {
533
            create_context!(test_ctx, handler_ctx);
534
            set_pkt_len(
535
                MAX_PKT_BUF_SIZE as u32 + 1,
536
                &handler_ctx.guest_txvq.dtable[0],
537
                &test_ctx.mem,
538
            );
539
            expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::InvalidPktLen(_));
540
        }
541
542
        // Test case: error on write-only buf descriptor.
543
        {
544
            create_context!(test_ctx, handler_ctx);
545
            handler_ctx.guest_txvq.dtable[1]
546
                .flags
547
                .set(VRING_DESC_F_WRITE.try_into().unwrap());
548
            expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor);
549
        }
550
551
        // Test case: the buffer descriptor cannot fit all the data advertised by the
552
        // packet header `len` field.
553
        {
554
            create_context!(test_ctx, handler_ctx);
555
            set_pkt_len(8 * 1024, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
556
            handler_ctx.guest_txvq.dtable[1].len.set(4 * 1024);
557
            expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::BufDescTooSmall);
558
        }
559
    }
560
561
    #[test]
562
    fn test_rx_packet_assembly() {
563
        // Test case: successful RX packet assembly.
564
        {
565
            create_context!(test_ctx, handler_ctx);
566
            let pkt = VsockPacket::from_rx_virtq_head(
567
                &mut handler_ctx.handler.queues[0]
568
                    .iter(&test_ctx.mem)
569
                    .unwrap()
570
                    .next()
571
                    .unwrap(),
572
                None,
573
            )
574
            .unwrap();
575
            assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
576
            assert_eq!(
577
                pkt.buf().unwrap().len(),
578
                handler_ctx.guest_rxvq.dtable[1].len.get() as usize
579
            );
580
        }
581
582
        // Test case: read-only RX packet header.
583
        {
584
            create_context!(test_ctx, handler_ctx);
585
            handler_ctx.guest_rxvq.dtable[0].flags.set(0);
586
            expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::UnwritableDescriptor);
587
        }
588
589
        // Test case: RX descriptor head cannot fit the entire packet header.
590
        {
591
            create_context!(test_ctx, handler_ctx);
592
            handler_ctx.guest_rxvq.dtable[0]
593
                .len
594
                .set(VSOCK_PKT_HDR_SIZE as u32 - 1);
595
            expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::HdrDescTooSmall(_));
596
        }
597
    }
598
599
    #[test]
600
    fn test_packet_hdr_accessors() {
601
        const SRC_CID: u64 = 1;
602
        const DST_CID: u64 = 2;
603
        const SRC_PORT: u32 = 3;
604
        const DST_PORT: u32 = 4;
605
        const LEN: u32 = 5;
606
        const TYPE: u16 = 6;
607
        const OP: u16 = 7;
608
        const FLAGS: u32 = 8;
609
        const BUF_ALLOC: u32 = 9;
610
        const FWD_CNT: u32 = 10;
611
612
        create_context!(test_ctx, handler_ctx);
613
        let mut pkt = VsockPacket::from_rx_virtq_head(
614
            &mut handler_ctx.handler.queues[0]
615
                .iter(&test_ctx.mem)
616
                .unwrap()
617
                .next()
618
                .unwrap(),
619
            None,
620
        )
621
        .unwrap();
622
623
        // Test field accessors.
624
        pkt.set_src_cid(SRC_CID)
625
            .set_dst_cid(DST_CID)
626
            .set_src_port(SRC_PORT)
627
            .set_dst_port(DST_PORT)
628
            .set_len(LEN)
629
            .set_type(TYPE)
630
            .set_op(OP)
631
            .set_flags(FLAGS)
632
            .set_buf_alloc(BUF_ALLOC)
633
            .set_fwd_cnt(FWD_CNT);
634
635
        assert_eq!(pkt.src_cid(), SRC_CID);
636
        assert_eq!(pkt.dst_cid(), DST_CID);
637
        assert_eq!(pkt.src_port(), SRC_PORT);
638
        assert_eq!(pkt.dst_port(), DST_PORT);
639
        assert_eq!(pkt.len(), LEN);
640
        assert_eq!(pkt.type_(), TYPE);
641
        assert_eq!(pkt.op(), OP);
642
        assert_eq!(pkt.flags(), FLAGS);
643
        assert_eq!(pkt.buf_alloc(), BUF_ALLOC);
644
        assert_eq!(pkt.fwd_cnt(), FWD_CNT);
645
646
        // Test individual flag setting.
647
        let flags = pkt.flags() | 0b1000;
648
        pkt.set_flag(0b1000);
649
        assert_eq!(pkt.flags(), flags);
650
651
        // Test packet header as-slice access.
652
        //
653
654
        assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
655
656
        assert_eq!(
657
            SRC_CID,
658
            LittleEndian::read_u64(&pkt.hdr()[HDROFF_SRC_CID..])
659
        );
660
        assert_eq!(
661
            DST_CID,
662
            LittleEndian::read_u64(&pkt.hdr()[HDROFF_DST_CID..])
663
        );
664
        assert_eq!(
665
            SRC_PORT,
666
            LittleEndian::read_u32(&pkt.hdr()[HDROFF_SRC_PORT..])
667
        );
668
        assert_eq!(
669
            DST_PORT,
670
            LittleEndian::read_u32(&pkt.hdr()[HDROFF_DST_PORT..])
671
        );
672
        assert_eq!(LEN, LittleEndian::read_u32(&pkt.hdr()[HDROFF_LEN..]));
673
        assert_eq!(TYPE, LittleEndian::read_u16(&pkt.hdr()[HDROFF_TYPE..]));
674
        assert_eq!(OP, LittleEndian::read_u16(&pkt.hdr()[HDROFF_OP..]));
675
        assert_eq!(FLAGS, LittleEndian::read_u32(&pkt.hdr()[HDROFF_FLAGS..]));
676
        assert_eq!(
677
            BUF_ALLOC,
678
            LittleEndian::read_u32(&pkt.hdr()[HDROFF_BUF_ALLOC..])
679
        );
680
        assert_eq!(
681
            FWD_CNT,
682
            LittleEndian::read_u32(&pkt.hdr()[HDROFF_FWD_CNT..])
683
        );
684
685
        assert_eq!(pkt.hdr_mut().len(), VSOCK_PKT_HDR_SIZE);
686
        for b in pkt.hdr_mut() {
687
            *b = 0;
688
        }
689
        assert_eq!(pkt.src_cid(), 0);
690
        assert_eq!(pkt.dst_cid(), 0);
691
        assert_eq!(pkt.src_port(), 0);
692
        assert_eq!(pkt.dst_port(), 0);
693
        assert_eq!(pkt.len(), 0);
694
        assert_eq!(pkt.type_(), 0);
695
        assert_eq!(pkt.op(), 0);
696
        assert_eq!(pkt.flags(), 0);
697
        assert_eq!(pkt.buf_alloc(), 0);
698
        assert_eq!(pkt.fwd_cnt(), 0);
699
    }
700
701
    #[test]
702
    fn test_packet_buf() {
703
        create_context!(test_ctx, handler_ctx);
704
        let mut pkt = VsockPacket::from_rx_virtq_head(
705
            &mut handler_ctx.handler.queues[0]
706
                .iter(&test_ctx.mem)
707
                .unwrap()
708
                .next()
709
                .unwrap(),
710
            None,
711
        )
712
        .unwrap();
713
714
        assert_eq!(
715
            pkt.buf().unwrap().len(),
716
            handler_ctx.guest_rxvq.dtable[1].len.get() as usize
717
        );
718
        assert_eq!(
719
            pkt.buf_mut().unwrap().len(),
720
            handler_ctx.guest_rxvq.dtable[1].len.get() as usize
721
        );
722
723
        for i in 0..pkt.buf().unwrap().len() {
724
            pkt.buf_mut().unwrap()[i] = (i % 0x100) as u8;
725
            assert_eq!(pkt.buf().unwrap()[i], (i % 0x100) as u8);
726
        }
727
    }
728
}