/src/cloud-hypervisor/fuzz/fuzz_targets/mem.rs
Line | Count | Source |
1 | | // Copyright © 2022 Intel Corporation |
2 | | // |
3 | | // SPDX-License-Identifier: Apache-2.0 |
4 | | |
5 | | #![no_main] |
6 | | |
7 | | use std::os::unix::io::{AsRawFd, FromRawFd}; |
8 | | use std::sync::{Arc, Mutex}; |
9 | | |
10 | | use libfuzzer_sys::{fuzz_target, Corpus}; |
11 | | use seccompiler::SeccompAction; |
12 | | use virtio_devices::{BlocksState, Mem, VirtioDevice, VirtioInterrupt, VirtioInterruptType}; |
13 | | use virtio_queue::{Queue, QueueT}; |
14 | | use vm_memory::bitmap::AtomicBitmap; |
15 | | use vm_memory::{Bytes, GuestAddress, GuestMemoryAtomic}; |
16 | | use vmm_sys_util::eventfd::{EventFd, EFD_NONBLOCK}; |
17 | | |
18 | | type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; |
19 | | type GuestRegionMmap = vm_memory::GuestRegionMmap<AtomicBitmap>; |
20 | | |
21 | | macro_rules! align { |
22 | | ($n:expr, $align:expr) => {{ |
23 | | $n.div_ceil($align) * $align |
24 | | }}; |
25 | | } |
26 | | |
27 | | const VIRTIO_MEM_DATA_SIZE: usize = 1; |
28 | | const QUEUE_DATA_SIZE: usize = 4; |
29 | | // The size of the guest memory for the virtio-mem region |
30 | | const MEM_SIZE: usize = 128 * 1024 * 1024; |
31 | | // The start address of the virtio-mem region in the guest memory |
32 | | const VIRTIO_MEM_REGION_ADDRESS: u64 = 0; |
33 | | |
34 | | // Max entries in the queue. |
35 | | const QUEUE_SIZE: u16 = 64; |
36 | | // Descriptor table alignment |
37 | | const DESC_TABLE_ALIGN_SIZE: u64 = 16; |
38 | | // Available ring alignment |
39 | | const AVAIL_RING_ALIGN_SIZE: u64 = 2; |
40 | | // Used ring alignment |
41 | | const USED_RING_ALIGN_SIZE: u64 = 4; |
42 | | // Descriptor table size |
43 | | const DESC_TABLE_SIZE: u64 = 16_u64 * QUEUE_SIZE as u64; |
44 | | // Available ring size |
45 | | const AVAIL_RING_SIZE: u64 = 6_u64 + 2 * QUEUE_SIZE as u64; |
46 | | // Used ring size |
47 | | const USED_RING_SIZE: u64 = 6_u64 + 8 * QUEUE_SIZE as u64; |
48 | | |
49 | | // Guest memory gap |
50 | | const GUEST_MEM_GAP: u64 = 1 * 1024 * 1024; |
51 | | // Guest physical address for descriptor table. |
52 | | const DESC_TABLE_ADDR: u64 = align!(MEM_SIZE as u64 + GUEST_MEM_GAP, DESC_TABLE_ALIGN_SIZE); |
53 | | // Guest physical address for available ring |
54 | | const AVAIL_RING_ADDR: u64 = align!(DESC_TABLE_ADDR + DESC_TABLE_SIZE, AVAIL_RING_ALIGN_SIZE); |
55 | | // Guest physical address for used ring |
56 | | const USED_RING_ADDR: u64 = align!(AVAIL_RING_ADDR + AVAIL_RING_SIZE, USED_RING_ALIGN_SIZE); |
57 | | // Virtio-queue size in bytes |
58 | | const QUEUE_BYTES_SIZE: usize = (USED_RING_ADDR + USED_RING_SIZE - DESC_TABLE_ADDR) as usize; |
59 | | |
60 | | fuzz_target!(|bytes: &[u8]| -> Corpus { |
61 | | if bytes.len() < VIRTIO_MEM_DATA_SIZE + QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE |
62 | | || bytes.len() > (VIRTIO_MEM_DATA_SIZE + QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE + MEM_SIZE) |
63 | | { |
64 | | return Corpus::Reject; |
65 | | } |
66 | | |
67 | | let virtio_mem_data = &bytes[..VIRTIO_MEM_DATA_SIZE]; |
68 | | let queue_data = &bytes[VIRTIO_MEM_DATA_SIZE..VIRTIO_MEM_DATA_SIZE + QUEUE_DATA_SIZE]; |
69 | | let queue_bytes = &bytes[VIRTIO_MEM_DATA_SIZE + QUEUE_DATA_SIZE |
70 | | ..VIRTIO_MEM_DATA_SIZE + QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE]; |
71 | | let mem_bytes = &bytes[VIRTIO_MEM_DATA_SIZE + QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE..]; |
72 | | |
73 | | // Create a virtio-mem device based on the input bytes; |
74 | | let (mut virtio_mem, virtio_mem_region) = |
75 | | create_dummy_virtio_mem(virtio_mem_data.try_into().unwrap()); |
76 | | |
77 | | // Setup the virt queue with the input bytes |
78 | | let q = setup_virt_queue(queue_data.try_into().unwrap()); |
79 | | |
80 | | // Setup the guest memory with the input bytes |
81 | | let mem = GuestMemoryMmap::from_ranges(&[ |
82 | | (GuestAddress(DESC_TABLE_ADDR), QUEUE_BYTES_SIZE), // guest region for the virt queue |
83 | | ]) |
84 | | .unwrap(); |
85 | | if mem |
86 | | .write_slice(queue_bytes, GuestAddress(DESC_TABLE_ADDR)) |
87 | | .is_err() |
88 | | { |
89 | | return Corpus::Reject; |
90 | | } |
91 | | // Add the memory region for the virtio-mem device |
92 | | let mem = mem.insert_region(virtio_mem_region).unwrap(); |
93 | | if mem |
94 | | .write_slice(mem_bytes, GuestAddress(VIRTIO_MEM_REGION_ADDRESS)) |
95 | | .is_err() |
96 | | { |
97 | | return Corpus::Reject; |
98 | | } |
99 | | let guest_memory = GuestMemoryAtomic::new(mem); |
100 | | |
101 | | let evt = EventFd::new(0).unwrap(); |
102 | | let queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(evt.as_raw_fd())) }; |
103 | | |
104 | | // Kick the 'queue' event before activate the virtio-mem device |
105 | | queue_evt.write(1).unwrap(); |
106 | | |
107 | | virtio_mem |
108 | | .activate( |
109 | | guest_memory, |
110 | | Arc::new(NoopVirtioInterrupt {}), |
111 | | vec![(0, q, evt)], |
112 | | ) |
113 | | .ok(); |
114 | | |
115 | | // Wait for the events to finish and virtio-mem device worker thread to return |
116 | | virtio_mem.wait_for_epoll_threads(); |
117 | | |
118 | | return Corpus::Keep; |
119 | | }); |
120 | | |
121 | | pub struct NoopVirtioInterrupt {} |
122 | | |
123 | | impl VirtioInterrupt for NoopVirtioInterrupt { |
124 | 17 | fn trigger(&self, _int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error> { |
125 | 17 | Ok(()) |
126 | 17 | } |
127 | | } |
128 | | |
129 | | // Create a dummy virtio-mem device for fuzzing purpose only |
130 | 719 | fn create_dummy_virtio_mem(bytes: &[u8; VIRTIO_MEM_DATA_SIZE]) -> (Mem, Arc<GuestRegionMmap>) { |
131 | 719 | let numa_id = if bytes[0] % 2 != 0 { Some(0) } else { None }; |
132 | | |
133 | 719 | let region = vmm::memory_manager::MemoryManager::create_ram_region( |
134 | 719 | &None, |
135 | | 0, |
136 | 719 | GuestAddress(VIRTIO_MEM_REGION_ADDRESS), |
137 | | MEM_SIZE, |
138 | | false, |
139 | | false, |
140 | | false, |
141 | 719 | None, |
142 | 719 | numa_id, |
143 | 719 | None, |
144 | | false, |
145 | | ) |
146 | 719 | .unwrap(); |
147 | | |
148 | 719 | let blocks_state = Arc::new(Mutex::new(BlocksState::new(region.size() as u64))); |
149 | | |
150 | | ( |
151 | 719 | Mem::new( |
152 | 719 | "fuzzer_mem".to_owned(), |
153 | 719 | ®ion, |
154 | 719 | SeccompAction::Allow, |
155 | 719 | numa_id.map(|i| i as u16), |
156 | | 0, |
157 | | false, |
158 | 719 | EventFd::new(EFD_NONBLOCK).unwrap(), |
159 | 719 | blocks_state.clone(), |
160 | 719 | None, |
161 | | ) |
162 | 719 | .unwrap(), |
163 | 719 | region, |
164 | | ) |
165 | 719 | } |
166 | | |
167 | 719 | fn setup_virt_queue(bytes: &[u8; QUEUE_DATA_SIZE]) -> Queue { |
168 | 719 | let mut q = Queue::new(QUEUE_SIZE).unwrap(); |
169 | 719 | q.set_next_avail(bytes[0] as u16); // 'u8' is enough given the 'QUEUE_SIZE' is small |
170 | 719 | q.set_next_used(bytes[1] as u16); |
171 | 719 | q.set_event_idx(bytes[2] % 2 != 0); |
172 | 719 | q.set_size(bytes[3] as u16 % QUEUE_SIZE); |
173 | | |
174 | 719 | q.try_set_desc_table_address(GuestAddress(DESC_TABLE_ADDR)) |
175 | 719 | .unwrap(); |
176 | 719 | q.try_set_avail_ring_address(GuestAddress(AVAIL_RING_ADDR)) |
177 | 719 | .unwrap(); |
178 | 719 | q.try_set_used_ring_address(GuestAddress(USED_RING_ADDR)) |
179 | 719 | .unwrap(); |
180 | 719 | q.set_ready(true); |
181 | | |
182 | 719 | q |
183 | 719 | } |