/src/cloud-hypervisor/fuzz/fuzz_targets/console.rs
Line | Count | Source |
1 | | // Copyright © 2022 Intel Corporation |
2 | | // |
3 | | // SPDX-License-Identifier: Apache-2.0 |
4 | | |
5 | | #![no_main] |
6 | | |
7 | | use std::fs::File; |
8 | | use std::io::Write; |
9 | | use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; |
10 | | use std::sync::Arc; |
11 | | |
12 | | use libfuzzer_sys::{fuzz_target, Corpus}; |
13 | | use seccompiler::SeccompAction; |
14 | | use virtio_devices::{VirtioDevice, VirtioInterrupt, VirtioInterruptType}; |
15 | | use virtio_queue::{Queue, QueueT}; |
16 | | use vm_memory::bitmap::AtomicBitmap; |
17 | | use vm_memory::{Bytes, GuestAddress, GuestMemoryAtomic}; |
18 | | use vmm_sys_util::eventfd::{EventFd, EFD_NONBLOCK}; |
19 | | |
20 | | type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; |
21 | | |
22 | | macro_rules! align { |
23 | | ($n:expr, $align:expr) => {{ |
24 | | $n.div_ceil($align) * $align |
25 | | }}; |
26 | | } |
27 | | |
28 | | const CONSOLE_INPUT_SIZE: usize = 128; |
29 | | const QUEUE_DATA_SIZE: usize = 4; |
30 | | const MEM_SIZE: usize = 32 * 1024 * 1024; |
31 | | // Guest memory gap |
32 | | const GUEST_MEM_GAP: u64 = 1 * 1024 * 1024; |
33 | | // Guest physical address for the first virt queue |
34 | | const BASE_VIRT_QUEUE_ADDR: u64 = MEM_SIZE as u64 + GUEST_MEM_GAP; |
35 | | // Number of queues |
36 | | const QUEUE_NUM: usize = 2; |
37 | | // Max entries in the queue. |
38 | | const QUEUE_SIZE: u16 = 256; |
39 | | // Descriptor table alignment |
40 | | const DESC_TABLE_ALIGN_SIZE: u64 = 16; |
41 | | // Used ring alignment |
42 | | const USED_RING_ALIGN_SIZE: u64 = 4; |
43 | | // Descriptor table size |
44 | | const DESC_TABLE_SIZE: u64 = 16_u64 * QUEUE_SIZE as u64; |
45 | | // Available ring size |
46 | | const AVAIL_RING_SIZE: u64 = 6_u64 + 2 * QUEUE_SIZE as u64; |
47 | | // Padding size before used ring |
48 | | const PADDING_SIZE: u64 = align!(AVAIL_RING_SIZE, USED_RING_ALIGN_SIZE) - AVAIL_RING_SIZE; |
49 | | // Used ring size |
50 | | const USED_RING_SIZE: u64 = 6_u64 + 8 * QUEUE_SIZE as u64; |
51 | | // Virtio-queue size in bytes |
52 | | const QUEUE_BYTES_SIZE: usize = align!( |
53 | | DESC_TABLE_SIZE + AVAIL_RING_SIZE + PADDING_SIZE + USED_RING_SIZE, |
54 | | DESC_TABLE_ALIGN_SIZE |
55 | | ) as usize; |
56 | | |
57 | | fuzz_target!(|bytes: &[u8]| -> Corpus { |
58 | | if bytes.len() < (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM + CONSOLE_INPUT_SIZE |
59 | | || bytes.len() |
60 | | > (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM + CONSOLE_INPUT_SIZE + MEM_SIZE |
61 | | { |
62 | | return Corpus::Reject; |
63 | | } |
64 | | |
65 | | let (pipe_rx, mut pipe_tx) = create_pipe().unwrap(); |
66 | | let output = unsafe { |
67 | | File::from_raw_fd( |
68 | | memfd_create(&std::ffi::CString::new("fuzz_console_output").unwrap()).unwrap(), |
69 | | ) |
70 | | }; |
71 | | let endpoint = virtio_devices::Endpoint::FilePair(Arc::new(output), Arc::new(pipe_rx)); |
72 | | |
73 | | let (mut console, _) = virtio_devices::Console::new( |
74 | | "fuzzer_console".to_owned(), |
75 | | endpoint, |
76 | | None, // resize_pipe |
77 | | false, // iommu |
78 | | SeccompAction::Allow, |
79 | | EventFd::new(EFD_NONBLOCK).unwrap(), |
80 | | None, |
81 | | ) |
82 | | .unwrap(); |
83 | | |
84 | | let console_input_bytes = &bytes[..CONSOLE_INPUT_SIZE]; |
85 | | let queue_data = &bytes[CONSOLE_INPUT_SIZE..CONSOLE_INPUT_SIZE + QUEUE_DATA_SIZE * QUEUE_NUM]; |
86 | | let queue_bytes = &bytes[CONSOLE_INPUT_SIZE + QUEUE_DATA_SIZE * QUEUE_NUM |
87 | | ..CONSOLE_INPUT_SIZE + (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM]; |
88 | | let mem_bytes = &bytes[CONSOLE_INPUT_SIZE + (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM..]; |
89 | | |
90 | | // Setup the virt queues with the input bytes |
91 | | let mut queues = setup_virt_queues( |
92 | | &[ |
93 | | &queue_data[..QUEUE_DATA_SIZE].try_into().unwrap(), |
94 | | &queue_data[QUEUE_DATA_SIZE..QUEUE_DATA_SIZE * 2] |
95 | | .try_into() |
96 | | .unwrap(), |
97 | | ], |
98 | | BASE_VIRT_QUEUE_ADDR, |
99 | | ); |
100 | | |
101 | | // Setup the guest memory with the input bytes |
102 | | let mem = GuestMemoryMmap::from_ranges(&[ |
103 | | (GuestAddress(0), MEM_SIZE), |
104 | | (GuestAddress(BASE_VIRT_QUEUE_ADDR), queue_bytes.len()), |
105 | | ]) |
106 | | .unwrap(); |
107 | | if mem |
108 | | .write_slice(queue_bytes, GuestAddress(BASE_VIRT_QUEUE_ADDR)) |
109 | | .is_err() |
110 | | { |
111 | | return Corpus::Reject; |
112 | | } |
113 | | if mem.write_slice(mem_bytes, GuestAddress(0 as u64)).is_err() { |
114 | | return Corpus::Reject; |
115 | | } |
116 | | let guest_memory = GuestMemoryAtomic::new(mem); |
117 | | |
118 | | let input_queue = queues.remove(0); |
119 | | let input_evt = EventFd::new(0).unwrap(); |
120 | | let input_queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(input_evt.as_raw_fd())) }; |
121 | | let output_queue = queues.remove(0); |
122 | | let output_evt = EventFd::new(0).unwrap(); |
123 | | let output_queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(output_evt.as_raw_fd())) }; |
124 | | |
125 | | // Kick the 'queue' events and endpoint event before activate the console device |
126 | | input_queue_evt.write(1).unwrap(); |
127 | | output_queue_evt.write(1).unwrap(); |
128 | | pipe_tx.write_all(console_input_bytes).unwrap(); // To use fuzzed data; |
129 | | |
130 | | console |
131 | | .activate( |
132 | | guest_memory, |
133 | | Arc::new(NoopVirtioInterrupt {}), |
134 | | vec![(0, input_queue, input_evt), (1, output_queue, output_evt)], |
135 | | ) |
136 | | .unwrap(); |
137 | | |
138 | | // Wait for the events to finish and console device worker thread to return |
139 | | console.wait_for_epoll_threads(); |
140 | | |
141 | | Corpus::Keep |
142 | | }); |
143 | | |
144 | | pub struct NoopVirtioInterrupt {} |
145 | | |
146 | | impl VirtioInterrupt for NoopVirtioInterrupt { |
147 | 79 | fn trigger(&self, _int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error> { |
148 | 79 | Ok(()) |
149 | 79 | } |
150 | | } |
151 | | |
152 | 476 | fn setup_virt_queues(bytes: &[&[u8; QUEUE_DATA_SIZE]], base_addr: u64) -> Vec<Queue> { |
153 | 476 | let mut queues = Vec::new(); |
154 | 952 | for (i, b) in bytes.iter().enumerate() { |
155 | 952 | let mut q = Queue::new(QUEUE_SIZE).unwrap(); |
156 | 952 | |
157 | 952 | let desc_table_addr = base_addr + (QUEUE_BYTES_SIZE * i) as u64; |
158 | 952 | let avail_ring_addr = desc_table_addr + DESC_TABLE_SIZE; |
159 | 952 | let used_ring_addr = avail_ring_addr + PADDING_SIZE + AVAIL_RING_SIZE; |
160 | 952 | q.try_set_desc_table_address(GuestAddress(desc_table_addr)) |
161 | 952 | .unwrap(); |
162 | 952 | q.try_set_avail_ring_address(GuestAddress(avail_ring_addr)) |
163 | 952 | .unwrap(); |
164 | 952 | q.try_set_used_ring_address(GuestAddress(used_ring_addr)) |
165 | 952 | .unwrap(); |
166 | 952 | |
167 | 952 | q.set_next_avail(b[0] as u16); // 'u8' is enough given the 'QUEUE_SIZE' is small |
168 | 952 | q.set_next_used(b[1] as u16); |
169 | 952 | q.set_event_idx(b[2] % 2 != 0); |
170 | 952 | q.set_size(b[3] as u16 % QUEUE_SIZE); |
171 | 952 | |
172 | 952 | q.set_ready(true); |
173 | 952 | queues.push(q); |
174 | 952 | } |
175 | | |
176 | 476 | queues |
177 | 476 | } |
178 | | |
179 | 476 | fn memfd_create(name: &std::ffi::CStr) -> Result<RawFd, std::io::Error> { |
180 | 476 | let res = unsafe { libc::syscall(libc::SYS_memfd_create, name.as_ptr(), 0) }; |
181 | | |
182 | 476 | if res < 0 { |
183 | 0 | Err(std::io::Error::last_os_error()) |
184 | | } else { |
185 | 476 | Ok(res as RawFd) |
186 | | } |
187 | 476 | } |
188 | | |
189 | 476 | fn create_pipe() -> Result<(File, File), std::io::Error> { |
190 | 476 | let mut pipe = [-1; 2]; |
191 | 476 | if unsafe { libc::pipe2(pipe.as_mut_ptr(), libc::O_CLOEXEC) } == -1 { |
192 | 0 | return Err(std::io::Error::last_os_error()); |
193 | 476 | } |
194 | 476 | let rx = unsafe { File::from_raw_fd(pipe[0]) }; |
195 | 476 | let tx = unsafe { File::from_raw_fd(pipe[1]) }; |
196 | | |
197 | 476 | Ok((rx, tx)) |
198 | 476 | } |