/src/MigTD/deps/td-shim/td-exception/src/interrupt.rs
Line | Count | Source |
1 | | // Copyright (c) 2021 Intel Corporation |
2 | | // |
3 | | // SPDX-License-Identifier: BSD-2-Clause-Patent |
4 | | |
5 | | use core::arch::asm; |
6 | | use spin::Mutex; |
7 | | #[cfg(feature = "tdx")] |
8 | | use tdx_tdcall::tdx; |
9 | | |
10 | | use crate::{idt::IDT_ENTRY_COUNT, ExceptionError}; |
11 | | |
12 | | // the order is aligned with scratch_push!() and scratch_pop!() |
13 | | #[repr(C, packed)] |
14 | | pub struct ScratchRegisters { |
15 | | pub r11: usize, |
16 | | pub r10: usize, |
17 | | pub r9: usize, |
18 | | pub r8: usize, |
19 | | pub rsi: usize, |
20 | | pub rdi: usize, |
21 | | pub rdx: usize, |
22 | | pub rcx: usize, |
23 | | pub rax: usize, |
24 | | } |
25 | | |
26 | | impl ScratchRegisters { |
27 | 0 | pub fn dump(&self) { |
28 | 0 | log::info!("RAX: {:>016X}\n", { self.rax }); |
29 | 0 | log::info!("RCX: {:>016X}\n", { self.rcx }); |
30 | 0 | log::info!("RDX: {:>016X}\n", { self.rdx }); |
31 | 0 | log::info!("RDI: {:>016X}\n", { self.rdi }); |
32 | 0 | log::info!("RSI: {:>016X}\n", { self.rsi }); |
33 | 0 | log::info!("R8: {:>016X}\n", { self.r8 }); |
34 | 0 | log::info!("R9: {:>016X}\n", { self.r9 }); |
35 | 0 | log::info!("R10: {:>016X}\n", { self.r10 }); |
36 | 0 | log::info!("R11: {:>016X}\n", { self.r11 }); |
37 | 0 | } |
38 | | } |
39 | | |
40 | | #[repr(C, packed)] |
41 | | pub struct PreservedRegisters { |
42 | | pub r15: usize, |
43 | | pub r14: usize, |
44 | | pub r13: usize, |
45 | | pub r12: usize, |
46 | | pub rbp: usize, |
47 | | pub rbx: usize, |
48 | | } |
49 | | |
50 | | impl PreservedRegisters { |
51 | 0 | pub fn dump(&self) { |
52 | 0 | log::info!("RBX: {:>016X}\n", { self.rbx }); |
53 | 0 | log::info!("RBP: {:>016X}\n", { self.rbp }); |
54 | 0 | log::info!("R12: {:>016X}\n", { self.r12 }); |
55 | 0 | log::info!("R13: {:>016X}\n", { self.r13 }); |
56 | 0 | log::info!("R14: {:>016X}\n", { self.r14 }); |
57 | 0 | log::info!("R15: {:>016X}\n", { self.r15 }); |
58 | 0 | } |
59 | | } |
60 | | |
61 | | #[repr(packed)] |
62 | | pub struct IretRegisters { |
63 | | pub rip: usize, |
64 | | pub cs: usize, |
65 | | pub rflags: usize, |
66 | | } |
67 | | |
68 | | impl IretRegisters { |
69 | 0 | fn dump(&self) { |
70 | 0 | log::info!("RFLAG: {:>016X}\n", { self.rflags }); |
71 | 0 | log::info!("CS: {:>016X}\n", { self.cs }); |
72 | 0 | log::info!("RIP: {:>016X}\n", { self.rip }); |
73 | 0 | } |
74 | | } |
75 | | |
76 | | #[repr(packed)] |
77 | | pub struct InterruptStack { |
78 | | pub preserved: PreservedRegisters, |
79 | | pub scratch: ScratchRegisters, |
80 | | pub vector: usize, |
81 | | pub code: usize, |
82 | | pub iret: IretRegisters, |
83 | | } |
84 | | |
85 | | impl InterruptStack { |
86 | 0 | pub fn dump(&self) { |
87 | 0 | self.iret.dump(); |
88 | 0 | log::info!("CODE: {:>016X}\n", { self.code }); |
89 | 0 | log::info!("VECTOR: {:>016X}\n", { self.vector }); |
90 | 0 | self.scratch.dump(); |
91 | 0 | self.preserved.dump(); |
92 | 0 | } |
93 | | } |
94 | | |
95 | | #[derive(Debug, Copy, Clone)] |
96 | | pub struct InterruptCallback { |
97 | | func: fn(&mut InterruptStack), |
98 | | } |
99 | | |
100 | | impl InterruptCallback { |
101 | 0 | pub const fn new(func: fn(&mut InterruptStack)) -> Self { |
102 | 0 | InterruptCallback { func } |
103 | 0 | } |
104 | | } |
105 | | |
106 | | struct InterruptCallbackTable { |
107 | | table: [InterruptCallback; IDT_ENTRY_COUNT], |
108 | | } |
109 | | |
110 | | impl InterruptCallbackTable { |
111 | 0 | const fn init() -> Self { |
112 | 0 | InterruptCallbackTable { |
113 | 0 | table: [InterruptCallback::new(default_callback); IDT_ENTRY_COUNT], |
114 | 0 | } |
115 | 0 | } |
116 | | } |
117 | | |
118 | | static CALLBACK_TABLE: Mutex<InterruptCallbackTable> = Mutex::new(InterruptCallbackTable::init()); |
119 | | |
120 | 0 | pub(crate) fn init_interrupt_callbacks() { |
121 | 0 | let mut callbacks = CALLBACK_TABLE.lock(); |
122 | | // Set up exceptions handler according to Intel64 & IA32 Software Developer Manual |
123 | | // Reference: https://www.intel.com/content/www/us/en/developer/articles/technical/intel-sdm.html |
124 | 0 | callbacks.table[0].func = divide_by_zero; |
125 | 0 | callbacks.table[1].func = debug; |
126 | 0 | callbacks.table[2].func = non_maskable; |
127 | 0 | callbacks.table[3].func = breakpoint; |
128 | 0 | callbacks.table[4].func = overflow; |
129 | 0 | callbacks.table[5].func = bound_range; |
130 | 0 | callbacks.table[6].func = invalid_opcode; |
131 | 0 | callbacks.table[7].func = device_not_available; |
132 | 0 | callbacks.table[8].func = double_fault; |
133 | | // 9 no longer available |
134 | 0 | callbacks.table[10].func = invalid_tss; |
135 | 0 | callbacks.table[11].func = segment_not_present; |
136 | 0 | callbacks.table[12].func = stack_segment; |
137 | 0 | callbacks.table[13].func = protection; |
138 | 0 | callbacks.table[14].func = page; |
139 | | // 15 reserved |
140 | 0 | callbacks.table[16].func = fpu; |
141 | 0 | callbacks.table[17].func = alignment_check; |
142 | 0 | callbacks.table[18].func = machine_check; |
143 | 0 | callbacks.table[19].func = simd; |
144 | | #[cfg(feature = "tdx")] |
145 | 0 | { |
146 | 0 | callbacks.table[20].func = virtualization; |
147 | 0 | } |
148 | 0 | callbacks.table[21].func = control_flow; |
149 | 0 | } |
150 | | |
151 | 0 | pub fn register_interrupt_callback( |
152 | 0 | index: usize, |
153 | 0 | callback: InterruptCallback, |
154 | 0 | ) -> Result<(), ExceptionError> { |
155 | 0 | if index > IDT_ENTRY_COUNT { |
156 | 0 | return Err(ExceptionError::InvalidParameter); |
157 | 0 | } |
158 | 0 | CALLBACK_TABLE.lock().table[index] = callback; |
159 | 0 | Ok(()) |
160 | 0 | } |
161 | | |
162 | 0 | fn eoi() { |
163 | | // Write the end-of-interrupt (EOI) register (0x80B) at the end of the handler |
164 | | // routine, sometime before the IRET instruction |
165 | | unsafe { |
166 | 0 | asm!( |
167 | 0 | " |
168 | 0 | mov rcx, 0x80B |
169 | 0 | mov edx, 0 |
170 | 0 | mov eax, 0 |
171 | 0 | wrmsr |
172 | 0 | " |
173 | 0 | ) |
174 | | } |
175 | 0 | } |
176 | | |
177 | | #[no_mangle] |
178 | 0 | fn generic_interrupt_handler(stack: &mut InterruptStack) { |
179 | 0 | if stack.vector >= IDT_ENTRY_COUNT { |
180 | 0 | log::error!("Invalid interrupt vector number!\n"); |
181 | 0 | return; |
182 | 0 | } |
183 | | |
184 | | // We need to allow the re-entry of this handler. For example, virtualization exception may |
185 | | // happen in a timer interrupt handler. So we need to copy the function pointer out and |
186 | | // release the lock. |
187 | 0 | let func = CALLBACK_TABLE.lock().table[stack.vector].func; |
188 | 0 | func(stack); |
189 | | |
190 | | // If we are handling an interrupt, signal a end-of-interrupt before return. |
191 | 0 | if stack.vector > 31 { |
192 | 0 | eoi(); |
193 | 0 | } |
194 | 0 | } |
195 | | |
196 | 0 | fn default_callback(stack: &mut InterruptStack) { |
197 | 0 | log::info!("default interrupt callback\n"); |
198 | 0 | stack.dump(); |
199 | 0 | deadloop(); |
200 | 0 | } |
201 | | |
202 | | #[cfg(feature = "integration-test")] |
203 | | fn divide_by_zero(stack: &mut InterruptStack) { |
204 | | log::info!("Divide by zero\n"); |
205 | | crate::DIVIDED_BY_ZERO_EVENT_COUNT.fetch_add(1, core::sync::atomic::Ordering::AcqRel); |
206 | | stack.iret.rip += 7; |
207 | | log::info!("divide_by_zero done\n"); |
208 | | return; |
209 | | } |
210 | | |
211 | | #[cfg(not(feature = "integration-test"))] |
212 | 0 | fn divide_by_zero(stack: &mut InterruptStack) { |
213 | 0 | log::info!("Divide by zero\n"); |
214 | 0 | stack.dump(); |
215 | 0 | deadloop(); |
216 | 0 | } |
217 | | |
218 | 0 | fn debug(stack: &mut InterruptStack) { |
219 | 0 | log::info!("Debug trap\n"); |
220 | 0 | stack.dump(); |
221 | 0 | deadloop(); |
222 | 0 | } |
223 | | |
224 | 0 | fn non_maskable(stack: &mut InterruptStack) { |
225 | 0 | log::info!("Non-maskable interrupt\n"); |
226 | 0 | stack.dump(); |
227 | 0 | deadloop(); |
228 | 0 | } |
229 | | |
230 | 0 | fn breakpoint(stack: &mut InterruptStack) { |
231 | 0 | log::info!("Breakpoint trap\n"); |
232 | 0 | stack.dump(); |
233 | 0 | deadloop(); |
234 | 0 | } |
235 | | |
236 | 0 | fn overflow(stack: &mut InterruptStack) { |
237 | 0 | log::info!("Overflow trap\n"); |
238 | 0 | stack.dump(); |
239 | 0 | deadloop(); |
240 | 0 | } |
241 | | |
242 | 0 | fn bound_range(stack: &mut InterruptStack) { |
243 | 0 | log::info!("Bound range exceeded fault\n"); |
244 | 0 | stack.dump(); |
245 | 0 | deadloop(); |
246 | 0 | } |
247 | | |
248 | 0 | fn invalid_opcode(stack: &mut InterruptStack) { |
249 | 0 | log::info!("Invalid opcode fault\n"); |
250 | 0 | stack.dump(); |
251 | 0 | deadloop(); |
252 | 0 | } |
253 | | |
254 | 0 | fn device_not_available(stack: &mut InterruptStack) { |
255 | 0 | log::info!("Device not available fault\n"); |
256 | 0 | stack.dump(); |
257 | 0 | deadloop(); |
258 | 0 | } |
259 | | |
260 | 0 | fn double_fault(stack: &mut InterruptStack) { |
261 | 0 | log::info!("Double fault\n"); |
262 | 0 | stack.dump(); |
263 | 0 | deadloop(); |
264 | 0 | } |
265 | | |
266 | 0 | fn invalid_tss(stack: &mut InterruptStack) { |
267 | 0 | log::info!("Invalid TSS fault\n"); |
268 | 0 | stack.dump(); |
269 | 0 | deadloop(); |
270 | 0 | } |
271 | | |
272 | 0 | fn segment_not_present(stack: &mut InterruptStack) { |
273 | 0 | log::info!("Segment not present fault\n"); |
274 | 0 | stack.dump(); |
275 | 0 | deadloop(); |
276 | 0 | } |
277 | | |
278 | 0 | fn stack_segment(stack: &mut InterruptStack) { |
279 | 0 | log::info!("Stack segment fault\n"); |
280 | 0 | stack.dump(); |
281 | 0 | deadloop(); |
282 | 0 | } |
283 | | |
284 | 0 | fn protection(stack: &mut InterruptStack) { |
285 | 0 | log::info!("Protection fault\n"); |
286 | 0 | stack.dump(); |
287 | 0 | deadloop(); |
288 | 0 | } |
289 | | |
290 | 0 | fn page(stack: &mut InterruptStack) { |
291 | | let cr2: usize; |
292 | 0 | unsafe { |
293 | 0 | asm!("mov {}, cr2", out(reg) cr2); |
294 | 0 | } |
295 | 0 | log::info!("Page fault: {:>016X}\n", cr2); |
296 | 0 | stack.dump(); |
297 | 0 | deadloop(); |
298 | 0 | } |
299 | | |
300 | 0 | fn fpu(stack: &mut InterruptStack) { |
301 | 0 | log::info!("FPU floating point fault\n"); |
302 | 0 | stack.dump(); |
303 | 0 | deadloop(); |
304 | 0 | } |
305 | | |
306 | 0 | fn alignment_check(stack: &mut InterruptStack) { |
307 | 0 | log::info!("Alignment check fault\n"); |
308 | 0 | stack.dump(); |
309 | 0 | deadloop(); |
310 | 0 | } |
311 | | |
312 | 0 | fn machine_check(stack: &mut InterruptStack) { |
313 | 0 | log::info!("Machine check fault\n"); |
314 | 0 | stack.dump(); |
315 | 0 | deadloop(); |
316 | 0 | } |
317 | | |
318 | 0 | fn simd(stack: &mut InterruptStack) { |
319 | 0 | log::info!("SIMD floating point fault\n"); |
320 | 0 | stack.dump(); |
321 | 0 | deadloop(); |
322 | 0 | } |
323 | | |
324 | 0 | fn control_flow(stack: &mut InterruptStack) { |
325 | 0 | log::info!("Control Flow Exception\n"); |
326 | 0 | stack.dump(); |
327 | 0 | deadloop(); |
328 | 0 | } |
329 | | |
330 | | #[cfg(feature = "tdx")] |
331 | | const EXIT_REASON_CPUID: u32 = 10; |
332 | | #[cfg(feature = "tdx")] |
333 | | const EXIT_REASON_HLT: u32 = 12; |
334 | | #[cfg(feature = "tdx")] |
335 | | const EXIT_REASON_RDPMC: u32 = 15; |
336 | | #[cfg(feature = "tdx")] |
337 | | const EXIT_REASON_VMCALL: u32 = 18; |
338 | | #[cfg(feature = "tdx")] |
339 | | const EXIT_REASON_IO_INSTRUCTION: u32 = 30; |
340 | | #[cfg(feature = "tdx")] |
341 | | const EXIT_REASON_MSR_READ: u32 = 31; |
342 | | #[cfg(feature = "tdx")] |
343 | | const EXIT_REASON_MSR_WRITE: u32 = 32; |
344 | | #[cfg(feature = "tdx")] |
345 | | const EXIT_REASON_MWAIT_INSTRUCTION: u32 = 36; |
346 | | #[cfg(feature = "tdx")] |
347 | | const EXIT_REASON_MONITOR_INSTRUCTION: u32 = 39; |
348 | | #[cfg(feature = "tdx")] |
349 | | const EXIT_REASON_WBINVD: u32 = 54; |
350 | | |
351 | | #[cfg(feature = "tdx")] |
352 | 0 | fn virtualization(stack: &mut InterruptStack) { |
353 | | // Firstly get VE information from TDX module, halt it error occurs |
354 | 0 | let ve_info = tdx::tdcall_get_ve_info().expect("#VE handler: fail to get VE info\n"); |
355 | | |
356 | | #[cfg(not(feature = "no-tdvmcall"))] |
357 | 0 | match ve_info.exit_reason { |
358 | 0 | EXIT_REASON_HLT => { |
359 | 0 | tdx::tdvmcall_halt(); |
360 | 0 | } |
361 | | EXIT_REASON_IO_INSTRUCTION => { |
362 | 0 | if !handle_tdx_ioexit(&ve_info, stack) { |
363 | 0 | tdx::tdvmcall_halt(); |
364 | 0 | } |
365 | | } |
366 | 0 | EXIT_REASON_MSR_READ => { |
367 | 0 | let msr = tdx::tdvmcall_rdmsr(stack.scratch.rcx as u32) |
368 | 0 | .expect("fail to perform RDMSR operation\n"); |
369 | 0 | stack.scratch.rax = (msr as u32 & u32::MAX) as usize; // EAX |
370 | 0 | stack.scratch.rdx = ((msr >> 32) as u32 & u32::MAX) as usize; // EDX |
371 | 0 | } |
372 | 0 | EXIT_REASON_MSR_WRITE => { |
373 | 0 | let data = stack.scratch.rax as u64 | ((stack.scratch.rdx as u64) << 32); // EDX:EAX |
374 | 0 | tdx::tdvmcall_wrmsr(stack.scratch.rcx as u32, data) |
375 | 0 | .expect("fail to perform WRMSR operation\n"); |
376 | 0 | } |
377 | 0 | EXIT_REASON_CPUID => { |
378 | 0 | let cpuid = tdx::tdvmcall_cpuid(stack.scratch.rax as u32, stack.scratch.rcx as u32); |
379 | 0 | let mask = 0xFFFF_FFFF_0000_0000_usize; |
380 | 0 | stack.scratch.rax = (stack.scratch.rax & mask) | cpuid.eax as usize; |
381 | 0 | stack.preserved.rbx = (stack.preserved.rbx & mask) | cpuid.ebx as usize; |
382 | 0 | stack.scratch.rcx = (stack.scratch.rcx & mask) | cpuid.ecx as usize; |
383 | 0 | stack.scratch.rdx = (stack.scratch.rdx & mask) | cpuid.edx as usize; |
384 | 0 | } |
385 | | EXIT_REASON_VMCALL |
386 | | | EXIT_REASON_MWAIT_INSTRUCTION |
387 | | | EXIT_REASON_MONITOR_INSTRUCTION |
388 | | | EXIT_REASON_WBINVD |
389 | 0 | | EXIT_REASON_RDPMC => return, |
390 | | // Unknown |
391 | | // And currently CPUID and MMIO handler is not implemented |
392 | | // Only VMCall is supported |
393 | | _ => { |
394 | 0 | log::warn!("Unsupported #VE exit reason {:#x} ", ve_info.exit_reason); |
395 | 0 | log::info!("Virtualization fault\n"); |
396 | 0 | stack.dump(); |
397 | 0 | deadloop(); |
398 | | } |
399 | | }; |
400 | | |
401 | | #[cfg(feature = "no-tdvmcall")] |
402 | | match ve_info.exit_reason { |
403 | | EXIT_REASON_HLT |
404 | | | EXIT_REASON_IO_INSTRUCTION |
405 | | | EXIT_REASON_MSR_READ |
406 | | | EXIT_REASON_MSR_WRITE |
407 | | | EXIT_REASON_CPUID |
408 | | | EXIT_REASON_VMCALL |
409 | | | EXIT_REASON_MWAIT_INSTRUCTION |
410 | | | EXIT_REASON_MONITOR_INSTRUCTION |
411 | | | EXIT_REASON_WBINVD |
412 | | | EXIT_REASON_RDPMC => return, |
413 | | // Unknown |
414 | | // And currently CPUID and MMIO handler is not implemented |
415 | | // Only VMCall is supported |
416 | | _ => { |
417 | | log::warn!("Unsupported #VE exit reason {:#x} ", ve_info.exit_reason); |
418 | | log::info!("Virtualization fault\n"); |
419 | | stack.dump(); |
420 | | deadloop(); |
421 | | } |
422 | | }; |
423 | | |
424 | 0 | stack.iret.rip += ve_info.exit_instruction_length as usize; |
425 | | |
426 | | // If CET shadow stack is enabled, processor will compare the `LIP` value saved in the shadow |
427 | | // stack and the `RIP` value saved in the normal stack when executing a return from an |
428 | | // exception handler and cause a control protection exception if they do not match. |
429 | | #[cfg(feature = "cet-shstk")] |
430 | | unsafe { |
431 | | use x86_64::registers::control::{Cr4, Cr4Flags}; |
432 | | use x86_64::registers::model_specific::Msr; |
433 | | |
434 | | const MSR_IA32_S_CET: u32 = 0x6A2; |
435 | | const SH_STK_EN: u64 = 1; |
436 | | const WR_SHSTK_E: u64 = 1 << 1; |
437 | | |
438 | | let mut msr_cet = Msr::new(MSR_IA32_S_CET); |
439 | | |
440 | | // If shadow stack is not enabled, return |
441 | | if (msr_cet.read() & SH_STK_EN) == 0 |
442 | | || (Cr4::read() & Cr4Flags::CONTROL_FLOW_ENFORCEMENT).is_empty() |
443 | | { |
444 | | return; |
445 | | } |
446 | | |
447 | | // Read the Shadow Stack Pointer |
448 | | let mut ssp: u64; |
449 | | asm!( |
450 | | "rdsspq {ssp}", |
451 | | ssp = out(reg) ssp, |
452 | | ); |
453 | | |
454 | | // SSP -> return address of func [virtualization] |
455 | | // return address of func [generic_interrupt_handler] |
456 | | // SSP |
457 | | // LIP |
458 | | // CS |
459 | | let lip_ptr = ssp + 0x18; |
460 | | let lip = *(lip_ptr as *const u64) + ve_info.exit_instruction_length as u64; |
461 | | |
462 | | // Enables the WRSSD/WRSSQ instructions by setting the `WR_SHSTK_E` |
463 | | // to 1, then we can write the shadow stack |
464 | | msr_cet.write(msr_cet.read() | WR_SHSTK_E); |
465 | | |
466 | | // Write the new LIP to the shadow stack |
467 | | asm!( |
468 | | "wrssq [{lip_ptr}], {lip}", |
469 | | lip_ptr = in(reg) lip_ptr, |
470 | | lip = in(reg) lip, |
471 | | ); |
472 | | |
473 | | // Clear the `WR_SHSTK_E` |
474 | | msr_cet.write(msr_cet.read() & !WR_SHSTK_E); |
475 | | } |
476 | 0 | } |
477 | | |
478 | | // Handle IO exit from TDX Module |
479 | | // |
480 | | // Use TDVMCALL to realize IO read/write operation |
481 | | // Return false if VE info is invalid |
482 | | #[cfg(all(feature = "tdx", not(feature = "no-tdvmcall")))] |
483 | 0 | fn handle_tdx_ioexit(ve_info: &tdx::TdVeInfo, stack: &mut InterruptStack) -> bool { |
484 | 0 | let size = ((ve_info.exit_qualification & 0x7) + 1) as usize; // 0 - 1bytes, 1 - 2bytes, 3 - 4bytes |
485 | 0 | let read = (ve_info.exit_qualification >> 3) & 0x1 == 1; |
486 | 0 | let string = (ve_info.exit_qualification >> 4) & 0x1 == 1; |
487 | 0 | let _operand = (ve_info.exit_qualification >> 6) & 0x1 == 0; // 0 = DX, 1 = immediate |
488 | 0 | let port = (ve_info.exit_qualification >> 16) as u16; |
489 | 0 | let repeat = if (ve_info.exit_qualification >> 5) & 0x1 == 1 { |
490 | 0 | stack.scratch.rcx |
491 | | } else { |
492 | 0 | 0 |
493 | | }; |
494 | | |
495 | | // Size of access should be 1/2/4 bytes |
496 | 0 | if size != 1 && size != 2 && size != 4 { |
497 | 0 | return false; |
498 | 0 | } |
499 | | |
500 | | // Define closure to perform IO port read with different size operands |
501 | 0 | let io_read = |size, port| match size { |
502 | 0 | 1 => tdx::tdvmcall_io_read_8(port) as u32, |
503 | 0 | 2 => tdx::tdvmcall_io_read_16(port) as u32, |
504 | 0 | 4 => tdx::tdvmcall_io_read_32(port), |
505 | 0 | _ => 0, |
506 | 0 | }; |
507 | | |
508 | | // Define closure to perform IO port write with different size operands |
509 | 0 | let io_write = |size, port, data| match size { |
510 | 0 | 1 => tdx::tdvmcall_io_write_8(port, data as u8), |
511 | 0 | 2 => tdx::tdvmcall_io_write_16(port, data as u16), |
512 | 0 | 4 => tdx::tdvmcall_io_write_32(port, data), |
513 | 0 | _ => {} |
514 | 0 | }; |
515 | | |
516 | | // INS / OUTS |
517 | 0 | if string { |
518 | 0 | for _ in 0..repeat { |
519 | 0 | if read { |
520 | 0 | let val = io_read(size, port); |
521 | 0 | unsafe { |
522 | 0 | let rsi = core::slice::from_raw_parts_mut(stack.scratch.rdi as *mut u8, size); |
523 | 0 | // Safety: size is smaller than 4 |
524 | 0 | rsi.copy_from_slice(&u32::to_le_bytes(val)[..size]) |
525 | 0 | } |
526 | 0 | stack.scratch.rdi += size; |
527 | 0 | } else { |
528 | 0 | let mut val = 0; |
529 | | unsafe { |
530 | 0 | let rsi = core::slice::from_raw_parts(stack.scratch.rsi as *mut u8, size); |
531 | 0 | for (idx, byte) in rsi.iter().enumerate() { |
532 | 0 | val |= (*byte as u32) << (idx * 8); |
533 | 0 | } |
534 | | } |
535 | 0 | io_write(size, port, val); |
536 | 0 | stack.scratch.rsi += size; |
537 | | } |
538 | 0 | stack.scratch.rcx -= 1; |
539 | | } |
540 | 0 | } else if read { |
541 | 0 | // Write the IO read result to the low $size-bytes of rax |
542 | 0 | stack.scratch.rax = (stack.scratch.rax & !(2_usize.pow(size as u32 * 8) - 1)) |
543 | 0 | | (io_read(size, port) as usize & (2_usize.pow(size as u32 * 8) - 1)); |
544 | 0 | } else { |
545 | 0 | io_write(size, port, stack.scratch.rax as u32); |
546 | 0 | } |
547 | | |
548 | 0 | true |
549 | 0 | } |
550 | | |
551 | 0 | fn deadloop() { |
552 | | #[allow(clippy::empty_loop)] |
553 | 0 | loop { |
554 | 0 | x86_64::instructions::interrupts::enable(); |
555 | 0 | x86_64::instructions::hlt(); |
556 | 0 | } |
557 | | } |