/src/MigTD/deps/td-shim/td-payload/src/mm/shared.rs
Line | Count | Source |
1 | | // Copyright (c) 2022 Intel Corporation |
2 | | // |
3 | | // SPDX-License-Identifier: BSD-2-Clause-Patent |
4 | | |
5 | | use core::{alloc::Layout, ptr::NonNull}; |
6 | | use linked_list_allocator::LockedHeap; |
7 | | use spin::Once; |
8 | | |
9 | | use super::SIZE_4K; |
10 | | use crate::arch::shared::decrypt; |
11 | | |
12 | | static SHARED_MEMORY_ALLOCATOR: LockedHeap = LockedHeap::empty(); |
13 | | static SHARED_START: Once<usize> = Once::new(); |
14 | | static SHADOW_START: Once<usize> = Once::new(); |
15 | | |
16 | 0 | pub fn init_shared_memory(start: u64, size: usize) { |
17 | 0 | if size % SIZE_4K != 0 { |
18 | 0 | panic!("Failed to initialize shared memory: size needs to be aligned with 0x1000"); |
19 | 0 | } |
20 | | |
21 | | // Set the shared memory region to be shared |
22 | 0 | decrypt(start, size); |
23 | | // Initialize the shared memory allocator |
24 | 0 | unsafe { |
25 | 0 | SHARED_MEMORY_ALLOCATOR.lock().init(start as *mut u8, size); |
26 | 0 | } |
27 | 0 | } |
28 | | |
29 | 0 | pub fn init_shared_memory_with_shadow(start: u64, size: usize, shadow_start: u64) { |
30 | 0 | init_shared_memory(start, size); |
31 | 0 | SHARED_START.call_once(|| start as usize); |
32 | 0 | SHADOW_START.call_once(|| shadow_start as usize); |
33 | 0 | } |
34 | | |
35 | | pub struct SharedMemory { |
36 | | addr: usize, |
37 | | shadow_addr: Option<usize>, |
38 | | size: usize, |
39 | | } |
40 | | |
41 | | impl SharedMemory { |
42 | 0 | pub fn new(num_page: usize) -> Option<Self> { |
43 | 0 | let addr = unsafe { alloc_shared_pages(num_page)? }; |
44 | 0 | let shadow_addr = alloc_private_shadow_pages(addr); |
45 | | |
46 | 0 | Some(Self { |
47 | 0 | addr, |
48 | 0 | shadow_addr, |
49 | 0 | size: num_page * SIZE_4K, |
50 | 0 | }) |
51 | 0 | } |
52 | | |
53 | 0 | pub fn copy_to_private_shadow(&mut self) -> Option<&[u8]> { |
54 | 0 | self.shadow_addr.map(|addr| { |
55 | 0 | let shadow = unsafe { core::slice::from_raw_parts_mut(addr as *mut u8, self.size) }; |
56 | 0 | shadow.copy_from_slice(self.as_bytes()); |
57 | | |
58 | 0 | &shadow[..] |
59 | 0 | }) |
60 | 0 | } |
61 | | |
62 | 0 | pub fn as_bytes(&self) -> &[u8] { |
63 | 0 | unsafe { core::slice::from_raw_parts(self.addr as *const u8, self.size) } |
64 | 0 | } |
65 | | |
66 | 0 | pub fn as_mut_bytes(&mut self) -> &mut [u8] { |
67 | 0 | unsafe { core::slice::from_raw_parts_mut(self.addr as *mut u8, self.size) } |
68 | 0 | } |
69 | | } |
70 | | |
71 | | impl Drop for SharedMemory { |
72 | 0 | fn drop(&mut self) { |
73 | 0 | unsafe { free_shared_pages(self.addr, self.size / SIZE_4K) } |
74 | 0 | } |
75 | | } |
76 | | |
77 | | /// # Safety |
78 | | /// The caller needs to explicitly call the `free_shared_pages` function after use |
79 | 0 | pub unsafe fn alloc_shared_pages(num: usize) -> Option<usize> { |
80 | 0 | let size = SIZE_4K.checked_mul(num)?; |
81 | | |
82 | 0 | let addr = SHARED_MEMORY_ALLOCATOR |
83 | 0 | .lock() |
84 | 0 | .allocate_first_fit(Layout::from_size_align(size, SIZE_4K).ok()?) |
85 | 0 | .map(|ptr| ptr.as_ptr() as usize) |
86 | 0 | .ok()?; |
87 | | |
88 | 0 | core::slice::from_raw_parts_mut(addr as *mut u8, size).fill(0); |
89 | | |
90 | 0 | Some(addr) |
91 | 0 | } |
92 | | |
93 | | /// # Safety |
94 | | /// The caller needs to explicitly call the `free_shared_page` function after use |
95 | 0 | pub unsafe fn alloc_shared_page() -> Option<usize> { |
96 | 0 | alloc_shared_pages(1) |
97 | 0 | } |
98 | | |
99 | | /// # Safety |
100 | | /// The caller needs to ensure the correctness of the addr and page num |
101 | 0 | pub unsafe fn free_shared_pages(addr: usize, num: usize) { |
102 | 0 | let size = SIZE_4K.checked_mul(num).expect("Invalid page num"); |
103 | | |
104 | 0 | SHARED_MEMORY_ALLOCATOR.lock().deallocate( |
105 | 0 | NonNull::new(addr as *mut u8).unwrap(), |
106 | 0 | Layout::from_size_align(size, SIZE_4K).unwrap(), |
107 | 0 | ); |
108 | 0 | } |
109 | | |
110 | | /// # Safety |
111 | | /// The caller needs to ensure the correctness of the addr |
112 | 0 | pub unsafe fn free_shared_page(addr: usize) { |
113 | 0 | free_shared_pages(addr, 1) |
114 | 0 | } |
115 | | |
116 | 0 | fn alloc_private_shadow_pages(shared_addr: usize) -> Option<usize> { |
117 | 0 | let offset = shared_addr.checked_sub(*SHARED_START.get()?)?; |
118 | 0 | Some(SHADOW_START.get()? + offset) |
119 | 0 | } |