Coverage Report

Created: 2025-11-16 06:39

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/MigTD/deps/td-shim/td-paging/src/page_table.rs
Line
Count
Source
1
// Copyright (c) 2021 Intel Corporation
2
//
3
// SPDX-License-Identifier: BSD-2-Clause-Patent
4
5
use core::cmp::min;
6
use log::{info, trace};
7
use x86_64::{
8
    structures::paging::{mapper::MapToError, PageTableFlags as Flags},
9
    structures::paging::{
10
        mapper::MappedFrame, mapper::TranslateResult, Mapper, OffsetPageTable, Page, PageSize,
11
        PhysFrame, Size1GiB, Size2MiB, Size4KiB, Translate,
12
    },
13
    PhysAddr, VirtAddr,
14
};
15
16
use super::frame::{BMFrameAllocator, FRAME_ALLOCATOR};
17
use crate::{Error, Result};
18
19
const ALIGN_4K_BITS: u64 = 12;
20
const ALIGN_4K: u64 = 1 << ALIGN_4K_BITS;
21
const ALIGN_2M_BITS: u64 = 21;
22
const ALIGN_2M: u64 = 1 << ALIGN_2M_BITS;
23
const ALIGN_1G_BITS: u64 = 30;
24
const ALIGN_1G: u64 = 1 << ALIGN_1G_BITS;
25
26
/// Write physical address of level 4 page table page to `CR3`.
27
0
pub fn cr3_write(addr: u64) {
28
0
    unsafe {
29
0
        x86::controlregs::cr3_write(addr);
30
0
    }
31
0
    info!("Cr3 - {:x}\n", unsafe { x86::controlregs::cr3() });
32
0
}
33
34
// Map a frame of size S from physical address
35
// to virtual address
36
0
fn map_page<'a, S: PageSize>(
37
0
    pt: &mut OffsetPageTable<'a>,
38
0
    pa: PhysAddr,
39
0
    va: VirtAddr,
40
0
    flags: Flags,
41
0
    allocator: &mut BMFrameAllocator,
42
0
) -> Result<()>
43
0
where
44
0
    OffsetPageTable<'a>: Mapper<S>,
45
{
46
0
    let page: Page<S> = Page::containing_address(va);
47
0
    let frame: PhysFrame<S> = PhysFrame::containing_address(pa);
48
0
    trace!(
49
0
        "Mapping {:016x} to {:016x} with granularity: {:x}\n",
50
0
        pa.as_u64(),
51
0
        va.as_u64(),
52
        S::SIZE,
53
    );
54
    unsafe {
55
0
        match pt.map_to(page, frame, flags, allocator) {
56
0
            Ok(mapper) => mapper.flush(),
57
0
            Err(e) => match e {
58
0
                MapToError::PageAlreadyMapped(_) => {}
59
0
                _ => return Err(Error::MappingError(pa.as_u64(), S::SIZE)),
60
            },
61
        }
62
    }
63
64
0
    Ok(())
65
0
}
Unexecuted instantiation: td_paging::page_table::map_page::<x86_64::structures::paging::page::Size1GiB>
Unexecuted instantiation: td_paging::page_table::map_page::<x86_64::structures::paging::page::Size2MiB>
Unexecuted instantiation: td_paging::page_table::map_page::<x86_64::structures::paging::page::Size4KiB>
66
67
/// Create page table entries to map `[va, va + sz)` to `[pa, ps + sz)` with page size `ps` and
68
/// page attribute `flags`.
69
///
70
/// # Panic
71
/// - `pa + sz` wraps around.
72
/// - `va + sz` wraps around.
73
/// - `pa`, `va` or `sz` is not page aligned.
74
0
pub fn create_mapping_with_flags(
75
0
    pt: &mut OffsetPageTable,
76
0
    mut pa: PhysAddr,
77
0
    mut va: VirtAddr,
78
0
    ps: u64,
79
0
    mut sz: u64,
80
0
    flags: Flags,
81
0
) -> Result<()> {
82
0
    let allocator: &mut BMFrameAllocator = &mut FRAME_ALLOCATOR.lock();
83
84
0
    if pa.as_u64().checked_add(sz).is_none()
85
0
        || va.as_u64().checked_add(sz).is_none()
86
0
        || pa.as_u64() & (ALIGN_4K - 1) != 0
87
0
        || va.as_u64() & (ALIGN_4K - 1) != 0
88
0
        || sz & (ALIGN_4K - 1) != 0
89
0
        || ps.count_ones() != 1
90
0
        || ps < ALIGN_4K
91
    {
92
0
        return Err(Error::InvalidArguments);
93
0
    }
94
95
0
    while sz > 0 {
96
0
        let addr_align = min(
97
0
            ps.trailing_zeros(),
98
0
            min(pa.as_u64().trailing_zeros(), va.as_u64().trailing_zeros()),
99
0
        ) as u64;
100
0
        let mapped_size = if addr_align >= ALIGN_1G_BITS && sz >= ALIGN_1G {
101
0
            map_page::<Size1GiB>(pt, pa, va, flags, allocator)?;
102
0
            Size1GiB::SIZE
103
0
        } else if addr_align >= ALIGN_2M_BITS && sz >= ALIGN_2M {
104
0
            map_page::<Size2MiB>(pt, pa, va, flags, allocator)?;
105
0
            Size2MiB::SIZE
106
        } else {
107
0
            map_page::<Size4KiB>(pt, pa, va, flags, allocator)?;
108
0
            Size4KiB::SIZE
109
        };
110
111
0
        sz = sz.checked_sub(mapped_size).ok_or(Error::InvalidArguments)?;
112
0
        pa += mapped_size;
113
0
        va += mapped_size;
114
    }
115
116
0
    Ok(())
117
0
}
118
119
/// Create page table entries to map `[va, va + sz)` to `[pa, ps + sz)` with page size `ps` and
120
/// mark pages as `PRESENT | WRITABLE`.
121
///
122
/// Note: the caller must ensure `pa + sz` and `va + sz` doesn't wrap around.
123
0
pub fn create_mapping(
124
0
    pt: &mut OffsetPageTable,
125
0
    pa: PhysAddr,
126
0
    va: VirtAddr,
127
0
    ps: u64,
128
0
    sz: u64,
129
0
) -> Result<()> {
130
0
    let flags = Flags::PRESENT | Flags::WRITABLE;
131
132
0
    create_mapping_with_flags(pt, pa, va, ps, sz, flags)
133
0
}
134
135
/// Modify page flags for all 4K PTEs for virtual address range [va, va + sz), stops at the first
136
/// whole.
137
0
pub fn set_page_flags(pt: &mut OffsetPageTable, mut va: VirtAddr, mut size: i64, flag: Flags) {
138
    let mut page_size: u64;
139
140
0
    while size > 0 {
141
0
        if let TranslateResult::Mapped { frame, .. } = pt.translate(va) {
142
0
            match frame {
143
                MappedFrame::Size4KiB(..) => {
144
                    type S = Size4KiB;
145
0
                    page_size = S::SIZE;
146
0
                    let page: Page<S> = Page::containing_address(va);
147
0
                    unsafe {
148
0
                        pt.update_flags(page, flag).unwrap().flush();
149
0
                    }
150
                }
151
                MappedFrame::Size2MiB(..) => {
152
                    type S = Size2MiB;
153
0
                    page_size = S::SIZE;
154
                }
155
                MappedFrame::Size1GiB(..) => {
156
                    type S = Size1GiB;
157
0
                    page_size = S::SIZE;
158
                }
159
            }
160
        } else {
161
0
            break;
162
        }
163
0
        size = size.checked_sub(page_size as i64).unwrap();
164
0
        va += page_size;
165
    }
166
0
}
167
168
#[cfg(test)]
169
mod tests {
170
    use super::*;
171
    use crate::{frame, init, PAGE_SIZE_4K, PHYS_VIRT_OFFSET};
172
    use x86_64::structures::paging::PageTable;
173
174
    const TD_PAYLOAD_PAGE_TABLE_BASE: u64 = 0x800000;
175
    const PAGE_TABLE_SIZE: usize = 0x800000;
176
177
    fn create_pt(base: u64, offset: u64) -> OffsetPageTable<'static> {
178
        let pt = unsafe {
179
            OffsetPageTable::new(&mut *(base as *mut PageTable), VirtAddr::new(offset as u64))
180
        };
181
        frame::FRAME_ALLOCATOR.lock().reserve(base);
182
183
        pt
184
    }
185
186
    #[test]
187
    #[should_panic]
188
    fn test_invalid_pa_sz() {
189
        init(TD_PAYLOAD_PAGE_TABLE_BASE, PAGE_TABLE_SIZE).unwrap();
190
        let mut pt = create_pt(TD_PAYLOAD_PAGE_TABLE_BASE, PHYS_VIRT_OFFSET as u64);
191
        assert!(create_mapping(
192
            &mut pt,
193
            PhysAddr::new(0x1000000),
194
            VirtAddr::new(0),
195
            PAGE_SIZE_4K as u64,
196
            u64::MAX,
197
        )
198
        .is_ok())
199
    }
200
201
    #[test]
202
    #[should_panic]
203
    fn test_invalid_va_sz() {
204
        init(TD_PAYLOAD_PAGE_TABLE_BASE, PAGE_TABLE_SIZE).unwrap();
205
        let mut pt = create_pt(TD_PAYLOAD_PAGE_TABLE_BASE, PHYS_VIRT_OFFSET as u64);
206
        assert!(create_mapping(
207
            &mut pt,
208
            PhysAddr::new(0x0),
209
            VirtAddr::new(0x1000000),
210
            PAGE_SIZE_4K as u64,
211
            u64::MAX,
212
        )
213
        .is_ok());
214
    }
215
}