/rust/registry/src/index.crates.io-1949cf8c6b5b557f/x86-0.47.0/src/bits64/paging.rs
Line | Count | Source |
1 | | //! Description of the data-structures for IA-32e paging mode. |
2 | | |
3 | | use bitflags::*; |
4 | | |
5 | | use core::convert::{From, Into}; |
6 | | use core::fmt; |
7 | | use core::hash::{Hash, Hasher}; |
8 | | use core::ops; |
9 | | |
10 | | macro_rules! check_flag { |
11 | | ($doc:meta, $fun:ident, $flag:expr) => { |
12 | | #[$doc] |
13 | 0 | pub fn $fun(self) -> bool { |
14 | 0 | self.flags().contains($flag) |
15 | 0 | } Unexecuted instantiation: <x86::bits64::paging::PDPTEntry>::is_present Unexecuted instantiation: <x86::bits64::paging::PDPTEntry>::is_accessed Unexecuted instantiation: <x86::bits64::paging::PDPTEntry>::is_writeable Unexecuted instantiation: <x86::bits64::paging::PDPTEntry>::is_user_mode_allowed Unexecuted instantiation: <x86::bits64::paging::PDPTEntry>::is_page_write_through Unexecuted instantiation: <x86::bits64::paging::PDPTEntry>::is_page_level_cache_disabled Unexecuted instantiation: <x86::bits64::paging::PDPTEntry>::is_instruction_fetching_disabled Unexecuted instantiation: <x86::bits64::paging::PDPTEntry>::is_pat Unexecuted instantiation: <x86::bits64::paging::PDPTEntry>::is_page Unexecuted instantiation: <x86::bits64::paging::PDEntry>::is_present Unexecuted instantiation: <x86::bits64::paging::PDEntry>::is_accessed Unexecuted instantiation: <x86::bits64::paging::PDEntry>::is_writeable Unexecuted instantiation: <x86::bits64::paging::PDEntry>::is_user_mode_allowed Unexecuted instantiation: <x86::bits64::paging::PDEntry>::is_page_write_through Unexecuted instantiation: <x86::bits64::paging::PDEntry>::is_page_level_cache_disabled Unexecuted instantiation: <x86::bits64::paging::PDEntry>::is_instruction_fetching_disabled Unexecuted instantiation: <x86::bits64::paging::PDEntry>::is_pat Unexecuted instantiation: <x86::bits64::paging::PDEntry>::is_page Unexecuted instantiation: <x86::bits64::paging::PDEntry>::is_dirty Unexecuted instantiation: <x86::bits64::paging::PDEntry>::is_global Unexecuted instantiation: <x86::bits64::paging::PTEntry>::is_present Unexecuted instantiation: <x86::bits64::paging::PTEntry>::is_accessed Unexecuted instantiation: <x86::bits64::paging::PTEntry>::is_writeable Unexecuted instantiation: <x86::bits64::paging::PTEntry>::is_user_mode_allowed Unexecuted instantiation: <x86::bits64::paging::PTEntry>::is_page_write_through Unexecuted instantiation: <x86::bits64::paging::PTEntry>::is_page_level_cache_disabled Unexecuted instantiation: <x86::bits64::paging::PTEntry>::is_instruction_fetching_disabled Unexecuted instantiation: <x86::bits64::paging::PTEntry>::is_dirty Unexecuted instantiation: <x86::bits64::paging::PTEntry>::is_global Unexecuted instantiation: <x86::bits64::paging::PML4Entry>::is_present Unexecuted instantiation: <x86::bits64::paging::PML4Entry>::is_accessed Unexecuted instantiation: <x86::bits64::paging::PML4Entry>::is_writeable Unexecuted instantiation: <x86::bits64::paging::PML4Entry>::is_user_mode_allowed Unexecuted instantiation: <x86::bits64::paging::PML4Entry>::is_page_write_through Unexecuted instantiation: <x86::bits64::paging::PML4Entry>::is_page_level_cache_disabled Unexecuted instantiation: <x86::bits64::paging::PML4Entry>::is_instruction_fetching_disabled Unexecuted instantiation: <x86::bits64::paging::PML5Entry>::is_present Unexecuted instantiation: <x86::bits64::paging::PML5Entry>::is_accessed Unexecuted instantiation: <x86::bits64::paging::PML5Entry>::is_writeable Unexecuted instantiation: <x86::bits64::paging::PML5Entry>::is_user_mode_allowed Unexecuted instantiation: <x86::bits64::paging::PML5Entry>::is_page_write_through Unexecuted instantiation: <x86::bits64::paging::PML5Entry>::is_page_level_cache_disabled Unexecuted instantiation: <x86::bits64::paging::PML5Entry>::is_instruction_fetching_disabled |
16 | | }; |
17 | | } |
18 | | |
19 | | /// Align address downwards. |
20 | | /// |
21 | | /// Returns the greatest x with alignment `align` so that x <= addr. |
22 | | /// The alignment must be a power of 2. |
23 | | #[inline(always)] |
24 | 0 | fn align_down(addr: u64, align: u64) -> u64 { |
25 | 0 | addr & !(align - 1) |
26 | 0 | } |
27 | | |
28 | | /// Align address upwards. |
29 | | /// |
30 | | /// Returns the smallest x with alignment `align` so that x >= addr. |
31 | | /// The alignment must be a power of 2. |
32 | | #[inline(always)] |
33 | 0 | fn align_up(addr: u64, align: u64) -> u64 { |
34 | 0 | let align_mask = align - 1; |
35 | 0 | if addr & align_mask == 0 { |
36 | 0 | addr |
37 | | } else { |
38 | 0 | (addr | align_mask) + 1 |
39 | | } |
40 | 0 | } |
41 | | |
42 | | /// A wrapper for a physical address. |
43 | | #[repr(transparent)] |
44 | | #[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)] |
45 | | pub struct PAddr(pub u64); |
46 | | |
47 | | impl PAddr { |
48 | | /// Convert to `u64` |
49 | 0 | pub fn as_u64(self) -> u64 { |
50 | 0 | self.0 |
51 | 0 | } |
52 | | |
53 | | /// Convert to `usize` |
54 | 0 | pub fn as_usize(self) -> usize { |
55 | 0 | self.0 as usize |
56 | 0 | } |
57 | | |
58 | | /// Physical Address zero. |
59 | 0 | pub const fn zero() -> Self { |
60 | 0 | PAddr(0) |
61 | 0 | } |
62 | | |
63 | | /// Is zero? |
64 | 0 | pub fn is_zero(self) -> bool { |
65 | 0 | self == PAddr::zero() |
66 | 0 | } |
67 | | |
68 | | /// Split `PAddr` into lower and higher 32-bits. |
69 | 0 | pub fn split(&self) -> (u32, u32) { |
70 | 0 | (self.0 as u32, (self.0 >> 32) as u32) |
71 | 0 | } |
72 | | |
73 | 0 | fn align_up<U>(self, align: U) -> Self |
74 | 0 | where |
75 | 0 | U: Into<u64>, |
76 | | { |
77 | 0 | PAddr(align_up(self.0, align.into())) |
78 | 0 | } |
79 | | |
80 | 0 | fn align_down<U>(self, align: U) -> Self |
81 | 0 | where |
82 | 0 | U: Into<u64>, |
83 | | { |
84 | 0 | PAddr(align_down(self.0, align.into())) |
85 | 0 | } |
86 | | |
87 | | /// Offset within the 4 KiB page. |
88 | 0 | pub fn base_page_offset(self) -> u64 { |
89 | 0 | self.0 & (BASE_PAGE_SIZE as u64 - 1) |
90 | 0 | } |
91 | | |
92 | | /// Offset within the 2 MiB page. |
93 | 0 | pub fn large_page_offset(self) -> u64 { |
94 | 0 | self.0 & (LARGE_PAGE_SIZE as u64 - 1) |
95 | 0 | } |
96 | | |
97 | | /// Offset within the 1 GiB page. |
98 | 0 | pub fn huge_page_offset(self) -> u64 { |
99 | 0 | self.0 & (HUGE_PAGE_SIZE as u64 - 1) |
100 | 0 | } |
101 | | |
102 | | /// Return address of nearest 4 KiB page (lower or equal than self). |
103 | 0 | pub fn align_down_to_base_page(self) -> Self { |
104 | 0 | self.align_down(BASE_PAGE_SIZE as u64) |
105 | 0 | } |
106 | | |
107 | | /// Return address of nearest 2 MiB page (lower or equal than self). |
108 | 0 | pub fn align_down_to_large_page(self) -> Self { |
109 | 0 | self.align_down(LARGE_PAGE_SIZE as u64) |
110 | 0 | } |
111 | | |
112 | | /// Return address of nearest 1 GiB page (lower or equal than self). |
113 | 0 | pub fn align_down_to_huge_page(self) -> Self { |
114 | 0 | self.align_down(HUGE_PAGE_SIZE as u64) |
115 | 0 | } |
116 | | |
117 | | /// Return address of nearest 4 KiB page (higher or equal than self). |
118 | 0 | pub fn align_up_to_base_page(self) -> Self { |
119 | 0 | self.align_up(BASE_PAGE_SIZE as u64) |
120 | 0 | } |
121 | | |
122 | | /// Return address of nearest 2 MiB page (higher or equal than self). |
123 | 0 | pub fn align_up_to_large_page(self) -> Self { |
124 | 0 | self.align_up(LARGE_PAGE_SIZE as u64) |
125 | 0 | } |
126 | | |
127 | | /// Return address of nearest 1 GiB page (higher or equal than self). |
128 | 0 | pub fn align_up_to_huge_page(self) -> Self { |
129 | 0 | self.align_up(HUGE_PAGE_SIZE as u64) |
130 | 0 | } |
131 | | |
132 | | /// Is this address aligned to a 4 KiB page? |
133 | 0 | pub fn is_base_page_aligned(self) -> bool { |
134 | 0 | self.align_down(BASE_PAGE_SIZE as u64) == self |
135 | 0 | } |
136 | | |
137 | | /// Is this address aligned to a 2 MiB page? |
138 | 0 | pub fn is_large_page_aligned(self) -> bool { |
139 | 0 | self.align_down(LARGE_PAGE_SIZE as u64) == self |
140 | 0 | } |
141 | | |
142 | | /// Is this address aligned to a 1 GiB page? |
143 | 0 | pub fn is_huge_page_aligned(self) -> bool { |
144 | 0 | self.align_down(HUGE_PAGE_SIZE as u64) == self |
145 | 0 | } |
146 | | |
147 | | /// Is this address aligned to `align`? |
148 | | /// |
149 | | /// # Note |
150 | | /// `align` must be a power of two. |
151 | 0 | pub fn is_aligned<U>(self, align: U) -> bool |
152 | 0 | where |
153 | 0 | U: Into<u64> + Copy, |
154 | | { |
155 | 0 | if !align.into().is_power_of_two() { |
156 | 0 | return false; |
157 | 0 | } |
158 | | |
159 | 0 | self.align_down(align) == self |
160 | 0 | } |
161 | | } |
162 | | |
163 | | impl From<u64> for PAddr { |
164 | 0 | fn from(num: u64) -> Self { |
165 | 0 | PAddr(num) |
166 | 0 | } |
167 | | } |
168 | | |
169 | | impl From<usize> for PAddr { |
170 | 0 | fn from(num: usize) -> Self { |
171 | 0 | PAddr(num as u64) |
172 | 0 | } |
173 | | } |
174 | | |
175 | | impl From<i32> for PAddr { |
176 | 0 | fn from(num: i32) -> Self { |
177 | 0 | PAddr(num as u64) |
178 | 0 | } |
179 | | } |
180 | | |
181 | | #[allow(clippy::clippy::from_over_into)] |
182 | | impl Into<u64> for PAddr { |
183 | 0 | fn into(self) -> u64 { |
184 | 0 | self.0 |
185 | 0 | } |
186 | | } |
187 | | |
188 | | #[allow(clippy::clippy::from_over_into)] |
189 | | impl Into<usize> for PAddr { |
190 | 0 | fn into(self) -> usize { |
191 | 0 | self.0 as usize |
192 | 0 | } |
193 | | } |
194 | | |
195 | | impl ops::Add for PAddr { |
196 | | type Output = PAddr; |
197 | | |
198 | 0 | fn add(self, rhs: PAddr) -> Self::Output { |
199 | 0 | PAddr(self.0 + rhs.0) |
200 | 0 | } |
201 | | } |
202 | | |
203 | | impl ops::Add<u64> for PAddr { |
204 | | type Output = PAddr; |
205 | | |
206 | 0 | fn add(self, rhs: u64) -> Self::Output { |
207 | 0 | PAddr::from(self.0 + rhs) |
208 | 0 | } |
209 | | } |
210 | | |
211 | | impl ops::Add<usize> for PAddr { |
212 | | type Output = PAddr; |
213 | | |
214 | 0 | fn add(self, rhs: usize) -> Self::Output { |
215 | 0 | PAddr::from(self.0 + rhs as u64) |
216 | 0 | } |
217 | | } |
218 | | |
219 | | impl ops::AddAssign for PAddr { |
220 | 0 | fn add_assign(&mut self, other: PAddr) { |
221 | 0 | *self = PAddr::from(self.0 + other.0); |
222 | 0 | } |
223 | | } |
224 | | |
225 | | impl ops::AddAssign<u64> for PAddr { |
226 | 0 | fn add_assign(&mut self, offset: u64) { |
227 | 0 | *self = PAddr::from(self.0 + offset); |
228 | 0 | } |
229 | | } |
230 | | |
231 | | impl ops::Sub for PAddr { |
232 | | type Output = PAddr; |
233 | | |
234 | 0 | fn sub(self, rhs: PAddr) -> Self::Output { |
235 | 0 | PAddr::from(self.0 - rhs.0) |
236 | 0 | } |
237 | | } |
238 | | |
239 | | impl ops::Sub<u64> for PAddr { |
240 | | type Output = PAddr; |
241 | | |
242 | 0 | fn sub(self, rhs: u64) -> Self::Output { |
243 | 0 | PAddr::from(self.0 - rhs) |
244 | 0 | } |
245 | | } |
246 | | |
247 | | impl ops::Sub<usize> for PAddr { |
248 | | type Output = PAddr; |
249 | | |
250 | 0 | fn sub(self, rhs: usize) -> Self::Output { |
251 | 0 | PAddr::from(self.0 - rhs as u64) |
252 | 0 | } |
253 | | } |
254 | | |
255 | | impl ops::Rem for PAddr { |
256 | | type Output = PAddr; |
257 | | |
258 | 0 | fn rem(self, rhs: PAddr) -> Self::Output { |
259 | 0 | PAddr(self.0 % rhs.0) |
260 | 0 | } |
261 | | } |
262 | | |
263 | | impl ops::Rem<u64> for PAddr { |
264 | | type Output = u64; |
265 | | |
266 | 0 | fn rem(self, rhs: u64) -> Self::Output { |
267 | 0 | self.0 % rhs |
268 | 0 | } |
269 | | } |
270 | | |
271 | | impl ops::Rem<usize> for PAddr { |
272 | | type Output = u64; |
273 | | |
274 | 0 | fn rem(self, rhs: usize) -> Self::Output { |
275 | 0 | self.0 % (rhs as u64) |
276 | 0 | } |
277 | | } |
278 | | |
279 | | impl ops::BitAnd for PAddr { |
280 | | type Output = Self; |
281 | | |
282 | 0 | fn bitand(self, rhs: Self) -> Self { |
283 | 0 | PAddr(self.0 & rhs.0) |
284 | 0 | } |
285 | | } |
286 | | |
287 | | impl ops::BitAnd<u64> for PAddr { |
288 | | type Output = u64; |
289 | | |
290 | 0 | fn bitand(self, rhs: u64) -> Self::Output { |
291 | 0 | Into::<u64>::into(self) & rhs |
292 | 0 | } |
293 | | } |
294 | | |
295 | | impl ops::BitOr for PAddr { |
296 | | type Output = PAddr; |
297 | | |
298 | 0 | fn bitor(self, rhs: PAddr) -> Self::Output { |
299 | 0 | PAddr(self.0 | rhs.0) |
300 | 0 | } |
301 | | } |
302 | | |
303 | | impl ops::BitOr<u64> for PAddr { |
304 | | type Output = u64; |
305 | | |
306 | 0 | fn bitor(self, rhs: u64) -> Self::Output { |
307 | 0 | self.0 | rhs |
308 | 0 | } |
309 | | } |
310 | | |
311 | | impl ops::Shr<u64> for PAddr { |
312 | | type Output = u64; |
313 | | |
314 | 0 | fn shr(self, rhs: u64) -> Self::Output { |
315 | 0 | self.0 >> rhs |
316 | 0 | } |
317 | | } |
318 | | |
319 | | impl fmt::Binary for PAddr { |
320 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
321 | 0 | self.0.fmt(f) |
322 | 0 | } |
323 | | } |
324 | | |
325 | | impl fmt::Display for PAddr { |
326 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
327 | 0 | self.0.fmt(f) |
328 | 0 | } |
329 | | } |
330 | | |
331 | | impl fmt::Debug for PAddr { |
332 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
333 | 0 | write!(f, "{:#x}", self.0) |
334 | 0 | } |
335 | | } |
336 | | |
337 | | impl fmt::LowerHex for PAddr { |
338 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
339 | 0 | self.0.fmt(f) |
340 | 0 | } |
341 | | } |
342 | | |
343 | | impl fmt::Octal for PAddr { |
344 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
345 | 0 | self.0.fmt(f) |
346 | 0 | } |
347 | | } |
348 | | |
349 | | impl fmt::UpperHex for PAddr { |
350 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
351 | 0 | self.0.fmt(f) |
352 | 0 | } |
353 | | } |
354 | | |
355 | | impl fmt::Pointer for PAddr { |
356 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
357 | | use core::fmt::LowerHex; |
358 | 0 | self.0.fmt(f) |
359 | 0 | } |
360 | | } |
361 | | |
362 | | #[allow(clippy::clippy::derive_hash_xor_eq)] |
363 | | impl Hash for PAddr { |
364 | 0 | fn hash<H: Hasher>(&self, state: &mut H) { |
365 | 0 | self.0.hash(state); |
366 | 0 | } |
367 | | } |
368 | | |
369 | | /// A wrapper for an IO address (IOVA / DMA Address for devices) |
370 | | #[repr(transparent)] |
371 | | #[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)] |
372 | | pub struct IOAddr(pub u64); |
373 | | |
374 | | impl IOAddr { |
375 | | /// Convert to `u64` |
376 | 0 | pub fn as_u64(self) -> u64 { |
377 | 0 | self.0 |
378 | 0 | } |
379 | | |
380 | | /// Convert to `usize` |
381 | 0 | pub fn as_usize(self) -> usize { |
382 | 0 | self.0 as usize |
383 | 0 | } |
384 | | |
385 | | /// IO Address zero. |
386 | 0 | pub const fn zero() -> Self { |
387 | 0 | IOAddr(0) |
388 | 0 | } |
389 | | |
390 | | /// Is zero? |
391 | 0 | pub fn is_zero(self) -> bool { |
392 | 0 | self == IOAddr::zero() |
393 | 0 | } |
394 | | |
395 | | /// Split `IOAddr` into lower and higher 32-bits. |
396 | 0 | pub fn split(&self) -> (u32, u32) { |
397 | 0 | (self.0 as u32, (self.0 >> 32) as u32) |
398 | 0 | } |
399 | | |
400 | 0 | fn align_up<U>(self, align: U) -> Self |
401 | 0 | where |
402 | 0 | U: Into<u64>, |
403 | | { |
404 | 0 | IOAddr(align_up(self.0, align.into())) |
405 | 0 | } |
406 | | |
407 | 0 | fn align_down<U>(self, align: U) -> Self |
408 | 0 | where |
409 | 0 | U: Into<u64>, |
410 | | { |
411 | 0 | IOAddr(align_down(self.0, align.into())) |
412 | 0 | } |
413 | | |
414 | | /// Offset within the 4 KiB page. |
415 | 0 | pub fn base_page_offset(self) -> u64 { |
416 | 0 | self.0 & (BASE_PAGE_SIZE as u64 - 1) |
417 | 0 | } |
418 | | |
419 | | /// Offset within the 2 MiB page. |
420 | 0 | pub fn large_page_offset(self) -> u64 { |
421 | 0 | self.0 & (LARGE_PAGE_SIZE as u64 - 1) |
422 | 0 | } |
423 | | |
424 | | /// Offset within the 1 GiB page. |
425 | 0 | pub fn huge_page_offset(self) -> u64 { |
426 | 0 | self.0 & (HUGE_PAGE_SIZE as u64 - 1) |
427 | 0 | } |
428 | | |
429 | | /// Return address of nearest 4 KiB page (lower or equal than self). |
430 | 0 | pub fn align_down_to_base_page(self) -> Self { |
431 | 0 | self.align_down(BASE_PAGE_SIZE as u64) |
432 | 0 | } |
433 | | |
434 | | /// Return address of nearest 2 MiB page (lower or equal than self). |
435 | 0 | pub fn align_down_to_large_page(self) -> Self { |
436 | 0 | self.align_down(LARGE_PAGE_SIZE as u64) |
437 | 0 | } |
438 | | |
439 | | /// Return address of nearest 1 GiB page (lower or equal than self). |
440 | 0 | pub fn align_down_to_huge_page(self) -> Self { |
441 | 0 | self.align_down(HUGE_PAGE_SIZE as u64) |
442 | 0 | } |
443 | | |
444 | | /// Return address of nearest 4 KiB page (higher or equal than self). |
445 | 0 | pub fn align_up_to_base_page(self) -> Self { |
446 | 0 | self.align_up(BASE_PAGE_SIZE as u64) |
447 | 0 | } |
448 | | |
449 | | /// Return address of nearest 2 MiB page (higher or equal than self). |
450 | 0 | pub fn align_up_to_large_page(self) -> Self { |
451 | 0 | self.align_up(LARGE_PAGE_SIZE as u64) |
452 | 0 | } |
453 | | |
454 | | /// Return address of nearest 1 GiB page (higher or equal than self). |
455 | 0 | pub fn align_up_to_huge_page(self) -> Self { |
456 | 0 | self.align_up(HUGE_PAGE_SIZE as u64) |
457 | 0 | } |
458 | | |
459 | | /// Is this address aligned to a 4 KiB page? |
460 | 0 | pub fn is_base_page_aligned(self) -> bool { |
461 | 0 | self.align_down(BASE_PAGE_SIZE as u64) == self |
462 | 0 | } |
463 | | |
464 | | /// Is this address aligned to a 2 MiB page? |
465 | 0 | pub fn is_large_page_aligned(self) -> bool { |
466 | 0 | self.align_down(LARGE_PAGE_SIZE as u64) == self |
467 | 0 | } |
468 | | |
469 | | /// Is this address aligned to a 1 GiB page? |
470 | 0 | pub fn is_huge_page_aligned(self) -> bool { |
471 | 0 | self.align_down(HUGE_PAGE_SIZE as u64) == self |
472 | 0 | } |
473 | | |
474 | | /// Is this address aligned to `align`? |
475 | | /// |
476 | | /// # Note |
477 | | /// `align` must be a power of two. |
478 | 0 | pub fn is_aligned<U>(self, align: U) -> bool |
479 | 0 | where |
480 | 0 | U: Into<u64> + Copy, |
481 | | { |
482 | 0 | if !align.into().is_power_of_two() { |
483 | 0 | return false; |
484 | 0 | } |
485 | | |
486 | 0 | self.align_down(align) == self |
487 | 0 | } |
488 | | } |
489 | | |
490 | | impl From<u64> for IOAddr { |
491 | 0 | fn from(num: u64) -> Self { |
492 | 0 | IOAddr(num) |
493 | 0 | } |
494 | | } |
495 | | |
496 | | impl From<usize> for IOAddr { |
497 | 0 | fn from(num: usize) -> Self { |
498 | 0 | IOAddr(num as u64) |
499 | 0 | } |
500 | | } |
501 | | |
502 | | impl From<i32> for IOAddr { |
503 | 0 | fn from(num: i32) -> Self { |
504 | 0 | IOAddr(num as u64) |
505 | 0 | } |
506 | | } |
507 | | |
508 | | #[allow(clippy::clippy::from_over_into)] |
509 | | impl Into<u64> for IOAddr { |
510 | 0 | fn into(self) -> u64 { |
511 | 0 | self.0 |
512 | 0 | } |
513 | | } |
514 | | |
515 | | #[allow(clippy::clippy::from_over_into)] |
516 | | impl Into<usize> for IOAddr { |
517 | 0 | fn into(self) -> usize { |
518 | 0 | self.0 as usize |
519 | 0 | } |
520 | | } |
521 | | |
522 | | impl ops::Add for IOAddr { |
523 | | type Output = IOAddr; |
524 | | |
525 | 0 | fn add(self, rhs: IOAddr) -> Self::Output { |
526 | 0 | IOAddr(self.0 + rhs.0) |
527 | 0 | } |
528 | | } |
529 | | |
530 | | impl ops::Add<u64> for IOAddr { |
531 | | type Output = IOAddr; |
532 | | |
533 | 0 | fn add(self, rhs: u64) -> Self::Output { |
534 | 0 | IOAddr::from(self.0 + rhs) |
535 | 0 | } |
536 | | } |
537 | | |
538 | | impl ops::Add<usize> for IOAddr { |
539 | | type Output = IOAddr; |
540 | | |
541 | 0 | fn add(self, rhs: usize) -> Self::Output { |
542 | 0 | IOAddr::from(self.0 + rhs as u64) |
543 | 0 | } |
544 | | } |
545 | | |
546 | | impl ops::AddAssign for IOAddr { |
547 | 0 | fn add_assign(&mut self, other: IOAddr) { |
548 | 0 | *self = IOAddr::from(self.0 + other.0); |
549 | 0 | } |
550 | | } |
551 | | |
552 | | impl ops::AddAssign<u64> for IOAddr { |
553 | 0 | fn add_assign(&mut self, offset: u64) { |
554 | 0 | *self = IOAddr::from(self.0 + offset); |
555 | 0 | } |
556 | | } |
557 | | |
558 | | impl ops::Sub for IOAddr { |
559 | | type Output = IOAddr; |
560 | | |
561 | 0 | fn sub(self, rhs: IOAddr) -> Self::Output { |
562 | 0 | IOAddr::from(self.0 - rhs.0) |
563 | 0 | } |
564 | | } |
565 | | |
566 | | impl ops::Sub<u64> for IOAddr { |
567 | | type Output = IOAddr; |
568 | | |
569 | 0 | fn sub(self, rhs: u64) -> Self::Output { |
570 | 0 | IOAddr::from(self.0 - rhs) |
571 | 0 | } |
572 | | } |
573 | | |
574 | | impl ops::Sub<usize> for IOAddr { |
575 | | type Output = IOAddr; |
576 | | |
577 | 0 | fn sub(self, rhs: usize) -> Self::Output { |
578 | 0 | IOAddr::from(self.0 - rhs as u64) |
579 | 0 | } |
580 | | } |
581 | | |
582 | | impl ops::Rem for IOAddr { |
583 | | type Output = IOAddr; |
584 | | |
585 | 0 | fn rem(self, rhs: IOAddr) -> Self::Output { |
586 | 0 | IOAddr(self.0 % rhs.0) |
587 | 0 | } |
588 | | } |
589 | | |
590 | | impl ops::Rem<u64> for IOAddr { |
591 | | type Output = u64; |
592 | | |
593 | 0 | fn rem(self, rhs: u64) -> Self::Output { |
594 | 0 | self.0 % rhs |
595 | 0 | } |
596 | | } |
597 | | |
598 | | impl ops::Rem<usize> for IOAddr { |
599 | | type Output = u64; |
600 | | |
601 | 0 | fn rem(self, rhs: usize) -> Self::Output { |
602 | 0 | self.0 % (rhs as u64) |
603 | 0 | } |
604 | | } |
605 | | |
606 | | impl ops::BitAnd for IOAddr { |
607 | | type Output = Self; |
608 | | |
609 | 0 | fn bitand(self, rhs: Self) -> Self { |
610 | 0 | IOAddr(self.0 & rhs.0) |
611 | 0 | } |
612 | | } |
613 | | |
614 | | impl ops::BitAnd<u64> for IOAddr { |
615 | | type Output = u64; |
616 | | |
617 | 0 | fn bitand(self, rhs: u64) -> Self::Output { |
618 | 0 | Into::<u64>::into(self) & rhs |
619 | 0 | } |
620 | | } |
621 | | |
622 | | impl ops::BitOr for IOAddr { |
623 | | type Output = IOAddr; |
624 | | |
625 | 0 | fn bitor(self, rhs: IOAddr) -> Self::Output { |
626 | 0 | IOAddr(self.0 | rhs.0) |
627 | 0 | } |
628 | | } |
629 | | |
630 | | impl ops::BitOr<u64> for IOAddr { |
631 | | type Output = u64; |
632 | | |
633 | 0 | fn bitor(self, rhs: u64) -> Self::Output { |
634 | 0 | self.0 | rhs |
635 | 0 | } |
636 | | } |
637 | | |
638 | | impl ops::Shr<u64> for IOAddr { |
639 | | type Output = u64; |
640 | | |
641 | 0 | fn shr(self, rhs: u64) -> Self::Output { |
642 | 0 | self.0 >> rhs |
643 | 0 | } |
644 | | } |
645 | | |
646 | | impl fmt::Binary for IOAddr { |
647 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
648 | 0 | self.0.fmt(f) |
649 | 0 | } |
650 | | } |
651 | | |
652 | | impl fmt::Display for IOAddr { |
653 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
654 | 0 | self.0.fmt(f) |
655 | 0 | } |
656 | | } |
657 | | |
658 | | impl fmt::Debug for IOAddr { |
659 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
660 | 0 | write!(f, "{:#x}", self.0) |
661 | 0 | } |
662 | | } |
663 | | |
664 | | impl fmt::LowerHex for IOAddr { |
665 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
666 | 0 | self.0.fmt(f) |
667 | 0 | } |
668 | | } |
669 | | |
670 | | impl fmt::Octal for IOAddr { |
671 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
672 | 0 | self.0.fmt(f) |
673 | 0 | } |
674 | | } |
675 | | |
676 | | impl fmt::UpperHex for IOAddr { |
677 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
678 | 0 | self.0.fmt(f) |
679 | 0 | } |
680 | | } |
681 | | |
682 | | impl fmt::Pointer for IOAddr { |
683 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
684 | | use core::fmt::LowerHex; |
685 | 0 | self.0.fmt(f) |
686 | 0 | } |
687 | | } |
688 | | |
689 | | #[allow(clippy::clippy::derive_hash_xor_eq)] |
690 | | impl Hash for IOAddr { |
691 | 0 | fn hash<H: Hasher>(&self, state: &mut H) { |
692 | 0 | self.0.hash(state); |
693 | 0 | } |
694 | | } |
695 | | |
696 | | /// A wrapper for a virtual address. |
697 | | #[repr(transparent)] |
698 | | #[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)] |
699 | | pub struct VAddr(pub u64); |
700 | | |
701 | | impl VAddr { |
702 | | /// Convert from `u64` |
703 | 0 | pub const fn from_u64(v: u64) -> Self { |
704 | 0 | VAddr(v) |
705 | 0 | } |
706 | | |
707 | | /// Convert from `usize` |
708 | 0 | pub const fn from_usize(v: usize) -> Self { |
709 | 0 | VAddr(v as u64) |
710 | 0 | } |
711 | | |
712 | | /// Convert to `u64` |
713 | 0 | pub const fn as_u64(self) -> u64 { |
714 | 0 | self.0 |
715 | 0 | } |
716 | | |
717 | | /// Convert to `usize` |
718 | 0 | pub const fn as_usize(self) -> usize { |
719 | 0 | self.0 as usize |
720 | 0 | } |
721 | | |
722 | | /// Convert to mutable pointer. |
723 | 0 | pub fn as_mut_ptr<T>(self) -> *mut T { |
724 | 0 | self.0 as *mut T |
725 | 0 | } |
726 | | |
727 | | /// Convert to pointer. |
728 | 0 | pub fn as_ptr<T>(self) -> *const T { |
729 | 0 | self.0 as *const T |
730 | 0 | } |
731 | | |
732 | | /// Virtual Address zero. |
733 | 0 | pub const fn zero() -> Self { |
734 | 0 | VAddr(0) |
735 | 0 | } |
736 | | |
737 | | /// Is zero? |
738 | 0 | pub fn is_zero(self) -> bool { |
739 | 0 | self == VAddr::zero() |
740 | 0 | } |
741 | | |
742 | 0 | fn align_up<U>(self, align: U) -> Self |
743 | 0 | where |
744 | 0 | U: Into<u64>, |
745 | | { |
746 | 0 | VAddr(align_up(self.0, align.into())) |
747 | 0 | } |
748 | | |
749 | 0 | fn align_down<U>(self, align: U) -> Self |
750 | 0 | where |
751 | 0 | U: Into<u64>, |
752 | | { |
753 | 0 | VAddr(align_down(self.0, align.into())) |
754 | 0 | } |
755 | | |
756 | | /// Offset within the 4 KiB page. |
757 | 0 | pub fn base_page_offset(self) -> u64 { |
758 | 0 | self.0 & (BASE_PAGE_SIZE as u64 - 1) |
759 | 0 | } |
760 | | |
761 | | /// Offset within the 2 MiB page. |
762 | 0 | pub fn large_page_offset(self) -> u64 { |
763 | 0 | self.0 & (LARGE_PAGE_SIZE as u64 - 1) |
764 | 0 | } |
765 | | |
766 | | /// Offset within the 1 GiB page. |
767 | 0 | pub fn huge_page_offset(self) -> u64 { |
768 | 0 | self.0 & (HUGE_PAGE_SIZE as u64 - 1) |
769 | 0 | } |
770 | | |
771 | | /// Return address of nearest 4 KiB page (lower or equal than self). |
772 | 0 | pub fn align_down_to_base_page(self) -> Self { |
773 | 0 | self.align_down(BASE_PAGE_SIZE as u64) |
774 | 0 | } |
775 | | |
776 | | /// Return address of nearest 2 MiB page (lower or equal than self). |
777 | 0 | pub fn align_down_to_large_page(self) -> Self { |
778 | 0 | self.align_down(LARGE_PAGE_SIZE as u64) |
779 | 0 | } |
780 | | |
781 | | /// Return address of nearest 1 GiB page (lower or equal than self). |
782 | 0 | pub fn align_down_to_huge_page(self) -> Self { |
783 | 0 | self.align_down(HUGE_PAGE_SIZE as u64) |
784 | 0 | } |
785 | | |
786 | | /// Return address of nearest 4 KiB page (higher or equal than self). |
787 | 0 | pub fn align_up_to_base_page(self) -> Self { |
788 | 0 | self.align_up(BASE_PAGE_SIZE as u64) |
789 | 0 | } |
790 | | |
791 | | /// Return address of nearest 2 MiB page (higher or equal than self). |
792 | 0 | pub fn align_up_to_large_page(self) -> Self { |
793 | 0 | self.align_up(LARGE_PAGE_SIZE as u64) |
794 | 0 | } |
795 | | |
796 | | /// Return address of nearest 1 GiB page (higher or equal than self). |
797 | 0 | pub fn align_up_to_huge_page(self) -> Self { |
798 | 0 | self.align_up(HUGE_PAGE_SIZE as u64) |
799 | 0 | } |
800 | | |
801 | | /// Is this address aligned to a 4 KiB page? |
802 | 0 | pub fn is_base_page_aligned(self) -> bool { |
803 | 0 | self.align_down(BASE_PAGE_SIZE as u64) == self |
804 | 0 | } |
805 | | |
806 | | /// Is this address aligned to a 2 MiB page? |
807 | 0 | pub fn is_large_page_aligned(self) -> bool { |
808 | 0 | self.align_down(LARGE_PAGE_SIZE as u64) == self |
809 | 0 | } |
810 | | |
811 | | /// Is this address aligned to a 1 GiB page? |
812 | 0 | pub fn is_huge_page_aligned(self) -> bool { |
813 | 0 | self.align_down(HUGE_PAGE_SIZE as u64) == self |
814 | 0 | } |
815 | | |
816 | | /// Is this address aligned to `align`? |
817 | | /// |
818 | | /// # Note |
819 | | /// `align` must be a power of two. |
820 | 0 | pub fn is_aligned<U>(self, align: U) -> bool |
821 | 0 | where |
822 | 0 | U: Into<u64> + Copy, |
823 | | { |
824 | 0 | if !align.into().is_power_of_two() { |
825 | 0 | return false; |
826 | 0 | } |
827 | | |
828 | 0 | self.align_down(align) == self |
829 | 0 | } |
830 | | } |
831 | | |
832 | | impl From<u64> for VAddr { |
833 | 0 | fn from(num: u64) -> Self { |
834 | 0 | VAddr(num) |
835 | 0 | } |
836 | | } |
837 | | |
838 | | impl From<i32> for VAddr { |
839 | 0 | fn from(num: i32) -> Self { |
840 | 0 | VAddr(num as u64) |
841 | 0 | } |
842 | | } |
843 | | |
844 | | #[allow(clippy::clippy::from_over_into)] |
845 | | impl Into<u64> for VAddr { |
846 | 0 | fn into(self) -> u64 { |
847 | 0 | self.0 |
848 | 0 | } |
849 | | } |
850 | | |
851 | | impl From<usize> for VAddr { |
852 | 0 | fn from(num: usize) -> Self { |
853 | 0 | VAddr(num as u64) |
854 | 0 | } |
855 | | } |
856 | | |
857 | | #[allow(clippy::clippy::from_over_into)] |
858 | | impl Into<usize> for VAddr { |
859 | 0 | fn into(self) -> usize { |
860 | 0 | self.0 as usize |
861 | 0 | } |
862 | | } |
863 | | |
864 | | impl ops::Add for VAddr { |
865 | | type Output = VAddr; |
866 | | |
867 | 0 | fn add(self, rhs: VAddr) -> Self::Output { |
868 | 0 | VAddr(self.0 + rhs.0) |
869 | 0 | } |
870 | | } |
871 | | |
872 | | impl ops::Add<u64> for VAddr { |
873 | | type Output = VAddr; |
874 | | |
875 | 0 | fn add(self, rhs: u64) -> Self::Output { |
876 | 0 | VAddr(self.0 + rhs) |
877 | 0 | } |
878 | | } |
879 | | |
880 | | impl ops::Add<usize> for VAddr { |
881 | | type Output = VAddr; |
882 | | |
883 | 0 | fn add(self, rhs: usize) -> Self::Output { |
884 | 0 | VAddr::from(self.0 + rhs as u64) |
885 | 0 | } |
886 | | } |
887 | | |
888 | | impl ops::AddAssign for VAddr { |
889 | 0 | fn add_assign(&mut self, other: VAddr) { |
890 | 0 | *self = VAddr::from(self.0 + other.0); |
891 | 0 | } |
892 | | } |
893 | | |
894 | | impl ops::AddAssign<u64> for VAddr { |
895 | 0 | fn add_assign(&mut self, offset: u64) { |
896 | 0 | *self = VAddr::from(self.0 + offset); |
897 | 0 | } |
898 | | } |
899 | | |
900 | | impl ops::AddAssign<usize> for VAddr { |
901 | 0 | fn add_assign(&mut self, offset: usize) { |
902 | 0 | *self = VAddr::from(self.0 + offset as u64); |
903 | 0 | } |
904 | | } |
905 | | |
906 | | impl ops::Sub for VAddr { |
907 | | type Output = VAddr; |
908 | | |
909 | 0 | fn sub(self, rhs: VAddr) -> Self::Output { |
910 | 0 | VAddr::from(self.0 - rhs.0) |
911 | 0 | } |
912 | | } |
913 | | |
914 | | impl ops::Sub<u64> for VAddr { |
915 | | type Output = VAddr; |
916 | | |
917 | 0 | fn sub(self, rhs: u64) -> Self::Output { |
918 | 0 | VAddr::from(self.0 - rhs) |
919 | 0 | } |
920 | | } |
921 | | |
922 | | impl ops::Sub<usize> for VAddr { |
923 | | type Output = VAddr; |
924 | | |
925 | 0 | fn sub(self, rhs: usize) -> Self::Output { |
926 | 0 | VAddr::from(self.0 - rhs as u64) |
927 | 0 | } |
928 | | } |
929 | | |
930 | | impl ops::Rem for VAddr { |
931 | | type Output = VAddr; |
932 | | |
933 | 0 | fn rem(self, rhs: VAddr) -> Self::Output { |
934 | 0 | VAddr(self.0 % rhs.0) |
935 | 0 | } |
936 | | } |
937 | | |
938 | | impl ops::Rem<u64> for VAddr { |
939 | | type Output = u64; |
940 | | |
941 | 0 | fn rem(self, rhs: Self::Output) -> Self::Output { |
942 | 0 | self.0 % rhs |
943 | 0 | } |
944 | | } |
945 | | |
946 | | impl ops::Rem<usize> for VAddr { |
947 | | type Output = usize; |
948 | | |
949 | 0 | fn rem(self, rhs: Self::Output) -> Self::Output { |
950 | 0 | self.as_usize() % rhs |
951 | 0 | } |
952 | | } |
953 | | |
954 | | impl ops::BitAnd for VAddr { |
955 | | type Output = Self; |
956 | | |
957 | 0 | fn bitand(self, rhs: Self) -> Self::Output { |
958 | 0 | VAddr(self.0 & rhs.0) |
959 | 0 | } |
960 | | } |
961 | | |
962 | | impl ops::BitAnd<u64> for VAddr { |
963 | | type Output = VAddr; |
964 | | |
965 | 0 | fn bitand(self, rhs: u64) -> Self::Output { |
966 | 0 | VAddr(self.0 & rhs) |
967 | 0 | } |
968 | | } |
969 | | |
970 | | impl ops::BitAnd<usize> for VAddr { |
971 | | type Output = VAddr; |
972 | | |
973 | 0 | fn bitand(self, rhs: usize) -> Self::Output { |
974 | 0 | VAddr(self.0 & rhs as u64) |
975 | 0 | } |
976 | | } |
977 | | |
978 | | impl ops::BitAnd<i32> for VAddr { |
979 | | type Output = VAddr; |
980 | | |
981 | 0 | fn bitand(self, rhs: i32) -> Self::Output { |
982 | 0 | VAddr(self.0 & rhs as u64) |
983 | 0 | } |
984 | | } |
985 | | |
986 | | impl ops::BitOr for VAddr { |
987 | | type Output = VAddr; |
988 | | |
989 | 0 | fn bitor(self, rhs: VAddr) -> VAddr { |
990 | 0 | VAddr(self.0 | rhs.0) |
991 | 0 | } |
992 | | } |
993 | | |
994 | | impl ops::BitOr<u64> for VAddr { |
995 | | type Output = VAddr; |
996 | | |
997 | 0 | fn bitor(self, rhs: u64) -> Self::Output { |
998 | 0 | VAddr(self.0 | rhs) |
999 | 0 | } |
1000 | | } |
1001 | | |
1002 | | impl ops::BitOr<usize> for VAddr { |
1003 | | type Output = VAddr; |
1004 | | |
1005 | 0 | fn bitor(self, rhs: usize) -> Self::Output { |
1006 | 0 | VAddr(self.0 | rhs as u64) |
1007 | 0 | } |
1008 | | } |
1009 | | |
1010 | | impl ops::Shr<u64> for VAddr { |
1011 | | type Output = u64; |
1012 | | |
1013 | 0 | fn shr(self, rhs: u64) -> Self::Output { |
1014 | 0 | self.0 >> rhs as u64 |
1015 | 0 | } |
1016 | | } |
1017 | | |
1018 | | impl ops::Shr<usize> for VAddr { |
1019 | | type Output = u64; |
1020 | | |
1021 | 0 | fn shr(self, rhs: usize) -> Self::Output { |
1022 | 0 | self.0 >> rhs as u64 |
1023 | 0 | } |
1024 | | } |
1025 | | |
1026 | | impl ops::Shr<i32> for VAddr { |
1027 | | type Output = u64; |
1028 | | |
1029 | 0 | fn shr(self, rhs: i32) -> Self::Output { |
1030 | 0 | self.0 >> rhs as u64 |
1031 | 0 | } |
1032 | | } |
1033 | | |
1034 | | impl fmt::Binary for VAddr { |
1035 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1036 | 0 | self.0.fmt(f) |
1037 | 0 | } |
1038 | | } |
1039 | | |
1040 | | impl fmt::Display for VAddr { |
1041 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1042 | 0 | write!(f, "{:#x}", self.0) |
1043 | 0 | } |
1044 | | } |
1045 | | |
1046 | | impl fmt::Debug for VAddr { |
1047 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1048 | 0 | write!(f, "{:#x}", self.0) |
1049 | 0 | } |
1050 | | } |
1051 | | |
1052 | | impl fmt::LowerHex for VAddr { |
1053 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1054 | 0 | self.0.fmt(f) |
1055 | 0 | } |
1056 | | } |
1057 | | |
1058 | | impl fmt::Octal for VAddr { |
1059 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1060 | 0 | self.0.fmt(f) |
1061 | 0 | } |
1062 | | } |
1063 | | |
1064 | | impl fmt::UpperHex for VAddr { |
1065 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1066 | 0 | self.0.fmt(f) |
1067 | 0 | } |
1068 | | } |
1069 | | |
1070 | | impl fmt::Pointer for VAddr { |
1071 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1072 | | use core::fmt::LowerHex; |
1073 | 0 | self.0.fmt(f) |
1074 | 0 | } |
1075 | | } |
1076 | | |
1077 | | #[allow(clippy::clippy::derive_hash_xor_eq)] |
1078 | | impl Hash for VAddr { |
1079 | 0 | fn hash<H: Hasher>(&self, state: &mut H) { |
1080 | 0 | self.0.hash(state); |
1081 | 0 | } |
1082 | | } |
1083 | | |
1084 | | /// Log2 of base page size (12 bits). |
1085 | | pub const BASE_PAGE_SHIFT: usize = 12; |
1086 | | |
1087 | | /// Size of a base page (4 KiB) |
1088 | | pub const BASE_PAGE_SIZE: usize = 4096; |
1089 | | |
1090 | | /// Size of a large page (2 MiB) |
1091 | | pub const LARGE_PAGE_SIZE: usize = 1024 * 1024 * 2; |
1092 | | |
1093 | | /// Size of a huge page (1 GiB) |
1094 | | pub const HUGE_PAGE_SIZE: usize = 1024 * 1024 * 1024; |
1095 | | |
1096 | | /// Size of a region covered by a PML4 Entry (512 GiB) |
1097 | | #[cfg(target_arch = "x86_64")] |
1098 | | pub const PML4_SLOT_SIZE: usize = HUGE_PAGE_SIZE * 512; |
1099 | | |
1100 | | /// Size of a cache-line |
1101 | | pub const CACHE_LINE_SIZE: usize = 64; |
1102 | | |
1103 | | /// A type wrapping a base page with a 4 KiB buffer. |
1104 | | pub struct Page([u8; BASE_PAGE_SIZE]); |
1105 | | |
1106 | | /// A type wrapping a large page with a 2 MiB buffer. |
1107 | | pub struct LargePage([u8; LARGE_PAGE_SIZE]); |
1108 | | |
1109 | | /// A type wrapping a huge page with a 1 GiB buffer. |
1110 | | pub struct HugePage([u8; HUGE_PAGE_SIZE]); |
1111 | | |
1112 | | /// MAXPHYADDR, which is at most 52; (use CPUID for finding system value). |
1113 | | pub const MAXPHYADDR: u64 = 52; |
1114 | | |
1115 | | /// Mask to find the physical address of an entry in a page-table. |
1116 | | const ADDRESS_MASK: u64 = ((1 << MAXPHYADDR) - 1) & !0xfff; |
1117 | | |
1118 | | /// Page tables have 512 = 4096 / 64 entries. |
1119 | | pub const PAGE_SIZE_ENTRIES: usize = 512; |
1120 | | |
1121 | | /// A PML4 table. |
1122 | | pub type PML4 = [PML4Entry; PAGE_SIZE_ENTRIES]; |
1123 | | |
1124 | | /// A PML5 table |
1125 | | pub type PML5 = [PML5Entry; PAGE_SIZE_ENTRIES]; |
1126 | | |
1127 | | /// A page directory pointer table. |
1128 | | #[allow(clippy::clippy::upper_case_acronyms)] |
1129 | | pub type PDPT = [PDPTEntry; PAGE_SIZE_ENTRIES]; |
1130 | | |
1131 | | /// A page directory. |
1132 | | pub type PD = [PDEntry; PAGE_SIZE_ENTRIES]; |
1133 | | |
1134 | | /// A page table. |
1135 | | pub type PT = [PTEntry; PAGE_SIZE_ENTRIES]; |
1136 | | |
1137 | | /// Given virtual address calculate corresponding entry in PML4. |
1138 | | #[cfg(target_arch = "x86_64")] |
1139 | | #[inline] |
1140 | 0 | pub fn pml4_index(addr: VAddr) -> usize { |
1141 | 0 | ((addr >> 39usize) & 0b111111111) as usize |
1142 | 0 | } |
1143 | | |
1144 | | /// Given virtual address calculate corresponding entry in PML5. |
1145 | | #[cfg(target_arch = "x86_64")] |
1146 | | #[inline] |
1147 | 0 | pub fn pml5_index(addr: VAddr) -> usize { |
1148 | 0 | ((addr >> 48usize) & 0b111111111) as usize |
1149 | 0 | } |
1150 | | |
1151 | | /// Given virtual address calculate corresponding entry in PDPT. |
1152 | | #[inline] |
1153 | 0 | pub fn pdpt_index(addr: VAddr) -> usize { |
1154 | 0 | ((addr >> 30usize) & 0b111111111) as usize |
1155 | 0 | } |
1156 | | |
1157 | | /// Given virtual address calculate corresponding entry in PD. |
1158 | | #[inline] |
1159 | 0 | pub fn pd_index(addr: VAddr) -> usize { |
1160 | 0 | ((addr >> 21usize) & 0b111111111) as usize |
1161 | 0 | } |
1162 | | |
1163 | | /// Given virtual address calculate corresponding entry in PT. |
1164 | | #[inline] |
1165 | 0 | pub fn pt_index(addr: VAddr) -> usize { |
1166 | 0 | ((addr >> 12usize) & 0b111111111) as usize |
1167 | 0 | } |
1168 | | |
1169 | | bitflags! { |
1170 | | /// PML4 configuration bit description. |
1171 | | #[repr(transparent)] |
1172 | | pub struct PML4Flags: u64 { |
1173 | | /// Present; must be 1 to reference a page-directory-pointer table |
1174 | | const P = bit!(0); |
1175 | | /// Read/write; if 0, writes may not be allowed to the 512-GByte region |
1176 | | /// controlled by this entry (see Section 4.6) |
1177 | | const RW = bit!(1); |
1178 | | /// User/supervisor; if 0, user-mode accesses are not allowed |
1179 | | /// to the 512-GByte region controlled by this entry. |
1180 | | const US = bit!(2); |
1181 | | /// Page-level write-through; indirectly determines the memory type used to |
1182 | | /// access the page-directory-pointer table referenced by this entry. |
1183 | | const PWT = bit!(3); |
1184 | | /// Page-level cache disable; indirectly determines the memory type used to |
1185 | | /// access the page-directory-pointer table referenced by this entry. |
1186 | | const PCD = bit!(4); |
1187 | | /// Accessed; indicates whether this entry has been used for linear-address translation. |
1188 | | const A = bit!(5); |
1189 | | /// If IA32_EFER.NXE = 1, execute-disable |
1190 | | /// If 1, instruction fetches are not allowed from the 512-GByte region. |
1191 | | const XD = bit!(63); |
1192 | | } |
1193 | | } |
1194 | | |
1195 | | bitflags! { |
1196 | | /// PML5 configuration bit description. |
1197 | | #[repr(transparent)] |
1198 | | pub struct PML5Flags: u64 { |
1199 | | /// Present; must be 1 to reference a PML5 entry |
1200 | | const P = bit!(0); |
1201 | | /// Read/write; if 0, writes may not be allowed to the 256-TByte region |
1202 | | /// controlled by this entry (see Section 4.6) |
1203 | | const RW = bit!(1); |
1204 | | /// User/supervisor; if 0, user-mode accesses are not allowed |
1205 | | /// to the 256-TByte region controlled by this entry. |
1206 | | const US = bit!(2); |
1207 | | /// Page-level write-through; indirectly determines the memory type used to |
1208 | | /// access the PML4 table referenced by this entry. |
1209 | | const PWT = bit!(3); |
1210 | | /// Page-level cache disable; indirectly determines the memory type used to |
1211 | | /// access the PML4 table referenced by this entry. |
1212 | | const PCD = bit!(4); |
1213 | | /// Accessed; indicates whether this entry has been used for linear-address translation. |
1214 | | const A = bit!(5); |
1215 | | /// If IA32_EFER.NXE = 1, execute-disable |
1216 | | /// If 1, instruction fetches are not allowed from the 256-TByte region. |
1217 | | const XD = bit!(63); |
1218 | | } |
1219 | | } |
1220 | | |
1221 | | /// A PML4 Entry consists of an address and a bunch of flags. |
1222 | | #[repr(transparent)] |
1223 | | #[derive(Clone, Copy)] |
1224 | | pub struct PML4Entry(pub u64); |
1225 | | |
1226 | | impl fmt::Debug for PML4Entry { |
1227 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1228 | 0 | write!( |
1229 | 0 | f, |
1230 | 0 | "PML4Entry {{ {:#x}, {:?} }}", |
1231 | 0 | self.address(), |
1232 | 0 | self.flags() |
1233 | | ) |
1234 | 0 | } |
1235 | | } |
1236 | | |
1237 | | impl PML4Entry { |
1238 | | /// Creates a new PML4Entry. |
1239 | | /// |
1240 | | /// # Arguments |
1241 | | /// |
1242 | | /// * `pdpt` - The physical address of the pdpt table. |
1243 | | /// * `flags`- Additional flags for the entry. |
1244 | 0 | pub fn new(pml4: PAddr, flags: PML4Flags) -> PML4Entry { |
1245 | 0 | let pml4_val = pml4 & ADDRESS_MASK; |
1246 | 0 | assert!(pml4_val == pml4.into()); |
1247 | 0 | assert!(pml4 % BASE_PAGE_SIZE == 0); |
1248 | 0 | PML4Entry(pml4_val | flags.bits) |
1249 | 0 | } |
1250 | | |
1251 | | /// Retrieves the physical address in this entry. |
1252 | 0 | pub fn address(self) -> PAddr { |
1253 | 0 | PAddr::from(self.0 & ADDRESS_MASK) |
1254 | 0 | } |
1255 | | |
1256 | 0 | pub fn flags(self) -> PML4Flags { |
1257 | 0 | PML4Flags::from_bits_truncate(self.0) |
1258 | 0 | } |
1259 | | |
1260 | | check_flag!(doc = "Is page present?", is_present, PML4Flags::P); |
1261 | | check_flag!(doc = "Read/write; if 0, writes may not be allowed to the 512-GByte region, controlled by this entry (see Section 4.6)", |
1262 | | is_writeable, PML4Flags::RW); |
1263 | | check_flag!(doc = "User/supervisor; if 0, user-mode accesses are not allowed to the 512-GByte region controlled by this entry.", |
1264 | | is_user_mode_allowed, PML4Flags::US); |
1265 | | check_flag!(doc = "Page-level write-through; indirectly determines the memory type used to access the page-directory-pointer table referenced by this entry.", |
1266 | | is_page_write_through, PML4Flags::PWT); |
1267 | | check_flag!(doc = "Page-level cache disable; indirectly determines the memory type used to access the page-directory-pointer table referenced by this entry.", |
1268 | | is_page_level_cache_disabled, PML4Flags::PCD); |
1269 | | check_flag!( |
1270 | | doc = |
1271 | | "Accessed; indicates whether this entry has been used for linear-address translation.", |
1272 | | is_accessed, |
1273 | | PML4Flags::A |
1274 | | ); |
1275 | | check_flag!(doc = "If IA32_EFER.NXE = 1, execute-disable. If 1, instruction fetches are not allowed from the 512-GByte region.", |
1276 | | is_instruction_fetching_disabled, PML4Flags::XD); |
1277 | | } |
1278 | | |
1279 | | /// A PML5 Entry consists of an address and a bunch of flags. |
1280 | | #[repr(transparent)] |
1281 | | #[derive(Clone, Copy)] |
1282 | | pub struct PML5Entry(pub u64); |
1283 | | |
1284 | | impl fmt::Debug for PML5Entry { |
1285 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1286 | 0 | write!( |
1287 | 0 | f, |
1288 | 0 | "PML5Entry {{ {:#x}, {:?} }}", |
1289 | 0 | self.address(), |
1290 | 0 | self.flags() |
1291 | | ) |
1292 | 0 | } |
1293 | | } |
1294 | | |
1295 | | impl PML5Entry { |
1296 | | /// Creates a new PML5Entry. |
1297 | | /// |
1298 | | /// # Arguments |
1299 | | /// |
1300 | | /// * `pml4` - The physical address of the PML4 table that this entry points to. |
1301 | | /// * `flags` - Additional flags for the entry. |
1302 | 0 | pub fn new(pml4: PAddr, flags: PML5Flags) -> PML5Entry { |
1303 | 0 | let pml5_val = pml4 & ADDRESS_MASK; |
1304 | 0 | assert!(pml5_val == pml4.into()); |
1305 | 0 | assert!(pml4 % BASE_PAGE_SIZE == 0); |
1306 | 0 | PML5Entry(pml5_val | flags.bits) |
1307 | 0 | } |
1308 | | |
1309 | | /// Retrieves the physical address in this entry. |
1310 | 0 | pub fn address(self) -> PAddr { |
1311 | 0 | PAddr::from(self.0 & ADDRESS_MASK) |
1312 | 0 | } |
1313 | | |
1314 | 0 | pub fn flags(self) -> PML5Flags { |
1315 | 0 | PML5Flags::from_bits_truncate(self.0) |
1316 | 0 | } |
1317 | | |
1318 | | check_flag!(doc = "Is page present?", is_present, PML5Flags::P); |
1319 | | check_flag!(doc = "Read/write; if 0, writes may not be allowed to the 256-TByte region, controlled by this entry (see Section 4.6)", |
1320 | | is_writeable, PML5Flags::RW); |
1321 | | check_flag!(doc = "User/supervisor; if 0, user-mode accesses are not allowed to the 256-TByte region controlled by this entry.", |
1322 | | is_user_mode_allowed, PML5Flags::US); |
1323 | | check_flag!(doc = "Page-level write-through; indirectly determines the memory type used to access the PML4 table referenced by this entry.", |
1324 | | is_page_write_through, PML5Flags::PWT); |
1325 | | check_flag!(doc = "Page-level cache disable; indirectly determines the memory type used to access the PML4 table referenced by this entry.", |
1326 | | is_page_level_cache_disabled, PML5Flags::PCD); |
1327 | | check_flag!( |
1328 | | doc = |
1329 | | "Accessed; indicates whether this entry has been used for linear-address translation.", |
1330 | | is_accessed, |
1331 | | PML5Flags::A |
1332 | | ); |
1333 | | check_flag!(doc = "If IA32_EFER.NXE = 1, execute-disable. If 1, instruction fetches are not allowed from the 256-TByte region.", |
1334 | | is_instruction_fetching_disabled, PML5Flags::XD); |
1335 | | } |
1336 | | |
1337 | | bitflags! { |
1338 | | /// PDPT configuration bit description. |
1339 | | #[repr(transparent)] |
1340 | | pub struct PDPTFlags: u64 { |
1341 | | /// Present; must be 1 to map a 1-GByte page or reference a page directory. |
1342 | | const P = bit!(0); |
1343 | | /// Read/write; if 0, writes may not be allowed to the 1-GByte region controlled by this entry |
1344 | | const RW = bit!(1); |
1345 | | /// User/supervisor; user-mode accesses are not allowed to the 1-GByte region controlled by this entry. |
1346 | | const US = bit!(2); |
1347 | | /// Page-level write-through. |
1348 | | const PWT = bit!(3); |
1349 | | /// Page-level cache disable. |
1350 | | const PCD = bit!(4); |
1351 | | /// Accessed; if PS set indicates whether software has accessed the 1-GByte page |
1352 | | /// else indicates whether this entry has been used for linear-address translation |
1353 | | const A = bit!(5); |
1354 | | /// Dirty; if PS indicates whether software has written to the 1-GByte page referenced by this entry. |
1355 | | /// else ignored. |
1356 | | const D = bit!(6); |
1357 | | /// Page size; if set this entry maps a 1-GByte page; otherwise, this entry references a page directory. |
1358 | | /// if not PS this is ignored. |
1359 | | const PS = bit!(7); |
1360 | | /// Global; if PS && CR4.PGE = 1, determines whether the translation is global; ignored otherwise |
1361 | | /// if not PS this is ignored. |
1362 | | const G = bit!(8); |
1363 | | /// Indirectly determines the memory type used to access the 1-GByte page referenced by this entry. |
1364 | | const PAT = bit!(12); |
1365 | | /// If IA32_EFER.NXE = 1, execute-disable |
1366 | | /// If 1, instruction fetches are not allowed from the 512-GByte region. |
1367 | | const XD = bit!(63); |
1368 | | } |
1369 | | } |
1370 | | |
1371 | | /// A PDPT Entry consists of an address and a bunch of flags. |
1372 | | #[repr(transparent)] |
1373 | | #[derive(Clone, Copy)] |
1374 | | pub struct PDPTEntry(pub u64); |
1375 | | |
1376 | | impl fmt::Debug for PDPTEntry { |
1377 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1378 | 0 | write!( |
1379 | 0 | f, |
1380 | 0 | "PDPTEntry {{ {:#x}, {:?} }}", |
1381 | 0 | self.address(), |
1382 | 0 | self.flags() |
1383 | | ) |
1384 | 0 | } |
1385 | | } |
1386 | | |
1387 | | impl PDPTEntry { |
1388 | | /// Creates a new PDPTEntry. |
1389 | | /// |
1390 | | /// # Arguments |
1391 | | /// |
1392 | | /// * `pd` - The physical address of the page directory. |
1393 | | /// * `flags`- Additional flags for the entry. |
1394 | 0 | pub fn new(pd: PAddr, flags: PDPTFlags) -> PDPTEntry { |
1395 | 0 | let pd_val = pd & ADDRESS_MASK; |
1396 | 0 | assert!(pd_val == pd.into()); |
1397 | 0 | assert!(pd % BASE_PAGE_SIZE == 0); |
1398 | 0 | PDPTEntry(pd_val | flags.bits) |
1399 | 0 | } |
1400 | | |
1401 | | /// Retrieves the physical address in this entry. |
1402 | 0 | pub fn address(self) -> PAddr { |
1403 | 0 | PAddr::from(self.0 & ADDRESS_MASK) |
1404 | 0 | } |
1405 | | |
1406 | | /// Returns the flags corresponding to this entry. |
1407 | 0 | pub fn flags(self) -> PDPTFlags { |
1408 | 0 | PDPTFlags::from_bits_truncate(self.0) |
1409 | 0 | } |
1410 | | |
1411 | | check_flag!(doc = "Is page present?", is_present, PDPTFlags::P); |
1412 | | check_flag!(doc = "Read/write; if 0, writes may not be allowed to the 1-GByte region controlled by this entry.", |
1413 | | is_writeable, PDPTFlags::RW); |
1414 | | check_flag!(doc = "User/supervisor; user-mode accesses are not allowed to the 1-GByte region controlled by this entry.", |
1415 | | is_user_mode_allowed, PDPTFlags::US); |
1416 | | check_flag!( |
1417 | | doc = "Page-level write-through.", |
1418 | | is_page_write_through, |
1419 | | PDPTFlags::PWT |
1420 | | ); |
1421 | | check_flag!( |
1422 | | doc = "Page-level cache disable.", |
1423 | | is_page_level_cache_disabled, |
1424 | | PDPTFlags::PCD |
1425 | | ); |
1426 | | check_flag!( |
1427 | | doc = |
1428 | | "Accessed; indicates whether this entry has been used for linear-address translation.", |
1429 | | is_accessed, |
1430 | | PDPTFlags::A |
1431 | | ); |
1432 | | check_flag!(doc = "Indirectly determines the memory type used to access the 1-GByte page referenced by this entry. if not PS this is ignored.", |
1433 | | is_pat, PDPTFlags::PAT); |
1434 | | check_flag!(doc = "If IA32_EFER.NXE = 1, execute-disable. If 1, instruction fetches are not allowed from the 512-GByte region.", |
1435 | | is_instruction_fetching_disabled, PDPTFlags::XD); |
1436 | | check_flag!(doc = "Page size; if set this entry maps a 1-GByte page; otherwise, this entry references a page directory.", |
1437 | | is_page, PDPTFlags::PS); |
1438 | | } |
1439 | | |
1440 | | bitflags! { |
1441 | | /// PD configuration bits description. |
1442 | | #[repr(transparent)] |
1443 | | pub struct PDFlags: u64 { |
1444 | | /// Present; must be 1 to map a 2-MByte page or reference a page table. |
1445 | | const P = bit!(0); |
1446 | | /// Read/write; if 0, writes may not be allowed to the 2-MByte region controlled by this entry |
1447 | | const RW = bit!(1); |
1448 | | /// User/supervisor; user-mode accesses are not allowed to the 2-MByte region controlled by this entry. |
1449 | | const US = bit!(2); |
1450 | | /// Page-level write-through. |
1451 | | const PWT = bit!(3); |
1452 | | /// Page-level cache disable. |
1453 | | const PCD = bit!(4); |
1454 | | /// Accessed; if PS set indicates whether software has accessed the 2-MByte page |
1455 | | /// else indicates whether this entry has been used for linear-address translation |
1456 | | const A = bit!(5); |
1457 | | /// Dirty; if PS indicates whether software has written to the 2-MByte page referenced by this entry. |
1458 | | /// else ignored. |
1459 | | const D = bit!(6); |
1460 | | /// Page size; if set this entry maps a 2-MByte page; otherwise, this entry references a page directory. |
1461 | | const PS = bit!(7); |
1462 | | /// Global; if PS && CR4.PGE = 1, determines whether the translation is global; ignored otherwise |
1463 | | /// if not PS this is ignored. |
1464 | | const G = bit!(8); |
1465 | | /// Indirectly determines the memory type used to access the 2-MByte page referenced by this entry. |
1466 | | /// if not PS this is ignored. |
1467 | | const PAT = bit!(12); |
1468 | | /// If IA32_EFER.NXE = 1, execute-disable |
1469 | | /// If 1, instruction fetches are not allowed from the 512-GByte region. |
1470 | | const XD = bit!(63); |
1471 | | } |
1472 | | } |
1473 | | |
1474 | | /// A PD Entry consists of an address and a bunch of flags. |
1475 | | #[repr(transparent)] |
1476 | | #[derive(Clone, Copy)] |
1477 | | pub struct PDEntry(pub u64); |
1478 | | |
1479 | | impl fmt::Debug for PDEntry { |
1480 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1481 | 0 | write!(f, "PDEntry {{ {:#x}, {:?} }}", self.address(), self.flags()) |
1482 | 0 | } |
1483 | | } |
1484 | | |
1485 | | impl PDEntry { |
1486 | | /// Creates a new PDEntry. |
1487 | | /// |
1488 | | /// # Arguments |
1489 | | /// |
1490 | | /// * `pt` - The physical address of the page table. |
1491 | | /// * `flags`- Additional flags for the entry. |
1492 | 0 | pub fn new(pt: PAddr, flags: PDFlags) -> PDEntry { |
1493 | 0 | let pt_val = pt & ADDRESS_MASK; |
1494 | 0 | assert!(pt_val == pt.into()); |
1495 | 0 | assert!(pt % BASE_PAGE_SIZE == 0); |
1496 | 0 | PDEntry(pt_val | flags.bits) |
1497 | 0 | } |
1498 | | |
1499 | | /// Retrieves the physical address in this entry. |
1500 | 0 | pub fn address(self) -> PAddr { |
1501 | 0 | PAddr::from(self.0 & ADDRESS_MASK) |
1502 | 0 | } |
1503 | | |
1504 | | /// Returns the flags corresponding to this entry. |
1505 | 0 | pub fn flags(self) -> PDFlags { |
1506 | 0 | PDFlags::from_bits_truncate(self.0) |
1507 | 0 | } |
1508 | | |
1509 | | check_flag!( |
1510 | | doc = "Present; must be 1 to map a 2-MByte page or reference a page table.", |
1511 | | is_present, |
1512 | | PDFlags::P |
1513 | | ); |
1514 | | check_flag!(doc = "Read/write; if 0, writes may not be allowed to the 2-MByte region controlled by this entry", |
1515 | | is_writeable, PDFlags::RW); |
1516 | | check_flag!(doc = "User/supervisor; user-mode accesses are not allowed to the 2-MByte region controlled by this entry.", |
1517 | | is_user_mode_allowed, PDFlags::US); |
1518 | | check_flag!( |
1519 | | doc = "Page-level write-through.", |
1520 | | is_page_write_through, |
1521 | | PDFlags::PWT |
1522 | | ); |
1523 | | check_flag!( |
1524 | | doc = "Page-level cache disable.", |
1525 | | is_page_level_cache_disabled, |
1526 | | PDFlags::PCD |
1527 | | ); |
1528 | | check_flag!(doc = "Accessed; if PS set indicates whether software has accessed the 2-MByte page else indicates whether this entry has been used for linear-address translation.", |
1529 | | is_accessed, PDFlags::A); |
1530 | | check_flag!(doc = "Dirty; if PS set indicates whether software has written to the 2-MByte page referenced by this entry else ignored.", |
1531 | | is_dirty, PDFlags::D); |
1532 | | check_flag!(doc = "Page size; if set this entry maps a 2-MByte page; otherwise, this entry references a page directory.", |
1533 | | is_page, PDFlags::PS); |
1534 | | check_flag!(doc = "Global; if PS && CR4.PGE = 1, determines whether the translation is global; ignored otherwise if not PS this is ignored.", |
1535 | | is_global, PDFlags::G); |
1536 | | check_flag!(doc = "Indirectly determines the memory type used to access the 2-MByte page referenced by this entry. if not PS this is ignored.", |
1537 | | is_pat, PDFlags::PAT); |
1538 | | check_flag!(doc = "If IA32_EFER.NXE = 1, execute-disable. If 1, instruction fetches are not allowed from the 2-Mbyte region.", |
1539 | | is_instruction_fetching_disabled, PDFlags::XD); |
1540 | | } |
1541 | | |
1542 | | bitflags! { |
1543 | | /// PT Entry bits description. |
1544 | | #[repr(transparent)] |
1545 | | pub struct PTFlags: u64 { |
1546 | | /// Present; must be 1 to map a 4-KByte page. |
1547 | | const P = bit!(0); |
1548 | | /// Read/write; if 0, writes may not be allowed to the 4-KByte region controlled by this entry |
1549 | | const RW = bit!(1); |
1550 | | /// User/supervisor; user-mode accesses are not allowed to the 4-KByte region controlled by this entry. |
1551 | | const US = bit!(2); |
1552 | | /// Page-level write-through. |
1553 | | const PWT = bit!(3); |
1554 | | /// Page-level cache disable. |
1555 | | const PCD = bit!(4); |
1556 | | /// Accessed; indicates whether software has accessed the 4-KByte page |
1557 | | const A = bit!(5); |
1558 | | /// Dirty; indicates whether software has written to the 4-KByte page referenced by this entry. |
1559 | | const D = bit!(6); |
1560 | | /// Global; if CR4.PGE = 1, determines whether the translation is global (see Section 4.10); ignored otherwise |
1561 | | const G = bit!(8); |
1562 | | /// If IA32_EFER.NXE = 1, execute-disable |
1563 | | /// If 1, instruction fetches are not allowed from the 512-GByte region. |
1564 | | const XD = bit!(63); |
1565 | | } |
1566 | | } |
1567 | | |
1568 | | /// A PT Entry consists of an address and a bunch of flags. |
1569 | | #[repr(transparent)] |
1570 | | #[derive(Clone, Copy)] |
1571 | | pub struct PTEntry(pub u64); |
1572 | | |
1573 | | impl fmt::Debug for PTEntry { |
1574 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1575 | 0 | write!(f, "PTEntry {{ {:#x}, {:?} }}", self.address(), self.flags()) |
1576 | 0 | } |
1577 | | } |
1578 | | |
1579 | | impl PTEntry { |
1580 | | /// Creates a new PTEntry. |
1581 | | /// |
1582 | | /// # Arguments |
1583 | | /// |
1584 | | /// * `page` - The physical address of the backing 4 KiB page. |
1585 | | /// * `flags`- Additional flags for the entry. |
1586 | 0 | pub fn new(page: PAddr, flags: PTFlags) -> PTEntry { |
1587 | 0 | let page_val = page & ADDRESS_MASK; |
1588 | 0 | assert!(page_val == page.into()); |
1589 | 0 | assert!(page % BASE_PAGE_SIZE == 0); |
1590 | 0 | PTEntry(page_val | flags.bits) |
1591 | 0 | } |
1592 | | |
1593 | | /// Retrieves the physical address in this entry. |
1594 | 0 | pub fn address(self) -> PAddr { |
1595 | 0 | PAddr::from(self.0 & ADDRESS_MASK) |
1596 | 0 | } |
1597 | | |
1598 | | /// Returns the flags corresponding to this entry. |
1599 | 0 | pub fn flags(self) -> PTFlags { |
1600 | 0 | PTFlags::from_bits_truncate(self.0) |
1601 | 0 | } |
1602 | | |
1603 | | check_flag!( |
1604 | | doc = "Present; must be 1 to map a 4-KByte page or reference a page table.", |
1605 | | is_present, |
1606 | | PTFlags::P |
1607 | | ); |
1608 | | check_flag!(doc = "Read/write; if 0, writes may not be allowed to the 4-KByte region controlled by this entry", |
1609 | | is_writeable, PTFlags::RW); |
1610 | | check_flag!(doc = "User/supervisor; user-mode accesses are not allowed to the 4-KByte region controlled by this entry.", |
1611 | | is_user_mode_allowed, PTFlags::US); |
1612 | | check_flag!( |
1613 | | doc = "Page-level write-through.", |
1614 | | is_page_write_through, |
1615 | | PTFlags::PWT |
1616 | | ); |
1617 | | check_flag!( |
1618 | | doc = "Page-level cache disable.", |
1619 | | is_page_level_cache_disabled, |
1620 | | PTFlags::PCD |
1621 | | ); |
1622 | | check_flag!(doc = "Accessed; if PS set indicates whether software has accessed the 4-KByte page else indicates whether this entry has been used for linear-address translation.", |
1623 | | is_accessed, PTFlags::A); |
1624 | | check_flag!(doc = "Dirty; if PD_PS set indicates whether software has written to the 4-KByte page referenced by this entry else ignored.", |
1625 | | is_dirty, PTFlags::D); |
1626 | | check_flag!(doc = "Global; if PS && CR4.PGE = 1, determines whether the translation is global; ignored otherwise if not PS this is ignored.", |
1627 | | is_global, PTFlags::G); |
1628 | | check_flag!(doc = "If IA32_EFER.NXE = 1, execute-disable. If 1, instruction fetches are not allowed from the 4-KByte region.", |
1629 | | is_instruction_fetching_disabled, PTFlags::XD); |
1630 | | } |
1631 | | |
1632 | | #[cfg(all(test, feature = "utest"))] |
1633 | | mod test { |
1634 | | use super::*; |
1635 | | |
1636 | | #[test] |
1637 | | fn paddr_align() { |
1638 | | let base = PAddr::from(0x1000); |
1639 | | assert_eq!(base.base_page_offset(), 0x0); |
1640 | | assert_eq!(base.large_page_offset(), 0x1000); |
1641 | | assert_eq!(base.huge_page_offset(), 0x1000); |
1642 | | assert_eq!(base.align_down_to_base_page(), PAddr::from(0x1000)); |
1643 | | assert_eq!(base.align_down_to_large_page(), PAddr::from(0x0)); |
1644 | | assert_eq!(base.align_down_to_huge_page(), PAddr::from(0x0)); |
1645 | | assert_eq!(base.align_up_to_base_page(), PAddr::from(0x1000)); |
1646 | | assert_eq!(base.align_up_to_large_page(), PAddr::from(0x200000)); |
1647 | | assert_eq!(base.align_up_to_huge_page(), PAddr::from(1073741824)); |
1648 | | assert!(base.is_base_page_aligned()); |
1649 | | assert!(!base.is_large_page_aligned()); |
1650 | | assert!(!base.is_huge_page_aligned()); |
1651 | | assert!(base.is_aligned(0x1u64)); |
1652 | | assert!(base.is_aligned(0x2u64)); |
1653 | | assert!(!base.is_aligned(0x3u64)); |
1654 | | assert!(base.is_aligned(0x4u64)); |
1655 | | |
1656 | | let base = PAddr::from(0x1001); |
1657 | | assert_eq!(base.base_page_offset(), 0x1); |
1658 | | assert_eq!(base.large_page_offset(), 0x1001); |
1659 | | assert_eq!(base.huge_page_offset(), 0x1001); |
1660 | | assert_eq!(base.align_down_to_base_page(), PAddr::from(0x1000)); |
1661 | | assert_eq!(base.align_down_to_large_page(), PAddr::from(0x0)); |
1662 | | assert_eq!(base.align_down_to_huge_page(), PAddr::from(0x0)); |
1663 | | assert_eq!(base.align_up_to_base_page(), PAddr::from(0x2000)); |
1664 | | assert_eq!(base.align_up_to_large_page(), PAddr::from(0x200000)); |
1665 | | assert_eq!(base.align_up_to_huge_page(), PAddr::from(1073741824)); |
1666 | | assert!(!base.is_base_page_aligned()); |
1667 | | assert!(!base.is_large_page_aligned()); |
1668 | | assert!(!base.is_huge_page_aligned()); |
1669 | | assert!(base.is_aligned(0x1u64)); |
1670 | | assert!(!base.is_aligned(0x2u64)); |
1671 | | assert!(!base.is_aligned(0x3u64)); |
1672 | | assert!(!base.is_aligned(0x4u64)); |
1673 | | |
1674 | | let base = PAddr::from(0x200000); |
1675 | | assert_eq!(base.base_page_offset(), 0x0); |
1676 | | assert_eq!(base.large_page_offset(), 0x0); |
1677 | | assert_eq!(base.huge_page_offset(), 0x200000); |
1678 | | assert_eq!(base.align_down_to_base_page(), PAddr::from(0x200000)); |
1679 | | assert_eq!(base.align_down_to_large_page(), PAddr::from(0x200000)); |
1680 | | assert_eq!(base.align_down_to_huge_page(), PAddr::from(0x0)); |
1681 | | assert_eq!(base.align_up_to_base_page(), PAddr::from(0x200000)); |
1682 | | assert_eq!(base.align_up_to_large_page(), PAddr::from(0x200000)); |
1683 | | assert_eq!(base.align_up_to_huge_page(), PAddr::from(1073741824)); |
1684 | | assert!(base.is_base_page_aligned()); |
1685 | | assert!(base.is_large_page_aligned()); |
1686 | | assert!(!base.is_huge_page_aligned()); |
1687 | | assert!(base.is_aligned(0x1u64)); |
1688 | | assert!(base.is_aligned(0x2u64)); |
1689 | | assert!(!base.is_aligned(0x3u64)); |
1690 | | assert!(base.is_aligned(0x4u64)); |
1691 | | |
1692 | | let base = PAddr::from(0x200002); |
1693 | | assert_eq!(base.base_page_offset(), 0x2); |
1694 | | assert_eq!(base.large_page_offset(), 0x2); |
1695 | | assert_eq!(base.huge_page_offset(), 0x200002); |
1696 | | assert_eq!(base.align_down_to_base_page(), PAddr::from(0x200000)); |
1697 | | assert_eq!(base.align_down_to_large_page(), PAddr::from(0x200000)); |
1698 | | assert_eq!(base.align_down_to_huge_page(), PAddr::from(0x0)); |
1699 | | assert_eq!(base.align_up_to_base_page(), PAddr::from(0x201000)); |
1700 | | assert_eq!(base.align_up_to_large_page(), PAddr::from(0x400000)); |
1701 | | assert_eq!(base.align_up_to_huge_page(), PAddr::from(1073741824)); |
1702 | | assert!(!base.is_base_page_aligned()); |
1703 | | assert!(!base.is_large_page_aligned()); |
1704 | | assert!(!base.is_huge_page_aligned()); |
1705 | | assert!(base.is_aligned(0x1u64)); |
1706 | | assert!(base.is_aligned(0x2u64)); |
1707 | | assert!(!base.is_aligned(0x3u64)); |
1708 | | assert!(!base.is_aligned(0x4u64)); |
1709 | | } |
1710 | | |
1711 | | #[test] |
1712 | | fn ioaddr_align() { |
1713 | | let base = IOAddr::from(0x1000); |
1714 | | assert_eq!(base.base_page_offset(), 0x0); |
1715 | | assert_eq!(base.large_page_offset(), 0x1000); |
1716 | | assert_eq!(base.huge_page_offset(), 0x1000); |
1717 | | assert_eq!(base.align_down_to_base_page(), IOAddr::from(0x1000)); |
1718 | | assert_eq!(base.align_down_to_large_page(), IOAddr::from(0x0)); |
1719 | | assert_eq!(base.align_down_to_huge_page(), IOAddr::from(0x0)); |
1720 | | assert_eq!(base.align_up_to_base_page(), IOAddr::from(0x1000)); |
1721 | | assert_eq!(base.align_up_to_large_page(), IOAddr::from(0x200000)); |
1722 | | assert_eq!(base.align_up_to_huge_page(), IOAddr::from(1073741824)); |
1723 | | assert!(base.is_base_page_aligned()); |
1724 | | assert!(!base.is_large_page_aligned()); |
1725 | | assert!(!base.is_huge_page_aligned()); |
1726 | | assert!(base.is_aligned(0x1u64)); |
1727 | | assert!(base.is_aligned(0x2u64)); |
1728 | | assert!(!base.is_aligned(0x3u64)); |
1729 | | assert!(base.is_aligned(0x4u64)); |
1730 | | |
1731 | | let base = IOAddr::from(0x1001); |
1732 | | assert_eq!(base.base_page_offset(), 0x1); |
1733 | | assert_eq!(base.large_page_offset(), 0x1001); |
1734 | | assert_eq!(base.huge_page_offset(), 0x1001); |
1735 | | assert_eq!(base.align_down_to_base_page(), IOAddr::from(0x1000)); |
1736 | | assert_eq!(base.align_down_to_large_page(), IOAddr::from(0x0)); |
1737 | | assert_eq!(base.align_down_to_huge_page(), IOAddr::from(0x0)); |
1738 | | assert_eq!(base.align_up_to_base_page(), IOAddr::from(0x2000)); |
1739 | | assert_eq!(base.align_up_to_large_page(), IOAddr::from(0x200000)); |
1740 | | assert_eq!(base.align_up_to_huge_page(), IOAddr::from(1073741824)); |
1741 | | assert!(!base.is_base_page_aligned()); |
1742 | | assert!(!base.is_large_page_aligned()); |
1743 | | assert!(!base.is_huge_page_aligned()); |
1744 | | assert!(base.is_aligned(0x1u64)); |
1745 | | assert!(!base.is_aligned(0x2u64)); |
1746 | | assert!(!base.is_aligned(0x3u64)); |
1747 | | assert!(!base.is_aligned(0x4u64)); |
1748 | | |
1749 | | let base = IOAddr::from(0x200000); |
1750 | | assert_eq!(base.base_page_offset(), 0x0); |
1751 | | assert_eq!(base.large_page_offset(), 0x0); |
1752 | | assert_eq!(base.huge_page_offset(), 0x200000); |
1753 | | assert_eq!(base.align_down_to_base_page(), IOAddr::from(0x200000)); |
1754 | | assert_eq!(base.align_down_to_large_page(), IOAddr::from(0x200000)); |
1755 | | assert_eq!(base.align_down_to_huge_page(), IOAddr::from(0x0)); |
1756 | | assert_eq!(base.align_up_to_base_page(), IOAddr::from(0x200000)); |
1757 | | assert_eq!(base.align_up_to_large_page(), IOAddr::from(0x200000)); |
1758 | | assert_eq!(base.align_up_to_huge_page(), IOAddr::from(1073741824)); |
1759 | | assert!(base.is_base_page_aligned()); |
1760 | | assert!(base.is_large_page_aligned()); |
1761 | | assert!(!base.is_huge_page_aligned()); |
1762 | | assert!(base.is_aligned(0x1u64)); |
1763 | | assert!(base.is_aligned(0x2u64)); |
1764 | | assert!(!base.is_aligned(0x3u64)); |
1765 | | assert!(base.is_aligned(0x4u64)); |
1766 | | |
1767 | | let base = IOAddr::from(0x200002); |
1768 | | assert_eq!(base.base_page_offset(), 0x2); |
1769 | | assert_eq!(base.large_page_offset(), 0x2); |
1770 | | assert_eq!(base.huge_page_offset(), 0x200002); |
1771 | | assert_eq!(base.align_down_to_base_page(), IOAddr::from(0x200000)); |
1772 | | assert_eq!(base.align_down_to_large_page(), IOAddr::from(0x200000)); |
1773 | | assert_eq!(base.align_down_to_huge_page(), IOAddr::from(0x0)); |
1774 | | assert_eq!(base.align_up_to_base_page(), IOAddr::from(0x201000)); |
1775 | | assert_eq!(base.align_up_to_large_page(), IOAddr::from(0x400000)); |
1776 | | assert_eq!(base.align_up_to_huge_page(), IOAddr::from(1073741824)); |
1777 | | assert!(!base.is_base_page_aligned()); |
1778 | | assert!(!base.is_large_page_aligned()); |
1779 | | assert!(!base.is_huge_page_aligned()); |
1780 | | assert!(base.is_aligned(0x1u64)); |
1781 | | assert!(base.is_aligned(0x2u64)); |
1782 | | assert!(!base.is_aligned(0x3u64)); |
1783 | | assert!(!base.is_aligned(0x4u64)); |
1784 | | } |
1785 | | |
1786 | | #[test] |
1787 | | fn vaddr_align() { |
1788 | | let base = VAddr::from(0x1000); |
1789 | | assert_eq!(base.base_page_offset(), 0x0); |
1790 | | assert_eq!(base.large_page_offset(), 0x1000); |
1791 | | assert_eq!(base.huge_page_offset(), 0x1000); |
1792 | | assert_eq!(base.align_down_to_base_page(), VAddr::from(0x1000)); |
1793 | | assert_eq!(base.align_down_to_large_page(), VAddr::from(0x0)); |
1794 | | assert_eq!(base.align_down_to_huge_page(), VAddr::from(0x0)); |
1795 | | assert_eq!(base.align_up_to_base_page(), VAddr::from(0x1000)); |
1796 | | assert_eq!(base.align_up_to_large_page(), VAddr::from(0x200000)); |
1797 | | assert_eq!(base.align_up_to_huge_page(), VAddr::from(1073741824)); |
1798 | | assert!(base.is_base_page_aligned()); |
1799 | | assert!(!base.is_large_page_aligned()); |
1800 | | assert!(!base.is_huge_page_aligned()); |
1801 | | assert!(base.is_aligned(0x1u64)); |
1802 | | assert!(base.is_aligned(0x2u64)); |
1803 | | assert!(!base.is_aligned(0x3u64)); |
1804 | | assert!(base.is_aligned(0x4u64)); |
1805 | | |
1806 | | let base = VAddr::from(0x1001); |
1807 | | assert_eq!(base.base_page_offset(), 0x1); |
1808 | | assert_eq!(base.large_page_offset(), 0x1001); |
1809 | | assert_eq!(base.huge_page_offset(), 0x1001); |
1810 | | assert_eq!(base.align_down_to_base_page(), VAddr::from(0x1000)); |
1811 | | assert_eq!(base.align_down_to_large_page(), VAddr::from(0x0)); |
1812 | | assert_eq!(base.align_down_to_huge_page(), VAddr::from(0x0)); |
1813 | | assert_eq!(base.align_up_to_base_page(), VAddr::from(0x2000)); |
1814 | | assert_eq!(base.align_up_to_large_page(), VAddr::from(0x200000)); |
1815 | | assert_eq!(base.align_up_to_huge_page(), VAddr::from(1073741824)); |
1816 | | assert!(!base.is_base_page_aligned()); |
1817 | | assert!(!base.is_large_page_aligned()); |
1818 | | assert!(!base.is_huge_page_aligned()); |
1819 | | assert!(base.is_aligned(0x1u64)); |
1820 | | assert!(!base.is_aligned(0x2u64)); |
1821 | | assert!(!base.is_aligned(0x3u64)); |
1822 | | assert!(!base.is_aligned(0x4u64)); |
1823 | | |
1824 | | let base = VAddr::from(0x200000); |
1825 | | assert_eq!(base.base_page_offset(), 0x0); |
1826 | | assert_eq!(base.large_page_offset(), 0x0); |
1827 | | assert_eq!(base.huge_page_offset(), 0x200000); |
1828 | | assert_eq!(base.align_down_to_base_page(), VAddr::from(0x200000)); |
1829 | | assert_eq!(base.align_down_to_large_page(), VAddr::from(0x200000)); |
1830 | | assert_eq!(base.align_down_to_huge_page(), VAddr::from(0x0)); |
1831 | | assert_eq!(base.align_up_to_base_page(), VAddr::from(0x200000)); |
1832 | | assert_eq!(base.align_up_to_large_page(), VAddr::from(0x200000)); |
1833 | | assert_eq!(base.align_up_to_huge_page(), VAddr::from(1073741824)); |
1834 | | assert!(base.is_base_page_aligned()); |
1835 | | assert!(base.is_large_page_aligned()); |
1836 | | assert!(!base.is_huge_page_aligned()); |
1837 | | assert!(base.is_aligned(0x1u64)); |
1838 | | assert!(base.is_aligned(0x2u64)); |
1839 | | assert!(!base.is_aligned(0x3u64)); |
1840 | | assert!(base.is_aligned(0x4u64)); |
1841 | | |
1842 | | let base = VAddr::from(0x200002); |
1843 | | assert_eq!(base.base_page_offset(), 0x2); |
1844 | | assert_eq!(base.large_page_offset(), 0x2); |
1845 | | assert_eq!(base.huge_page_offset(), 0x200002); |
1846 | | assert_eq!(base.align_down_to_base_page(), VAddr::from(0x200000)); |
1847 | | assert_eq!(base.align_down_to_large_page(), VAddr::from(0x200000)); |
1848 | | assert_eq!(base.align_down_to_huge_page(), VAddr::from(0x0)); |
1849 | | assert_eq!(base.align_up_to_base_page(), VAddr::from(0x201000)); |
1850 | | assert_eq!(base.align_up_to_large_page(), VAddr::from(0x400000)); |
1851 | | assert_eq!(base.align_up_to_huge_page(), VAddr::from(1073741824)); |
1852 | | assert!(!base.is_base_page_aligned()); |
1853 | | assert!(!base.is_large_page_aligned()); |
1854 | | assert!(!base.is_huge_page_aligned()); |
1855 | | assert!(base.is_aligned(0x1u64)); |
1856 | | assert!(base.is_aligned(0x2u64)); |
1857 | | assert!(!base.is_aligned(0x3u64)); |
1858 | | assert!(!base.is_aligned(0x4u64)); |
1859 | | } |
1860 | | } |