/rust/registry/src/index.crates.io-1949cf8c6b5b557f/x86-0.47.0/src/bits32/paging.rs
Line | Count | Source |
1 | | //! Description of the data-structures for IA-32 paging mode. |
2 | | |
3 | | use bitflags::*; |
4 | | |
5 | | use core::convert::{From, Into}; |
6 | | use core::fmt; |
7 | | use core::hash::{Hash, Hasher}; |
8 | | use core::ops; |
9 | | |
10 | | macro_rules! check_flag { |
11 | | ($doc:meta, $fun:ident, $flag:expr) => { |
12 | | #[$doc] |
13 | 0 | pub fn $fun(self) -> bool { |
14 | 0 | self.flags().contains($flag) |
15 | 0 | } Unexecuted instantiation: <x86::bits32::paging::PDEntry>::is_present Unexecuted instantiation: <x86::bits32::paging::PDEntry>::is_accessed Unexecuted instantiation: <x86::bits32::paging::PDEntry>::is_writeable Unexecuted instantiation: <x86::bits32::paging::PDEntry>::is_user_mode_allowed Unexecuted instantiation: <x86::bits32::paging::PDEntry>::is_page_write_through Unexecuted instantiation: <x86::bits32::paging::PDEntry>::is_page_level_cache_disabled Unexecuted instantiation: <x86::bits32::paging::PDEntry>::is_pat Unexecuted instantiation: <x86::bits32::paging::PDEntry>::is_page Unexecuted instantiation: <x86::bits32::paging::PDEntry>::is_dirty Unexecuted instantiation: <x86::bits32::paging::PDEntry>::is_global Unexecuted instantiation: <x86::bits32::paging::PTEntry>::is_present Unexecuted instantiation: <x86::bits32::paging::PTEntry>::is_accessed Unexecuted instantiation: <x86::bits32::paging::PTEntry>::is_writeable Unexecuted instantiation: <x86::bits32::paging::PTEntry>::is_user_mode_allowed Unexecuted instantiation: <x86::bits32::paging::PTEntry>::is_page_write_through Unexecuted instantiation: <x86::bits32::paging::PTEntry>::is_page_level_cache_disabled Unexecuted instantiation: <x86::bits32::paging::PTEntry>::is_pat Unexecuted instantiation: <x86::bits32::paging::PTEntry>::is_dirty Unexecuted instantiation: <x86::bits32::paging::PTEntry>::is_global |
16 | | }; |
17 | | } |
18 | | |
19 | | /// Align address downwards. |
20 | | /// |
21 | | /// Returns the greatest x with alignment `align` so that x <= addr. |
22 | | /// The alignment must be a power of 2. |
23 | | #[inline(always)] |
24 | 0 | fn align_down(addr: u32, align: u32) -> u32 { |
25 | 0 | addr & !(align - 1) |
26 | 0 | } |
27 | | |
28 | | /// Align address upwards. |
29 | | /// |
30 | | /// Returns the smallest x with alignment `align` so that x >= addr. |
31 | | /// The alignment must be a power of 2. |
32 | | #[inline(always)] |
33 | 0 | fn align_up(addr: u32, align: u32) -> u32 { |
34 | 0 | let align_mask = align - 1; |
35 | 0 | if addr & align_mask == 0 { |
36 | 0 | addr |
37 | | } else { |
38 | 0 | (addr | align_mask) + 1 |
39 | | } |
40 | 0 | } |
41 | | |
42 | | /// A wrapper for a physical address. |
43 | | #[repr(transparent)] |
44 | | #[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)] |
45 | | pub struct PAddr(pub u32); |
46 | | |
47 | | impl PAddr { |
48 | | /// Convert to `u32` |
49 | 0 | pub fn as_u32(self) -> u32 { |
50 | 0 | self.0 |
51 | 0 | } |
52 | | |
53 | | /// Convert to `usize` |
54 | 0 | pub fn as_usize(self) -> usize { |
55 | 0 | self.0 as usize |
56 | 0 | } |
57 | | |
58 | | /// Convert to mutable pointer. |
59 | 0 | pub fn as_mut_ptr<T>(self) -> *mut T { |
60 | 0 | self.0 as *mut T |
61 | 0 | } |
62 | | |
63 | | /// Convert to pointer. |
64 | 0 | pub fn as_ptr<T>(self) -> *const T { |
65 | 0 | self.0 as *const T |
66 | 0 | } |
67 | | |
68 | | /// Physical Address zero. |
69 | 0 | pub const fn zero() -> Self { |
70 | 0 | PAddr(0) |
71 | 0 | } |
72 | | |
73 | | /// Is zero? |
74 | 0 | pub fn is_zero(self) -> bool { |
75 | 0 | self == PAddr::zero() |
76 | 0 | } |
77 | | |
78 | 0 | fn align_up<U>(self, align: U) -> Self |
79 | 0 | where |
80 | 0 | U: Into<u32>, |
81 | | { |
82 | 0 | PAddr(align_up(self.0, align.into())) |
83 | 0 | } |
84 | | |
85 | 0 | fn align_down<U>(self, align: U) -> Self |
86 | 0 | where |
87 | 0 | U: Into<u32>, |
88 | | { |
89 | 0 | PAddr(align_down(self.0, align.into())) |
90 | 0 | } |
91 | | |
92 | | /// Offset within the 4 KiB page. |
93 | 0 | pub fn base_page_offset(self) -> u32 { |
94 | 0 | self.0 & (BASE_PAGE_SIZE as u32 - 1) |
95 | 0 | } |
96 | | |
97 | | /// Offset within the 4 MiB page. |
98 | 0 | pub fn large_page_offset(self) -> u32 { |
99 | 0 | self.0 & (LARGE_PAGE_SIZE as u32 - 1) |
100 | 0 | } |
101 | | |
102 | | /// Return address of nearest 4 KiB page (lower or equal than self). |
103 | 0 | pub fn align_down_to_base_page(self) -> Self { |
104 | 0 | self.align_down(BASE_PAGE_SIZE as u32) |
105 | 0 | } |
106 | | |
107 | | /// Return address of nearest 4 MiB page (lower or equal than self). |
108 | 0 | pub fn align_down_to_large_page(self) -> Self { |
109 | 0 | self.align_down(LARGE_PAGE_SIZE as u32) |
110 | 0 | } |
111 | | |
112 | | /// Return address of nearest 4 KiB page (higher or equal than self). |
113 | 0 | pub fn align_up_to_base_page(self) -> Self { |
114 | 0 | self.align_up(BASE_PAGE_SIZE as u32) |
115 | 0 | } |
116 | | |
117 | | /// Return address of nearest 4 MiB page (higher or equal than self). |
118 | 0 | pub fn align_up_to_large_page(self) -> Self { |
119 | 0 | self.align_up(LARGE_PAGE_SIZE as u32) |
120 | 0 | } |
121 | | |
122 | | /// Is this address aligned to a 4 KiB page? |
123 | 0 | pub fn is_base_page_aligned(self) -> bool { |
124 | 0 | self.align_down(BASE_PAGE_SIZE as u32) == self |
125 | 0 | } |
126 | | |
127 | | /// Is this address aligned to a 4 MiB page? |
128 | 0 | pub fn is_large_page_aligned(self) -> bool { |
129 | 0 | self.align_down(LARGE_PAGE_SIZE as u32) == self |
130 | 0 | } |
131 | | |
132 | | /// Is this address aligned to `align`? |
133 | | /// |
134 | | /// # Note |
135 | | /// `align` must be a power of two. |
136 | 0 | pub fn is_aligned<U>(self, align: U) -> bool |
137 | 0 | where |
138 | 0 | U: Into<u32> + Copy, |
139 | | { |
140 | 0 | if !align.into().is_power_of_two() { |
141 | 0 | return false; |
142 | 0 | } |
143 | | |
144 | 0 | self.align_down(align) == self |
145 | 0 | } |
146 | | } |
147 | | |
148 | | impl From<u32> for PAddr { |
149 | 0 | fn from(num: u32) -> Self { |
150 | 0 | PAddr(num) |
151 | 0 | } |
152 | | } |
153 | | |
154 | | impl From<usize> for PAddr { |
155 | 0 | fn from(num: usize) -> Self { |
156 | 0 | PAddr(num as u32) |
157 | 0 | } |
158 | | } |
159 | | |
160 | | impl From<i32> for PAddr { |
161 | 0 | fn from(num: i32) -> Self { |
162 | 0 | PAddr(num as u32) |
163 | 0 | } |
164 | | } |
165 | | |
166 | | #[allow(clippy::clippy::from_over_into)] |
167 | | impl Into<u32> for PAddr { |
168 | 0 | fn into(self) -> u32 { |
169 | 0 | self.0 |
170 | 0 | } |
171 | | } |
172 | | |
173 | | #[allow(clippy::clippy::from_over_into)] |
174 | | impl Into<usize> for PAddr { |
175 | 0 | fn into(self) -> usize { |
176 | 0 | self.0 as usize |
177 | 0 | } |
178 | | } |
179 | | |
180 | | impl ops::Add for PAddr { |
181 | | type Output = PAddr; |
182 | | |
183 | 0 | fn add(self, rhs: PAddr) -> Self::Output { |
184 | 0 | PAddr(self.0 + rhs.0) |
185 | 0 | } |
186 | | } |
187 | | |
188 | | impl ops::Add<u32> for PAddr { |
189 | | type Output = PAddr; |
190 | | |
191 | 0 | fn add(self, rhs: u32) -> Self::Output { |
192 | 0 | PAddr::from(self.0 + rhs) |
193 | 0 | } |
194 | | } |
195 | | |
196 | | impl ops::Add<usize> for PAddr { |
197 | | type Output = PAddr; |
198 | | |
199 | 0 | fn add(self, rhs: usize) -> Self::Output { |
200 | 0 | PAddr::from(self.0 + rhs as u32) |
201 | 0 | } |
202 | | } |
203 | | |
204 | | impl ops::AddAssign for PAddr { |
205 | 0 | fn add_assign(&mut self, other: PAddr) { |
206 | 0 | *self = PAddr::from(self.0 + other.0); |
207 | 0 | } |
208 | | } |
209 | | |
210 | | impl ops::AddAssign<u32> for PAddr { |
211 | 0 | fn add_assign(&mut self, offset: u32) { |
212 | 0 | *self = PAddr::from(self.0 + offset); |
213 | 0 | } |
214 | | } |
215 | | |
216 | | impl ops::Sub for PAddr { |
217 | | type Output = PAddr; |
218 | | |
219 | 0 | fn sub(self, rhs: PAddr) -> Self::Output { |
220 | 0 | PAddr::from(self.0 - rhs.0) |
221 | 0 | } |
222 | | } |
223 | | |
224 | | impl ops::Sub<u32> for PAddr { |
225 | | type Output = PAddr; |
226 | | |
227 | 0 | fn sub(self, rhs: u32) -> Self::Output { |
228 | 0 | PAddr::from(self.0 - rhs) |
229 | 0 | } |
230 | | } |
231 | | |
232 | | impl ops::Sub<usize> for PAddr { |
233 | | type Output = PAddr; |
234 | | |
235 | 0 | fn sub(self, rhs: usize) -> Self::Output { |
236 | 0 | PAddr::from(self.0 - rhs as u32) |
237 | 0 | } |
238 | | } |
239 | | |
240 | | impl ops::Rem for PAddr { |
241 | | type Output = PAddr; |
242 | | |
243 | 0 | fn rem(self, rhs: PAddr) -> Self::Output { |
244 | 0 | PAddr(self.0 % rhs.0) |
245 | 0 | } |
246 | | } |
247 | | |
248 | | impl ops::Rem<u32> for PAddr { |
249 | | type Output = u32; |
250 | | |
251 | 0 | fn rem(self, rhs: u32) -> Self::Output { |
252 | 0 | self.0 % rhs |
253 | 0 | } |
254 | | } |
255 | | |
256 | | impl ops::Rem<usize> for PAddr { |
257 | | type Output = u32; |
258 | | |
259 | 0 | fn rem(self, rhs: usize) -> Self::Output { |
260 | 0 | self.0 % (rhs as u32) |
261 | 0 | } |
262 | | } |
263 | | |
264 | | impl ops::BitAnd for PAddr { |
265 | | type Output = Self; |
266 | | |
267 | 0 | fn bitand(self, rhs: Self) -> Self { |
268 | 0 | PAddr(self.0 & rhs.0) |
269 | 0 | } |
270 | | } |
271 | | |
272 | | impl ops::BitAnd<u32> for PAddr { |
273 | | type Output = u32; |
274 | | |
275 | 0 | fn bitand(self, rhs: u32) -> Self::Output { |
276 | 0 | Into::<u32>::into(self) & rhs |
277 | 0 | } |
278 | | } |
279 | | |
280 | | impl ops::BitOr for PAddr { |
281 | | type Output = PAddr; |
282 | | |
283 | 0 | fn bitor(self, rhs: PAddr) -> Self::Output { |
284 | 0 | PAddr(self.0 | rhs.0) |
285 | 0 | } |
286 | | } |
287 | | |
288 | | impl ops::BitOr<u32> for PAddr { |
289 | | type Output = u32; |
290 | | |
291 | 0 | fn bitor(self, rhs: u32) -> Self::Output { |
292 | 0 | self.0 | rhs |
293 | 0 | } |
294 | | } |
295 | | |
296 | | impl ops::Shr<u32> for PAddr { |
297 | | type Output = u32; |
298 | | |
299 | 0 | fn shr(self, rhs: u32) -> Self::Output { |
300 | 0 | self.0 >> rhs |
301 | 0 | } |
302 | | } |
303 | | |
304 | | impl fmt::Binary for PAddr { |
305 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
306 | 0 | self.0.fmt(f) |
307 | 0 | } |
308 | | } |
309 | | |
310 | | impl fmt::Display for PAddr { |
311 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
312 | 0 | self.0.fmt(f) |
313 | 0 | } |
314 | | } |
315 | | |
316 | | impl fmt::Debug for PAddr { |
317 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
318 | 0 | write!(f, "{:#x}", self.0) |
319 | 0 | } |
320 | | } |
321 | | |
322 | | impl fmt::LowerHex for PAddr { |
323 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
324 | 0 | self.0.fmt(f) |
325 | 0 | } |
326 | | } |
327 | | |
328 | | impl fmt::Octal for PAddr { |
329 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
330 | 0 | self.0.fmt(f) |
331 | 0 | } |
332 | | } |
333 | | |
334 | | impl fmt::UpperHex for PAddr { |
335 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
336 | 0 | self.0.fmt(f) |
337 | 0 | } |
338 | | } |
339 | | |
340 | | impl fmt::Pointer for PAddr { |
341 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
342 | | use core::fmt::LowerHex; |
343 | 0 | self.0.fmt(f) |
344 | 0 | } |
345 | | } |
346 | | |
347 | | #[allow(clippy::clippy::derive_hash_xor_eq)] |
348 | | impl Hash for PAddr { |
349 | 0 | fn hash<H: Hasher>(&self, state: &mut H) { |
350 | 0 | self.0.hash(state); |
351 | 0 | } |
352 | | } |
353 | | |
354 | | /// A wrapper for an IO address (IOVA / DMA Address for devices) |
355 | | #[repr(transparent)] |
356 | | #[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)] |
357 | | pub struct IOAddr(pub u32); |
358 | | |
359 | | impl IOAddr { |
360 | | /// Convert to `u32` |
361 | 0 | pub fn as_u32(self) -> u32 { |
362 | 0 | self.0 |
363 | 0 | } |
364 | | |
365 | | /// Convert to `usize` |
366 | 0 | pub fn as_usize(self) -> usize { |
367 | 0 | self.0 as usize |
368 | 0 | } |
369 | | |
370 | | /// IO Address zero. |
371 | 0 | pub const fn zero() -> Self { |
372 | 0 | IOAddr(0) |
373 | 0 | } |
374 | | |
375 | | /// Is zero? |
376 | 0 | pub fn is_zero(self) -> bool { |
377 | 0 | self == IOAddr::zero() |
378 | 0 | } |
379 | | |
380 | 0 | fn align_up<U>(self, align: U) -> Self |
381 | 0 | where |
382 | 0 | U: Into<u32>, |
383 | | { |
384 | 0 | IOAddr(align_up(self.0, align.into())) |
385 | 0 | } |
386 | | |
387 | 0 | fn align_down<U>(self, align: U) -> Self |
388 | 0 | where |
389 | 0 | U: Into<u32>, |
390 | | { |
391 | 0 | IOAddr(align_down(self.0, align.into())) |
392 | 0 | } |
393 | | |
394 | | /// Offset within the 4 KiB page. |
395 | 0 | pub fn base_page_offset(self) -> u32 { |
396 | 0 | self.0 & (BASE_PAGE_SIZE as u32 - 1) |
397 | 0 | } |
398 | | |
399 | | /// Offset within the 4 MiB page. |
400 | 0 | pub fn large_page_offset(self) -> u32 { |
401 | 0 | self.0 & (LARGE_PAGE_SIZE as u32 - 1) |
402 | 0 | } |
403 | | |
404 | | /// Return address of nearest 4 KiB page (lower or equal than self). |
405 | 0 | pub fn align_down_to_base_page(self) -> Self { |
406 | 0 | self.align_down(BASE_PAGE_SIZE as u32) |
407 | 0 | } |
408 | | |
409 | | /// Return address of nearest 4 MiB page (lower or equal than self). |
410 | 0 | pub fn align_down_to_large_page(self) -> Self { |
411 | 0 | self.align_down(LARGE_PAGE_SIZE as u32) |
412 | 0 | } |
413 | | |
414 | | /// Return address of nearest 4 KiB page (higher or equal than self). |
415 | 0 | pub fn align_up_to_base_page(self) -> Self { |
416 | 0 | self.align_up(BASE_PAGE_SIZE as u32) |
417 | 0 | } |
418 | | |
419 | | /// Return address of nearest 4 MiB page (higher or equal than self). |
420 | 0 | pub fn align_up_to_large_page(self) -> Self { |
421 | 0 | self.align_up(LARGE_PAGE_SIZE as u32) |
422 | 0 | } |
423 | | |
424 | | /// Is this address aligned to a 4 KiB page? |
425 | 0 | pub fn is_base_page_aligned(self) -> bool { |
426 | 0 | self.align_down(BASE_PAGE_SIZE as u32) == self |
427 | 0 | } |
428 | | |
429 | | /// Is this address aligned to a 4 MiB page? |
430 | 0 | pub fn is_large_page_aligned(self) -> bool { |
431 | 0 | self.align_down(LARGE_PAGE_SIZE as u32) == self |
432 | 0 | } |
433 | | |
434 | | /// Is this address aligned to `align`? |
435 | | /// |
436 | | /// # Note |
437 | | /// `align` must be a power of two. |
438 | 0 | pub fn is_aligned<U>(self, align: U) -> bool |
439 | 0 | where |
440 | 0 | U: Into<u32> + Copy, |
441 | | { |
442 | 0 | if !align.into().is_power_of_two() { |
443 | 0 | return false; |
444 | 0 | } |
445 | | |
446 | 0 | self.align_down(align) == self |
447 | 0 | } |
448 | | } |
449 | | |
450 | | impl From<u32> for IOAddr { |
451 | 0 | fn from(num: u32) -> Self { |
452 | 0 | IOAddr(num) |
453 | 0 | } |
454 | | } |
455 | | |
456 | | impl From<usize> for IOAddr { |
457 | 0 | fn from(num: usize) -> Self { |
458 | 0 | IOAddr(num as u32) |
459 | 0 | } |
460 | | } |
461 | | |
462 | | impl From<i32> for IOAddr { |
463 | 0 | fn from(num: i32) -> Self { |
464 | 0 | IOAddr(num as u32) |
465 | 0 | } |
466 | | } |
467 | | |
468 | | #[allow(clippy::clippy::from_over_into)] |
469 | | impl Into<u32> for IOAddr { |
470 | 0 | fn into(self) -> u32 { |
471 | 0 | self.0 |
472 | 0 | } |
473 | | } |
474 | | |
475 | | #[allow(clippy::clippy::from_over_into)] |
476 | | impl Into<usize> for IOAddr { |
477 | 0 | fn into(self) -> usize { |
478 | 0 | self.0 as usize |
479 | 0 | } |
480 | | } |
481 | | |
482 | | impl ops::Add for IOAddr { |
483 | | type Output = IOAddr; |
484 | | |
485 | 0 | fn add(self, rhs: IOAddr) -> Self::Output { |
486 | 0 | IOAddr(self.0 + rhs.0) |
487 | 0 | } |
488 | | } |
489 | | |
490 | | impl ops::Add<u32> for IOAddr { |
491 | | type Output = IOAddr; |
492 | | |
493 | 0 | fn add(self, rhs: u32) -> Self::Output { |
494 | 0 | IOAddr::from(self.0 + rhs) |
495 | 0 | } |
496 | | } |
497 | | |
498 | | impl ops::Add<usize> for IOAddr { |
499 | | type Output = IOAddr; |
500 | | |
501 | 0 | fn add(self, rhs: usize) -> Self::Output { |
502 | 0 | IOAddr::from(self.0 + rhs as u32) |
503 | 0 | } |
504 | | } |
505 | | |
506 | | impl ops::AddAssign for IOAddr { |
507 | 0 | fn add_assign(&mut self, other: IOAddr) { |
508 | 0 | *self = IOAddr::from(self.0 + other.0); |
509 | 0 | } |
510 | | } |
511 | | |
512 | | impl ops::AddAssign<u32> for IOAddr { |
513 | 0 | fn add_assign(&mut self, offset: u32) { |
514 | 0 | *self = IOAddr::from(self.0 + offset); |
515 | 0 | } |
516 | | } |
517 | | |
518 | | impl ops::Sub for IOAddr { |
519 | | type Output = IOAddr; |
520 | | |
521 | 0 | fn sub(self, rhs: IOAddr) -> Self::Output { |
522 | 0 | IOAddr::from(self.0 - rhs.0) |
523 | 0 | } |
524 | | } |
525 | | |
526 | | impl ops::Sub<u32> for IOAddr { |
527 | | type Output = IOAddr; |
528 | | |
529 | 0 | fn sub(self, rhs: u32) -> Self::Output { |
530 | 0 | IOAddr::from(self.0 - rhs) |
531 | 0 | } |
532 | | } |
533 | | |
534 | | impl ops::Sub<usize> for IOAddr { |
535 | | type Output = IOAddr; |
536 | | |
537 | 0 | fn sub(self, rhs: usize) -> Self::Output { |
538 | 0 | IOAddr::from(self.0 - rhs as u32) |
539 | 0 | } |
540 | | } |
541 | | |
542 | | impl ops::Rem for IOAddr { |
543 | | type Output = IOAddr; |
544 | | |
545 | 0 | fn rem(self, rhs: IOAddr) -> Self::Output { |
546 | 0 | IOAddr(self.0 % rhs.0) |
547 | 0 | } |
548 | | } |
549 | | |
550 | | impl ops::Rem<u32> for IOAddr { |
551 | | type Output = u32; |
552 | | |
553 | 0 | fn rem(self, rhs: u32) -> Self::Output { |
554 | 0 | self.0 % rhs |
555 | 0 | } |
556 | | } |
557 | | |
558 | | impl ops::Rem<usize> for IOAddr { |
559 | | type Output = u32; |
560 | | |
561 | 0 | fn rem(self, rhs: usize) -> Self::Output { |
562 | 0 | self.0 % (rhs as u32) |
563 | 0 | } |
564 | | } |
565 | | |
566 | | impl ops::BitAnd for IOAddr { |
567 | | type Output = Self; |
568 | | |
569 | 0 | fn bitand(self, rhs: Self) -> Self { |
570 | 0 | IOAddr(self.0 & rhs.0) |
571 | 0 | } |
572 | | } |
573 | | |
574 | | impl ops::BitAnd<u32> for IOAddr { |
575 | | type Output = u32; |
576 | | |
577 | 0 | fn bitand(self, rhs: u32) -> Self::Output { |
578 | 0 | Into::<u32>::into(self) & rhs |
579 | 0 | } |
580 | | } |
581 | | |
582 | | impl ops::BitOr for IOAddr { |
583 | | type Output = IOAddr; |
584 | | |
585 | 0 | fn bitor(self, rhs: IOAddr) -> Self::Output { |
586 | 0 | IOAddr(self.0 | rhs.0) |
587 | 0 | } |
588 | | } |
589 | | |
590 | | impl ops::BitOr<u32> for IOAddr { |
591 | | type Output = u32; |
592 | | |
593 | 0 | fn bitor(self, rhs: u32) -> Self::Output { |
594 | 0 | self.0 | rhs |
595 | 0 | } |
596 | | } |
597 | | |
598 | | impl ops::Shr<u32> for IOAddr { |
599 | | type Output = u32; |
600 | | |
601 | 0 | fn shr(self, rhs: u32) -> Self::Output { |
602 | 0 | self.0 >> rhs |
603 | 0 | } |
604 | | } |
605 | | |
606 | | impl fmt::Binary for IOAddr { |
607 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
608 | 0 | self.0.fmt(f) |
609 | 0 | } |
610 | | } |
611 | | |
612 | | impl fmt::Display for IOAddr { |
613 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
614 | 0 | self.0.fmt(f) |
615 | 0 | } |
616 | | } |
617 | | |
618 | | impl fmt::Debug for IOAddr { |
619 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
620 | 0 | write!(f, "{:#x}", self.0) |
621 | 0 | } |
622 | | } |
623 | | |
624 | | impl fmt::LowerHex for IOAddr { |
625 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
626 | 0 | self.0.fmt(f) |
627 | 0 | } |
628 | | } |
629 | | |
630 | | impl fmt::Octal for IOAddr { |
631 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
632 | 0 | self.0.fmt(f) |
633 | 0 | } |
634 | | } |
635 | | |
636 | | impl fmt::UpperHex for IOAddr { |
637 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
638 | 0 | self.0.fmt(f) |
639 | 0 | } |
640 | | } |
641 | | |
642 | | impl fmt::Pointer for IOAddr { |
643 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
644 | | use core::fmt::LowerHex; |
645 | 0 | self.0.fmt(f) |
646 | 0 | } |
647 | | } |
648 | | |
649 | | #[allow(clippy::clippy::derive_hash_xor_eq)] |
650 | | impl Hash for IOAddr { |
651 | 0 | fn hash<H: Hasher>(&self, state: &mut H) { |
652 | 0 | self.0.hash(state); |
653 | 0 | } |
654 | | } |
655 | | |
656 | | /// A wrapper for a virtual address. |
657 | | #[repr(transparent)] |
658 | | #[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)] |
659 | | pub struct VAddr(pub u32); |
660 | | |
661 | | impl VAddr { |
662 | | /// Convert from `u32` |
663 | 0 | pub const fn from_u32(v: u32) -> Self { |
664 | 0 | VAddr(v) |
665 | 0 | } |
666 | | |
667 | | /// Convert from `usize` |
668 | 0 | pub const fn from_usize(v: usize) -> Self { |
669 | 0 | VAddr(v as u32) |
670 | 0 | } |
671 | | |
672 | | /// Convert to `u32` |
673 | 0 | pub const fn as_u32(self) -> u32 { |
674 | 0 | self.0 |
675 | 0 | } |
676 | | |
677 | | /// Convert to `usize` |
678 | 0 | pub const fn as_usize(self) -> usize { |
679 | 0 | self.0 as usize |
680 | 0 | } |
681 | | |
682 | | /// Convert to mutable pointer. |
683 | 0 | pub fn as_mut_ptr<T>(self) -> *mut T { |
684 | 0 | self.0 as *mut T |
685 | 0 | } |
686 | | |
687 | | /// Convert to pointer. |
688 | 0 | pub fn as_ptr<T>(self) -> *const T { |
689 | 0 | self.0 as *const T |
690 | 0 | } |
691 | | |
692 | | /// Virtual Address zero. |
693 | 0 | pub const fn zero() -> Self { |
694 | 0 | VAddr(0) |
695 | 0 | } |
696 | | |
697 | | /// Is zero? |
698 | 0 | pub fn is_zero(self) -> bool { |
699 | 0 | self == VAddr::zero() |
700 | 0 | } |
701 | | |
702 | 0 | fn align_up<U>(self, align: U) -> Self |
703 | 0 | where |
704 | 0 | U: Into<u32>, |
705 | | { |
706 | 0 | VAddr(align_up(self.0, align.into())) |
707 | 0 | } |
708 | | |
709 | 0 | fn align_down<U>(self, align: U) -> Self |
710 | 0 | where |
711 | 0 | U: Into<u32>, |
712 | | { |
713 | 0 | VAddr(align_down(self.0, align.into())) |
714 | 0 | } |
715 | | |
716 | | /// Offset within the 4 KiB page. |
717 | 0 | pub fn base_page_offset(self) -> u32 { |
718 | 0 | self.0 & (BASE_PAGE_SIZE as u32 - 1) |
719 | 0 | } |
720 | | |
721 | | /// Offset within the 4 MiB page. |
722 | 0 | pub fn large_page_offset(self) -> u32 { |
723 | 0 | self.0 & (LARGE_PAGE_SIZE as u32 - 1) |
724 | 0 | } |
725 | | |
726 | | /// Return address of nearest 4 KiB page (lower or equal than self). |
727 | 0 | pub fn align_down_to_base_page(self) -> Self { |
728 | 0 | self.align_down(BASE_PAGE_SIZE as u32) |
729 | 0 | } |
730 | | |
731 | | /// Return address of nearest 4 MiB page (lower or equal than self). |
732 | 0 | pub fn align_down_to_large_page(self) -> Self { |
733 | 0 | self.align_down(LARGE_PAGE_SIZE as u32) |
734 | 0 | } |
735 | | |
736 | | /// Return address of nearest 4 KiB page (higher or equal than self). |
737 | 0 | pub fn align_up_to_base_page(self) -> Self { |
738 | 0 | self.align_up(BASE_PAGE_SIZE as u32) |
739 | 0 | } |
740 | | |
741 | | /// Return address of nearest 4 MiB page (higher or equal than self). |
742 | 0 | pub fn align_up_to_large_page(self) -> Self { |
743 | 0 | self.align_up(LARGE_PAGE_SIZE as u32) |
744 | 0 | } |
745 | | |
746 | | /// Is this address aligned to a 4 KiB page? |
747 | 0 | pub fn is_base_page_aligned(self) -> bool { |
748 | 0 | self.align_down(BASE_PAGE_SIZE as u32) == self |
749 | 0 | } |
750 | | |
751 | | /// Is this address aligned to a 4 MiB page? |
752 | 0 | pub fn is_large_page_aligned(self) -> bool { |
753 | 0 | self.align_down(LARGE_PAGE_SIZE as u32) == self |
754 | 0 | } |
755 | | |
756 | | /// Is this address aligned to `align`? |
757 | | /// |
758 | | /// # Note |
759 | | /// `align` must be a power of two. |
760 | 0 | pub fn is_aligned<U>(self, align: U) -> bool |
761 | 0 | where |
762 | 0 | U: Into<u32> + Copy, |
763 | | { |
764 | 0 | if !align.into().is_power_of_two() { |
765 | 0 | return false; |
766 | 0 | } |
767 | | |
768 | 0 | self.align_down(align) == self |
769 | 0 | } |
770 | | } |
771 | | |
772 | | impl From<u32> for VAddr { |
773 | 0 | fn from(num: u32) -> Self { |
774 | 0 | VAddr(num) |
775 | 0 | } |
776 | | } |
777 | | |
778 | | impl From<i32> for VAddr { |
779 | 0 | fn from(num: i32) -> Self { |
780 | 0 | VAddr(num as u32) |
781 | 0 | } |
782 | | } |
783 | | |
784 | | #[allow(clippy::clippy::from_over_into)] |
785 | | impl Into<u32> for VAddr { |
786 | 0 | fn into(self) -> u32 { |
787 | 0 | self.0 |
788 | 0 | } |
789 | | } |
790 | | |
791 | | impl From<usize> for VAddr { |
792 | 0 | fn from(num: usize) -> Self { |
793 | 0 | VAddr(num as u32) |
794 | 0 | } |
795 | | } |
796 | | |
797 | | #[allow(clippy::clippy::from_over_into)] |
798 | | impl Into<usize> for VAddr { |
799 | 0 | fn into(self) -> usize { |
800 | 0 | self.0 as usize |
801 | 0 | } |
802 | | } |
803 | | |
804 | | impl ops::Add for VAddr { |
805 | | type Output = VAddr; |
806 | | |
807 | 0 | fn add(self, rhs: VAddr) -> Self::Output { |
808 | 0 | VAddr(self.0 + rhs.0) |
809 | 0 | } |
810 | | } |
811 | | |
812 | | impl ops::Add<u32> for VAddr { |
813 | | type Output = VAddr; |
814 | | |
815 | 0 | fn add(self, rhs: u32) -> Self::Output { |
816 | 0 | VAddr(self.0 + rhs) |
817 | 0 | } |
818 | | } |
819 | | |
820 | | impl ops::Add<usize> for VAddr { |
821 | | type Output = VAddr; |
822 | | |
823 | 0 | fn add(self, rhs: usize) -> Self::Output { |
824 | 0 | VAddr::from(self.0 + rhs as u32) |
825 | 0 | } |
826 | | } |
827 | | |
828 | | impl ops::AddAssign for VAddr { |
829 | 0 | fn add_assign(&mut self, other: VAddr) { |
830 | 0 | *self = VAddr::from(self.0 + other.0); |
831 | 0 | } |
832 | | } |
833 | | |
834 | | impl ops::AddAssign<u32> for VAddr { |
835 | 0 | fn add_assign(&mut self, offset: u32) { |
836 | 0 | *self = VAddr::from(self.0 + offset); |
837 | 0 | } |
838 | | } |
839 | | |
840 | | impl ops::AddAssign<usize> for VAddr { |
841 | 0 | fn add_assign(&mut self, offset: usize) { |
842 | 0 | *self = VAddr::from(self.0 + offset as u32); |
843 | 0 | } |
844 | | } |
845 | | |
846 | | impl ops::Sub for VAddr { |
847 | | type Output = VAddr; |
848 | | |
849 | 0 | fn sub(self, rhs: VAddr) -> Self::Output { |
850 | 0 | VAddr::from(self.0 - rhs.0) |
851 | 0 | } |
852 | | } |
853 | | |
854 | | impl ops::Sub<u32> for VAddr { |
855 | | type Output = VAddr; |
856 | | |
857 | 0 | fn sub(self, rhs: u32) -> Self::Output { |
858 | 0 | VAddr::from(self.0 - rhs) |
859 | 0 | } |
860 | | } |
861 | | |
862 | | impl ops::Sub<usize> for VAddr { |
863 | | type Output = VAddr; |
864 | | |
865 | 0 | fn sub(self, rhs: usize) -> Self::Output { |
866 | 0 | VAddr::from(self.0 - rhs as u32) |
867 | 0 | } |
868 | | } |
869 | | |
870 | | impl ops::Rem for VAddr { |
871 | | type Output = VAddr; |
872 | | |
873 | 0 | fn rem(self, rhs: VAddr) -> Self::Output { |
874 | 0 | VAddr(self.0 % rhs.0) |
875 | 0 | } |
876 | | } |
877 | | |
878 | | impl ops::Rem<u32> for VAddr { |
879 | | type Output = u32; |
880 | | |
881 | 0 | fn rem(self, rhs: Self::Output) -> Self::Output { |
882 | 0 | self.0 % rhs |
883 | 0 | } |
884 | | } |
885 | | |
886 | | impl ops::Rem<usize> for VAddr { |
887 | | type Output = usize; |
888 | | |
889 | 0 | fn rem(self, rhs: Self::Output) -> Self::Output { |
890 | 0 | self.as_usize() % rhs |
891 | 0 | } |
892 | | } |
893 | | |
894 | | impl ops::BitAnd for VAddr { |
895 | | type Output = Self; |
896 | | |
897 | 0 | fn bitand(self, rhs: Self) -> Self::Output { |
898 | 0 | VAddr(self.0 & rhs.0) |
899 | 0 | } |
900 | | } |
901 | | |
902 | | impl ops::BitAnd<u32> for VAddr { |
903 | | type Output = VAddr; |
904 | | |
905 | 0 | fn bitand(self, rhs: u32) -> Self::Output { |
906 | 0 | VAddr(self.0 & rhs) |
907 | 0 | } |
908 | | } |
909 | | |
910 | | impl ops::BitAnd<usize> for VAddr { |
911 | | type Output = VAddr; |
912 | | |
913 | 0 | fn bitand(self, rhs: usize) -> Self::Output { |
914 | 0 | VAddr(self.0 & rhs as u32) |
915 | 0 | } |
916 | | } |
917 | | |
918 | | impl ops::BitAnd<i32> for VAddr { |
919 | | type Output = VAddr; |
920 | | |
921 | 0 | fn bitand(self, rhs: i32) -> Self::Output { |
922 | 0 | VAddr(self.0 & rhs as u32) |
923 | 0 | } |
924 | | } |
925 | | |
926 | | impl ops::BitOr for VAddr { |
927 | | type Output = VAddr; |
928 | | |
929 | 0 | fn bitor(self, rhs: VAddr) -> VAddr { |
930 | 0 | VAddr(self.0 | rhs.0) |
931 | 0 | } |
932 | | } |
933 | | |
934 | | impl ops::BitOr<u32> for VAddr { |
935 | | type Output = VAddr; |
936 | | |
937 | 0 | fn bitor(self, rhs: u32) -> Self::Output { |
938 | 0 | VAddr(self.0 | rhs) |
939 | 0 | } |
940 | | } |
941 | | |
942 | | impl ops::BitOr<usize> for VAddr { |
943 | | type Output = VAddr; |
944 | | |
945 | 0 | fn bitor(self, rhs: usize) -> Self::Output { |
946 | 0 | VAddr(self.0 | rhs as u32) |
947 | 0 | } |
948 | | } |
949 | | |
950 | | impl ops::Shr<u32> for VAddr { |
951 | | type Output = u32; |
952 | | |
953 | 0 | fn shr(self, rhs: u32) -> Self::Output { |
954 | 0 | self.0 >> rhs as u32 |
955 | 0 | } |
956 | | } |
957 | | |
958 | | impl ops::Shr<usize> for VAddr { |
959 | | type Output = u32; |
960 | | |
961 | 0 | fn shr(self, rhs: usize) -> Self::Output { |
962 | 0 | self.0 >> rhs as u32 |
963 | 0 | } |
964 | | } |
965 | | |
966 | | impl ops::Shr<i32> for VAddr { |
967 | | type Output = u32; |
968 | | |
969 | 0 | fn shr(self, rhs: i32) -> Self::Output { |
970 | 0 | self.0 >> rhs as u32 |
971 | 0 | } |
972 | | } |
973 | | |
974 | | impl fmt::Binary for VAddr { |
975 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
976 | 0 | self.0.fmt(f) |
977 | 0 | } |
978 | | } |
979 | | |
980 | | impl fmt::Display for VAddr { |
981 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
982 | 0 | write!(f, "{:#x}", self.0) |
983 | 0 | } |
984 | | } |
985 | | |
986 | | impl fmt::Debug for VAddr { |
987 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
988 | 0 | write!(f, "{:#x}", self.0) |
989 | 0 | } |
990 | | } |
991 | | |
992 | | impl fmt::LowerHex for VAddr { |
993 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
994 | 0 | self.0.fmt(f) |
995 | 0 | } |
996 | | } |
997 | | |
998 | | impl fmt::Octal for VAddr { |
999 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1000 | 0 | self.0.fmt(f) |
1001 | 0 | } |
1002 | | } |
1003 | | |
1004 | | impl fmt::UpperHex for VAddr { |
1005 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1006 | 0 | self.0.fmt(f) |
1007 | 0 | } |
1008 | | } |
1009 | | |
1010 | | impl fmt::Pointer for VAddr { |
1011 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1012 | | use core::fmt::LowerHex; |
1013 | 0 | self.0.fmt(f) |
1014 | 0 | } |
1015 | | } |
1016 | | |
1017 | | #[allow(clippy::clippy::derive_hash_xor_eq)] |
1018 | | impl Hash for VAddr { |
1019 | 0 | fn hash<H: Hasher>(&self, state: &mut H) { |
1020 | 0 | self.0.hash(state); |
1021 | 0 | } |
1022 | | } |
1023 | | |
1024 | | /// Log2 of base page size (12 bits). |
1025 | | pub const BASE_PAGE_SHIFT: usize = 12; |
1026 | | |
1027 | | /// Size of a base page (4 KiB) |
1028 | | pub const BASE_PAGE_SIZE: usize = 4096; |
1029 | | |
1030 | | /// Size of a large page (4 MiB) |
1031 | | pub const LARGE_PAGE_SIZE: usize = 1024 * 1024 * 4; |
1032 | | |
1033 | | /// Size of a cache-line |
1034 | | pub const CACHE_LINE_SIZE: usize = 64; |
1035 | | |
1036 | | /// A type wrapping a base page with a 4 KiB buffer. |
1037 | | pub struct Page([u8; BASE_PAGE_SIZE]); |
1038 | | |
1039 | | /// A type wrapping a large page with a 4 MiB buffer. |
1040 | | pub struct LargePage([u8; LARGE_PAGE_SIZE]); |
1041 | | |
1042 | | /// Mask to find the physical address of an entry in a page-table. |
1043 | | const ADDRESS_MASK: u32 = !0xfff; |
1044 | | const ADDRESS_MASK_PSE: u32 = !0x3fffff; |
1045 | | |
1046 | | /// Page tables have 512 = 4096 / 32 entries. |
1047 | | pub const PAGE_SIZE_ENTRIES: usize = 1024; |
1048 | | |
1049 | | /// A page directory. |
1050 | | pub type PD = [PDEntry; PAGE_SIZE_ENTRIES]; |
1051 | | |
1052 | | /// A page table. |
1053 | | pub type PT = [PTEntry; PAGE_SIZE_ENTRIES]; |
1054 | | |
1055 | | /// Given virtual address calculate corresponding entry in PD. |
1056 | | #[inline] |
1057 | 0 | pub fn pd_index(addr: VAddr) -> usize { |
1058 | 0 | ((addr >> 22usize) & 0b1111111111) as usize |
1059 | 0 | } |
1060 | | |
1061 | | /// Given virtual address calculate corresponding entry in PT. |
1062 | | #[inline] |
1063 | 0 | pub fn pt_index(addr: VAddr) -> usize { |
1064 | 0 | ((addr >> 12usize) & 0b1111111111) as usize |
1065 | 0 | } |
1066 | | |
1067 | | bitflags! { |
1068 | | /// PD configuration bits description. |
1069 | | #[repr(transparent)] |
1070 | | pub struct PDFlags: u32 { |
1071 | | /// Present; must be 1 to map a 4-MByte page. |
1072 | | const P = bit!(0); |
1073 | | /// Read/write; if 0, writes may not be allowed to the 4-MByte page referenced by this entry. |
1074 | | const RW = bit!(1); |
1075 | | /// User/supervisor; if 0, user-mode accesses are not allowed to the 4-MByte page referenced by this entry. |
1076 | | const US = bit!(2); |
1077 | | /// Page-level write-through. |
1078 | | const PWT = bit!(3); |
1079 | | /// Page-level cache disable. |
1080 | | const PCD = bit!(4); |
1081 | | /// Accessed; indicates whether software has accessed the 4-MByte page referenced by this entry. |
1082 | | const A = bit!(5); |
1083 | | /// Dirty; indicates whether software has written to the 4-MByte page referenced by this entry. |
1084 | | const D = bit!(6); |
1085 | | /// Page size; if set this entry maps a 4-MByte page; otherwise, this entry references a page directory. |
1086 | | const PS = bit!(7); |
1087 | | /// Global; if CR4.PGE = 1, determines whether the translation is global; ignored otherwise. |
1088 | | const G = bit!(8); |
1089 | | /// If the PAT is supported, indirectly determines the memory type used to access the 4-MByte page referenced by this entry; |
1090 | | /// otherwise, reserved (must be 0) |
1091 | | const PAT = bit!(12); |
1092 | | } |
1093 | | } |
1094 | | |
1095 | | /// A PD Entry consists of an address and a bunch of flags. |
1096 | | #[repr(transparent)] |
1097 | | #[derive(Clone, Copy)] |
1098 | | pub struct PDEntry(pub u32); |
1099 | | |
1100 | | impl fmt::Debug for PDEntry { |
1101 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1102 | 0 | write!(f, "PDEntry {{ {:#x}, {:?} }}", self.address(), self.flags()) |
1103 | 0 | } |
1104 | | } |
1105 | | |
1106 | | impl PDEntry { |
1107 | | /// Creates a new PDEntry. |
1108 | | /// |
1109 | | /// # Arguments |
1110 | | /// |
1111 | | /// * `pt` - The physical address of the page table. |
1112 | | /// * `flags`- Additional flags for the entry. |
1113 | | /// |
1114 | | /// # Implementation notes |
1115 | | /// |
1116 | | /// This doesn't support PSE-36 or PSE-40. |
1117 | 0 | pub fn new(pt: PAddr, flags: PDFlags) -> PDEntry { |
1118 | 0 | let mask = if flags.contains(PDFlags::PS) { |
1119 | 0 | ADDRESS_MASK_PSE |
1120 | | } else { |
1121 | 0 | ADDRESS_MASK |
1122 | | }; |
1123 | 0 | let pt_val = pt & mask; |
1124 | 0 | assert!(pt_val == pt.into()); |
1125 | 0 | assert!(pt % BASE_PAGE_SIZE == 0); |
1126 | 0 | PDEntry(pt_val | flags.bits) |
1127 | 0 | } |
1128 | | |
1129 | | /// Retrieves the physical address in this entry. |
1130 | 0 | pub fn address(self) -> PAddr { |
1131 | 0 | if self.flags().contains(PDFlags::PS) { |
1132 | 0 | PAddr::from(self.0 & ADDRESS_MASK_PSE) |
1133 | | } else { |
1134 | 0 | PAddr::from(self.0 & ADDRESS_MASK) |
1135 | | } |
1136 | 0 | } |
1137 | | |
1138 | | /// Returns the flags corresponding to this entry. |
1139 | 0 | pub fn flags(self) -> PDFlags { |
1140 | 0 | PDFlags::from_bits_truncate(self.0) |
1141 | 0 | } |
1142 | | |
1143 | | check_flag!( |
1144 | | doc = "Present; must be 1 to map a 4-MByte page.", |
1145 | | is_present, |
1146 | | PDFlags::P |
1147 | | ); |
1148 | | check_flag!(doc = "Read/write; if 0, writes may not be allowed to the 4-MByte page referenced by this entry.", |
1149 | | is_writeable, PDFlags::RW); |
1150 | | check_flag!(doc = "User/supervisor; if 0, user-mode accesses are not allowed to the 4-MByte page referenced by this entry.", |
1151 | | is_user_mode_allowed, PDFlags::US); |
1152 | | check_flag!( |
1153 | | doc = "Page-level write-through.", |
1154 | | is_page_write_through, |
1155 | | PDFlags::PWT |
1156 | | ); |
1157 | | check_flag!( |
1158 | | doc = "Page-level cache disable.", |
1159 | | is_page_level_cache_disabled, |
1160 | | PDFlags::PCD |
1161 | | ); |
1162 | | check_flag!(doc = "Accessed; indicates whether software has accessed the 4-MByte page referenced by this entry.", |
1163 | | is_accessed, PDFlags::A); |
1164 | | check_flag!(doc = "Dirty; indicates whether software has written to the 4-MByte page referenced by this entry.", |
1165 | | is_dirty, PDFlags::D); |
1166 | | check_flag!(doc = "Page size; if set this entry maps a 4-MByte page; otherwise, this entry references a page directory.", |
1167 | | is_page, PDFlags::PS); |
1168 | | check_flag!(doc = "Global; if CR4.PGE = 1, determines whether the translation is global; ignored otherwise.", |
1169 | | is_global, PDFlags::G); |
1170 | | check_flag!(doc = "If the PAT is supported, indirectly determines the memory type used to access the 4-MByte page referenced by this entry; otherwise, reserved (must be 0)", |
1171 | | is_pat, PDFlags::PAT); |
1172 | | } |
1173 | | |
1174 | | bitflags! { |
1175 | | /// PT Entry bits description. |
1176 | | #[repr(transparent)] |
1177 | | pub struct PTFlags: u32 { |
1178 | | /// Present; must be 1 to map a 4-KByte page. |
1179 | | const P = bit!(0); |
1180 | | /// Read/write; if 0, writes may not be allowed to the 4-KByte page referenced by this entry. |
1181 | | const RW = bit!(1); |
1182 | | /// User/supervisor; if 0, user-mode accesses are not allowed to the 4-KByte page referenced by this entry. |
1183 | | const US = bit!(2); |
1184 | | /// Page-level write-through. |
1185 | | const PWT = bit!(3); |
1186 | | /// Page-level cache disable. |
1187 | | const PCD = bit!(4); |
1188 | | /// Accessed; indicates whether software has accessed the 4-KByte page referenced by this entry. |
1189 | | const A = bit!(5); |
1190 | | /// Dirty; indicates whether software has written to the 4-KByte page referenced by this entry. |
1191 | | const D = bit!(6); |
1192 | | /// If the PAT is supported, indirectly determines the memory type used to access the 4-KByte page referenced by this entry; |
1193 | | /// otherwise, reserved (must be 0) |
1194 | | const PAT = bit!(7); |
1195 | | /// Global; if CR4.PGE = 1, determines whether the translation is global; ignored otherwise. |
1196 | | const G = bit!(8); |
1197 | | } |
1198 | | } |
1199 | | |
1200 | | /// A PT Entry consists of an address and a bunch of flags. |
1201 | | #[repr(transparent)] |
1202 | | #[derive(Clone, Copy)] |
1203 | | pub struct PTEntry(pub u32); |
1204 | | |
1205 | | impl fmt::Debug for PTEntry { |
1206 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
1207 | 0 | write!(f, "PTEntry {{ {:#x}, {:?} }}", self.address(), self.flags()) |
1208 | 0 | } |
1209 | | } |
1210 | | |
1211 | | impl PTEntry { |
1212 | | /// Creates a new PTEntry. |
1213 | | /// |
1214 | | /// # Arguments |
1215 | | /// |
1216 | | /// * `page` - The physical address of the backing 4 KiB page. |
1217 | | /// * `flags`- Additional flags for the entry. |
1218 | 0 | pub fn new(page: PAddr, flags: PTFlags) -> PTEntry { |
1219 | 0 | let page_val = page & ADDRESS_MASK; |
1220 | 0 | assert!(page_val == page.into()); |
1221 | 0 | assert!(page % BASE_PAGE_SIZE == 0); |
1222 | 0 | PTEntry(page_val | flags.bits) |
1223 | 0 | } |
1224 | | |
1225 | | /// Retrieves the physical address in this entry. |
1226 | 0 | pub fn address(self) -> PAddr { |
1227 | 0 | PAddr::from(self.0 & ADDRESS_MASK) |
1228 | 0 | } |
1229 | | |
1230 | | /// Returns the flags corresponding to this entry. |
1231 | 0 | pub fn flags(self) -> PTFlags { |
1232 | 0 | PTFlags::from_bits_truncate(self.0) |
1233 | 0 | } |
1234 | | |
1235 | | check_flag!( |
1236 | | doc = "Present; must be 1 to map a 4-KByte page.", |
1237 | | is_present, |
1238 | | PTFlags::P |
1239 | | ); |
1240 | | check_flag!(doc = "Read/write; if 0, writes may not be allowed to the 4-KByte page referenced by this entry.", |
1241 | | is_writeable, PTFlags::RW); |
1242 | | check_flag!(doc = "User/supervisor; if 0, user-mode accesses are not allowed to the 4-KByte page referenced by this entry.", |
1243 | | is_user_mode_allowed, PTFlags::US); |
1244 | | check_flag!( |
1245 | | doc = "Page-level write-through.", |
1246 | | is_page_write_through, |
1247 | | PTFlags::PWT |
1248 | | ); |
1249 | | check_flag!( |
1250 | | doc = "Page-level cache disable.", |
1251 | | is_page_level_cache_disabled, |
1252 | | PTFlags::PCD |
1253 | | ); |
1254 | | check_flag!(doc = "Accessed; indicates whether software has accessed the 4-KByte page referenced by this entry.", |
1255 | | is_accessed, PTFlags::A); |
1256 | | check_flag!(doc = "Dirty; indicates whether software has written to the 4-KByte page referenced by this entry.", |
1257 | | is_dirty, PTFlags::D); |
1258 | | check_flag!(doc = "If the PAT is supported, indirectly determines the memory type used to access the 4-KByte page referenced by this entry; otherwise, reserved (must be 0)", |
1259 | | is_pat, PTFlags::PAT); |
1260 | | check_flag!(doc = "Global; if CR4.PGE = 1, determines whether the translation is global; ignored otherwise.", |
1261 | | is_global, PTFlags::G); |
1262 | | } |
1263 | | |
1264 | | #[cfg(all(test, feature = "utest"))] |
1265 | | mod test { |
1266 | | use super::*; |
1267 | | |
1268 | | #[test] |
1269 | | fn paddr_align() { |
1270 | | let base = PAddr::from(0x1000); |
1271 | | assert_eq!(base.base_page_offset(), 0x0); |
1272 | | assert_eq!(base.large_page_offset(), 0x1000); |
1273 | | assert_eq!(base.align_down_to_base_page(), PAddr::from(0x1000)); |
1274 | | assert_eq!(base.align_down_to_large_page(), PAddr::from(0x0)); |
1275 | | assert_eq!(base.align_up_to_base_page(), PAddr::from(0x1000)); |
1276 | | assert_eq!(base.align_up_to_large_page(), PAddr::from(0x400000)); |
1277 | | assert!(base.is_base_page_aligned()); |
1278 | | assert!(!base.is_large_page_aligned()); |
1279 | | assert!(base.is_aligned(0x1u32)); |
1280 | | assert!(base.is_aligned(0x2u32)); |
1281 | | assert!(!base.is_aligned(0x3u32)); |
1282 | | assert!(base.is_aligned(0x4u32)); |
1283 | | |
1284 | | let base = PAddr::from(0x1001); |
1285 | | assert_eq!(base.base_page_offset(), 0x1); |
1286 | | assert_eq!(base.large_page_offset(), 0x1001); |
1287 | | assert_eq!(base.align_down_to_base_page(), PAddr::from(0x1000)); |
1288 | | assert_eq!(base.align_down_to_large_page(), PAddr::from(0x0)); |
1289 | | assert_eq!(base.align_up_to_base_page(), PAddr::from(0x2000)); |
1290 | | assert_eq!(base.align_up_to_large_page(), PAddr::from(0x400000)); |
1291 | | assert!(!base.is_base_page_aligned()); |
1292 | | assert!(!base.is_large_page_aligned()); |
1293 | | assert!(base.is_aligned(0x1u32)); |
1294 | | assert!(!base.is_aligned(0x2u32)); |
1295 | | assert!(!base.is_aligned(0x3u32)); |
1296 | | assert!(!base.is_aligned(0x4u32)); |
1297 | | |
1298 | | let base = PAddr::from(0x400000); |
1299 | | assert_eq!(base.base_page_offset(), 0x0); |
1300 | | assert_eq!(base.large_page_offset(), 0x0); |
1301 | | assert_eq!(base.align_down_to_base_page(), PAddr::from(0x400000)); |
1302 | | assert_eq!(base.align_down_to_large_page(), PAddr::from(0x400000)); |
1303 | | assert_eq!(base.align_up_to_base_page(), PAddr::from(0x400000)); |
1304 | | assert_eq!(base.align_up_to_large_page(), PAddr::from(0x400000)); |
1305 | | assert!(base.is_base_page_aligned()); |
1306 | | assert!(base.is_large_page_aligned()); |
1307 | | assert!(base.is_aligned(0x1u32)); |
1308 | | assert!(base.is_aligned(0x2u32)); |
1309 | | assert!(!base.is_aligned(0x3u32)); |
1310 | | assert!(base.is_aligned(0x4u32)); |
1311 | | |
1312 | | let base = PAddr::from(0x400002); |
1313 | | assert_eq!(base.base_page_offset(), 0x2); |
1314 | | assert_eq!(base.large_page_offset(), 0x2); |
1315 | | assert_eq!(base.align_down_to_base_page(), PAddr::from(0x400000)); |
1316 | | assert_eq!(base.align_down_to_large_page(), PAddr::from(0x400000)); |
1317 | | assert_eq!(base.align_up_to_base_page(), PAddr::from(0x401000)); |
1318 | | assert_eq!(base.align_up_to_large_page(), PAddr::from(0x800000)); |
1319 | | assert!(!base.is_base_page_aligned()); |
1320 | | assert!(!base.is_large_page_aligned()); |
1321 | | assert!(base.is_aligned(0x1u32)); |
1322 | | assert!(base.is_aligned(0x2u32)); |
1323 | | assert!(!base.is_aligned(0x3u32)); |
1324 | | assert!(!base.is_aligned(0x4u32)); |
1325 | | } |
1326 | | |
1327 | | #[test] |
1328 | | fn ioaddr_align() { |
1329 | | let base = IOAddr::from(0x1000); |
1330 | | assert_eq!(base.base_page_offset(), 0x0); |
1331 | | assert_eq!(base.large_page_offset(), 0x1000); |
1332 | | assert_eq!(base.align_down_to_base_page(), IOAddr::from(0x1000)); |
1333 | | assert_eq!(base.align_down_to_large_page(), IOAddr::from(0x0)); |
1334 | | assert_eq!(base.align_up_to_base_page(), IOAddr::from(0x1000)); |
1335 | | assert_eq!(base.align_up_to_large_page(), IOAddr::from(0x400000)); |
1336 | | assert!(base.is_base_page_aligned()); |
1337 | | assert!(!base.is_large_page_aligned()); |
1338 | | assert!(base.is_aligned(0x1u32)); |
1339 | | assert!(base.is_aligned(0x2u32)); |
1340 | | assert!(!base.is_aligned(0x3u32)); |
1341 | | assert!(base.is_aligned(0x4u32)); |
1342 | | |
1343 | | let base = IOAddr::from(0x1001); |
1344 | | assert_eq!(base.base_page_offset(), 0x1); |
1345 | | assert_eq!(base.large_page_offset(), 0x1001); |
1346 | | assert_eq!(base.align_down_to_base_page(), IOAddr::from(0x1000)); |
1347 | | assert_eq!(base.align_down_to_large_page(), IOAddr::from(0x0)); |
1348 | | assert_eq!(base.align_up_to_base_page(), IOAddr::from(0x2000)); |
1349 | | assert_eq!(base.align_up_to_large_page(), IOAddr::from(0x400000)); |
1350 | | assert!(!base.is_base_page_aligned()); |
1351 | | assert!(!base.is_large_page_aligned()); |
1352 | | assert!(base.is_aligned(0x1u32)); |
1353 | | assert!(!base.is_aligned(0x2u32)); |
1354 | | assert!(!base.is_aligned(0x3u32)); |
1355 | | assert!(!base.is_aligned(0x4u32)); |
1356 | | |
1357 | | let base = IOAddr::from(0x400000); |
1358 | | assert_eq!(base.base_page_offset(), 0x0); |
1359 | | assert_eq!(base.large_page_offset(), 0x0); |
1360 | | assert_eq!(base.align_down_to_base_page(), IOAddr::from(0x400000)); |
1361 | | assert_eq!(base.align_down_to_large_page(), IOAddr::from(0x400000)); |
1362 | | assert_eq!(base.align_up_to_base_page(), IOAddr::from(0x400000)); |
1363 | | assert_eq!(base.align_up_to_large_page(), IOAddr::from(0x400000)); |
1364 | | assert!(base.is_base_page_aligned()); |
1365 | | assert!(base.is_large_page_aligned()); |
1366 | | assert!(base.is_aligned(0x1u32)); |
1367 | | assert!(base.is_aligned(0x2u32)); |
1368 | | assert!(!base.is_aligned(0x3u32)); |
1369 | | assert!(base.is_aligned(0x4u32)); |
1370 | | |
1371 | | let base = IOAddr::from(0x400002); |
1372 | | assert_eq!(base.base_page_offset(), 0x2); |
1373 | | assert_eq!(base.large_page_offset(), 0x2); |
1374 | | assert_eq!(base.align_down_to_base_page(), IOAddr::from(0x400000)); |
1375 | | assert_eq!(base.align_down_to_large_page(), IOAddr::from(0x400000)); |
1376 | | assert_eq!(base.align_up_to_base_page(), IOAddr::from(0x401000)); |
1377 | | assert_eq!(base.align_up_to_large_page(), IOAddr::from(0x800000)); |
1378 | | assert!(!base.is_base_page_aligned()); |
1379 | | assert!(!base.is_large_page_aligned()); |
1380 | | assert!(base.is_aligned(0x1u32)); |
1381 | | assert!(base.is_aligned(0x2u32)); |
1382 | | assert!(!base.is_aligned(0x3u32)); |
1383 | | assert!(!base.is_aligned(0x4u32)); |
1384 | | } |
1385 | | |
1386 | | #[test] |
1387 | | fn vaddr_align() { |
1388 | | let base = VAddr::from(0x1000); |
1389 | | assert_eq!(base.base_page_offset(), 0x0); |
1390 | | assert_eq!(base.large_page_offset(), 0x1000); |
1391 | | assert_eq!(base.align_down_to_base_page(), VAddr::from(0x1000)); |
1392 | | assert_eq!(base.align_down_to_large_page(), VAddr::from(0x0)); |
1393 | | assert_eq!(base.align_up_to_base_page(), VAddr::from(0x1000)); |
1394 | | assert_eq!(base.align_up_to_large_page(), VAddr::from(0x400000)); |
1395 | | assert!(base.is_base_page_aligned()); |
1396 | | assert!(!base.is_large_page_aligned()); |
1397 | | assert!(base.is_aligned(0x1u32)); |
1398 | | assert!(base.is_aligned(0x2u32)); |
1399 | | assert!(!base.is_aligned(0x3u32)); |
1400 | | assert!(base.is_aligned(0x4u32)); |
1401 | | |
1402 | | let base = VAddr::from(0x1001); |
1403 | | assert_eq!(base.base_page_offset(), 0x1); |
1404 | | assert_eq!(base.large_page_offset(), 0x1001); |
1405 | | assert_eq!(base.align_down_to_base_page(), VAddr::from(0x1000)); |
1406 | | assert_eq!(base.align_down_to_large_page(), VAddr::from(0x0)); |
1407 | | assert_eq!(base.align_up_to_base_page(), VAddr::from(0x2000)); |
1408 | | assert_eq!(base.align_up_to_large_page(), VAddr::from(0x400000)); |
1409 | | assert!(!base.is_base_page_aligned()); |
1410 | | assert!(!base.is_large_page_aligned()); |
1411 | | assert!(base.is_aligned(0x1u32)); |
1412 | | assert!(!base.is_aligned(0x2u32)); |
1413 | | assert!(!base.is_aligned(0x3u32)); |
1414 | | assert!(!base.is_aligned(0x4u32)); |
1415 | | |
1416 | | let base = VAddr::from(0x400000); |
1417 | | assert_eq!(base.base_page_offset(), 0x0); |
1418 | | assert_eq!(base.large_page_offset(), 0x0); |
1419 | | assert_eq!(base.align_down_to_base_page(), VAddr::from(0x400000)); |
1420 | | assert_eq!(base.align_down_to_large_page(), VAddr::from(0x400000)); |
1421 | | assert_eq!(base.align_up_to_base_page(), VAddr::from(0x400000)); |
1422 | | assert_eq!(base.align_up_to_large_page(), VAddr::from(0x400000)); |
1423 | | assert!(base.is_base_page_aligned()); |
1424 | | assert!(base.is_large_page_aligned()); |
1425 | | assert!(base.is_aligned(0x1u32)); |
1426 | | assert!(base.is_aligned(0x2u32)); |
1427 | | assert!(!base.is_aligned(0x3u32)); |
1428 | | assert!(base.is_aligned(0x4u32)); |
1429 | | |
1430 | | let base = VAddr::from(0x400002); |
1431 | | assert_eq!(base.base_page_offset(), 0x2); |
1432 | | assert_eq!(base.large_page_offset(), 0x2); |
1433 | | assert_eq!(base.align_down_to_base_page(), VAddr::from(0x400000)); |
1434 | | assert_eq!(base.align_down_to_large_page(), VAddr::from(0x400000)); |
1435 | | assert_eq!(base.align_up_to_base_page(), VAddr::from(0x401000)); |
1436 | | assert_eq!(base.align_up_to_large_page(), VAddr::from(0x800000)); |
1437 | | assert!(!base.is_base_page_aligned()); |
1438 | | assert!(!base.is_large_page_aligned()); |
1439 | | assert!(base.is_aligned(0x1u32)); |
1440 | | assert!(base.is_aligned(0x2u32)); |
1441 | | assert!(!base.is_aligned(0x3u32)); |
1442 | | assert!(!base.is_aligned(0x4u32)); |
1443 | | } |
1444 | | } |