/rust/registry/src/index.crates.io-1949cf8c6b5b557f/x86_64-0.14.13/src/addr.rs
Line | Count | Source |
1 | | //! Physical and virtual addresses manipulation |
2 | | |
3 | | use core::convert::TryFrom; |
4 | | use core::fmt; |
5 | | #[cfg(feature = "step_trait")] |
6 | | use core::iter::Step; |
7 | | use core::ops::{Add, AddAssign, Sub, SubAssign}; |
8 | | |
9 | | use crate::structures::paging::page_table::PageTableLevel; |
10 | | use crate::structures::paging::{PageOffset, PageTableIndex}; |
11 | | use bit_field::BitField; |
12 | | |
13 | | const ADDRESS_SPACE_SIZE: u64 = 0x1_0000_0000_0000; |
14 | | |
15 | | /// A canonical 64-bit virtual memory address. |
16 | | /// |
17 | | /// This is a wrapper type around an `u64`, so it is always 8 bytes, even when compiled |
18 | | /// on non 64-bit systems. The |
19 | | /// [`TryFrom`](https://doc.rust-lang.org/std/convert/trait.TryFrom.html) trait can be used for performing conversions |
20 | | /// between `u64` and `usize`. |
21 | | /// |
22 | | /// On `x86_64`, only the 48 lower bits of a virtual address can be used. The top 16 bits need |
23 | | /// to be copies of bit 47, i.e. the most significant bit. Addresses that fulfil this criterium |
24 | | /// are called “canonical”. This type guarantees that it always represents a canonical address. |
25 | | #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] |
26 | | #[repr(transparent)] |
27 | | pub struct VirtAddr(u64); |
28 | | |
29 | | /// A 64-bit physical memory address. |
30 | | /// |
31 | | /// This is a wrapper type around an `u64`, so it is always 8 bytes, even when compiled |
32 | | /// on non 64-bit systems. The |
33 | | /// [`TryFrom`](https://doc.rust-lang.org/std/convert/trait.TryFrom.html) trait can be used for performing conversions |
34 | | /// between `u64` and `usize`. |
35 | | /// |
36 | | /// On `x86_64`, only the 52 lower bits of a physical address can be used. The top 12 bits need |
37 | | /// to be zero. This type guarantees that it always represents a valid physical address. |
38 | | #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] |
39 | | #[repr(transparent)] |
40 | | pub struct PhysAddr(u64); |
41 | | |
42 | | /// A passed `u64` was not a valid virtual address. |
43 | | /// |
44 | | /// This means that bits 48 to 64 are not |
45 | | /// a valid sign extension and are not null either. So automatic sign extension would have |
46 | | /// overwritten possibly meaningful bits. This likely indicates a bug, for example an invalid |
47 | | /// address calculation. |
48 | | /// |
49 | | /// Contains the invalid address. |
50 | | pub struct VirtAddrNotValid(pub u64); |
51 | | |
52 | | impl core::fmt::Debug for VirtAddrNotValid { |
53 | 0 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
54 | 0 | f.debug_tuple("VirtAddrNotValid") |
55 | 0 | .field(&format_args!("{:#x}", self.0)) |
56 | 0 | .finish() |
57 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddrNotValid as core::fmt::Debug>::fmt Unexecuted instantiation: <x86_64::addr::VirtAddrNotValid as core::fmt::Debug>::fmt |
58 | | } |
59 | | |
60 | | impl VirtAddr { |
61 | | /// Creates a new canonical virtual address. |
62 | | /// |
63 | | /// This function performs sign extension of bit 47 to make the address canonical. |
64 | | /// |
65 | | /// ## Panics |
66 | | /// |
67 | | /// This function panics if the bits in the range 48 to 64 contain data (i.e. are not null and no sign extension). |
68 | | #[inline] |
69 | 0 | pub fn new(addr: u64) -> VirtAddr { |
70 | 0 | Self::try_new(addr).expect( |
71 | 0 | "address passed to VirtAddr::new must not contain any data \ |
72 | 0 | in bits 48 to 64", |
73 | | ) |
74 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::new Unexecuted instantiation: <x86_64::addr::VirtAddr>::new Unexecuted instantiation: <x86_64::addr::VirtAddr>::new Unexecuted instantiation: <x86_64::addr::VirtAddr>::new Unexecuted instantiation: <x86_64::addr::VirtAddr>::new |
75 | | |
76 | | /// Tries to create a new canonical virtual address. |
77 | | /// |
78 | | /// This function tries to performs sign |
79 | | /// extension of bit 47 to make the address canonical. It succeeds if bits 48 to 64 are |
80 | | /// either a correct sign extension (i.e. copies of bit 47) or all null. Else, an error |
81 | | /// is returned. |
82 | | #[inline] |
83 | 0 | pub fn try_new(addr: u64) -> Result<VirtAddr, VirtAddrNotValid> { |
84 | 0 | match addr.get_bits(47..64) { |
85 | 0 | 0 | 0x1ffff => Ok(VirtAddr(addr)), // address is canonical |
86 | 0 | 1 => Ok(VirtAddr::new_truncate(addr)), // address needs sign extension |
87 | 0 | _ => Err(VirtAddrNotValid(addr)), |
88 | | } |
89 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::try_new Unexecuted instantiation: <x86_64::addr::VirtAddr>::try_new Unexecuted instantiation: <x86_64::addr::VirtAddr>::try_new Unexecuted instantiation: <x86_64::addr::VirtAddr>::try_new Unexecuted instantiation: <x86_64::addr::VirtAddr>::try_new |
90 | | |
91 | | /// Creates a new canonical virtual address, throwing out bits 48..64. |
92 | | /// |
93 | | /// This function performs sign extension of bit 47 to make the address canonical, so |
94 | | /// bits 48 to 64 are overwritten. If you want to check that these bits contain no data, |
95 | | /// use `new` or `try_new`. |
96 | | #[inline] |
97 | 0 | pub const fn new_truncate(addr: u64) -> VirtAddr { |
98 | | // By doing the right shift as a signed operation (on a i64), it will |
99 | | // sign extend the value, repeating the leftmost bit. |
100 | 0 | VirtAddr(((addr << 16) as i64 >> 16) as u64) |
101 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::new_truncate Unexecuted instantiation: <x86_64::addr::VirtAddr>::new_truncate Unexecuted instantiation: <x86_64::addr::VirtAddr>::new_truncate Unexecuted instantiation: <x86_64::addr::VirtAddr>::new_truncate Unexecuted instantiation: <x86_64::addr::VirtAddr>::new_truncate |
102 | | |
103 | | /// Creates a new virtual address, without any checks. |
104 | | /// |
105 | | /// ## Safety |
106 | | /// |
107 | | /// You must make sure bits 48..64 are equal to bit 47. This is not checked. |
108 | | #[inline] |
109 | 0 | pub const unsafe fn new_unsafe(addr: u64) -> VirtAddr { |
110 | 0 | VirtAddr(addr) |
111 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::new_unsafe Unexecuted instantiation: <x86_64::addr::VirtAddr>::new_unsafe |
112 | | |
113 | | /// Creates a virtual address that points to `0`. |
114 | | #[inline] |
115 | 0 | pub const fn zero() -> VirtAddr { |
116 | 0 | VirtAddr(0) |
117 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::zero Unexecuted instantiation: <x86_64::addr::VirtAddr>::zero Unexecuted instantiation: <x86_64::addr::VirtAddr>::zero |
118 | | |
119 | | /// Converts the address to an `u64`. |
120 | | #[inline] |
121 | 0 | pub const fn as_u64(self) -> u64 { |
122 | 0 | self.0 |
123 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::as_u64 Unexecuted instantiation: <x86_64::addr::VirtAddr>::as_u64 Unexecuted instantiation: <x86_64::addr::VirtAddr>::as_u64 Unexecuted instantiation: <x86_64::addr::VirtAddr>::as_u64 Unexecuted instantiation: <x86_64::addr::VirtAddr>::as_u64 |
124 | | |
125 | | /// Creates a virtual address from the given pointer |
126 | | // cfg(target_pointer_width = "32") is only here for backwards |
127 | | // compatibility: Earlier versions of this crate did not have any `cfg()` |
128 | | // on this function. At least for 32- and 64-bit we know the `as u64` cast |
129 | | // doesn't truncate. |
130 | | #[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] |
131 | | #[inline] |
132 | 0 | pub fn from_ptr<T: ?Sized>(ptr: *const T) -> Self { |
133 | 0 | Self::new(ptr as *const () as u64) |
134 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::from_ptr::<_> Unexecuted instantiation: <x86_64::addr::VirtAddr>::from_ptr::<_> |
135 | | |
136 | | /// Converts the address to a raw pointer. |
137 | | #[cfg(target_pointer_width = "64")] |
138 | | #[inline] |
139 | 0 | pub const fn as_ptr<T>(self) -> *const T { |
140 | 0 | self.as_u64() as *const T |
141 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::as_ptr::<x86_64::structures::paging::page_table::PageTable> Unexecuted instantiation: <x86_64::addr::VirtAddr>::as_ptr::<x86_64::structures::paging::page_table::PageTable> |
142 | | |
143 | | /// Converts the address to a mutable raw pointer. |
144 | | #[cfg(target_pointer_width = "64")] |
145 | | #[inline] |
146 | 0 | pub const fn as_mut_ptr<T>(self) -> *mut T { |
147 | 0 | self.as_ptr::<T>() as *mut T |
148 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::as_mut_ptr::<x86_64::structures::paging::page_table::PageTable> Unexecuted instantiation: <x86_64::addr::VirtAddr>::as_mut_ptr::<x86_64::structures::paging::page_table::PageTable> |
149 | | |
150 | | /// Convenience method for checking if a virtual address is null. |
151 | | #[inline] |
152 | 0 | pub const fn is_null(self) -> bool { |
153 | 0 | self.0 == 0 |
154 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::is_null Unexecuted instantiation: <x86_64::addr::VirtAddr>::is_null |
155 | | |
156 | | /// Aligns the virtual address upwards to the given alignment. |
157 | | /// |
158 | | /// See the `align_up` function for more information. |
159 | | /// |
160 | | /// # Panics |
161 | | /// |
162 | | /// This function panics if the resulting address is higher than |
163 | | /// `0xffff_ffff_ffff_ffff`. |
164 | | #[inline] |
165 | 0 | pub fn align_up<U>(self, align: U) -> Self |
166 | 0 | where |
167 | 0 | U: Into<u64>, |
168 | | { |
169 | 0 | VirtAddr::new_truncate(align_up(self.0, align.into())) |
170 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::align_up::<_> Unexecuted instantiation: <x86_64::addr::VirtAddr>::align_up::<_> |
171 | | |
172 | | /// Aligns the virtual address downwards to the given alignment. |
173 | | /// |
174 | | /// See the `align_down` function for more information. |
175 | | #[inline] |
176 | 0 | pub fn align_down<U>(self, align: U) -> Self |
177 | 0 | where |
178 | 0 | U: Into<u64>, |
179 | | { |
180 | 0 | VirtAddr::new_truncate(align_down(self.0, align.into())) |
181 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::align_down::<u64> Unexecuted instantiation: <x86_64::addr::VirtAddr>::align_down::<u64> |
182 | | |
183 | | /// Checks whether the virtual address has the demanded alignment. |
184 | | #[inline] |
185 | 0 | pub fn is_aligned<U>(self, align: U) -> bool |
186 | 0 | where |
187 | 0 | U: Into<u64>, |
188 | | { |
189 | 0 | self.align_down(align) == self |
190 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::is_aligned::<_> Unexecuted instantiation: <x86_64::addr::VirtAddr>::is_aligned::<_> |
191 | | |
192 | | /// Returns the 12-bit page offset of this virtual address. |
193 | | #[inline] |
194 | 0 | pub const fn page_offset(self) -> PageOffset { |
195 | 0 | PageOffset::new_truncate(self.0 as u16) |
196 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::page_offset Unexecuted instantiation: <x86_64::addr::VirtAddr>::page_offset Unexecuted instantiation: <x86_64::addr::VirtAddr>::page_offset |
197 | | |
198 | | /// Returns the 9-bit level 1 page table index. |
199 | | #[inline] |
200 | 0 | pub const fn p1_index(self) -> PageTableIndex { |
201 | 0 | PageTableIndex::new_truncate((self.0 >> 12) as u16) |
202 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::p1_index Unexecuted instantiation: <x86_64::addr::VirtAddr>::p1_index Unexecuted instantiation: <x86_64::addr::VirtAddr>::p1_index Unexecuted instantiation: <x86_64::addr::VirtAddr>::p1_index |
203 | | |
204 | | /// Returns the 9-bit level 2 page table index. |
205 | | #[inline] |
206 | 0 | pub const fn p2_index(self) -> PageTableIndex { |
207 | 0 | PageTableIndex::new_truncate((self.0 >> 12 >> 9) as u16) |
208 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::p2_index Unexecuted instantiation: <x86_64::addr::VirtAddr>::p2_index Unexecuted instantiation: <x86_64::addr::VirtAddr>::p2_index |
209 | | |
210 | | /// Returns the 9-bit level 3 page table index. |
211 | | #[inline] |
212 | 0 | pub const fn p3_index(self) -> PageTableIndex { |
213 | 0 | PageTableIndex::new_truncate((self.0 >> 12 >> 9 >> 9) as u16) |
214 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::p3_index Unexecuted instantiation: <x86_64::addr::VirtAddr>::p3_index Unexecuted instantiation: <x86_64::addr::VirtAddr>::p3_index |
215 | | |
216 | | /// Returns the 9-bit level 4 page table index. |
217 | | #[inline] |
218 | 0 | pub const fn p4_index(self) -> PageTableIndex { |
219 | 0 | PageTableIndex::new_truncate((self.0 >> 12 >> 9 >> 9 >> 9) as u16) |
220 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::p4_index Unexecuted instantiation: <x86_64::addr::VirtAddr>::p4_index Unexecuted instantiation: <x86_64::addr::VirtAddr>::p4_index |
221 | | |
222 | | /// Returns the 9-bit level page table index. |
223 | | #[inline] |
224 | 0 | pub const fn page_table_index(self, level: PageTableLevel) -> PageTableIndex { |
225 | 0 | PageTableIndex::new_truncate((self.0 >> 12 >> ((level as u8 - 1) * 9)) as u16) |
226 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::page_table_index Unexecuted instantiation: <x86_64::addr::VirtAddr>::page_table_index |
227 | | |
228 | | // FIXME: Move this into the `Step` impl, once `Step` is stabilized. |
229 | 0 | pub(crate) fn steps_between_impl(start: &Self, end: &Self) -> (usize, Option<usize>) { |
230 | 0 | let mut steps = if let Some(steps) = end.0.checked_sub(start.0) { |
231 | 0 | steps |
232 | | } else { |
233 | 0 | return (0, None); |
234 | | }; |
235 | | |
236 | | // Check if we jumped the gap. |
237 | 0 | if end.0.get_bit(47) && !start.0.get_bit(47) { |
238 | 0 | steps = steps.checked_sub(0xffff_0000_0000_0000).unwrap(); |
239 | 0 | } |
240 | | |
241 | 0 | let steps = usize::try_from(steps).ok(); |
242 | 0 | (steps.unwrap_or(usize::MAX), steps) |
243 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::steps_between_impl Unexecuted instantiation: <x86_64::addr::VirtAddr>::steps_between_impl |
244 | | |
245 | | // FIXME: Move this into the `Step` impl, once `Step` is stabilized. |
246 | 0 | pub(crate) fn forward_checked_impl(start: Self, count: usize) -> Option<Self> { |
247 | 0 | let offset = u64::try_from(count).ok()?; |
248 | 0 | if offset > ADDRESS_SPACE_SIZE { |
249 | 0 | return None; |
250 | 0 | } |
251 | | |
252 | 0 | let mut addr = start.0.checked_add(offset)?; |
253 | | |
254 | 0 | match addr.get_bits(47..) { |
255 | 0 | 0x1 => { |
256 | 0 | // Jump the gap by sign extending the 47th bit. |
257 | 0 | addr.set_bits(47.., 0x1ffff); |
258 | 0 | } |
259 | | 0x2 => { |
260 | | // Address overflow |
261 | 0 | return None; |
262 | | } |
263 | 0 | _ => {} |
264 | | } |
265 | | |
266 | 0 | Some(Self::new(addr)) |
267 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr>::forward_checked_impl Unexecuted instantiation: <x86_64::addr::VirtAddr>::forward_checked_impl |
268 | | } |
269 | | |
270 | | impl fmt::Debug for VirtAddr { |
271 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
272 | 0 | f.debug_tuple("VirtAddr") |
273 | 0 | .field(&format_args!("{:#x}", self.0)) |
274 | 0 | .finish() |
275 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr as core::fmt::Debug>::fmt Unexecuted instantiation: <x86_64::addr::VirtAddr as core::fmt::Debug>::fmt |
276 | | } |
277 | | |
278 | | impl fmt::Binary for VirtAddr { |
279 | | #[inline] |
280 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
281 | 0 | fmt::Binary::fmt(&self.0, f) |
282 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr as core::fmt::Binary>::fmt Unexecuted instantiation: <x86_64::addr::VirtAddr as core::fmt::Binary>::fmt |
283 | | } |
284 | | |
285 | | impl fmt::LowerHex for VirtAddr { |
286 | | #[inline] |
287 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
288 | 0 | fmt::LowerHex::fmt(&self.0, f) |
289 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr as core::fmt::LowerHex>::fmt Unexecuted instantiation: <x86_64::addr::VirtAddr as core::fmt::LowerHex>::fmt |
290 | | } |
291 | | |
292 | | impl fmt::Octal for VirtAddr { |
293 | | #[inline] |
294 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
295 | 0 | fmt::Octal::fmt(&self.0, f) |
296 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr as core::fmt::Octal>::fmt Unexecuted instantiation: <x86_64::addr::VirtAddr as core::fmt::Octal>::fmt |
297 | | } |
298 | | |
299 | | impl fmt::UpperHex for VirtAddr { |
300 | | #[inline] |
301 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
302 | 0 | fmt::UpperHex::fmt(&self.0, f) |
303 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr as core::fmt::UpperHex>::fmt Unexecuted instantiation: <x86_64::addr::VirtAddr as core::fmt::UpperHex>::fmt |
304 | | } |
305 | | |
306 | | impl fmt::Pointer for VirtAddr { |
307 | | #[inline] |
308 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
309 | 0 | fmt::Pointer::fmt(&(self.0 as *const ()), f) |
310 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr as core::fmt::Pointer>::fmt Unexecuted instantiation: <x86_64::addr::VirtAddr as core::fmt::Pointer>::fmt |
311 | | } |
312 | | |
313 | | impl Add<u64> for VirtAddr { |
314 | | type Output = Self; |
315 | | #[inline] |
316 | 0 | fn add(self, rhs: u64) -> Self::Output { |
317 | 0 | VirtAddr::new(self.0 + rhs) |
318 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::Add<u64>>::add Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::Add<u64>>::add Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::Add<u64>>::add |
319 | | } |
320 | | |
321 | | impl AddAssign<u64> for VirtAddr { |
322 | | #[inline] |
323 | 0 | fn add_assign(&mut self, rhs: u64) { |
324 | 0 | *self = *self + rhs; |
325 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::AddAssign<u64>>::add_assign Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::AddAssign<u64>>::add_assign Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::AddAssign<u64>>::add_assign |
326 | | } |
327 | | |
328 | | #[cfg(target_pointer_width = "64")] |
329 | | impl Add<usize> for VirtAddr { |
330 | | type Output = Self; |
331 | | #[inline] |
332 | 0 | fn add(self, rhs: usize) -> Self::Output { |
333 | 0 | self + rhs as u64 |
334 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::Add<usize>>::add Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::Add<usize>>::add |
335 | | } |
336 | | |
337 | | #[cfg(target_pointer_width = "64")] |
338 | | impl AddAssign<usize> for VirtAddr { |
339 | | #[inline] |
340 | 0 | fn add_assign(&mut self, rhs: usize) { |
341 | 0 | self.add_assign(rhs as u64) |
342 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::AddAssign<usize>>::add_assign Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::AddAssign<usize>>::add_assign |
343 | | } |
344 | | |
345 | | impl Sub<u64> for VirtAddr { |
346 | | type Output = Self; |
347 | | #[inline] |
348 | 0 | fn sub(self, rhs: u64) -> Self::Output { |
349 | 0 | VirtAddr::new(self.0.checked_sub(rhs).unwrap()) |
350 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::Sub<u64>>::sub Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::Sub<u64>>::sub |
351 | | } |
352 | | |
353 | | impl SubAssign<u64> for VirtAddr { |
354 | | #[inline] |
355 | 0 | fn sub_assign(&mut self, rhs: u64) { |
356 | 0 | *self = *self - rhs; |
357 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::SubAssign<u64>>::sub_assign Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::SubAssign<u64>>::sub_assign |
358 | | } |
359 | | |
360 | | #[cfg(target_pointer_width = "64")] |
361 | | impl Sub<usize> for VirtAddr { |
362 | | type Output = Self; |
363 | | #[inline] |
364 | 0 | fn sub(self, rhs: usize) -> Self::Output { |
365 | 0 | self - rhs as u64 |
366 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::Sub<usize>>::sub Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::Sub<usize>>::sub |
367 | | } |
368 | | |
369 | | #[cfg(target_pointer_width = "64")] |
370 | | impl SubAssign<usize> for VirtAddr { |
371 | | #[inline] |
372 | 0 | fn sub_assign(&mut self, rhs: usize) { |
373 | 0 | self.sub_assign(rhs as u64) |
374 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::SubAssign<usize>>::sub_assign Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::SubAssign<usize>>::sub_assign |
375 | | } |
376 | | |
377 | | impl Sub<VirtAddr> for VirtAddr { |
378 | | type Output = u64; |
379 | | #[inline] |
380 | 0 | fn sub(self, rhs: VirtAddr) -> Self::Output { |
381 | 0 | self.as_u64().checked_sub(rhs.as_u64()).unwrap() |
382 | 0 | } Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::Sub>::sub Unexecuted instantiation: <x86_64::addr::VirtAddr as core::ops::arith::Sub>::sub |
383 | | } |
384 | | |
385 | | #[cfg(feature = "step_trait")] |
386 | | impl Step for VirtAddr { |
387 | | fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) { |
388 | | Self::steps_between_impl(start, end) |
389 | | } |
390 | | |
391 | | fn forward_checked(start: Self, count: usize) -> Option<Self> { |
392 | | Self::forward_checked_impl(start, count) |
393 | | } |
394 | | |
395 | | fn backward_checked(start: Self, count: usize) -> Option<Self> { |
396 | | let offset = u64::try_from(count).ok()?; |
397 | | if offset > ADDRESS_SPACE_SIZE { |
398 | | return None; |
399 | | } |
400 | | |
401 | | let mut addr = start.0.checked_sub(offset)?; |
402 | | |
403 | | match addr.get_bits(47..) { |
404 | | 0x1fffe => { |
405 | | // Jump the gap by sign extending the 47th bit. |
406 | | addr.set_bits(47.., 0); |
407 | | } |
408 | | 0x1fffd => { |
409 | | // Address underflow |
410 | | return None; |
411 | | } |
412 | | _ => {} |
413 | | } |
414 | | |
415 | | Some(Self::new(addr)) |
416 | | } |
417 | | } |
418 | | |
419 | | /// A passed `u64` was not a valid physical address. |
420 | | /// |
421 | | /// This means that bits 52 to 64 were not all null. |
422 | | /// |
423 | | /// Contains the invalid address. |
424 | | pub struct PhysAddrNotValid(pub u64); |
425 | | |
426 | | impl core::fmt::Debug for PhysAddrNotValid { |
427 | 0 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
428 | 0 | f.debug_tuple("PhysAddrNotValid") |
429 | 0 | .field(&format_args!("{:#x}", self.0)) |
430 | 0 | .finish() |
431 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddrNotValid as core::fmt::Debug>::fmt Unexecuted instantiation: <x86_64::addr::PhysAddrNotValid as core::fmt::Debug>::fmt |
432 | | } |
433 | | |
434 | | impl PhysAddr { |
435 | | /// Creates a new physical address. |
436 | | /// |
437 | | /// ## Panics |
438 | | /// |
439 | | /// This function panics if a bit in the range 52 to 64 is set. |
440 | | #[inline] |
441 | 0 | pub const fn new(addr: u64) -> Self { |
442 | | // TODO: Replace with .ok().expect(msg) when that works on stable. |
443 | 0 | match Self::try_new(addr) { |
444 | 0 | Ok(p) => p, |
445 | 0 | Err(_) => panic!("physical addresses must not have any bits in the range 52 to 64 set"), |
446 | | } |
447 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr>::new Unexecuted instantiation: <x86_64::addr::PhysAddr>::new Unexecuted instantiation: <x86_64::addr::PhysAddr>::new Unexecuted instantiation: <x86_64::addr::PhysAddr>::new |
448 | | |
449 | | /// Creates a new physical address, throwing bits 52..64 away. |
450 | | #[inline] |
451 | 0 | pub const fn new_truncate(addr: u64) -> PhysAddr { |
452 | 0 | PhysAddr(addr % (1 << 52)) |
453 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr>::new_truncate Unexecuted instantiation: <x86_64::addr::PhysAddr>::new_truncate Unexecuted instantiation: <x86_64::addr::PhysAddr>::new_truncate Unexecuted instantiation: <x86_64::addr::PhysAddr>::new_truncate |
454 | | |
455 | | /// Creates a new physical address, without any checks. |
456 | | /// |
457 | | /// ## Safety |
458 | | /// |
459 | | /// You must make sure bits 52..64 are zero. This is not checked. |
460 | | #[inline] |
461 | 0 | pub const unsafe fn new_unsafe(addr: u64) -> PhysAddr { |
462 | 0 | PhysAddr(addr) |
463 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr>::new_unsafe Unexecuted instantiation: <x86_64::addr::PhysAddr>::new_unsafe |
464 | | |
465 | | /// Tries to create a new physical address. |
466 | | /// |
467 | | /// Fails if any bits in the range 52 to 64 are set. |
468 | | #[inline] |
469 | 0 | pub const fn try_new(addr: u64) -> Result<Self, PhysAddrNotValid> { |
470 | 0 | let p = Self::new_truncate(addr); |
471 | 0 | if p.0 == addr { |
472 | 0 | Ok(p) |
473 | | } else { |
474 | 0 | Err(PhysAddrNotValid(addr)) |
475 | | } |
476 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr>::try_new Unexecuted instantiation: <x86_64::addr::PhysAddr>::try_new Unexecuted instantiation: <x86_64::addr::PhysAddr>::try_new Unexecuted instantiation: <x86_64::addr::PhysAddr>::try_new |
477 | | |
478 | | /// Creates a physical address that points to `0`. |
479 | | #[inline] |
480 | 0 | pub const fn zero() -> PhysAddr { |
481 | 0 | PhysAddr(0) |
482 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr>::zero Unexecuted instantiation: <x86_64::addr::PhysAddr>::zero |
483 | | |
484 | | /// Converts the address to an `u64`. |
485 | | #[inline] |
486 | 0 | pub const fn as_u64(self) -> u64 { |
487 | 0 | self.0 |
488 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr>::as_u64 Unexecuted instantiation: <x86_64::addr::PhysAddr>::as_u64 Unexecuted instantiation: <x86_64::addr::PhysAddr>::as_u64 Unexecuted instantiation: <x86_64::addr::PhysAddr>::as_u64 |
489 | | |
490 | | /// Convenience method for checking if a physical address is null. |
491 | | #[inline] |
492 | 0 | pub const fn is_null(self) -> bool { |
493 | 0 | self.0 == 0 |
494 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr>::is_null Unexecuted instantiation: <x86_64::addr::PhysAddr>::is_null |
495 | | |
496 | | /// Aligns the physical address upwards to the given alignment. |
497 | | /// |
498 | | /// See the `align_up` function for more information. |
499 | | /// |
500 | | /// # Panics |
501 | | /// |
502 | | /// This function panics if the resulting address has a bit in the range 52 |
503 | | /// to 64 set. |
504 | | #[inline] |
505 | 0 | pub fn align_up<U>(self, align: U) -> Self |
506 | 0 | where |
507 | 0 | U: Into<u64>, |
508 | | { |
509 | 0 | PhysAddr::new(align_up(self.0, align.into())) |
510 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr>::align_up::<_> Unexecuted instantiation: <x86_64::addr::PhysAddr>::align_up::<_> |
511 | | |
512 | | /// Aligns the physical address downwards to the given alignment. |
513 | | /// |
514 | | /// See the `align_down` function for more information. |
515 | | #[inline] |
516 | 0 | pub fn align_down<U>(self, align: U) -> Self |
517 | 0 | where |
518 | 0 | U: Into<u64>, |
519 | | { |
520 | 0 | PhysAddr(align_down(self.0, align.into())) |
521 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr>::align_down::<u64> Unexecuted instantiation: <x86_64::addr::PhysAddr>::align_down::<u64> |
522 | | |
523 | | /// Checks whether the physical address has the demanded alignment. |
524 | | #[inline] |
525 | 0 | pub fn is_aligned<U>(self, align: U) -> bool |
526 | 0 | where |
527 | 0 | U: Into<u64>, |
528 | | { |
529 | 0 | self.align_down(align) == self |
530 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr>::is_aligned::<u64> Unexecuted instantiation: <x86_64::addr::PhysAddr>::is_aligned::<u64> |
531 | | } |
532 | | |
533 | | impl fmt::Debug for PhysAddr { |
534 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
535 | 0 | f.debug_tuple("PhysAddr") |
536 | 0 | .field(&format_args!("{:#x}", self.0)) |
537 | 0 | .finish() |
538 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr as core::fmt::Debug>::fmt Unexecuted instantiation: <x86_64::addr::PhysAddr as core::fmt::Debug>::fmt |
539 | | } |
540 | | |
541 | | impl fmt::Binary for PhysAddr { |
542 | | #[inline] |
543 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
544 | 0 | fmt::Binary::fmt(&self.0, f) |
545 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr as core::fmt::Binary>::fmt Unexecuted instantiation: <x86_64::addr::PhysAddr as core::fmt::Binary>::fmt |
546 | | } |
547 | | |
548 | | impl fmt::LowerHex for PhysAddr { |
549 | | #[inline] |
550 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
551 | 0 | fmt::LowerHex::fmt(&self.0, f) |
552 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr as core::fmt::LowerHex>::fmt Unexecuted instantiation: <x86_64::addr::PhysAddr as core::fmt::LowerHex>::fmt |
553 | | } |
554 | | |
555 | | impl fmt::Octal for PhysAddr { |
556 | | #[inline] |
557 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
558 | 0 | fmt::Octal::fmt(&self.0, f) |
559 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr as core::fmt::Octal>::fmt Unexecuted instantiation: <x86_64::addr::PhysAddr as core::fmt::Octal>::fmt |
560 | | } |
561 | | |
562 | | impl fmt::UpperHex for PhysAddr { |
563 | | #[inline] |
564 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
565 | 0 | fmt::UpperHex::fmt(&self.0, f) |
566 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr as core::fmt::UpperHex>::fmt Unexecuted instantiation: <x86_64::addr::PhysAddr as core::fmt::UpperHex>::fmt |
567 | | } |
568 | | |
569 | | impl fmt::Pointer for PhysAddr { |
570 | | #[inline] |
571 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
572 | 0 | fmt::Pointer::fmt(&(self.0 as *const ()), f) |
573 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr as core::fmt::Pointer>::fmt Unexecuted instantiation: <x86_64::addr::PhysAddr as core::fmt::Pointer>::fmt |
574 | | } |
575 | | |
576 | | impl Add<u64> for PhysAddr { |
577 | | type Output = Self; |
578 | | #[inline] |
579 | 0 | fn add(self, rhs: u64) -> Self::Output { |
580 | 0 | PhysAddr::new(self.0 + rhs) |
581 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::Add<u64>>::add Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::Add<u64>>::add Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::Add<u64>>::add |
582 | | } |
583 | | |
584 | | impl AddAssign<u64> for PhysAddr { |
585 | | #[inline] |
586 | 0 | fn add_assign(&mut self, rhs: u64) { |
587 | 0 | *self = *self + rhs; |
588 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::AddAssign<u64>>::add_assign Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::AddAssign<u64>>::add_assign Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::AddAssign<u64>>::add_assign |
589 | | } |
590 | | |
591 | | #[cfg(target_pointer_width = "64")] |
592 | | impl Add<usize> for PhysAddr { |
593 | | type Output = Self; |
594 | | #[inline] |
595 | 0 | fn add(self, rhs: usize) -> Self::Output { |
596 | 0 | self + rhs as u64 |
597 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::Add<usize>>::add Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::Add<usize>>::add |
598 | | } |
599 | | |
600 | | #[cfg(target_pointer_width = "64")] |
601 | | impl AddAssign<usize> for PhysAddr { |
602 | | #[inline] |
603 | 0 | fn add_assign(&mut self, rhs: usize) { |
604 | 0 | self.add_assign(rhs as u64) |
605 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::AddAssign<usize>>::add_assign Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::AddAssign<usize>>::add_assign |
606 | | } |
607 | | |
608 | | impl Sub<u64> for PhysAddr { |
609 | | type Output = Self; |
610 | | #[inline] |
611 | 0 | fn sub(self, rhs: u64) -> Self::Output { |
612 | 0 | PhysAddr::new(self.0.checked_sub(rhs).unwrap()) |
613 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::Sub<u64>>::sub Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::Sub<u64>>::sub |
614 | | } |
615 | | |
616 | | impl SubAssign<u64> for PhysAddr { |
617 | | #[inline] |
618 | 0 | fn sub_assign(&mut self, rhs: u64) { |
619 | 0 | *self = *self - rhs; |
620 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::SubAssign<u64>>::sub_assign Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::SubAssign<u64>>::sub_assign |
621 | | } |
622 | | |
623 | | #[cfg(target_pointer_width = "64")] |
624 | | impl Sub<usize> for PhysAddr { |
625 | | type Output = Self; |
626 | | #[inline] |
627 | 0 | fn sub(self, rhs: usize) -> Self::Output { |
628 | 0 | self - rhs as u64 |
629 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::Sub<usize>>::sub Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::Sub<usize>>::sub |
630 | | } |
631 | | |
632 | | #[cfg(target_pointer_width = "64")] |
633 | | impl SubAssign<usize> for PhysAddr { |
634 | | #[inline] |
635 | 0 | fn sub_assign(&mut self, rhs: usize) { |
636 | 0 | self.sub_assign(rhs as u64) |
637 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::SubAssign<usize>>::sub_assign Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::SubAssign<usize>>::sub_assign |
638 | | } |
639 | | |
640 | | impl Sub<PhysAddr> for PhysAddr { |
641 | | type Output = u64; |
642 | | #[inline] |
643 | 0 | fn sub(self, rhs: PhysAddr) -> Self::Output { |
644 | 0 | self.as_u64().checked_sub(rhs.as_u64()).unwrap() |
645 | 0 | } Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::Sub>::sub Unexecuted instantiation: <x86_64::addr::PhysAddr as core::ops::arith::Sub>::sub |
646 | | } |
647 | | |
648 | | /// Align address downwards. |
649 | | /// |
650 | | /// Returns the greatest `x` with alignment `align` so that `x <= addr`. |
651 | | /// |
652 | | /// Panics if the alignment is not a power of two. |
653 | | #[inline] |
654 | 0 | pub const fn align_down(addr: u64, align: u64) -> u64 { |
655 | 0 | assert!(align.is_power_of_two(), "`align` must be a power of two"); |
656 | 0 | addr & !(align - 1) |
657 | 0 | } Unexecuted instantiation: x86_64::addr::align_down Unexecuted instantiation: x86_64::addr::align_down |
658 | | |
659 | | /// Align address upwards. |
660 | | /// |
661 | | /// Returns the smallest `x` with alignment `align` so that `x >= addr`. |
662 | | /// |
663 | | /// Panics if the alignment is not a power of two or if an overflow occurs. |
664 | | #[inline] |
665 | 0 | pub const fn align_up(addr: u64, align: u64) -> u64 { |
666 | 0 | assert!(align.is_power_of_two(), "`align` must be a power of two"); |
667 | 0 | let align_mask = align - 1; |
668 | 0 | if addr & align_mask == 0 { |
669 | 0 | addr // already aligned |
670 | | } else { |
671 | | // FIXME: Replace with .expect, once `Option::expect` is const. |
672 | 0 | if let Some(aligned) = (addr | align_mask).checked_add(1) { |
673 | 0 | aligned |
674 | | } else { |
675 | 0 | panic!("attempt to add with overflow") |
676 | | } |
677 | | } |
678 | 0 | } Unexecuted instantiation: x86_64::addr::align_up Unexecuted instantiation: x86_64::addr::align_up |
679 | | |
680 | | #[cfg(test)] |
681 | | mod tests { |
682 | | use super::*; |
683 | | |
684 | | #[test] |
685 | | pub fn virtaddr_new_truncate() { |
686 | | assert_eq!(VirtAddr::new_truncate(0), VirtAddr(0)); |
687 | | assert_eq!(VirtAddr::new_truncate(1 << 47), VirtAddr(0xfffff << 47)); |
688 | | assert_eq!(VirtAddr::new_truncate(123), VirtAddr(123)); |
689 | | assert_eq!(VirtAddr::new_truncate(123 << 47), VirtAddr(0xfffff << 47)); |
690 | | } |
691 | | |
692 | | #[test] |
693 | | #[cfg(feature = "step_trait")] |
694 | | fn virtaddr_step_forward() { |
695 | | assert_eq!(Step::forward(VirtAddr(0), 0), VirtAddr(0)); |
696 | | assert_eq!(Step::forward(VirtAddr(0), 1), VirtAddr(1)); |
697 | | assert_eq!( |
698 | | Step::forward(VirtAddr(0x7fff_ffff_ffff), 1), |
699 | | VirtAddr(0xffff_8000_0000_0000) |
700 | | ); |
701 | | assert_eq!( |
702 | | Step::forward(VirtAddr(0xffff_8000_0000_0000), 1), |
703 | | VirtAddr(0xffff_8000_0000_0001) |
704 | | ); |
705 | | assert_eq!( |
706 | | Step::forward_checked(VirtAddr(0xffff_ffff_ffff_ffff), 1), |
707 | | None |
708 | | ); |
709 | | assert_eq!( |
710 | | Step::forward(VirtAddr(0x7fff_ffff_ffff), 0x1234_5678_9abd), |
711 | | VirtAddr(0xffff_9234_5678_9abc) |
712 | | ); |
713 | | assert_eq!( |
714 | | Step::forward(VirtAddr(0x7fff_ffff_ffff), 0x8000_0000_0000), |
715 | | VirtAddr(0xffff_ffff_ffff_ffff) |
716 | | ); |
717 | | assert_eq!( |
718 | | Step::forward(VirtAddr(0x7fff_ffff_ff00), 0x8000_0000_00ff), |
719 | | VirtAddr(0xffff_ffff_ffff_ffff) |
720 | | ); |
721 | | assert_eq!( |
722 | | Step::forward_checked(VirtAddr(0x7fff_ffff_ff00), 0x8000_0000_0100), |
723 | | None |
724 | | ); |
725 | | assert_eq!( |
726 | | Step::forward_checked(VirtAddr(0x7fff_ffff_ffff), 0x8000_0000_0001), |
727 | | None |
728 | | ); |
729 | | } |
730 | | |
731 | | #[test] |
732 | | #[cfg(feature = "step_trait")] |
733 | | fn virtaddr_step_backward() { |
734 | | assert_eq!(Step::backward(VirtAddr(0), 0), VirtAddr(0)); |
735 | | assert_eq!(Step::backward_checked(VirtAddr(0), 1), None); |
736 | | assert_eq!(Step::backward(VirtAddr(1), 1), VirtAddr(0)); |
737 | | assert_eq!( |
738 | | Step::backward(VirtAddr(0xffff_8000_0000_0000), 1), |
739 | | VirtAddr(0x7fff_ffff_ffff) |
740 | | ); |
741 | | assert_eq!( |
742 | | Step::backward(VirtAddr(0xffff_8000_0000_0001), 1), |
743 | | VirtAddr(0xffff_8000_0000_0000) |
744 | | ); |
745 | | assert_eq!( |
746 | | Step::backward(VirtAddr(0xffff_9234_5678_9abc), 0x1234_5678_9abd), |
747 | | VirtAddr(0x7fff_ffff_ffff) |
748 | | ); |
749 | | assert_eq!( |
750 | | Step::backward(VirtAddr(0xffff_8000_0000_0000), 0x8000_0000_0000), |
751 | | VirtAddr(0) |
752 | | ); |
753 | | assert_eq!( |
754 | | Step::backward(VirtAddr(0xffff_8000_0000_0000), 0x7fff_ffff_ff01), |
755 | | VirtAddr(0xff) |
756 | | ); |
757 | | assert_eq!( |
758 | | Step::backward_checked(VirtAddr(0xffff_8000_0000_0000), 0x8000_0000_0001), |
759 | | None |
760 | | ); |
761 | | } |
762 | | |
763 | | #[test] |
764 | | #[cfg(feature = "step_trait")] |
765 | | fn virtaddr_steps_between() { |
766 | | assert_eq!( |
767 | | Step::steps_between(&VirtAddr(0), &VirtAddr(0)), |
768 | | (0, Some(0)) |
769 | | ); |
770 | | assert_eq!( |
771 | | Step::steps_between(&VirtAddr(0), &VirtAddr(1)), |
772 | | (1, Some(1)) |
773 | | ); |
774 | | assert_eq!(Step::steps_between(&VirtAddr(1), &VirtAddr(0)), (0, None)); |
775 | | assert_eq!( |
776 | | Step::steps_between( |
777 | | &VirtAddr(0x7fff_ffff_ffff), |
778 | | &VirtAddr(0xffff_8000_0000_0000) |
779 | | ), |
780 | | (1, Some(1)) |
781 | | ); |
782 | | assert_eq!( |
783 | | Step::steps_between( |
784 | | &VirtAddr(0xffff_8000_0000_0000), |
785 | | &VirtAddr(0x7fff_ffff_ffff) |
786 | | ), |
787 | | (0, None) |
788 | | ); |
789 | | assert_eq!( |
790 | | Step::steps_between( |
791 | | &VirtAddr(0xffff_8000_0000_0000), |
792 | | &VirtAddr(0xffff_8000_0000_0000) |
793 | | ), |
794 | | (0, Some(0)) |
795 | | ); |
796 | | assert_eq!( |
797 | | Step::steps_between( |
798 | | &VirtAddr(0xffff_8000_0000_0000), |
799 | | &VirtAddr(0xffff_8000_0000_0001) |
800 | | ), |
801 | | (1, Some(1)) |
802 | | ); |
803 | | assert_eq!( |
804 | | Step::steps_between( |
805 | | &VirtAddr(0xffff_8000_0000_0001), |
806 | | &VirtAddr(0xffff_8000_0000_0000) |
807 | | ), |
808 | | (0, None) |
809 | | ); |
810 | | } |
811 | | |
812 | | #[test] |
813 | | pub fn test_align_up() { |
814 | | // align 1 |
815 | | assert_eq!(align_up(0, 1), 0); |
816 | | assert_eq!(align_up(1234, 1), 1234); |
817 | | assert_eq!(align_up(0xffff_ffff_ffff_ffff, 1), 0xffff_ffff_ffff_ffff); |
818 | | // align 2 |
819 | | assert_eq!(align_up(0, 2), 0); |
820 | | assert_eq!(align_up(1233, 2), 1234); |
821 | | assert_eq!(align_up(0xffff_ffff_ffff_fffe, 2), 0xffff_ffff_ffff_fffe); |
822 | | // address 0 |
823 | | assert_eq!(align_up(0, 128), 0); |
824 | | assert_eq!(align_up(0, 1), 0); |
825 | | assert_eq!(align_up(0, 2), 0); |
826 | | assert_eq!(align_up(0, 0x8000_0000_0000_0000), 0); |
827 | | } |
828 | | |
829 | | #[test] |
830 | | fn test_virt_addr_align_up() { |
831 | | // Make sure the 47th bit is extended. |
832 | | assert_eq!( |
833 | | VirtAddr::new(0x7fff_ffff_ffff).align_up(2u64), |
834 | | VirtAddr::new(0xffff_8000_0000_0000) |
835 | | ); |
836 | | } |
837 | | |
838 | | #[test] |
839 | | fn test_virt_addr_align_down() { |
840 | | // Make sure the 47th bit is extended. |
841 | | assert_eq!( |
842 | | VirtAddr::new(0xffff_8000_0000_0000).align_down(1u64 << 48), |
843 | | VirtAddr::new(0) |
844 | | ); |
845 | | } |
846 | | |
847 | | #[test] |
848 | | #[should_panic] |
849 | | fn test_virt_addr_align_up_overflow() { |
850 | | VirtAddr::new(0xffff_ffff_ffff_ffff).align_up(2u64); |
851 | | } |
852 | | |
853 | | #[test] |
854 | | #[should_panic] |
855 | | fn test_phys_addr_align_up_overflow() { |
856 | | PhysAddr::new(0x000f_ffff_ffff_ffff).align_up(2u64); |
857 | | } |
858 | | |
859 | | #[test] |
860 | | fn test_from_ptr_array() { |
861 | | let slice = &[1, 2, 3, 4, 5]; |
862 | | // Make sure that from_ptr(slice) is the address of the first element |
863 | | assert_eq!(VirtAddr::from_ptr(slice), VirtAddr::from_ptr(&slice[0])); |
864 | | } |
865 | | } |