/rust/registry/src/index.crates.io-6f17d22bba15001f/region-2.2.0/src/protect.rs
Line | Count | Source (jump to first uncovered line) |
1 | | // TODO: Remove this for the next major release |
2 | | #![allow(non_upper_case_globals)] |
3 | | |
4 | | use {os, page, query_range, Error, Region, Result}; |
5 | | |
6 | | /// Changes the memory protection of one or more pages. |
7 | | /// |
8 | | /// The address range may overlap one or more pages, and if so, all pages within |
9 | | /// the range will be modified. The previous protection flags are not preserved |
10 | | /// (if reset of protection flags is desired, use `protect_with_handle`). |
11 | | /// |
12 | | /// - The range is `[address, address + size)` |
13 | | /// - The address may not be null. |
14 | | /// - The address is rounded down to the closest page boundary. |
15 | | /// - The size may not be zero. |
16 | | /// - The size is rounded up to the closest page boundary, relative to the |
17 | | /// address. |
18 | | /// |
19 | | /// # Safety |
20 | | /// |
21 | | /// This is unsafe since it can change read-only properties of constants and/or |
22 | | /// modify the executable properties of any code segments. |
23 | | /// |
24 | | /// # Examples |
25 | | /// |
26 | | /// ``` |
27 | | /// # if cfg!(any(target_arch = "x86", target_arch = "x86_64")) { |
28 | | /// use region::{Protection}; |
29 | | /// |
30 | | /// let ret5 = [0xB8, 0x05, 0x00, 0x00, 0x00, 0xC3]; |
31 | | /// let x: extern "C" fn() -> i32 = unsafe { |
32 | | /// region::protect(ret5.as_ptr(), ret5.len(), Protection::READ_WRITE_EXECUTE).unwrap(); |
33 | | /// std::mem::transmute(ret5.as_ptr()) |
34 | | /// }; |
35 | | /// assert_eq!(x(), 5); |
36 | | /// # } |
37 | | /// ``` |
38 | 6.75k | pub unsafe fn protect(address: *const u8, size: usize, protection: Protection) -> Result<()> { |
39 | 6.75k | if address.is_null() { |
40 | 0 | return Err(Error::NullAddress); |
41 | 6.75k | } |
42 | 6.75k | |
43 | 6.75k | if size == 0 { |
44 | 0 | return Err(Error::EmptyRange); |
45 | 6.75k | } |
46 | 6.75k | |
47 | 6.75k | // Ignore the preservation of previous protection flags |
48 | 6.75k | os::set_protection( |
49 | 6.75k | page::floor(address as usize) as *const u8, |
50 | 6.75k | page::size_from_range(address, size), |
51 | 6.75k | protection, |
52 | 6.75k | ) |
53 | 6.75k | } |
54 | | |
55 | | /// Changes the memory protection of one or more pages temporarily. |
56 | | /// |
57 | | /// The address range may overlap one or more pages, and if so, all pages within |
58 | | /// the range will be modified. The protection flags will be reset when the |
59 | | /// handle is dropped. |
60 | | /// |
61 | | /// This function uses `query_range` internally and is therefore less performant |
62 | | /// than `protect`. Prefer this function only if a memory protection reset is |
63 | | /// desired. |
64 | | /// |
65 | | /// - The range is `[address, address + size)` |
66 | | /// - The address may not be null. |
67 | | /// - The address is rounded down to the closest page boundary. |
68 | | /// - The size may not be zero. |
69 | | /// - The size is rounded up to the closest page boundary, relative to the |
70 | | /// address. |
71 | | /// |
72 | | /// # Safety |
73 | | /// |
74 | | /// This is unsafe since it can change read-only properties of constants and/or |
75 | | /// modify the executable properties of any code segments. |
76 | 0 | pub unsafe fn protect_with_handle( |
77 | 0 | address: *const u8, |
78 | 0 | size: usize, |
79 | 0 | protection: Protection, |
80 | 0 | ) -> Result<ProtectGuard> { |
81 | | // Determine the current region flags |
82 | 0 | let mut regions = query_range(address, size)?; |
83 | | |
84 | | // Change the region to the desired protection |
85 | 0 | protect(address, size, protection)?; |
86 | | |
87 | 0 | let lower = page::floor(address as usize); |
88 | 0 | let upper = page::ceil(address as usize + size); |
89 | | |
90 | 0 | if let Some(ref mut region) = regions.first_mut() { |
91 | 0 | // Offset the lower region to the smallest page boundary |
92 | 0 | let delta = lower - region.base as usize; |
93 | 0 | region.base = (region.base as usize + delta) as *mut u8; |
94 | 0 | region.size -= delta; |
95 | 0 | } |
96 | | |
97 | 0 | if let Some(ref mut region) = regions.last_mut() { |
98 | 0 | // Truncate the upper region to the smallest page boundary |
99 | 0 | let delta = region.upper() - upper; |
100 | 0 | region.size -= delta; |
101 | 0 | } |
102 | | |
103 | 0 | Ok(ProtectGuard::new(regions)) |
104 | 0 | } |
105 | | |
106 | | /// An RAII implementation of "scoped protection". When this structure is dropped |
107 | | /// (falls out of scope), the memory region protection will be reset. |
108 | | #[must_use] |
109 | | pub struct ProtectGuard { |
110 | | regions: Vec<Region>, |
111 | | } |
112 | | |
113 | | impl ProtectGuard { |
114 | 0 | fn new(regions: Vec<Region>) -> Self { |
115 | 0 | ProtectGuard { regions } |
116 | 0 | } |
117 | | |
118 | | /// Releases the guards ownership of the memory protection. |
119 | | #[deprecated(since = "2.2.0", note = "Use std::mem::forget instead")] |
120 | 0 | pub fn release(self) { |
121 | 0 | ::std::mem::forget(self); |
122 | 0 | } |
123 | | } |
124 | | |
125 | | impl Drop for ProtectGuard { |
126 | 0 | fn drop(&mut self) { |
127 | 0 | let result = unsafe { |
128 | 0 | self |
129 | 0 | .regions |
130 | 0 | .iter() |
131 | 0 | .try_for_each(|region| protect(region.base, region.size, region.protection)) |
132 | | }; |
133 | 0 | debug_assert!(result.is_ok(), "restoring region protection"); |
134 | 0 | } |
135 | | } |
136 | | |
137 | | unsafe impl Send for ProtectGuard {} |
138 | | unsafe impl Sync for ProtectGuard {} |
139 | | |
140 | | bitflags! { |
141 | | /// Memory page protection constants. |
142 | | /// |
143 | | /// Determines the access rights for a specific page and/or region. Some |
144 | | /// combination of flags may not work depending on the OS (e.g macOS |
145 | | /// enforces pages to be readable). |
146 | | /// |
147 | | /// # Examples |
148 | | /// |
149 | | /// ``` |
150 | | /// use region::Protection; |
151 | | /// |
152 | | /// let combine = Protection::READ | Protection::WRITE; |
153 | | /// let shorthand = Protection::READ_WRITE; |
154 | | /// ``` |
155 | | pub struct Protection: usize { |
156 | | /// No access allowed at all. |
157 | | const NONE = 0; |
158 | | /// Read access; writing and/or executing data will panic. |
159 | | const READ = (1 << 1); |
160 | | /// Write access; this flag alone may not be supported on all OSs. |
161 | | const WRITE = (1 << 2); |
162 | | /// Execute access; this may not be allowed depending on DEP. |
163 | | const EXECUTE = (1 << 3); |
164 | | /// Read and execute shorthand. |
165 | | const READ_EXECUTE = (Self::READ.bits | Self::EXECUTE.bits); |
166 | | /// Read and write shorthand. |
167 | | const READ_WRITE = (Self::READ.bits | Self::WRITE.bits); |
168 | | /// Read, write and execute shorthand. |
169 | | const READ_WRITE_EXECUTE = (Self::READ.bits | Self::WRITE.bits | Self::EXECUTE.bits); |
170 | | /// Write and execute shorthand. |
171 | | const WRITE_EXECUTE = (Self::WRITE.bits | Self::EXECUTE.bits); |
172 | | |
173 | | /// No access allowed at all. |
174 | | #[deprecated(since = "2.2.0", note = "Use Protection::NONE instead")] |
175 | | const None = Self::NONE.bits; |
176 | | /// Read access; writing and/or executing data will panic. |
177 | | #[deprecated(since = "2.2.0", note = "Use Protection::READ instead")] |
178 | | const Read = Self::READ.bits; |
179 | | /// Write access; this flag alone may not be supported on all OSs. |
180 | | #[deprecated(since = "2.2.0", note = "Use Protection::WRITE instead")] |
181 | | const Write = Self::WRITE.bits; |
182 | | /// Execute access; this may not be allowed depending on DEP. |
183 | | #[deprecated(since = "2.2.0", note = "Use Protection::EXECUTE instead")] |
184 | | const Execute = Self::EXECUTE.bits; |
185 | | /// Read and execute shorthand. |
186 | | #[deprecated(since = "2.2.0", note = "Use Protection::READ_EXECUTE instead")] |
187 | | const ReadExecute = Self::READ_EXECUTE.bits; |
188 | | /// Read and write shorthand. |
189 | | #[deprecated(since = "2.2.0", note = "Use Protection::READ_WRITE instead")] |
190 | | const ReadWrite = Self::READ_WRITE.bits; |
191 | | /// Read, write and execute shorthand. |
192 | | #[deprecated(since = "2.2.0", note = "Use Protection::READ_WRITE_EXECUTE instead")] |
193 | | const ReadWriteExecute = Self::READ_WRITE_EXECUTE.bits; |
194 | | /// Write and execute shorthand. |
195 | | #[deprecated(since = "2.2.0", note = "Use Protection::WRITE_EXECUTE instead")] |
196 | | const WriteExecute = Self::WRITE_EXECUTE.bits; |
197 | | } |
198 | | } |
199 | | |
200 | | #[cfg(test)] |
201 | | mod tests { |
202 | | use super::*; |
203 | | use tests::alloc_pages; |
204 | | |
205 | | #[test] |
206 | | fn protect_null() { |
207 | | assert!(unsafe { protect(::std::ptr::null(), 0, Protection::NONE) }.is_err()); |
208 | | } |
209 | | |
210 | | #[test] |
211 | | fn protect_code() { |
212 | | let address = &mut protect_code as *mut _ as *mut u8; |
213 | | unsafe { |
214 | | protect(address, 0x10, Protection::READ_WRITE_EXECUTE).unwrap(); |
215 | | *address = 0x90; |
216 | | } |
217 | | } |
218 | | |
219 | | #[test] |
220 | | fn protect_alloc() { |
221 | | let mut map = alloc_pages(&[Protection::READ]); |
222 | | unsafe { |
223 | | protect(map.as_ptr(), page::size(), Protection::READ_WRITE).unwrap(); |
224 | | *map.as_mut_ptr() = 0x1; |
225 | | } |
226 | | } |
227 | | |
228 | | #[test] |
229 | | fn protect_overlap() { |
230 | | let pz = page::size(); |
231 | | |
232 | | // Create a page boundary with different protection flags in the |
233 | | // upper and lower span, so the intermediate page sizes are fixed. |
234 | | let prots = [ |
235 | | Protection::READ, |
236 | | Protection::READ_EXECUTE, |
237 | | Protection::READ_WRITE, |
238 | | Protection::READ, |
239 | | ]; |
240 | | |
241 | | let map = alloc_pages(&prots); |
242 | | let base_exec = unsafe { map.as_ptr().offset(pz as isize) }; |
243 | | let straddle = unsafe { base_exec.offset(pz as isize - 1) }; |
244 | | |
245 | | // Change the protection over two page boundaries |
246 | | unsafe { protect(straddle, 2, Protection::READ_WRITE_EXECUTE).unwrap() }; |
247 | | |
248 | | // Ensure that the pages have merged into one region |
249 | | let result = query_range(base_exec, pz * 2).unwrap(); |
250 | | assert_eq!(result.len(), 1); |
251 | | assert_eq!(result[0].protection, Protection::READ_WRITE_EXECUTE); |
252 | | assert_eq!(result[0].size, pz * 2); |
253 | | } |
254 | | |
255 | | #[test] |
256 | | fn protect_handle() { |
257 | | let map = alloc_pages(&[Protection::READ]); |
258 | | unsafe { |
259 | | let _handle = |
260 | | protect_with_handle(map.as_ptr(), page::size(), Protection::READ_WRITE).unwrap(); |
261 | | assert_eq!( |
262 | | ::query(map.as_ptr()).unwrap().protection, |
263 | | Protection::READ_WRITE |
264 | | ); |
265 | | }; |
266 | | assert_eq!(::query(map.as_ptr()).unwrap().protection, Protection::READ); |
267 | | } |
268 | | } |