Coverage Report

Created: 2025-12-28 06:31

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/memmap2-0.9.5/src/unix.rs
Line
Count
Source
1
extern crate libc;
2
3
use std::fs::File;
4
use std::mem::ManuallyDrop;
5
use std::os::unix::io::{FromRawFd, RawFd};
6
use std::sync::atomic::{AtomicUsize, Ordering};
7
use std::{io, ptr};
8
9
#[cfg(any(
10
    all(target_os = "linux", not(target_arch = "mips")),
11
    target_os = "freebsd",
12
    target_os = "android"
13
))]
14
const MAP_STACK: libc::c_int = libc::MAP_STACK;
15
16
#[cfg(not(any(
17
    all(target_os = "linux", not(target_arch = "mips")),
18
    target_os = "freebsd",
19
    target_os = "android"
20
)))]
21
const MAP_STACK: libc::c_int = 0;
22
23
#[cfg(any(target_os = "linux", target_os = "android"))]
24
const MAP_POPULATE: libc::c_int = libc::MAP_POPULATE;
25
26
#[cfg(not(any(target_os = "linux", target_os = "android")))]
27
const MAP_POPULATE: libc::c_int = 0;
28
29
#[cfg(any(target_os = "linux", target_os = "android"))]
30
const MAP_HUGETLB: libc::c_int = libc::MAP_HUGETLB;
31
32
#[cfg(target_os = "linux")]
33
const MAP_HUGE_MASK: libc::c_int = libc::MAP_HUGE_MASK;
34
35
#[cfg(any(target_os = "linux", target_os = "android"))]
36
const MAP_HUGE_SHIFT: libc::c_int = libc::MAP_HUGE_SHIFT;
37
38
#[cfg(not(any(target_os = "linux", target_os = "android")))]
39
const MAP_HUGETLB: libc::c_int = 0;
40
41
#[cfg(not(target_os = "linux"))]
42
const MAP_HUGE_MASK: libc::c_int = 0;
43
44
#[cfg(not(any(target_os = "linux", target_os = "android")))]
45
const MAP_HUGE_SHIFT: libc::c_int = 0;
46
47
#[cfg(any(
48
    target_os = "android",
49
    all(target_os = "linux", not(target_env = "musl"))
50
))]
51
use libc::{mmap64 as mmap, off64_t as off_t};
52
53
#[cfg(not(any(
54
    target_os = "android",
55
    all(target_os = "linux", not(target_env = "musl"))
56
)))]
57
use libc::{mmap, off_t};
58
59
pub struct MmapInner {
60
    ptr: *mut libc::c_void,
61
    len: usize,
62
}
63
64
impl MmapInner {
65
    /// Creates a new `MmapInner`.
66
    ///
67
    /// This is a thin wrapper around the `mmap` system call.
68
0
    fn new(
69
0
        len: usize,
70
0
        prot: libc::c_int,
71
0
        flags: libc::c_int,
72
0
        file: RawFd,
73
0
        offset: u64,
74
0
    ) -> io::Result<MmapInner> {
75
0
        let alignment = offset % page_size() as u64;
76
0
        let aligned_offset = offset - alignment;
77
78
0
        let (map_len, map_offset) = Self::adjust_mmap_params(len, alignment as usize)?;
79
80
        unsafe {
81
0
            let ptr = mmap(
82
0
                ptr::null_mut(),
83
0
                map_len as libc::size_t,
84
0
                prot,
85
0
                flags,
86
0
                file,
87
0
                aligned_offset as off_t,
88
            );
89
90
0
            if ptr == libc::MAP_FAILED {
91
0
                Err(io::Error::last_os_error())
92
            } else {
93
0
                Ok(Self::from_raw_parts(ptr, len, map_offset))
94
            }
95
        }
96
0
    }
97
98
0
    fn adjust_mmap_params(len: usize, alignment: usize) -> io::Result<(usize, usize)> {
99
        use std::isize;
100
101
        // Rust's slice cannot be larger than isize::MAX.
102
        // See https://doc.rust-lang.org/std/slice/fn.from_raw_parts.html
103
        //
104
        // This is not a problem on 64-bit targets, but on 32-bit one
105
        // having a file or an anonymous mapping larger than 2GB is quite normal
106
        // and we have to prevent it.
107
        //
108
        // The code below is essentially the same as in Rust's std:
109
        // https://github.com/rust-lang/rust/blob/db78ab70a88a0a5e89031d7ee4eccec835dcdbde/library/alloc/src/raw_vec.rs#L495
110
0
        if std::mem::size_of::<usize>() < 8 && len > isize::MAX as usize {
111
0
            return Err(io::Error::new(
112
0
                io::ErrorKind::InvalidData,
113
0
                "memory map length overflows isize",
114
0
            ));
115
0
        }
116
117
0
        let map_len = len + alignment;
118
0
        let map_offset = alignment;
119
120
        // `libc::mmap` does not support zero-size mappings. POSIX defines:
121
        //
122
        // https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html
123
        // > If `len` is zero, `mmap()` shall fail and no mapping shall be established.
124
        //
125
        // So if we would create such a mapping, crate a one-byte mapping instead:
126
0
        let map_len = map_len.max(1);
127
128
        // Note that in that case `MmapInner::len` is still set to zero,
129
        // and `Mmap` will still dereferences to an empty slice.
130
        //
131
        // If this mapping is backed by an empty file, we create a mapping larger than the file.
132
        // This is unusual but well-defined. On the same man page, POSIX further defines:
133
        //
134
        // > The `mmap()` function can be used to map a region of memory that is larger
135
        // > than the current size of the object.
136
        //
137
        // (The object here is the file.)
138
        //
139
        // > Memory access within the mapping but beyond the current end of the underlying
140
        // > objects may result in SIGBUS signals being sent to the process. The reason for this
141
        // > is that the size of the object can be manipulated by other processes and can change
142
        // > at any moment. The implementation should tell the application that a memory reference
143
        // > is outside the object where this can be detected; otherwise, written data may be lost
144
        // > and read data may not reflect actual data in the object.
145
        //
146
        // Because `MmapInner::len` is not incremented, this increment of `aligned_len`
147
        // will not allow accesses past the end of the file and will not cause SIGBUS.
148
        //
149
        // (SIGBUS is still possible by mapping a non-empty file and then truncating it
150
        // to a shorter size, but that is unrelated to this handling of empty files.)
151
0
        Ok((map_len, map_offset))
152
0
    }
153
154
    /// Get the current memory mapping as a `(ptr, map_len, offset)` tuple.
155
    ///
156
    /// Note that `map_len` is the length of the memory mapping itself and
157
    /// _not_ the one that would be passed to `from_raw_parts`.
158
0
    fn as_mmap_params(&self) -> (*mut libc::c_void, usize, usize) {
159
0
        let offset = self.ptr as usize % page_size();
160
0
        let len = self.len + offset;
161
162
        // There are two possible memory layouts we could have, depending on
163
        // the length and offset passed when constructing this instance:
164
        //
165
        // 1. The "normal" memory layout looks like this:
166
        //
167
        //         |<------------------>|<---------------------->|
168
        //     mmap ptr    offset      ptr     public slice
169
        //
170
        //    That is, we have
171
        //    - The start of the page-aligned memory mapping returned by mmap,
172
        //      followed by,
173
        //    - Some number of bytes that are memory mapped but ignored since
174
        //      they are before the byte offset requested by the user, followed
175
        //      by,
176
        //    - The actual memory mapped slice requested by the user.
177
        //
178
        //    This maps cleanly to a (ptr, len, offset) tuple.
179
        //
180
        // 2. Then, we have the case where the user requested a zero-length
181
        //    memory mapping. mmap(2) does not support zero-length mappings so
182
        //    this crate works around that by actually making a mapping of
183
        //    length one. This means that we have
184
        //    - A length zero slice, followed by,
185
        //    - A single memory mapped byte
186
        //
187
        //    Note that this only happens if the offset within the page is also
188
        //    zero. Otherwise, we have a memory map of offset bytes and not a
189
        //    zero-length memory map.
190
        //
191
        //    This doesn't fit cleanly into a (ptr, len, offset) tuple. Instead,
192
        //    we fudge it slightly: a zero-length memory map turns into a
193
        //    mapping of length one and can't be told apart outside of this
194
        //    method without knowing the original length.
195
0
        if len == 0 {
196
0
            (self.ptr, 1, 0)
197
        } else {
198
0
            (unsafe { self.ptr.offset(-(offset as isize)) }, len, offset)
199
        }
200
0
    }
201
202
    /// Construct this `MmapInner` from its raw components
203
    ///
204
    /// # Safety
205
    ///
206
    /// - `ptr` must point to the start of memory mapping that can be freed
207
    ///   using `munmap(2)` (i.e. returned by `mmap(2)` or `mremap(2)`)
208
    /// - The memory mapping at `ptr` must have a length of `len + offset`.
209
    /// - If `len + offset == 0` then the memory mapping must be of length 1.
210
    /// - `offset` must be less than the current page size.
211
0
    unsafe fn from_raw_parts(ptr: *mut libc::c_void, len: usize, offset: usize) -> Self {
212
0
        debug_assert_eq!(ptr as usize % page_size(), 0, "ptr not page-aligned");
213
0
        debug_assert!(offset < page_size(), "offset larger than page size");
214
215
0
        Self {
216
0
            ptr: ptr.add(offset),
217
0
            len,
218
0
        }
219
0
    }
220
221
0
    pub fn map(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
222
0
        let populate = if populate { MAP_POPULATE } else { 0 };
223
0
        MmapInner::new(
224
0
            len,
225
            libc::PROT_READ,
226
0
            libc::MAP_SHARED | populate,
227
0
            file,
228
0
            offset,
229
        )
230
0
    }
231
232
0
    pub fn map_exec(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
233
0
        let populate = if populate { MAP_POPULATE } else { 0 };
234
0
        MmapInner::new(
235
0
            len,
236
0
            libc::PROT_READ | libc::PROT_EXEC,
237
0
            libc::MAP_SHARED | populate,
238
0
            file,
239
0
            offset,
240
        )
241
0
    }
242
243
0
    pub fn map_mut(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
244
0
        let populate = if populate { MAP_POPULATE } else { 0 };
245
0
        MmapInner::new(
246
0
            len,
247
0
            libc::PROT_READ | libc::PROT_WRITE,
248
0
            libc::MAP_SHARED | populate,
249
0
            file,
250
0
            offset,
251
        )
252
0
    }
253
254
0
    pub fn map_copy(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
255
0
        let populate = if populate { MAP_POPULATE } else { 0 };
256
0
        MmapInner::new(
257
0
            len,
258
0
            libc::PROT_READ | libc::PROT_WRITE,
259
0
            libc::MAP_PRIVATE | populate,
260
0
            file,
261
0
            offset,
262
        )
263
0
    }
264
265
0
    pub fn map_copy_read_only(
266
0
        len: usize,
267
0
        file: RawFd,
268
0
        offset: u64,
269
0
        populate: bool,
270
0
    ) -> io::Result<MmapInner> {
271
0
        let populate = if populate { MAP_POPULATE } else { 0 };
272
0
        MmapInner::new(
273
0
            len,
274
            libc::PROT_READ,
275
0
            libc::MAP_PRIVATE | populate,
276
0
            file,
277
0
            offset,
278
        )
279
0
    }
280
281
    /// Open an anonymous memory map.
282
0
    pub fn map_anon(
283
0
        len: usize,
284
0
        stack: bool,
285
0
        populate: bool,
286
0
        huge: Option<u8>,
287
0
    ) -> io::Result<MmapInner> {
288
0
        let stack = if stack { MAP_STACK } else { 0 };
289
0
        let populate = if populate { MAP_POPULATE } else { 0 };
290
0
        let hugetlb = if huge.is_some() { MAP_HUGETLB } else { 0 };
291
0
        let offset = huge
292
0
            .map(|mask| ((mask as u64) & (MAP_HUGE_MASK as u64)) << MAP_HUGE_SHIFT)
293
0
            .unwrap_or(0);
294
0
        MmapInner::new(
295
0
            len,
296
0
            libc::PROT_READ | libc::PROT_WRITE,
297
0
            libc::MAP_PRIVATE | libc::MAP_ANON | stack | populate | hugetlb,
298
            -1,
299
0
            offset,
300
        )
301
0
    }
302
303
0
    pub fn flush(&self, offset: usize, len: usize) -> io::Result<()> {
304
0
        let alignment = (self.ptr as usize + offset) % page_size();
305
0
        let offset = offset as isize - alignment as isize;
306
0
        let len = len + alignment;
307
0
        let result =
308
0
            unsafe { libc::msync(self.ptr.offset(offset), len as libc::size_t, libc::MS_SYNC) };
309
0
        if result == 0 {
310
0
            Ok(())
311
        } else {
312
0
            Err(io::Error::last_os_error())
313
        }
314
0
    }
315
316
0
    pub fn flush_async(&self, offset: usize, len: usize) -> io::Result<()> {
317
0
        let alignment = (self.ptr as usize + offset) % page_size();
318
0
        let offset = offset as isize - alignment as isize;
319
0
        let len = len + alignment;
320
0
        let result =
321
0
            unsafe { libc::msync(self.ptr.offset(offset), len as libc::size_t, libc::MS_ASYNC) };
322
0
        if result == 0 {
323
0
            Ok(())
324
        } else {
325
0
            Err(io::Error::last_os_error())
326
        }
327
0
    }
328
329
0
    fn mprotect(&mut self, prot: libc::c_int) -> io::Result<()> {
330
        unsafe {
331
0
            let alignment = self.ptr as usize % page_size();
332
0
            let ptr = self.ptr.offset(-(alignment as isize));
333
0
            let len = self.len + alignment;
334
0
            let len = len.max(1);
335
0
            if libc::mprotect(ptr, len, prot) == 0 {
336
0
                Ok(())
337
            } else {
338
0
                Err(io::Error::last_os_error())
339
            }
340
        }
341
0
    }
342
343
0
    pub fn make_read_only(&mut self) -> io::Result<()> {
344
0
        self.mprotect(libc::PROT_READ)
345
0
    }
346
347
0
    pub fn make_exec(&mut self) -> io::Result<()> {
348
0
        self.mprotect(libc::PROT_READ | libc::PROT_EXEC)
349
0
    }
350
351
0
    pub fn make_mut(&mut self) -> io::Result<()> {
352
0
        self.mprotect(libc::PROT_READ | libc::PROT_WRITE)
353
0
    }
354
355
    #[inline]
356
0
    pub fn ptr(&self) -> *const u8 {
357
0
        self.ptr as *const u8
358
0
    }
Unexecuted instantiation: <memmap2::os::MmapInner>::ptr
Unexecuted instantiation: <memmap2::os::MmapInner>::ptr
359
360
    #[inline]
361
0
    pub fn mut_ptr(&mut self) -> *mut u8 {
362
0
        self.ptr as *mut u8
363
0
    }
364
365
    #[inline]
366
0
    pub fn len(&self) -> usize {
367
0
        self.len
368
0
    }
Unexecuted instantiation: <memmap2::os::MmapInner>::len
Unexecuted instantiation: <memmap2::os::MmapInner>::len
369
370
0
    pub fn advise(&self, advice: libc::c_int, offset: usize, len: usize) -> io::Result<()> {
371
0
        let alignment = (self.ptr as usize + offset) % page_size();
372
0
        let offset = offset as isize - alignment as isize;
373
0
        let len = len + alignment;
374
        unsafe {
375
0
            if libc::madvise(self.ptr.offset(offset), len, advice) != 0 {
376
0
                Err(io::Error::last_os_error())
377
            } else {
378
0
                Ok(())
379
            }
380
        }
381
0
    }
382
383
    #[cfg(target_os = "linux")]
384
0
    pub fn remap(&mut self, new_len: usize, options: crate::RemapOptions) -> io::Result<()> {
385
0
        let (old_ptr, old_len, offset) = self.as_mmap_params();
386
0
        let (map_len, offset) = Self::adjust_mmap_params(new_len, offset)?;
387
388
        unsafe {
389
0
            let new_ptr = libc::mremap(old_ptr, old_len, map_len, options.into_flags());
390
391
0
            if new_ptr == libc::MAP_FAILED {
392
0
                Err(io::Error::last_os_error())
393
            } else {
394
                // We explicitly don't drop self since the pointer within is no longer valid.
395
0
                ptr::write(self, Self::from_raw_parts(new_ptr, new_len, offset));
396
0
                Ok(())
397
            }
398
        }
399
0
    }
400
401
0
    pub fn lock(&self) -> io::Result<()> {
402
        unsafe {
403
0
            if libc::mlock(self.ptr, self.len) != 0 {
404
0
                Err(io::Error::last_os_error())
405
            } else {
406
0
                Ok(())
407
            }
408
        }
409
0
    }
410
411
0
    pub fn unlock(&self) -> io::Result<()> {
412
        unsafe {
413
0
            if libc::munlock(self.ptr, self.len) != 0 {
414
0
                Err(io::Error::last_os_error())
415
            } else {
416
0
                Ok(())
417
            }
418
        }
419
0
    }
420
}
421
422
impl Drop for MmapInner {
423
0
    fn drop(&mut self) {
424
0
        let (ptr, len, _) = self.as_mmap_params();
425
426
        // Any errors during unmapping/closing are ignored as the only way
427
        // to report them would be through panicking which is highly discouraged
428
        // in Drop impls, c.f. https://github.com/rust-lang/lang-team/issues/97
429
0
        unsafe { libc::munmap(ptr, len as libc::size_t) };
430
0
    }
431
}
432
433
unsafe impl Sync for MmapInner {}
434
unsafe impl Send for MmapInner {}
435
436
0
fn page_size() -> usize {
437
    static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
438
439
0
    match PAGE_SIZE.load(Ordering::Relaxed) {
440
        0 => {
441
0
            let page_size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize };
442
443
0
            PAGE_SIZE.store(page_size, Ordering::Relaxed);
444
445
0
            page_size
446
        }
447
0
        page_size => page_size,
448
    }
449
0
}
450
451
0
pub fn file_len(file: RawFd) -> io::Result<u64> {
452
    // SAFETY: We must not close the passed-in fd by dropping the File we create,
453
    // we ensure this by immediately wrapping it in a ManuallyDrop.
454
    unsafe {
455
0
        let file = ManuallyDrop::new(File::from_raw_fd(file));
456
0
        Ok(file.metadata()?.len())
457
    }
458
0
}