Coverage Report

Created: 2025-06-13 06:29

/src/gdal/port/cpl_virtualmem.cpp
Line
Count
Source (jump to first uncovered line)
1
/**********************************************************************
2
 *
3
 * Name:     cpl_virtualmem.cpp
4
 * Project:  CPL - Common Portability Library
5
 * Purpose:  Virtual memory
6
 * Author:   Even Rouault, <even dot rouault at spatialys.com>
7
 *
8
 **********************************************************************
9
 * Copyright (c) 2014, Even Rouault <even dot rouault at spatialys.com>
10
 *
11
 * SPDX-License-Identifier: MIT
12
 ****************************************************************************/
13
14
#ifndef _GNU_SOURCE
15
#define _GNU_SOURCE
16
#endif
17
18
// to have off_t on 64bit possibly
19
#ifndef _FILE_OFFSET_BITS
20
#define _FILE_OFFSET_BITS 64
21
#endif
22
23
#include "cpl_virtualmem.h"
24
25
#include <algorithm>
26
#include <cassert>
27
28
#include "cpl_atomic_ops.h"
29
#include "cpl_config.h"
30
#include "cpl_conv.h"
31
#include "cpl_error.h"
32
#include "cpl_multiproc.h"
33
34
#ifdef NDEBUG
35
// Non NDEBUG: Ignore the result.
36
#define IGNORE_OR_ASSERT_IN_DEBUG(expr) CPL_IGNORE_RET_VAL((expr))
37
#else
38
// Debug: Assert.
39
0
#define IGNORE_OR_ASSERT_IN_DEBUG(expr) assert((expr))
40
#endif
41
42
#if defined(__linux) && defined(CPL_MULTIPROC_PTHREAD)
43
#ifndef HAVE_5ARGS_MREMAP
44
// FIXME? gcore/virtualmem.py tests fail/crash when HAVE_5ARGS_MREMAP
45
// is not defined.
46
#warning "HAVE_5ARGS_MREMAP not found. Disabling HAVE_VIRTUAL_MEM_VMA"
47
#else
48
#define HAVE_VIRTUAL_MEM_VMA
49
#endif
50
#endif
51
52
#if defined(HAVE_MMAP) || defined(HAVE_VIRTUAL_MEM_VMA)
53
#include <unistd.h>    // read, write, close, pipe, sysconf
54
#include <sys/mman.h>  // mmap, munmap, mremap
55
#endif
56
57
typedef enum
58
{
59
    VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED,
60
    VIRTUAL_MEM_TYPE_VMA
61
} CPLVirtualMemType;
62
63
struct CPLVirtualMem
64
{
65
    CPLVirtualMemType eType;
66
67
    struct CPLVirtualMem *pVMemBase;
68
    int nRefCount;
69
70
    CPLVirtualMemAccessMode eAccessMode;
71
72
    size_t nPageSize;
73
    // Aligned on nPageSize.
74
    void *pData;
75
    // Returned by mmap(), potentially lower than pData.
76
    void *pDataToFree;
77
    // Requested size (unrounded).
78
    size_t nSize;
79
80
    bool bSingleThreadUsage;
81
82
    void *pCbkUserData;
83
    CPLVirtualMemFreeUserData pfnFreeUserData;
84
};
85
86
#ifdef HAVE_VIRTUAL_MEM_VMA
87
88
#include <sys/select.h>  // select
89
#include <sys/stat.h>    // open()
90
#include <sys/types.h>   // open()
91
#include <errno.h>
92
#include <fcntl.h>   // open()
93
#include <signal.h>  // sigaction
94
#include <stdio.h>
95
#include <stdlib.h>
96
#include <string.h>
97
#include <pthread.h>
98
99
#ifndef HAVE_5ARGS_MREMAP
100
#include "cpl_atomic_ops.h"
101
#endif
102
103
/* Linux specific (i.e. non POSIX compliant) features used:
104
   - returning from a SIGSEGV handler is clearly a POSIX violation, but in
105
     practice most POSIX systems should be happy.
106
   - mremap() with 5 args is Linux specific. It is used when the user
107
     callback is invited to fill a page, we currently mmap() a
108
     writable page, let it filled it, and afterwards mremap() that
109
     temporary page onto the location where the fault occurred.
110
     If we have no mremap(), the workaround is to pause other threads that
111
     consume the current view while we are updating the faulted page, otherwise
112
     a non-paused thread could access a page that is in the middle of being
113
     filled... The way we pause those threads is quite original : we send them
114
     a SIGUSR1 and wait that they are stuck in the temporary SIGUSR1 handler...
115
   - MAP_ANONYMOUS isn't documented in POSIX, but very commonly found
116
     (sometimes called MAP_ANON)
117
   - dealing with the limitation of number of memory mapping regions,
118
     and the 65536 limit.
119
   - other things I've not identified
120
*/
121
122
#define ALIGN_DOWN(p, pagesize)                                                \
123
0
    reinterpret_cast<void *>((reinterpret_cast<GUIntptr_t>(p)) / (pagesize) *  \
124
0
                             (pagesize))
125
#define ALIGN_UP(p, pagesize)                                                  \
126
0
    reinterpret_cast<void *>(                                                  \
127
0
        cpl::div_round_up(reinterpret_cast<GUIntptr_t>(p), (pagesize)) *       \
128
0
        (pagesize))
129
130
0
#define DEFAULT_PAGE_SIZE (256 * 256)
131
0
#define MAXIMUM_PAGE_SIZE (32 * 1024 * 1024)
132
133
// Linux Kernel limit.
134
0
#define MAXIMUM_COUNT_OF_MAPPINGS 65536
135
136
0
#define BYEBYE_ADDR (reinterpret_cast<void *>(~static_cast<size_t>(0)))
137
138
0
#define MAPPING_FOUND "yeah"
139
0
#define MAPPING_NOT_FOUND "doh!"
140
141
0
#define SET_BIT(ar, bitnumber) ar[(bitnumber) / 8] |= 1 << ((bitnumber) % 8)
142
#define UNSET_BIT(ar, bitnumber)                                               \
143
0
    ar[(bitnumber) / 8] &= ~(1 << ((bitnumber) % 8))
144
0
#define TEST_BIT(ar, bitnumber) (ar[(bitnumber) / 8] & (1 << ((bitnumber) % 8)))
145
146
typedef enum
147
{
148
    OP_LOAD,
149
    OP_STORE,
150
    OP_MOVS_RSI_RDI,
151
    OP_UNKNOWN
152
} OpType;
153
154
typedef struct
155
{
156
    CPLVirtualMem sBase;
157
158
    GByte *pabitMappedPages;
159
    GByte *pabitRWMappedPages;
160
161
    int nCacheMaxSizeInPages;  // Maximum size of page array.
162
    int *panLRUPageIndices;    // Array with indices of cached pages.
163
    int iLRUStart;             // Index in array where to
164
                               // write next page index.
165
    int nLRUSize;              // Current size of the array.
166
167
    int iLastPage;  // Last page accessed.
168
    int nRetry;     // Number of consecutive
169
                    // retries to that last page.
170
171
    CPLVirtualMemCachePageCbk pfnCachePage;      // Called when a page is
172
                                                 // mapped.
173
    CPLVirtualMemUnCachePageCbk pfnUnCachePage;  // Called when a (writable)
174
                                                 // page is unmapped.
175
176
#ifndef HAVE_5ARGS_MREMAP
177
    CPLMutex *hMutexThreadArray;
178
    int nThreads;
179
    pthread_t *pahThreads;
180
#endif
181
} CPLVirtualMemVMA;
182
183
typedef struct
184
{
185
    // hVirtualMemManagerMutex protects the 2 following variables.
186
    CPLVirtualMemVMA **pasVirtualMem;
187
    int nVirtualMemCount;
188
189
    int pipefd_to_thread[2];
190
    int pipefd_from_thread[2];
191
    int pipefd_wait_thread[2];
192
    CPLJoinableThread *hHelperThread;
193
194
    // Using sigaction without testing HAVE_SIGACTION since we are in a Linux
195
    // specific code path
196
    struct sigaction oldact;
197
} CPLVirtualMemManager;
198
199
typedef struct
200
{
201
    void *pFaultAddr;
202
    OpType opType;
203
    pthread_t hRequesterThread;
204
} CPLVirtualMemMsgToWorkerThread;
205
206
// TODO: Singletons.
207
static CPLVirtualMemManager *pVirtualMemManager = nullptr;
208
static CPLMutex *hVirtualMemManagerMutex = nullptr;
209
210
static bool CPLVirtualMemManagerInit();
211
212
#ifdef DEBUG_VIRTUALMEM
213
214
/************************************************************************/
215
/*                           fprintfstderr()                            */
216
/************************************************************************/
217
218
// This function may be called from signal handlers where most functions
219
// from the C library are unsafe to be called. fprintf() is clearly one
220
// of those functions (see
221
// http://stackoverflow.com/questions/4554129/linux-glibc-can-i-use-fprintf-in-signal-handler)
222
// vsnprintf() is *probably* safer with respect to that (but there is no
223
// guarantee though).
224
// write() is async-signal-safe.
225
static void fprintfstderr(const char *fmt, ...)
226
{
227
    char buffer[80] = {};
228
    va_list ap;
229
    va_start(ap, fmt);
230
    vsnprintf(buffer, sizeof(buffer), fmt, ap);
231
    va_end(ap);
232
    int offset = 0;
233
    while (true)
234
    {
235
        const size_t nSizeToWrite = strlen(buffer + offset);
236
        int ret = static_cast<int>(write(2, buffer + offset, nSizeToWrite));
237
        if (ret < 0 && errno == EINTR)
238
        {
239
        }
240
        else
241
        {
242
            if (ret == static_cast<int>(nSizeToWrite))
243
                break;
244
            offset += ret;
245
        }
246
    }
247
}
248
249
#endif
250
251
/************************************************************************/
252
/*              CPLVirtualMemManagerRegisterVirtualMem()                */
253
/************************************************************************/
254
255
static bool CPLVirtualMemManagerRegisterVirtualMem(CPLVirtualMemVMA *ctxt)
256
0
{
257
0
    if (!CPLVirtualMemManagerInit())
258
0
        return false;
259
260
0
    bool bSuccess = true;
261
0
    IGNORE_OR_ASSERT_IN_DEBUG(ctxt);
262
0
    CPLAcquireMutex(hVirtualMemManagerMutex, 1000.0);
263
0
    CPLVirtualMemVMA **pasVirtualMemNew = static_cast<CPLVirtualMemVMA **>(
264
0
        VSI_REALLOC_VERBOSE(pVirtualMemManager->pasVirtualMem,
265
0
                            sizeof(CPLVirtualMemVMA *) *
266
0
                                (pVirtualMemManager->nVirtualMemCount + 1)));
267
0
    if (pasVirtualMemNew == nullptr)
268
0
    {
269
0
        bSuccess = false;
270
0
    }
271
0
    else
272
0
    {
273
0
        pVirtualMemManager->pasVirtualMem = pasVirtualMemNew;
274
0
        pVirtualMemManager
275
0
            ->pasVirtualMem[pVirtualMemManager->nVirtualMemCount] = ctxt;
276
0
        pVirtualMemManager->nVirtualMemCount++;
277
0
    }
278
0
    CPLReleaseMutex(hVirtualMemManagerMutex);
279
0
    return bSuccess;
280
0
}
281
282
/************************************************************************/
283
/*               CPLVirtualMemManagerUnregisterVirtualMem()             */
284
/************************************************************************/
285
286
static void CPLVirtualMemManagerUnregisterVirtualMem(CPLVirtualMemVMA *ctxt)
287
0
{
288
0
    CPLAcquireMutex(hVirtualMemManagerMutex, 1000.0);
289
0
    for (int i = 0; i < pVirtualMemManager->nVirtualMemCount; i++)
290
0
    {
291
0
        if (pVirtualMemManager->pasVirtualMem[i] == ctxt)
292
0
        {
293
0
            if (i < pVirtualMemManager->nVirtualMemCount - 1)
294
0
            {
295
0
                memmove(pVirtualMemManager->pasVirtualMem + i,
296
0
                        pVirtualMemManager->pasVirtualMem + i + 1,
297
0
                        sizeof(CPLVirtualMem *) *
298
0
                            (pVirtualMemManager->nVirtualMemCount - i - 1));
299
0
            }
300
0
            pVirtualMemManager->nVirtualMemCount--;
301
0
            break;
302
0
        }
303
0
    }
304
0
    CPLReleaseMutex(hVirtualMemManagerMutex);
305
0
}
306
307
/************************************************************************/
308
/*                           CPLVirtualMemNew()                         */
309
/************************************************************************/
310
311
static void CPLVirtualMemFreeFileMemoryMapped(CPLVirtualMemVMA *ctxt);
312
313
CPLVirtualMem *CPLVirtualMemNew(size_t nSize, size_t nCacheSize,
314
                                size_t nPageSizeHint, int bSingleThreadUsage,
315
                                CPLVirtualMemAccessMode eAccessMode,
316
                                CPLVirtualMemCachePageCbk pfnCachePage,
317
                                CPLVirtualMemUnCachePageCbk pfnUnCachePage,
318
                                CPLVirtualMemFreeUserData pfnFreeUserData,
319
                                void *pCbkUserData)
320
0
{
321
0
    size_t nMinPageSize = CPLGetPageSize();
322
0
    size_t nPageSize = DEFAULT_PAGE_SIZE;
323
324
0
    IGNORE_OR_ASSERT_IN_DEBUG(nSize > 0);
325
0
    IGNORE_OR_ASSERT_IN_DEBUG(pfnCachePage != nullptr);
326
327
0
    if (nPageSizeHint >= nMinPageSize && nPageSizeHint <= MAXIMUM_PAGE_SIZE)
328
0
    {
329
0
        if ((nPageSizeHint % nMinPageSize) == 0)
330
0
            nPageSize = nPageSizeHint;
331
0
        else
332
0
        {
333
0
            int nbits = 0;
334
0
            nPageSize = static_cast<size_t>(nPageSizeHint);
335
0
            do
336
0
            {
337
0
                nPageSize >>= 1;
338
0
                nbits++;
339
0
            } while (nPageSize > 0);
340
0
            nPageSize = static_cast<size_t>(1) << (nbits - 1);
341
0
            if (nPageSize < static_cast<size_t>(nPageSizeHint))
342
0
                nPageSize <<= 1;
343
0
        }
344
0
    }
345
346
0
    if ((nPageSize % nMinPageSize) != 0)
347
0
        nPageSize = nMinPageSize;
348
349
0
    if (nCacheSize > nSize)
350
0
        nCacheSize = nSize;
351
0
    else if (nCacheSize == 0)
352
0
        nCacheSize = 1;
353
354
0
    int nMappings = 0;
355
356
    // Linux specific:
357
    // Count the number of existing memory mappings.
358
0
    FILE *f = fopen("/proc/self/maps", "rb");
359
0
    if (f != nullptr)
360
0
    {
361
0
        char buffer[80] = {};
362
0
        while (fgets(buffer, sizeof(buffer), f) != nullptr)
363
0
            nMappings++;
364
0
        fclose(f);
365
0
    }
366
367
0
    size_t nCacheMaxSizeInPages = 0;
368
0
    while (true)
369
0
    {
370
        // /proc/self/maps must not have more than 65K lines.
371
0
        nCacheMaxSizeInPages = (nCacheSize + 2 * nPageSize - 1) / nPageSize;
372
0
        if (nCacheMaxSizeInPages >
373
0
            static_cast<size_t>((MAXIMUM_COUNT_OF_MAPPINGS * 9 / 10) -
374
0
                                nMappings))
375
0
            nPageSize <<= 1;
376
0
        else
377
0
            break;
378
0
    }
379
0
    size_t nRoundedMappingSize =
380
0
        ((nSize + 2 * nPageSize - 1) / nPageSize) * nPageSize;
381
0
    void *pData = mmap(nullptr, nRoundedMappingSize, PROT_NONE,
382
0
                       MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
383
0
    if (pData == MAP_FAILED)
384
0
    {
385
0
        perror("mmap");
386
0
        return nullptr;
387
0
    }
388
0
    CPLVirtualMemVMA *ctxt = static_cast<CPLVirtualMemVMA *>(
389
0
        VSI_CALLOC_VERBOSE(1, sizeof(CPLVirtualMemVMA)));
390
0
    if (ctxt == nullptr)
391
0
    {
392
0
        munmap(pData, nRoundedMappingSize);
393
0
        return nullptr;
394
0
    }
395
0
    ctxt->sBase.nRefCount = 1;
396
0
    ctxt->sBase.eType = VIRTUAL_MEM_TYPE_VMA;
397
0
    ctxt->sBase.eAccessMode = eAccessMode;
398
0
    ctxt->sBase.pDataToFree = pData;
399
0
    ctxt->sBase.pData = ALIGN_UP(pData, nPageSize);
400
0
    ctxt->sBase.nPageSize = nPageSize;
401
0
    ctxt->sBase.nSize = nSize;
402
0
    ctxt->sBase.bSingleThreadUsage = CPL_TO_BOOL(bSingleThreadUsage);
403
0
    ctxt->sBase.pfnFreeUserData = pfnFreeUserData;
404
0
    ctxt->sBase.pCbkUserData = pCbkUserData;
405
406
0
    ctxt->pabitMappedPages = static_cast<GByte *>(
407
0
        VSI_CALLOC_VERBOSE(1, (nRoundedMappingSize / nPageSize + 7) / 8));
408
0
    if (ctxt->pabitMappedPages == nullptr)
409
0
    {
410
0
        CPLVirtualMemFreeFileMemoryMapped(ctxt);
411
0
        CPLFree(ctxt);
412
0
        return nullptr;
413
0
    }
414
0
    ctxt->pabitRWMappedPages = static_cast<GByte *>(
415
0
        VSI_CALLOC_VERBOSE(1, (nRoundedMappingSize / nPageSize + 7) / 8));
416
0
    if (ctxt->pabitRWMappedPages == nullptr)
417
0
    {
418
0
        CPLVirtualMemFreeFileMemoryMapped(ctxt);
419
0
        CPLFree(ctxt);
420
0
        return nullptr;
421
0
    }
422
    // Need at least 2 pages in case for a rep movs instruction
423
    // that operate in the view.
424
0
    ctxt->nCacheMaxSizeInPages = static_cast<int>(nCacheMaxSizeInPages);
425
0
    ctxt->panLRUPageIndices = static_cast<int *>(
426
0
        VSI_MALLOC_VERBOSE(ctxt->nCacheMaxSizeInPages * sizeof(int)));
427
0
    if (ctxt->panLRUPageIndices == nullptr)
428
0
    {
429
0
        CPLVirtualMemFreeFileMemoryMapped(ctxt);
430
0
        CPLFree(ctxt);
431
0
        return nullptr;
432
0
    }
433
0
    ctxt->iLRUStart = 0;
434
0
    ctxt->nLRUSize = 0;
435
0
    ctxt->iLastPage = -1;
436
0
    ctxt->nRetry = 0;
437
0
    ctxt->pfnCachePage = pfnCachePage;
438
0
    ctxt->pfnUnCachePage = pfnUnCachePage;
439
440
#ifndef HAVE_5ARGS_MREMAP
441
    if (!ctxt->sBase.bSingleThreadUsage)
442
    {
443
        ctxt->hMutexThreadArray = CPLCreateMutex();
444
        IGNORE_OR_ASSERT_IN_DEBUG(ctxt->hMutexThreadArray != nullptr);
445
        CPLReleaseMutex(ctxt->hMutexThreadArray);
446
        ctxt->nThreads = 0;
447
        ctxt->pahThreads = nullptr;
448
    }
449
#endif
450
451
0
    if (!CPLVirtualMemManagerRegisterVirtualMem(ctxt))
452
0
    {
453
0
        CPLVirtualMemFreeFileMemoryMapped(ctxt);
454
0
        CPLFree(ctxt);
455
0
        return nullptr;
456
0
    }
457
458
0
    return reinterpret_cast<CPLVirtualMem *>(ctxt);
459
0
}
460
461
/************************************************************************/
462
/*                  CPLVirtualMemFreeFileMemoryMapped()                 */
463
/************************************************************************/
464
465
static void CPLVirtualMemFreeFileMemoryMapped(CPLVirtualMemVMA *ctxt)
466
0
{
467
0
    CPLVirtualMemManagerUnregisterVirtualMem(ctxt);
468
469
0
    size_t nRoundedMappingSize =
470
0
        ((ctxt->sBase.nSize + 2 * ctxt->sBase.nPageSize - 1) /
471
0
         ctxt->sBase.nPageSize) *
472
0
        ctxt->sBase.nPageSize;
473
0
    if (ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
474
0
        ctxt->pabitRWMappedPages != nullptr && ctxt->pfnUnCachePage != nullptr)
475
0
    {
476
0
        for (size_t i = 0; i < nRoundedMappingSize / ctxt->sBase.nPageSize; i++)
477
0
        {
478
0
            if (TEST_BIT(ctxt->pabitRWMappedPages, i))
479
0
            {
480
0
                void *addr = static_cast<char *>(ctxt->sBase.pData) +
481
0
                             i * ctxt->sBase.nPageSize;
482
0
                ctxt->pfnUnCachePage(reinterpret_cast<CPLVirtualMem *>(ctxt),
483
0
                                     i * ctxt->sBase.nPageSize, addr,
484
0
                                     ctxt->sBase.nPageSize,
485
0
                                     ctxt->sBase.pCbkUserData);
486
0
            }
487
0
        }
488
0
    }
489
0
    int nRet = munmap(ctxt->sBase.pDataToFree, nRoundedMappingSize);
490
0
    IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
491
0
    CPLFree(ctxt->pabitMappedPages);
492
0
    CPLFree(ctxt->pabitRWMappedPages);
493
0
    CPLFree(ctxt->panLRUPageIndices);
494
#ifndef HAVE_5ARGS_MREMAP
495
    if (!ctxt->sBase.bSingleThreadUsage)
496
    {
497
        CPLFree(ctxt->pahThreads);
498
        CPLDestroyMutex(ctxt->hMutexThreadArray);
499
    }
500
#endif
501
0
}
502
503
#ifndef HAVE_5ARGS_MREMAP
504
505
static volatile int nCountThreadsInSigUSR1 = 0;
506
static volatile int nWaitHelperThread = 0;
507
508
/************************************************************************/
509
/*                   CPLVirtualMemSIGUSR1Handler()                      */
510
/************************************************************************/
511
512
static void CPLVirtualMemSIGUSR1Handler(int /* signum_unused */,
513
                                        siginfo_t * /* the_info_unused */,
514
                                        void * /* the_ctxt_unused */)
515
{
516
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
517
    fprintfstderr("entering CPLVirtualMemSIGUSR1Handler %X\n", pthread_self());
518
#endif
519
    // Rouault guesses this is only POSIX correct if it is implemented by an
520
    // intrinsic.
521
    CPLAtomicInc(&nCountThreadsInSigUSR1);
522
    while (nWaitHelperThread)
523
        // Not explicitly indicated as signal-async-safe, but hopefully ok.
524
        usleep(1);
525
    CPLAtomicDec(&nCountThreadsInSigUSR1);
526
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
527
    fprintfstderr("leaving CPLVirtualMemSIGUSR1Handler %X\n", pthread_self());
528
#endif
529
}
530
#endif
531
532
/************************************************************************/
533
/*                      CPLVirtualMemDeclareThread()                    */
534
/************************************************************************/
535
536
void CPLVirtualMemDeclareThread(CPLVirtualMem *ctxt)
537
0
{
538
0
    if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
539
0
        return;
540
#ifndef HAVE_5ARGS_MREMAP
541
    CPLVirtualMemVMA *ctxtVMA = reinterpret_cast<CPLVirtualMemVMA *>(ctxt);
542
    IGNORE_OR_ASSERT_IN_DEBUG(!ctxt->bSingleThreadUsage);
543
    CPLAcquireMutex(ctxtVMA->hMutexThreadArray, 1000.0);
544
    ctxtVMA->pahThreads = static_cast<pthread_t *>(CPLRealloc(
545
        ctxtVMA->pahThreads, (ctxtVMA->nThreads + 1) * sizeof(pthread_t)));
546
    ctxtVMA->pahThreads[ctxtVMA->nThreads] = pthread_self();
547
    ctxtVMA->nThreads++;
548
549
    CPLReleaseMutex(ctxtVMA->hMutexThreadArray);
550
#endif
551
0
}
552
553
/************************************************************************/
554
/*                     CPLVirtualMemUnDeclareThread()                   */
555
/************************************************************************/
556
557
void CPLVirtualMemUnDeclareThread(CPLVirtualMem *ctxt)
558
0
{
559
0
    if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
560
0
        return;
561
#ifndef HAVE_5ARGS_MREMAP
562
    CPLVirtualMemVMA *ctxtVMA = reinterpret_cast<CPLVirtualMemVMA *>(ctxt);
563
    pthread_t self = pthread_self();
564
    IGNORE_OR_ASSERT_IN_DEBUG(!ctxt->bSingleThreadUsage);
565
    CPLAcquireMutex(ctxtVMA->hMutexThreadArray, 1000.0);
566
    for (int i = 0; i < ctxtVMA->nThreads; i++)
567
    {
568
        if (ctxtVMA->pahThreads[i] == self)
569
        {
570
            if (i < ctxtVMA->nThreads - 1)
571
                memmove(ctxtVMA->pahThreads + i + 1, ctxtVMA->pahThreads + i,
572
                        (ctxtVMA->nThreads - 1 - i) * sizeof(pthread_t));
573
            ctxtVMA->nThreads--;
574
            break;
575
        }
576
    }
577
578
    CPLReleaseMutex(ctxtVMA->hMutexThreadArray);
579
#endif
580
0
}
581
582
/************************************************************************/
583
/*                     CPLVirtualMemGetPageToFill()                     */
584
/************************************************************************/
585
586
// Must be paired with CPLVirtualMemAddPage.
587
static void *CPLVirtualMemGetPageToFill(CPLVirtualMemVMA *ctxt,
588
                                        void *start_page_addr)
589
0
{
590
0
    void *pPageToFill = nullptr;
591
592
0
    if (ctxt->sBase.bSingleThreadUsage)
593
0
    {
594
0
        pPageToFill = start_page_addr;
595
0
        const int nRet = mprotect(pPageToFill, ctxt->sBase.nPageSize,
596
0
                                  PROT_READ | PROT_WRITE);
597
0
        IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
598
0
    }
599
0
    else
600
0
    {
601
#ifndef HAVE_5ARGS_MREMAP
602
        CPLAcquireMutex(ctxt->hMutexThreadArray, 1000.0);
603
        if (ctxt->nThreads == 1)
604
        {
605
            pPageToFill = start_page_addr;
606
            const int nRet = mprotect(pPageToFill, ctxt->sBase.nPageSize,
607
                                      PROT_READ | PROT_WRITE);
608
            IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
609
        }
610
        else
611
#endif
612
0
        {
613
            // Allocate a temporary writable page that the user
614
            // callback can fill.
615
0
            pPageToFill =
616
0
                mmap(nullptr, ctxt->sBase.nPageSize, PROT_READ | PROT_WRITE,
617
0
                     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
618
0
            IGNORE_OR_ASSERT_IN_DEBUG(pPageToFill != MAP_FAILED);
619
0
        }
620
0
    }
621
0
    return pPageToFill;
622
0
}
623
624
/************************************************************************/
625
/*                        CPLVirtualMemAddPage()                        */
626
/************************************************************************/
627
628
static void CPLVirtualMemAddPage(CPLVirtualMemVMA *ctxt, void *target_addr,
629
                                 void *pPageToFill, OpType opType,
630
                                 pthread_t hRequesterThread)
631
0
{
632
0
    const int iPage =
633
0
        static_cast<int>((static_cast<char *>(target_addr) -
634
0
                          static_cast<char *>(ctxt->sBase.pData)) /
635
0
                         ctxt->sBase.nPageSize);
636
0
    if (ctxt->nLRUSize == ctxt->nCacheMaxSizeInPages)
637
0
    {
638
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
639
        fprintfstderr("uncaching page %d\n", iPage);
640
#endif
641
0
        int nOldPage = ctxt->panLRUPageIndices[ctxt->iLRUStart];
642
0
        void *addr = static_cast<char *>(ctxt->sBase.pData) +
643
0
                     nOldPage * ctxt->sBase.nPageSize;
644
0
        if (ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
645
0
            ctxt->pfnUnCachePage != nullptr &&
646
0
            TEST_BIT(ctxt->pabitRWMappedPages, nOldPage))
647
0
        {
648
0
            size_t nToBeEvicted = ctxt->sBase.nPageSize;
649
0
            if (static_cast<char *>(addr) + nToBeEvicted >=
650
0
                static_cast<char *>(ctxt->sBase.pData) + ctxt->sBase.nSize)
651
0
                nToBeEvicted = static_cast<char *>(ctxt->sBase.pData) +
652
0
                               ctxt->sBase.nSize - static_cast<char *>(addr);
653
654
0
            ctxt->pfnUnCachePage(reinterpret_cast<CPLVirtualMem *>(ctxt),
655
0
                                 nOldPage * ctxt->sBase.nPageSize, addr,
656
0
                                 nToBeEvicted, ctxt->sBase.pCbkUserData);
657
0
        }
658
        // "Free" the least recently used page.
659
0
        UNSET_BIT(ctxt->pabitMappedPages, nOldPage);
660
0
        UNSET_BIT(ctxt->pabitRWMappedPages, nOldPage);
661
        // Free the old page.
662
        // Not sure how portable it is to do that that way.
663
0
        const void *const pRet =
664
0
            mmap(addr, ctxt->sBase.nPageSize, PROT_NONE,
665
0
                 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
666
0
        IGNORE_OR_ASSERT_IN_DEBUG(pRet == addr);
667
        // cppcheck-suppress memleak
668
0
    }
669
0
    ctxt->panLRUPageIndices[ctxt->iLRUStart] = iPage;
670
0
    ctxt->iLRUStart = (ctxt->iLRUStart + 1) % ctxt->nCacheMaxSizeInPages;
671
0
    if (ctxt->nLRUSize < ctxt->nCacheMaxSizeInPages)
672
0
    {
673
0
        ctxt->nLRUSize++;
674
0
    }
675
0
    SET_BIT(ctxt->pabitMappedPages, iPage);
676
677
0
    if (ctxt->sBase.bSingleThreadUsage)
678
0
    {
679
0
        if (opType == OP_STORE &&
680
0
            ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
681
0
        {
682
            // Let (and mark) the page writable since the instruction that
683
            // triggered the fault is a store.
684
0
            SET_BIT(ctxt->pabitRWMappedPages, iPage);
685
0
        }
686
0
        else if (ctxt->sBase.eAccessMode != VIRTUALMEM_READONLY)
687
0
        {
688
0
            const int nRet =
689
0
                mprotect(target_addr, ctxt->sBase.nPageSize, PROT_READ);
690
0
            IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
691
0
        }
692
0
    }
693
0
    else
694
0
    {
695
0
#ifdef HAVE_5ARGS_MREMAP
696
0
        (void)hRequesterThread;
697
698
0
        if (opType == OP_STORE &&
699
0
            ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
700
0
        {
701
            // Let (and mark) the page writable since the instruction that
702
            // triggered the fault is a store.
703
0
            SET_BIT(ctxt->pabitRWMappedPages, iPage);
704
0
        }
705
0
        else if (ctxt->sBase.eAccessMode != VIRTUALMEM_READONLY)
706
0
        {
707
            // Turn the temporary page read-only before remapping it.
708
            // Only turn it writtable when a new fault occurs (and the
709
            // mapping is writable).
710
0
            const int nRet =
711
0
                mprotect(pPageToFill, ctxt->sBase.nPageSize, PROT_READ);
712
0
            IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
713
0
        }
714
        /* Can now remap the pPageToFill onto the target page */
715
0
        const void *const pRet =
716
0
            mremap(pPageToFill, ctxt->sBase.nPageSize, ctxt->sBase.nPageSize,
717
0
                   MREMAP_MAYMOVE | MREMAP_FIXED, target_addr);
718
0
        IGNORE_OR_ASSERT_IN_DEBUG(pRet == target_addr);
719
720
#else
721
        if (ctxt->nThreads > 1)
722
        {
723
            /* Pause threads that share this mem view */
724
            CPLAtomicInc(&nWaitHelperThread);
725
726
            /* Install temporary SIGUSR1 signal handler */
727
            struct sigaction act, oldact;
728
            act.sa_sigaction = CPLVirtualMemSIGUSR1Handler;
729
            sigemptyset(&act.sa_mask);
730
            /* We don't want the sigsegv handler to be called when we are */
731
            /* running the sigusr1 handler */
732
            IGNORE_OR_ASSERT_IN_DEBUG(sigaddset(&act.sa_mask, SIGSEGV) == 0);
733
            act.sa_flags = 0;
734
            IGNORE_OR_ASSERT_IN_DEBUG(sigaction(SIGUSR1, &act, &oldact) == 0);
735
736
            for (int i = 0; i < ctxt->nThreads; i++)
737
            {
738
                if (ctxt->pahThreads[i] != hRequesterThread)
739
                {
740
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
741
                    fprintfstderr("stopping thread %X\n", ctxt->pahThreads[i]);
742
#endif
743
                    IGNORE_OR_ASSERT_IN_DEBUG(
744
                        pthread_kill(ctxt->pahThreads[i], SIGUSR1) == 0);
745
                }
746
            }
747
748
            /* Wait that they are all paused */
749
            while (nCountThreadsInSigUSR1 != ctxt->nThreads - 1)
750
                usleep(1);
751
752
            /* Restore old SIGUSR1 signal handler */
753
            IGNORE_OR_ASSERT_IN_DEBUG(sigaction(SIGUSR1, &oldact, nullptr) ==
754
                                      0);
755
756
            int nRet = mprotect(target_addr, ctxt->sBase.nPageSize,
757
                                PROT_READ | PROT_WRITE);
758
            IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
759
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
760
            fprintfstderr("memcpying page %d\n", iPage);
761
#endif
762
            memcpy(target_addr, pPageToFill, ctxt->sBase.nPageSize);
763
764
            if (opType == OP_STORE &&
765
                ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
766
            {
767
                // Let (and mark) the page writable since the instruction that
768
                // triggered the fault is a store.
769
                SET_BIT(ctxt->pabitRWMappedPages, iPage);
770
            }
771
            else
772
            {
773
                nRet = mprotect(target_addr, ctxt->sBase.nPageSize, PROT_READ);
774
                IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
775
            }
776
777
            /* Wake up sleeping threads */
778
            CPLAtomicDec(&nWaitHelperThread);
779
            while (nCountThreadsInSigUSR1 != 0)
780
                usleep(1);
781
782
            IGNORE_OR_ASSERT_IN_DEBUG(
783
                munmap(pPageToFill, ctxt->sBase.nPageSize) == 0);
784
        }
785
        else
786
        {
787
            if (opType == OP_STORE &&
788
                ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE)
789
            {
790
                // Let (and mark) the page writable since the instruction that
791
                // triggered the fault is a store.
792
                SET_BIT(ctxt->pabitRWMappedPages, iPage);
793
            }
794
            else if (ctxt->sBase.eAccessMode != VIRTUALMEM_READONLY)
795
            {
796
                const int nRet2 =
797
                    mprotect(target_addr, ctxt->sBase.nPageSize, PROT_READ);
798
                IGNORE_OR_ASSERT_IN_DEBUG(nRet2 == 0);
799
            }
800
        }
801
802
        CPLReleaseMutex(ctxt->hMutexThreadArray);
803
#endif
804
0
    }
805
    // cppcheck-suppress memleak
806
0
}
807
808
/************************************************************************/
809
/*                    CPLVirtualMemGetOpTypeImm()                       */
810
/************************************************************************/
811
812
#if defined(__x86_64__) || defined(__i386__)
813
static OpType CPLVirtualMemGetOpTypeImm(GByte val_rip)
814
0
{
815
0
    OpType opType = OP_UNKNOWN;
816
0
    if ((/*val_rip >= 0x00 &&*/ val_rip <= 0x07) ||
817
0
        (val_rip >= 0x40 && val_rip <= 0x47))  // add $, (X)
818
0
        opType = OP_STORE;
819
0
    if ((val_rip >= 0x08 && val_rip <= 0x0f) ||
820
0
        (val_rip >= 0x48 && val_rip <= 0x4f))  // or $, (X)
821
0
        opType = OP_STORE;
822
0
    if ((val_rip >= 0x20 && val_rip <= 0x27) ||
823
0
        (val_rip >= 0x60 && val_rip <= 0x67))  // and $, (X)
824
0
        opType = OP_STORE;
825
0
    if ((val_rip >= 0x28 && val_rip <= 0x2f) ||
826
0
        (val_rip >= 0x68 && val_rip <= 0x6f))  // sub $, (X)
827
0
        opType = OP_STORE;
828
0
    if ((val_rip >= 0x30 && val_rip <= 0x37) ||
829
0
        (val_rip >= 0x70 && val_rip <= 0x77))  // xor $, (X)
830
0
        opType = OP_STORE;
831
0
    if ((val_rip >= 0x38 && val_rip <= 0x3f) ||
832
0
        (val_rip >= 0x78 && val_rip <= 0x7f))  // cmp $, (X)
833
0
        opType = OP_LOAD;
834
0
    return opType;
835
0
}
836
#endif
837
838
/************************************************************************/
839
/*                      CPLVirtualMemGetOpType()                        */
840
/************************************************************************/
841
842
// Don't need exhaustivity. It is just a hint for an optimization:
843
// If the fault occurs on a store operation, then we can directly put
844
// the page in writable mode if the mapping allows it.
845
846
#if defined(__x86_64__) || defined(__i386__)
847
static OpType CPLVirtualMemGetOpType(const GByte *rip)
848
0
{
849
0
    OpType opType = OP_UNKNOWN;
850
851
0
#if defined(__x86_64__) || defined(__i386__)
852
0
    switch (rip[0])
853
0
    {
854
0
        case 0x00: /* add %al,(%rax) */
855
0
        case 0x01: /* add %eax,(%rax) */
856
0
            opType = OP_STORE;
857
0
            break;
858
0
        case 0x02: /* add (%rax),%al */
859
0
        case 0x03: /* add (%rax),%eax */
860
0
            opType = OP_LOAD;
861
0
            break;
862
863
0
        case 0x08: /* or %al,(%rax) */
864
0
        case 0x09: /* or %eax,(%rax) */
865
0
            opType = OP_STORE;
866
0
            break;
867
0
        case 0x0a: /* or (%rax),%al */
868
0
        case 0x0b: /* or (%rax),%eax */
869
0
            opType = OP_LOAD;
870
0
            break;
871
872
0
        case 0x0f:
873
0
        {
874
0
            switch (rip[1])
875
0
            {
876
0
                case 0xb6: /* movzbl (%rax),%eax */
877
0
                case 0xb7: /* movzwl (%rax),%eax */
878
0
                case 0xbe: /* movsbl (%rax),%eax */
879
0
                case 0xbf: /* movswl (%rax),%eax */
880
0
                    opType = OP_LOAD;
881
0
                    break;
882
0
                default:
883
0
                    break;
884
0
            }
885
0
            break;
886
0
        }
887
0
        case 0xc6: /* movb $,(%rax) */
888
0
        case 0xc7: /* movl $,(%rax) */
889
0
            opType = OP_STORE;
890
0
            break;
891
892
0
        case 0x20: /* and %al,(%rax) */
893
0
        case 0x21: /* and %eax,(%rax) */
894
0
            opType = OP_STORE;
895
0
            break;
896
0
        case 0x22: /* and (%rax),%al */
897
0
        case 0x23: /* and (%rax),%eax */
898
0
            opType = OP_LOAD;
899
0
            break;
900
901
0
        case 0x28: /* sub %al,(%rax) */
902
0
        case 0x29: /* sub %eax,(%rax) */
903
0
            opType = OP_STORE;
904
0
            break;
905
0
        case 0x2a: /* sub (%rax),%al */
906
0
        case 0x2b: /* sub (%rax),%eax */
907
0
            opType = OP_LOAD;
908
0
            break;
909
910
0
        case 0x30: /* xor %al,(%rax) */
911
0
        case 0x31: /* xor %eax,(%rax) */
912
0
            opType = OP_STORE;
913
0
            break;
914
0
        case 0x32: /* xor (%rax),%al */
915
0
        case 0x33: /* xor (%rax),%eax */
916
0
            opType = OP_LOAD;
917
0
            break;
918
919
0
        case 0x38: /* cmp %al,(%rax) */
920
0
        case 0x39: /* cmp %eax,(%rax) */
921
0
            opType = OP_LOAD;
922
0
            break;
923
0
        case 0x40:
924
0
        {
925
0
            switch (rip[1])
926
0
            {
927
0
                case 0x00: /* add %spl,(%rax) */
928
0
                    opType = OP_STORE;
929
0
                    break;
930
0
                case 0x02: /* add (%rax),%spl */
931
0
                    opType = OP_LOAD;
932
0
                    break;
933
0
                case 0x28: /* sub %spl,(%rax) */
934
0
                    opType = OP_STORE;
935
0
                    break;
936
0
                case 0x2a: /* sub (%rax),%spl */
937
0
                    opType = OP_LOAD;
938
0
                    break;
939
0
                case 0x3a: /* cmp (%rax),%spl */
940
0
                    opType = OP_LOAD;
941
0
                    break;
942
0
                case 0x8a: /* mov (%rax),%spl */
943
0
                    opType = OP_LOAD;
944
0
                    break;
945
0
                default:
946
0
                    break;
947
0
            }
948
0
            break;
949
0
        }
950
0
#if defined(__x86_64__)
951
0
        case 0x41: /* reg=%al/%eax, X=%r8 */
952
0
        case 0x42: /* reg=%al/%eax, X=%rax,%r8,1 */
953
0
        case 0x43: /* reg=%al/%eax, X=%r8,%r8,1 */
954
0
        case 0x44: /* reg=%r8b/%r8w, X = %rax */
955
0
        case 0x45: /* reg=%r8b/%r8w, X = %r8 */
956
0
        case 0x46: /* reg=%r8b/%r8w, X = %rax,%r8,1 */
957
0
        case 0x47: /* reg=%r8b/%r8w, X = %r8,%r8,1 */
958
0
        {
959
0
            switch (rip[1])
960
0
            {
961
0
                case 0x00: /* add regb,(X) */
962
0
                case 0x01: /* add regl,(X) */
963
0
                    opType = OP_STORE;
964
0
                    break;
965
0
                case 0x02: /* add (X),regb */
966
0
                case 0x03: /* add (X),regl */
967
0
                    opType = OP_LOAD;
968
0
                    break;
969
0
                case 0x0f:
970
0
                {
971
0
                    switch (rip[2])
972
0
                    {
973
0
                        case 0xb6: /* movzbl (X),regl */
974
0
                        case 0xb7: /* movzwl (X),regl */
975
0
                        case 0xbe: /* movsbl (X),regl */
976
0
                        case 0xbf: /* movswl (X),regl */
977
0
                            opType = OP_LOAD;
978
0
                            break;
979
0
                        default:
980
0
                            break;
981
0
                    }
982
0
                    break;
983
0
                }
984
0
                case 0x28: /* sub regb,(X) */
985
0
                case 0x29: /* sub regl,(X) */
986
0
                    opType = OP_STORE;
987
0
                    break;
988
0
                case 0x2a: /* sub (X),regb */
989
0
                case 0x2b: /* sub (X),regl */
990
0
                    opType = OP_LOAD;
991
0
                    break;
992
0
                case 0x38: /* cmp regb,(X) */
993
0
                case 0x39: /* cmp regl,(X) */
994
0
                    opType = OP_LOAD;
995
0
                    break;
996
0
                case 0x80: /* cmpb,... $,(X) */
997
0
                case 0x81: /* cmpl,... $,(X) */
998
0
                case 0x83: /* cmpl,... $,(X) */
999
0
                    opType = CPLVirtualMemGetOpTypeImm(rip[2]);
1000
0
                    break;
1001
0
                case 0x88: /* mov regb,(X) */
1002
0
                case 0x89: /* mov regl,(X) */
1003
0
                    opType = OP_STORE;
1004
0
                    break;
1005
0
                case 0x8a: /* mov (X),regb */
1006
0
                case 0x8b: /* mov (X),regl */
1007
0
                    opType = OP_LOAD;
1008
0
                    break;
1009
0
                case 0xc6: /* movb $,(X) */
1010
0
                case 0xc7: /* movl $,(X) */
1011
0
                    opType = OP_STORE;
1012
0
                    break;
1013
0
                case 0x84: /* test %al,(X) */
1014
0
                    opType = OP_LOAD;
1015
0
                    break;
1016
0
                case 0xf6: /* testb $,(X) or notb (X) */
1017
0
                case 0xf7: /* testl $,(X) or notl (X)*/
1018
0
                {
1019
0
                    if (rip[2] < 0x10) /* test (X) */
1020
0
                        opType = OP_LOAD;
1021
0
                    else /* not (X) */
1022
0
                        opType = OP_STORE;
1023
0
                    break;
1024
0
                }
1025
0
                default:
1026
0
                    break;
1027
0
            }
1028
0
            break;
1029
0
        }
1030
0
        case 0x48: /* reg=%rax, X=%rax or %rax,%rax,1 */
1031
0
        case 0x49: /* reg=%rax, X=%r8 or %r8,%rax,1 */
1032
0
        case 0x4a: /* reg=%rax, X=%rax,%r8,1 */
1033
0
        case 0x4b: /* reg=%rax, X=%r8,%r8,1 */
1034
0
        case 0x4c: /* reg=%r8, X=%rax or %rax,%rax,1 */
1035
0
        case 0x4d: /* reg=%r8, X=%r8 or %r8,%rax,1 */
1036
0
        case 0x4e: /* reg=%r8, X=%rax,%r8,1 */
1037
0
        case 0x4f: /* reg=%r8, X=%r8,%r8,1 */
1038
0
        {
1039
0
            switch (rip[1])
1040
0
            {
1041
0
                case 0x01: /* add reg,(X) */
1042
0
                    opType = OP_STORE;
1043
0
                    break;
1044
0
                case 0x03: /* add (X),reg */
1045
0
                    opType = OP_LOAD;
1046
0
                    break;
1047
1048
0
                case 0x09: /* or reg,(%rax) */
1049
0
                    opType = OP_STORE;
1050
0
                    break;
1051
0
                case 0x0b: /* or (%rax),reg */
1052
0
                    opType = OP_LOAD;
1053
0
                    break;
1054
0
                case 0x0f:
1055
0
                {
1056
0
                    switch (rip[2])
1057
0
                    {
1058
0
                        case 0xc3: /* movnti reg,(X) */
1059
0
                            opType = OP_STORE;
1060
0
                            break;
1061
0
                        default:
1062
0
                            break;
1063
0
                    }
1064
0
                    break;
1065
0
                }
1066
0
                case 0x21: /* and reg,(X) */
1067
0
                    opType = OP_STORE;
1068
0
                    break;
1069
0
                case 0x23: /* and (X),reg */
1070
0
                    opType = OP_LOAD;
1071
0
                    break;
1072
1073
0
                case 0x29: /* sub reg,(X) */
1074
0
                    opType = OP_STORE;
1075
0
                    break;
1076
0
                case 0x2b: /* sub (X),reg */
1077
0
                    opType = OP_LOAD;
1078
0
                    break;
1079
1080
0
                case 0x31: /* xor reg,(X) */
1081
0
                    opType = OP_STORE;
1082
0
                    break;
1083
0
                case 0x33: /* xor (X),reg */
1084
0
                    opType = OP_LOAD;
1085
0
                    break;
1086
1087
0
                case 0x39: /* cmp reg,(X) */
1088
0
                    opType = OP_LOAD;
1089
0
                    break;
1090
1091
0
                case 0x81:
1092
0
                case 0x83:
1093
0
                    opType = CPLVirtualMemGetOpTypeImm(rip[2]);
1094
0
                    break;
1095
1096
0
                case 0x85: /* test reg,(X) */
1097
0
                    opType = OP_LOAD;
1098
0
                    break;
1099
1100
0
                case 0x89: /* mov reg,(X) */
1101
0
                    opType = OP_STORE;
1102
0
                    break;
1103
0
                case 0x8b: /* mov (X),reg */
1104
0
                    opType = OP_LOAD;
1105
0
                    break;
1106
1107
0
                case 0xc7: /* movq $,(X) */
1108
0
                    opType = OP_STORE;
1109
0
                    break;
1110
1111
0
                case 0xf7:
1112
0
                {
1113
0
                    if (rip[2] < 0x10) /* testq $,(X) */
1114
0
                        opType = OP_LOAD;
1115
0
                    else /* notq (X) */
1116
0
                        opType = OP_STORE;
1117
0
                    break;
1118
0
                }
1119
0
                default:
1120
0
                    break;
1121
0
            }
1122
0
            break;
1123
0
        }
1124
0
#endif
1125
0
        case 0x66:
1126
0
        {
1127
0
            switch (rip[1])
1128
0
            {
1129
0
                case 0x01: /* add %ax,(%rax) */
1130
0
                    opType = OP_STORE;
1131
0
                    break;
1132
0
                case 0x03: /* add (%rax),%ax */
1133
0
                    opType = OP_LOAD;
1134
0
                    break;
1135
0
                case 0x0f:
1136
0
                {
1137
0
                    switch (rip[2])
1138
0
                    {
1139
0
                        case 0x2e: /* ucomisd (%rax),%xmm0 */
1140
0
                            opType = OP_LOAD;
1141
0
                            break;
1142
0
                        case 0x6f: /* movdqa (%rax),%xmm0 */
1143
0
                            opType = OP_LOAD;
1144
0
                            break;
1145
0
                        case 0x7f: /* movdqa %xmm0,(%rax) */
1146
0
                            opType = OP_STORE;
1147
0
                            break;
1148
0
                        case 0xb6: /* movzbw (%rax),%ax */
1149
0
                            opType = OP_LOAD;
1150
0
                            break;
1151
0
                        case 0xe7: /* movntdq %xmm0,(%rax) */
1152
0
                            opType = OP_STORE;
1153
0
                            break;
1154
0
                        default:
1155
0
                            break;
1156
0
                    }
1157
0
                    break;
1158
0
                }
1159
0
                case 0x29: /* sub %ax,(%rax) */
1160
0
                    opType = OP_STORE;
1161
0
                    break;
1162
0
                case 0x2b: /* sub (%rax),%ax */
1163
0
                    opType = OP_LOAD;
1164
0
                    break;
1165
0
                case 0x39: /* cmp %ax,(%rax) */
1166
0
                    opType = OP_LOAD;
1167
0
                    break;
1168
0
#if defined(__x86_64__)
1169
0
                case 0x41: /* reg = %ax (or %xmm0), X = %r8 */
1170
0
                case 0x42: /* reg = %ax (or %xmm0), X = %rax,%r8,1 */
1171
0
                case 0x43: /* reg = %ax (or %xmm0), X = %r8,%r8,1 */
1172
0
                case 0x44: /* reg = %r8w (or %xmm8), X = %rax */
1173
0
                case 0x45: /* reg = %r8w (or %xmm8), X = %r8 */
1174
0
                case 0x46: /* reg = %r8w (or %xmm8), X = %rax,%r8,1 */
1175
0
                case 0x47: /* reg = %r8w (or %xmm8), X = %r8,%r8,1 */
1176
0
                {
1177
0
                    switch (rip[2])
1178
0
                    {
1179
0
                        case 0x01: /* add reg,(X) */
1180
0
                            opType = OP_STORE;
1181
0
                            break;
1182
0
                        case 0x03: /* add (X),reg */
1183
0
                            opType = OP_LOAD;
1184
0
                            break;
1185
0
                        case 0x0f:
1186
0
                        {
1187
0
                            switch (rip[3])
1188
0
                            {
1189
0
                                case 0x2e: /* ucomisd (X),reg */
1190
0
                                    opType = OP_LOAD;
1191
0
                                    break;
1192
0
                                case 0x6f: /* movdqa (X),reg */
1193
0
                                    opType = OP_LOAD;
1194
0
                                    break;
1195
0
                                case 0x7f: /* movdqa reg,(X) */
1196
0
                                    opType = OP_STORE;
1197
0
                                    break;
1198
0
                                case 0xb6: /* movzbw (X),reg */
1199
0
                                    opType = OP_LOAD;
1200
0
                                    break;
1201
0
                                case 0xe7: /* movntdq reg,(X) */
1202
0
                                    opType = OP_STORE;
1203
0
                                    break;
1204
0
                                default:
1205
0
                                    break;
1206
0
                            }
1207
0
                            break;
1208
0
                        }
1209
0
                        case 0x29: /* sub reg,(X) */
1210
0
                            opType = OP_STORE;
1211
0
                            break;
1212
0
                        case 0x2b: /* sub (X),reg */
1213
0
                            opType = OP_LOAD;
1214
0
                            break;
1215
0
                        case 0x39: /* cmp reg,(X) */
1216
0
                            opType = OP_LOAD;
1217
0
                            break;
1218
0
                        case 0x81: /* cmpw,... $,(X) */
1219
0
                        case 0x83: /* cmpw,... $,(X) */
1220
0
                            opType = CPLVirtualMemGetOpTypeImm(rip[3]);
1221
0
                            break;
1222
0
                        case 0x85: /* test reg,(X) */
1223
0
                            opType = OP_LOAD;
1224
0
                            break;
1225
0
                        case 0x89: /* mov reg,(X) */
1226
0
                            opType = OP_STORE;
1227
0
                            break;
1228
0
                        case 0x8b: /* mov (X),reg */
1229
0
                            opType = OP_LOAD;
1230
0
                            break;
1231
0
                        case 0xc7: /* movw $,(X) */
1232
0
                            opType = OP_STORE;
1233
0
                            break;
1234
0
                        case 0xf7:
1235
0
                        {
1236
0
                            if (rip[3] < 0x10) /* testw $,(X) */
1237
0
                                opType = OP_LOAD;
1238
0
                            else /* notw (X) */
1239
0
                                opType = OP_STORE;
1240
0
                            break;
1241
0
                        }
1242
0
                        default:
1243
0
                            break;
1244
0
                    }
1245
0
                    break;
1246
0
                }
1247
0
#endif
1248
0
                case 0x81: /* cmpw,... $,(%rax) */
1249
0
                case 0x83: /* cmpw,... $,(%rax) */
1250
0
                    opType = CPLVirtualMemGetOpTypeImm(rip[2]);
1251
0
                    break;
1252
1253
0
                case 0x85: /* test %ax,(%rax) */
1254
0
                    opType = OP_LOAD;
1255
0
                    break;
1256
0
                case 0x89: /* mov %ax,(%rax) */
1257
0
                    opType = OP_STORE;
1258
0
                    break;
1259
0
                case 0x8b: /* mov (%rax),%ax */
1260
0
                    opType = OP_LOAD;
1261
0
                    break;
1262
0
                case 0xc7: /* movw $,(%rax) */
1263
0
                    opType = OP_STORE;
1264
0
                    break;
1265
0
                case 0xf3:
1266
0
                {
1267
0
                    switch (rip[2])
1268
0
                    {
1269
0
                        case 0xa5: /* rep movsw %ds:(%rsi),%es:(%rdi) */
1270
0
                            opType = OP_MOVS_RSI_RDI;
1271
0
                            break;
1272
0
                        default:
1273
0
                            break;
1274
0
                    }
1275
0
                    break;
1276
0
                }
1277
0
                case 0xf7: /* testw $,(%rax) or notw (%rax) */
1278
0
                {
1279
0
                    if (rip[2] < 0x10) /* test */
1280
0
                        opType = OP_LOAD;
1281
0
                    else /* not */
1282
0
                        opType = OP_STORE;
1283
0
                    break;
1284
0
                }
1285
0
                default:
1286
0
                    break;
1287
0
            }
1288
0
            break;
1289
0
        }
1290
0
        case 0x80: /* cmpb,... $,(%rax) */
1291
0
        case 0x81: /* cmpl,... $,(%rax) */
1292
0
        case 0x83: /* cmpl,... $,(%rax) */
1293
0
            opType = CPLVirtualMemGetOpTypeImm(rip[1]);
1294
0
            break;
1295
0
        case 0x84: /* test %al,(%rax) */
1296
0
        case 0x85: /* test %eax,(%rax) */
1297
0
            opType = OP_LOAD;
1298
0
            break;
1299
0
        case 0x88: /* mov %al,(%rax) */
1300
0
            opType = OP_STORE;
1301
0
            break;
1302
0
        case 0x89: /* mov %eax,(%rax) */
1303
0
            opType = OP_STORE;
1304
0
            break;
1305
0
        case 0x8a: /* mov (%rax),%al */
1306
0
            opType = OP_LOAD;
1307
0
            break;
1308
0
        case 0x8b: /* mov (%rax),%eax */
1309
0
            opType = OP_LOAD;
1310
0
            break;
1311
0
        case 0xd9: /* 387 float */
1312
0
        {
1313
0
            if (rip[1] < 0x08) /* flds (%eax) */
1314
0
                opType = OP_LOAD;
1315
0
            else if (rip[1] >= 0x18 && rip[1] <= 0x20) /* fstps (%eax) */
1316
0
                opType = OP_STORE;
1317
0
            break;
1318
0
        }
1319
0
        case 0xf2: /* SSE 2 */
1320
0
        {
1321
0
            switch (rip[1])
1322
0
            {
1323
0
                case 0x0f:
1324
0
                {
1325
0
                    switch (rip[2])
1326
0
                    {
1327
0
                        case 0x10: /* movsd (%rax),%xmm0 */
1328
0
                            opType = OP_LOAD;
1329
0
                            break;
1330
0
                        case 0x11: /* movsd %xmm0,(%rax) */
1331
0
                            opType = OP_STORE;
1332
0
                            break;
1333
0
                        case 0x58: /* addsd (%rax),%xmm0 */
1334
0
                            opType = OP_LOAD;
1335
0
                            break;
1336
0
                        case 0x59: /* mulsd (%rax),%xmm0 */
1337
0
                            opType = OP_LOAD;
1338
0
                            break;
1339
0
                        case 0x5c: /* subsd (%rax),%xmm0 */
1340
0
                            opType = OP_LOAD;
1341
0
                            break;
1342
0
                        case 0x5e: /* divsd (%rax),%xmm0 */
1343
0
                            opType = OP_LOAD;
1344
0
                            break;
1345
0
                        default:
1346
0
                            break;
1347
0
                    }
1348
0
                    break;
1349
0
                }
1350
0
#if defined(__x86_64__)
1351
0
                case 0x41: /* reg=%xmm0, X=%r8 or %r8,%rax,1 */
1352
0
                case 0x42: /* reg=%xmm0, X=%rax,%r8,1 */
1353
0
                case 0x43: /* reg=%xmm0, X=%r8,%r8,1 */
1354
0
                case 0x44: /* reg=%xmm8, X=%rax or %rax,%rax,1*/
1355
0
                case 0x45: /* reg=%xmm8, X=%r8 or %r8,%rax,1 */
1356
0
                case 0x46: /* reg=%xmm8, X=%rax,%r8,1 */
1357
0
                case 0x47: /* reg=%xmm8, X=%r8,%r8,1 */
1358
0
                {
1359
0
                    switch (rip[2])
1360
0
                    {
1361
0
                        case 0x0f:
1362
0
                        {
1363
0
                            switch (rip[3])
1364
0
                            {
1365
0
                                case 0x10: /* movsd (X),reg */
1366
0
                                    opType = OP_LOAD;
1367
0
                                    break;
1368
0
                                case 0x11: /* movsd reg,(X) */
1369
0
                                    opType = OP_STORE;
1370
0
                                    break;
1371
0
                                case 0x58: /* addsd (X),reg */
1372
0
                                    opType = OP_LOAD;
1373
0
                                    break;
1374
0
                                case 0x59: /* mulsd (X),reg */
1375
0
                                    opType = OP_LOAD;
1376
0
                                    break;
1377
0
                                case 0x5c: /* subsd (X),reg */
1378
0
                                    opType = OP_LOAD;
1379
0
                                    break;
1380
0
                                case 0x5e: /* divsd (X),reg */
1381
0
                                    opType = OP_LOAD;
1382
0
                                    break;
1383
0
                                default:
1384
0
                                    break;
1385
0
                            }
1386
0
                            break;
1387
0
                        }
1388
0
                        default:
1389
0
                            break;
1390
0
                    }
1391
0
                    break;
1392
0
                }
1393
0
#endif
1394
0
                default:
1395
0
                    break;
1396
0
            }
1397
0
            break;
1398
0
        }
1399
0
        case 0xf3:
1400
0
        {
1401
0
            switch (rip[1])
1402
0
            {
1403
0
                case 0x0f: /* SSE 2 */
1404
0
                {
1405
0
                    switch (rip[2])
1406
0
                    {
1407
0
                        case 0x10: /* movss (%rax),%xmm0 */
1408
0
                            opType = OP_LOAD;
1409
0
                            break;
1410
0
                        case 0x11: /* movss %xmm0,(%rax) */
1411
0
                            opType = OP_STORE;
1412
0
                            break;
1413
0
                        case 0x6f: /* movdqu (%rax),%xmm0 */
1414
0
                            opType = OP_LOAD;
1415
0
                            break;
1416
0
                        case 0x7f: /* movdqu %xmm0,(%rax) */
1417
0
                            opType = OP_STORE;
1418
0
                            break;
1419
0
                        default:
1420
0
                            break;
1421
0
                    }
1422
0
                    break;
1423
0
                }
1424
0
#if defined(__x86_64__)
1425
0
                case 0x41: /* reg=%xmm0, X=%r8 */
1426
0
                case 0x42: /* reg=%xmm0, X=%rax,%r8,1 */
1427
0
                case 0x43: /* reg=%xmm0, X=%r8,%r8,1 */
1428
0
                case 0x44: /* reg=%xmm8, X = %rax */
1429
0
                case 0x45: /* reg=%xmm8, X = %r8 */
1430
0
                case 0x46: /* reg=%xmm8, X = %rax,%r8,1 */
1431
0
                case 0x47: /* reg=%xmm8, X = %r8,%r8,1 */
1432
0
                {
1433
0
                    switch (rip[2])
1434
0
                    {
1435
0
                        case 0x0f: /* SSE 2 */
1436
0
                        {
1437
0
                            switch (rip[3])
1438
0
                            {
1439
0
                                case 0x10: /* movss (X),reg */
1440
0
                                    opType = OP_LOAD;
1441
0
                                    break;
1442
0
                                case 0x11: /* movss reg,(X) */
1443
0
                                    opType = OP_STORE;
1444
0
                                    break;
1445
0
                                case 0x6f: /* movdqu (X),reg */
1446
0
                                    opType = OP_LOAD;
1447
0
                                    break;
1448
0
                                case 0x7f: /* movdqu reg,(X) */
1449
0
                                    opType = OP_STORE;
1450
0
                                    break;
1451
0
                                default:
1452
0
                                    break;
1453
0
                            }
1454
0
                            break;
1455
0
                        }
1456
0
                        default:
1457
0
                            break;
1458
0
                    }
1459
0
                    break;
1460
0
                }
1461
0
                case 0x48:
1462
0
                {
1463
0
                    switch (rip[2])
1464
0
                    {
1465
0
                        case 0xa5: /* rep movsq %ds:(%rsi),%es:(%rdi) */
1466
0
                            opType = OP_MOVS_RSI_RDI;
1467
0
                            break;
1468
0
                        default:
1469
0
                            break;
1470
0
                    }
1471
0
                    break;
1472
0
                }
1473
0
#endif
1474
0
                case 0xa4: /* rep movsb %ds:(%rsi),%es:(%rdi) */
1475
0
                case 0xa5: /* rep movsl %ds:(%rsi),%es:(%rdi) */
1476
0
                    opType = OP_MOVS_RSI_RDI;
1477
0
                    break;
1478
0
                case 0xa6: /* repz cmpsb %es:(%rdi),%ds:(%rsi) */
1479
0
                    opType = OP_LOAD;
1480
0
                    break;
1481
0
                default:
1482
0
                    break;
1483
0
            }
1484
0
            break;
1485
0
        }
1486
0
        case 0xf6: /* testb $,(%rax) or notb (%rax) */
1487
0
        case 0xf7: /* testl $,(%rax) or notl (%rax) */
1488
0
        {
1489
0
            if (rip[1] < 0x10) /* test */
1490
0
                opType = OP_LOAD;
1491
0
            else /* not */
1492
0
                opType = OP_STORE;
1493
0
            break;
1494
0
        }
1495
0
        default:
1496
0
            break;
1497
0
    }
1498
0
#endif
1499
0
    return opType;
1500
0
}
1501
#endif
1502
1503
/************************************************************************/
1504
/*                    CPLVirtualMemManagerPinAddrInternal()             */
1505
/************************************************************************/
1506
1507
static int
1508
CPLVirtualMemManagerPinAddrInternal(CPLVirtualMemMsgToWorkerThread *msg)
1509
0
{
1510
0
    char wait_ready = '\0';
1511
0
    char response_buf[4] = {};
1512
1513
    // Wait for the helper thread to be ready to process another request.
1514
0
    while (true)
1515
0
    {
1516
0
        const int ret = static_cast<int>(
1517
0
            read(pVirtualMemManager->pipefd_wait_thread[0], &wait_ready, 1));
1518
0
        if (ret < 0 && errno == EINTR)
1519
0
        {
1520
            // NOP
1521
0
        }
1522
0
        else
1523
0
        {
1524
0
            IGNORE_OR_ASSERT_IN_DEBUG(ret == 1);
1525
0
            break;
1526
0
        }
1527
0
    }
1528
1529
    // Pass the address that caused the fault to the helper thread.
1530
0
    const ssize_t nRetWrite =
1531
0
        write(pVirtualMemManager->pipefd_to_thread[1], msg, sizeof(*msg));
1532
0
    IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == sizeof(*msg));
1533
1534
    // Wait that the helper thread has fixed the fault.
1535
0
    while (true)
1536
0
    {
1537
0
        const int ret = static_cast<int>(
1538
0
            read(pVirtualMemManager->pipefd_from_thread[0], response_buf, 4));
1539
0
        if (ret < 0 && errno == EINTR)
1540
0
        {
1541
            // NOP
1542
0
        }
1543
0
        else
1544
0
        {
1545
0
            IGNORE_OR_ASSERT_IN_DEBUG(ret == 4);
1546
0
            break;
1547
0
        }
1548
0
    }
1549
1550
    // In case the helper thread did not recognize the address as being
1551
    // one that it should take care of, just rely on the previous SIGSEGV
1552
    // handler (with might abort the process).
1553
0
    return (memcmp(response_buf, MAPPING_FOUND, 4) == 0);
1554
0
}
1555
1556
/************************************************************************/
1557
/*                      CPLVirtualMemPin()                              */
1558
/************************************************************************/
1559
1560
void CPLVirtualMemPin(CPLVirtualMem *ctxt, void *pAddr, size_t nSize,
1561
                      int bWriteOp)
1562
0
{
1563
0
    if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
1564
0
        return;
1565
1566
0
    CPLVirtualMemMsgToWorkerThread msg;
1567
1568
0
    memset(&msg, 0, sizeof(msg));
1569
0
    msg.hRequesterThread = pthread_self();
1570
0
    msg.opType = (bWriteOp) ? OP_STORE : OP_LOAD;
1571
1572
0
    char *pBase = reinterpret_cast<char *>(ALIGN_DOWN(pAddr, ctxt->nPageSize));
1573
0
    const size_t n = (reinterpret_cast<char *>(pAddr) - pBase + nSize +
1574
0
                      ctxt->nPageSize - 1) /
1575
0
                     ctxt->nPageSize;
1576
0
    for (size_t i = 0; i < n; i++)
1577
0
    {
1578
0
        msg.pFaultAddr = reinterpret_cast<char *>(pBase) + i * ctxt->nPageSize;
1579
0
        CPLVirtualMemManagerPinAddrInternal(&msg);
1580
0
    }
1581
0
}
1582
1583
/************************************************************************/
1584
/*                   CPLVirtualMemManagerSIGSEGVHandler()               */
1585
/************************************************************************/
1586
1587
#if defined(__x86_64__)
1588
0
#define REG_IP REG_RIP
1589
0
#define REG_SI REG_RSI
1590
0
#define REG_DI REG_RDI
1591
#elif defined(__i386__)
1592
#define REG_IP REG_EIP
1593
#define REG_SI REG_ESI
1594
#define REG_DI REG_EDI
1595
#endif
1596
1597
// Must take care of only using "asynchronous-signal-safe" functions in a signal
1598
// handler pthread_self(), read() and write() are such.  See:
1599
// https://www.securecoding.cert.org/confluence/display/seccode/SIG30-C.+Call+only+asynchronous-safe+functions+within+signal+handlers
1600
static void CPLVirtualMemManagerSIGSEGVHandler(int the_signal,
1601
                                               siginfo_t *the_info,
1602
                                               void *the_ctxt)
1603
0
{
1604
0
    CPLVirtualMemMsgToWorkerThread msg;
1605
1606
0
    memset(&msg, 0, sizeof(msg));
1607
0
    msg.pFaultAddr = the_info->si_addr;
1608
0
    msg.hRequesterThread = pthread_self();
1609
1610
0
#if defined(__x86_64__) || defined(__i386__)
1611
0
    ucontext_t *the_ucontext = static_cast<ucontext_t *>(the_ctxt);
1612
0
    const GByte *rip = reinterpret_cast<const GByte *>(
1613
0
        the_ucontext->uc_mcontext.gregs[REG_IP]);
1614
0
    msg.opType = CPLVirtualMemGetOpType(rip);
1615
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1616
    fprintfstderr("at rip %p, bytes: %02x %02x %02x %02x\n", rip, rip[0],
1617
                  rip[1], rip[2], rip[3]);
1618
#endif
1619
0
    if (msg.opType == OP_MOVS_RSI_RDI)
1620
0
    {
1621
0
        void *rsi =
1622
0
            reinterpret_cast<void *>(the_ucontext->uc_mcontext.gregs[REG_SI]);
1623
0
        void *rdi =
1624
0
            reinterpret_cast<void *>(the_ucontext->uc_mcontext.gregs[REG_DI]);
1625
1626
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1627
        fprintfstderr("fault=%p rsi=%p rsi=%p\n", msg.pFaultAddr, rsi, rdi);
1628
#endif
1629
0
        if (msg.pFaultAddr == rsi)
1630
0
        {
1631
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1632
            fprintfstderr("load\n");
1633
#endif
1634
0
            msg.opType = OP_LOAD;
1635
0
        }
1636
0
        else if (msg.pFaultAddr == rdi)
1637
0
        {
1638
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1639
            fprintfstderr("store\n");
1640
#endif
1641
0
            msg.opType = OP_STORE;
1642
0
        }
1643
0
    }
1644
#ifdef DEBUG_VIRTUALMEM
1645
    else if (msg.opType == OP_UNKNOWN)
1646
    {
1647
        static bool bHasWarned = false;
1648
        if (!bHasWarned)
1649
        {
1650
            bHasWarned = true;
1651
            fprintfstderr("at rip %p, unknown bytes: %02x %02x %02x %02x\n",
1652
                          rip, rip[0], rip[1], rip[2], rip[3]);
1653
        }
1654
    }
1655
#endif
1656
#else
1657
    msg.opType = OP_UNKNOWN;
1658
#endif
1659
1660
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1661
    fprintfstderr("entering handler for %X (addr=%p)\n", pthread_self(),
1662
                  the_info->si_addr);
1663
#endif
1664
1665
0
    if (the_info->si_code != SEGV_ACCERR)
1666
0
    {
1667
0
        pVirtualMemManager->oldact.sa_sigaction(the_signal, the_info, the_ctxt);
1668
0
        return;
1669
0
    }
1670
1671
0
    if (!CPLVirtualMemManagerPinAddrInternal(&msg))
1672
0
    {
1673
        // In case the helper thread did not recognize the address as being
1674
        // one that it should take care of, just rely on the previous SIGSEGV
1675
        // handler (with might abort the process).
1676
0
        pVirtualMemManager->oldact.sa_sigaction(the_signal, the_info, the_ctxt);
1677
0
    }
1678
1679
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1680
    fprintfstderr("leaving handler for %X (addr=%p)\n", pthread_self(),
1681
                  the_info->si_addr);
1682
#endif
1683
0
}
1684
1685
/************************************************************************/
1686
/*                      CPLVirtualMemManagerThread()                    */
1687
/************************************************************************/
1688
1689
static void CPLVirtualMemManagerThread(void * /* unused_param */)
1690
0
{
1691
0
    while (true)
1692
0
    {
1693
0
        char i_m_ready = 1;
1694
0
        CPLVirtualMemVMA *ctxt = nullptr;
1695
0
        bool bMappingFound = false;
1696
0
        CPLVirtualMemMsgToWorkerThread msg;
1697
1698
        // Signal that we are ready to process a new request.
1699
0
        ssize_t nRetWrite =
1700
0
            write(pVirtualMemManager->pipefd_wait_thread[1], &i_m_ready, 1);
1701
0
        IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 1);
1702
1703
        // Fetch the address to process.
1704
0
        const ssize_t nRetRead =
1705
0
            read(pVirtualMemManager->pipefd_to_thread[0], &msg, sizeof(msg));
1706
0
        IGNORE_OR_ASSERT_IN_DEBUG(nRetRead == sizeof(msg));
1707
1708
        // If CPLVirtualMemManagerTerminate() is called, it will use BYEBYE_ADDR
1709
        // as a means to ask for our termination.
1710
0
        if (msg.pFaultAddr == BYEBYE_ADDR)
1711
0
            break;
1712
1713
        /* Lookup for a mapping that contains addr */
1714
0
        CPLAcquireMutex(hVirtualMemManagerMutex, 1000.0);
1715
0
        for (int i = 0; i < pVirtualMemManager->nVirtualMemCount; i++)
1716
0
        {
1717
0
            ctxt = pVirtualMemManager->pasVirtualMem[i];
1718
0
            if (static_cast<char *>(msg.pFaultAddr) >=
1719
0
                    static_cast<char *>(ctxt->sBase.pData) &&
1720
0
                static_cast<char *>(msg.pFaultAddr) <
1721
0
                    static_cast<char *>(ctxt->sBase.pData) + ctxt->sBase.nSize)
1722
0
            {
1723
0
                bMappingFound = true;
1724
0
                break;
1725
0
            }
1726
0
        }
1727
0
        CPLReleaseMutex(hVirtualMemManagerMutex);
1728
1729
0
        if (bMappingFound)
1730
0
        {
1731
0
            char *const start_page_addr = static_cast<char *>(
1732
0
                ALIGN_DOWN(msg.pFaultAddr, ctxt->sBase.nPageSize));
1733
0
            const int iPage =
1734
0
                static_cast<int>((static_cast<char *>(start_page_addr) -
1735
0
                                  static_cast<char *>(ctxt->sBase.pData)) /
1736
0
                                 ctxt->sBase.nPageSize);
1737
1738
0
            if (iPage == ctxt->iLastPage)
1739
0
            {
1740
                // In case 2 threads try to access the same page concurrently it
1741
                // is possible that we are asked to mapped the page again
1742
                // whereas it is always mapped. However, if that number of
1743
                // successive retries is too high, this is certainly a sign that
1744
                // something else happen, like trying to write-access a
1745
                // read-only page 100 is a bit of magic number. Rouault believes
1746
                // it must be at least the number of concurrent threads. 100
1747
                // seems to be really safe!
1748
0
                ctxt->nRetry++;
1749
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1750
                fprintfstderr("retry on page %d : %d\n", iPage, ctxt->nRetry);
1751
#endif
1752
0
                if (ctxt->nRetry >= 100)
1753
0
                {
1754
0
                    CPLError(CE_Failure, CPLE_AppDefined,
1755
0
                             "CPLVirtualMemManagerThread: trying to "
1756
0
                             "write into read-only mapping");
1757
0
                    nRetWrite = write(pVirtualMemManager->pipefd_from_thread[1],
1758
0
                                      MAPPING_NOT_FOUND, 4);
1759
0
                    IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 4);
1760
0
                    break;
1761
0
                }
1762
0
                else if (msg.opType != OP_LOAD &&
1763
0
                         ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
1764
0
                         !TEST_BIT(ctxt->pabitRWMappedPages, iPage))
1765
0
                {
1766
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1767
                    fprintfstderr("switching page %d to write mode\n", iPage);
1768
#endif
1769
0
                    SET_BIT(ctxt->pabitRWMappedPages, iPage);
1770
0
                    const int nRet =
1771
0
                        mprotect(start_page_addr, ctxt->sBase.nPageSize,
1772
0
                                 PROT_READ | PROT_WRITE);
1773
0
                    IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1774
0
                }
1775
0
            }
1776
0
            else
1777
0
            {
1778
0
                ctxt->iLastPage = iPage;
1779
0
                ctxt->nRetry = 0;
1780
1781
0
                if (TEST_BIT(ctxt->pabitMappedPages, iPage))
1782
0
                {
1783
0
                    if (msg.opType != OP_LOAD &&
1784
0
                        ctxt->sBase.eAccessMode == VIRTUALMEM_READWRITE &&
1785
0
                        !TEST_BIT(ctxt->pabitRWMappedPages, iPage))
1786
0
                    {
1787
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1788
                        fprintfstderr("switching page %d to write mode\n",
1789
                                      iPage);
1790
#endif
1791
0
                        SET_BIT(ctxt->pabitRWMappedPages, iPage);
1792
0
                        const int nRet =
1793
0
                            mprotect(start_page_addr, ctxt->sBase.nPageSize,
1794
0
                                     PROT_READ | PROT_WRITE);
1795
0
                        IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1796
0
                    }
1797
0
                    else
1798
0
                    {
1799
#if defined DEBUG_VIRTUALMEM && defined DEBUG_VERBOSE
1800
                        fprintfstderr("unexpected case for page %d\n", iPage);
1801
#endif
1802
0
                    }
1803
0
                }
1804
0
                else
1805
0
                {
1806
0
                    void *const pPageToFill =
1807
0
                        CPLVirtualMemGetPageToFill(ctxt, start_page_addr);
1808
1809
0
                    size_t nToFill = ctxt->sBase.nPageSize;
1810
0
                    if (start_page_addr + nToFill >=
1811
0
                        static_cast<char *>(ctxt->sBase.pData) +
1812
0
                            ctxt->sBase.nSize)
1813
0
                    {
1814
0
                        nToFill = static_cast<char *>(ctxt->sBase.pData) +
1815
0
                                  ctxt->sBase.nSize - start_page_addr;
1816
0
                    }
1817
1818
0
                    ctxt->pfnCachePage(reinterpret_cast<CPLVirtualMem *>(ctxt),
1819
0
                                       start_page_addr - static_cast<char *>(
1820
0
                                                             ctxt->sBase.pData),
1821
0
                                       pPageToFill, nToFill,
1822
0
                                       ctxt->sBase.pCbkUserData);
1823
1824
                    // Now remap this page to its target address and
1825
                    // register it in the LRU.
1826
0
                    CPLVirtualMemAddPage(ctxt, start_page_addr, pPageToFill,
1827
0
                                         msg.opType, msg.hRequesterThread);
1828
0
                }
1829
0
            }
1830
1831
            // Warn the segfault handler that we have finished our job.
1832
0
            nRetWrite = write(pVirtualMemManager->pipefd_from_thread[1],
1833
0
                              MAPPING_FOUND, 4);
1834
0
            IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 4);
1835
0
        }
1836
0
        else
1837
0
        {
1838
            // Warn the segfault handler that we have finished our job
1839
            // but that the fault didn't occur in a memory range that
1840
            // is under our responsibility.
1841
0
            CPLError(CE_Failure, CPLE_AppDefined,
1842
0
                     "CPLVirtualMemManagerThread: no mapping found");
1843
0
            nRetWrite = write(pVirtualMemManager->pipefd_from_thread[1],
1844
0
                              MAPPING_NOT_FOUND, 4);
1845
0
            IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == 4);
1846
0
        }
1847
0
    }
1848
0
}
1849
1850
/************************************************************************/
1851
/*                       CPLVirtualMemManagerInit()                     */
1852
/************************************************************************/
1853
1854
static bool CPLVirtualMemManagerInit()
1855
0
{
1856
0
    CPLMutexHolderD(&hVirtualMemManagerMutex);
1857
0
    if (pVirtualMemManager != nullptr)
1858
0
        return true;
1859
1860
0
    struct sigaction act;
1861
0
    pVirtualMemManager = static_cast<CPLVirtualMemManager *>(
1862
0
        VSI_MALLOC_VERBOSE(sizeof(CPLVirtualMemManager)));
1863
0
    if (pVirtualMemManager == nullptr)
1864
0
        return false;
1865
0
    pVirtualMemManager->pasVirtualMem = nullptr;
1866
0
    pVirtualMemManager->nVirtualMemCount = 0;
1867
0
    int nRet = pipe(pVirtualMemManager->pipefd_to_thread);
1868
0
    IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1869
0
    nRet = pipe(pVirtualMemManager->pipefd_from_thread);
1870
0
    IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1871
0
    nRet = pipe(pVirtualMemManager->pipefd_wait_thread);
1872
0
    IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1873
1874
    // Install our custom SIGSEGV handler.
1875
0
    act.sa_sigaction = CPLVirtualMemManagerSIGSEGVHandler;
1876
0
    sigemptyset(&act.sa_mask);
1877
0
    act.sa_flags = SA_SIGINFO;
1878
0
    nRet = sigaction(SIGSEGV, &act, &pVirtualMemManager->oldact);
1879
0
    IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1880
1881
    // Starts the helper thread.
1882
0
    pVirtualMemManager->hHelperThread =
1883
0
        CPLCreateJoinableThread(CPLVirtualMemManagerThread, nullptr);
1884
0
    if (pVirtualMemManager->hHelperThread == nullptr)
1885
0
    {
1886
0
        VSIFree(pVirtualMemManager);
1887
0
        pVirtualMemManager = nullptr;
1888
0
        return false;
1889
0
    }
1890
0
    return true;
1891
0
}
1892
1893
/************************************************************************/
1894
/*                      CPLVirtualMemManagerTerminate()                 */
1895
/************************************************************************/
1896
1897
void CPLVirtualMemManagerTerminate(void)
1898
0
{
1899
0
    if (pVirtualMemManager == nullptr)
1900
0
        return;
1901
1902
0
    CPLVirtualMemMsgToWorkerThread msg;
1903
0
    msg.pFaultAddr = BYEBYE_ADDR;
1904
0
    msg.opType = OP_UNKNOWN;
1905
0
    memset(&msg.hRequesterThread, 0, sizeof(msg.hRequesterThread));
1906
1907
    // Wait for the helper thread to be ready.
1908
0
    char wait_ready;
1909
0
    const ssize_t nRetRead =
1910
0
        read(pVirtualMemManager->pipefd_wait_thread[0], &wait_ready, 1);
1911
0
    IGNORE_OR_ASSERT_IN_DEBUG(nRetRead == 1);
1912
1913
    // Ask it to terminate.
1914
0
    const ssize_t nRetWrite =
1915
0
        write(pVirtualMemManager->pipefd_to_thread[1], &msg, sizeof(msg));
1916
0
    IGNORE_OR_ASSERT_IN_DEBUG(nRetWrite == sizeof(msg));
1917
1918
    // Wait for its termination.
1919
0
    CPLJoinThread(pVirtualMemManager->hHelperThread);
1920
1921
    // Cleanup everything.
1922
0
    while (pVirtualMemManager->nVirtualMemCount > 0)
1923
0
        CPLVirtualMemFree(reinterpret_cast<CPLVirtualMem *>(
1924
0
            pVirtualMemManager
1925
0
                ->pasVirtualMem[pVirtualMemManager->nVirtualMemCount - 1]));
1926
0
    CPLFree(pVirtualMemManager->pasVirtualMem);
1927
1928
0
    close(pVirtualMemManager->pipefd_to_thread[0]);
1929
0
    close(pVirtualMemManager->pipefd_to_thread[1]);
1930
0
    close(pVirtualMemManager->pipefd_from_thread[0]);
1931
0
    close(pVirtualMemManager->pipefd_from_thread[1]);
1932
0
    close(pVirtualMemManager->pipefd_wait_thread[0]);
1933
0
    close(pVirtualMemManager->pipefd_wait_thread[1]);
1934
1935
    // Restore previous handler.
1936
0
    sigaction(SIGSEGV, &pVirtualMemManager->oldact, nullptr);
1937
1938
0
    CPLFree(pVirtualMemManager);
1939
0
    pVirtualMemManager = nullptr;
1940
1941
0
    CPLDestroyMutex(hVirtualMemManagerMutex);
1942
0
    hVirtualMemManagerMutex = nullptr;
1943
0
}
1944
1945
#else  // HAVE_VIRTUAL_MEM_VMA
1946
1947
CPLVirtualMem *CPLVirtualMemNew(
1948
    size_t /* nSize */, size_t /* nCacheSize */, size_t /* nPageSizeHint */,
1949
    int /* bSingleThreadUsage */, CPLVirtualMemAccessMode /* eAccessMode */,
1950
    CPLVirtualMemCachePageCbk /* pfnCachePage */,
1951
    CPLVirtualMemUnCachePageCbk /* pfnUnCachePage */,
1952
    CPLVirtualMemFreeUserData /* pfnFreeUserData */, void * /* pCbkUserData */)
1953
{
1954
    CPLError(CE_Failure, CPLE_NotSupported,
1955
             "CPLVirtualMemNew() unsupported on "
1956
             "this operating system / configuration");
1957
    return nullptr;
1958
}
1959
1960
void CPLVirtualMemDeclareThread(CPLVirtualMem * /* ctxt */)
1961
{
1962
}
1963
1964
void CPLVirtualMemUnDeclareThread(CPLVirtualMem * /* ctxt */)
1965
{
1966
}
1967
1968
void CPLVirtualMemPin(CPLVirtualMem * /* ctxt */, void * /* pAddr */,
1969
                      size_t /* nSize */, int /* bWriteOp */)
1970
{
1971
}
1972
1973
void CPLVirtualMemManagerTerminate(void)
1974
{
1975
}
1976
1977
#endif  // HAVE_VIRTUAL_MEM_VMA
1978
1979
#ifdef HAVE_MMAP
1980
1981
/************************************************************************/
1982
/*                     CPLVirtualMemFreeFileMemoryMapped()              */
1983
/************************************************************************/
1984
1985
static void CPLVirtualMemFreeFileMemoryMapped(CPLVirtualMem *ctxt)
1986
0
{
1987
0
    const size_t nMappingSize = ctxt->nSize +
1988
0
                                static_cast<GByte *>(ctxt->pData) -
1989
0
                                static_cast<GByte *>(ctxt->pDataToFree);
1990
0
    const int nRet = munmap(ctxt->pDataToFree, nMappingSize);
1991
0
    IGNORE_OR_ASSERT_IN_DEBUG(nRet == 0);
1992
0
}
1993
1994
/************************************************************************/
1995
/*                       CPLVirtualMemFileMapNew()                      */
1996
/************************************************************************/
1997
1998
CPLVirtualMem *CPLVirtualMemFileMapNew(
1999
    VSILFILE *fp, vsi_l_offset nOffset, vsi_l_offset nLength,
2000
    CPLVirtualMemAccessMode eAccessMode,
2001
    CPLVirtualMemFreeUserData pfnFreeUserData, void *pCbkUserData)
2002
0
{
2003
#if SIZEOF_VOIDP == 4
2004
    if (nLength != static_cast<size_t>(nLength))
2005
    {
2006
        CPLError(CE_Failure, CPLE_AppDefined,
2007
                 "nLength = " CPL_FRMT_GUIB
2008
                 " incompatible with 32 bit architecture",
2009
                 nLength);
2010
        return nullptr;
2011
    }
2012
    if (nOffset + CPLGetPageSize() !=
2013
        static_cast<vsi_l_offset>(
2014
            static_cast<off_t>(nOffset + CPLGetPageSize())))
2015
    {
2016
        CPLError(CE_Failure, CPLE_AppDefined,
2017
                 "nOffset = " CPL_FRMT_GUIB
2018
                 " incompatible with 32 bit architecture",
2019
                 nOffset);
2020
        return nullptr;
2021
    }
2022
#endif
2023
2024
0
    int fd = static_cast<int>(
2025
0
        reinterpret_cast<GUIntptr_t>(VSIFGetNativeFileDescriptorL(fp)));
2026
0
    if (fd == 0)
2027
0
    {
2028
0
        CPLError(CE_Failure, CPLE_AppDefined,
2029
0
                 "Cannot operate on a virtual file");
2030
0
        return nullptr;
2031
0
    }
2032
2033
0
    const off_t nAlignedOffset =
2034
0
        static_cast<off_t>((nOffset / CPLGetPageSize()) * CPLGetPageSize());
2035
0
    size_t nAlignment = static_cast<size_t>(nOffset - nAlignedOffset);
2036
0
    size_t nMappingSize = static_cast<size_t>(nLength + nAlignment);
2037
2038
    // Need to ensure that the requested extent fits into the file size
2039
    // otherwise SIGBUS errors will occur when using the mapping.
2040
0
    vsi_l_offset nCurPos = VSIFTellL(fp);
2041
0
    if (VSIFSeekL(fp, 0, SEEK_END) != 0)
2042
0
        return nullptr;
2043
0
    vsi_l_offset nFileSize = VSIFTellL(fp);
2044
0
    if (nFileSize < nOffset + nLength)
2045
0
    {
2046
0
        if (eAccessMode != VIRTUALMEM_READWRITE)
2047
0
        {
2048
0
            CPLError(CE_Failure, CPLE_AppDefined,
2049
0
                     "Trying to map an extent outside of the file");
2050
0
            CPL_IGNORE_RET_VAL(VSIFSeekL(fp, nCurPos, SEEK_SET));
2051
0
            return nullptr;
2052
0
        }
2053
0
        else
2054
0
        {
2055
0
            char ch = 0;
2056
0
            if (VSIFSeekL(fp, nOffset + nLength - 1, SEEK_SET) != 0 ||
2057
0
                VSIFWriteL(&ch, 1, 1, fp) != 1)
2058
0
            {
2059
0
                CPLError(CE_Failure, CPLE_AppDefined,
2060
0
                         "Cannot extend file to mapping size");
2061
0
                CPL_IGNORE_RET_VAL(VSIFSeekL(fp, nCurPos, SEEK_SET));
2062
0
                return nullptr;
2063
0
            }
2064
0
        }
2065
0
    }
2066
0
    if (VSIFSeekL(fp, nCurPos, SEEK_SET) != 0)
2067
0
        return nullptr;
2068
2069
0
    CPLVirtualMem *ctxt = static_cast<CPLVirtualMem *>(
2070
0
        VSI_CALLOC_VERBOSE(1, sizeof(CPLVirtualMem)));
2071
0
    if (ctxt == nullptr)
2072
0
        return nullptr;
2073
2074
0
    void *addr =
2075
0
        mmap(nullptr, nMappingSize,
2076
0
             eAccessMode == VIRTUALMEM_READWRITE ? PROT_READ | PROT_WRITE
2077
0
                                                 : PROT_READ,
2078
0
             MAP_SHARED, fd, nAlignedOffset);
2079
0
    if (addr == MAP_FAILED)
2080
0
    {
2081
0
        int myerrno = errno;
2082
0
        CPLError(CE_Failure, CPLE_AppDefined, "mmap() failed : %s",
2083
0
                 strerror(myerrno));
2084
0
        VSIFree(ctxt);
2085
        // cppcheck thinks we are leaking addr.
2086
        // cppcheck-suppress memleak
2087
0
        return nullptr;
2088
0
    }
2089
2090
0
    ctxt->eType = VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED;
2091
0
    ctxt->nRefCount = 1;
2092
0
    ctxt->eAccessMode = eAccessMode;
2093
0
    ctxt->pData = static_cast<GByte *>(addr) + nAlignment;
2094
0
    ctxt->pDataToFree = addr;
2095
0
    ctxt->nSize = static_cast<size_t>(nLength);
2096
0
    ctxt->nPageSize = CPLGetPageSize();
2097
0
    ctxt->bSingleThreadUsage = false;
2098
0
    ctxt->pfnFreeUserData = pfnFreeUserData;
2099
0
    ctxt->pCbkUserData = pCbkUserData;
2100
2101
0
    return ctxt;
2102
0
}
2103
2104
#else  // HAVE_MMAP
2105
2106
CPLVirtualMem *CPLVirtualMemFileMapNew(
2107
    VSILFILE * /* fp */, vsi_l_offset /* nOffset */, vsi_l_offset /* nLength */,
2108
    CPLVirtualMemAccessMode /* eAccessMode */,
2109
    CPLVirtualMemFreeUserData /* pfnFreeUserData */, void * /* pCbkUserData */)
2110
{
2111
    CPLError(CE_Failure, CPLE_NotSupported,
2112
             "CPLVirtualMemFileMapNew() unsupported on this "
2113
             "operating system / configuration");
2114
    return nullptr;
2115
}
2116
2117
#endif  // HAVE_MMAP
2118
2119
/************************************************************************/
2120
/*                         CPLGetPageSize()                             */
2121
/************************************************************************/
2122
2123
size_t CPLGetPageSize(void)
2124
0
{
2125
0
#if defined(HAVE_MMAP) || defined(HAVE_VIRTUAL_MEM_VMA)
2126
0
    return static_cast<size_t>(std::max(0L, sysconf(_SC_PAGESIZE)));
2127
#else
2128
    return 0;
2129
#endif
2130
0
}
2131
2132
/************************************************************************/
2133
/*                   CPLIsVirtualMemFileMapAvailable()                  */
2134
/************************************************************************/
2135
2136
int CPLIsVirtualMemFileMapAvailable(void)
2137
0
{
2138
0
#ifdef HAVE_MMAP
2139
0
    return TRUE;
2140
#else
2141
    return FALSE;
2142
#endif
2143
0
}
2144
2145
/************************************************************************/
2146
/*                        CPLVirtualMemFree()                           */
2147
/************************************************************************/
2148
2149
void CPLVirtualMemFree(CPLVirtualMem *ctxt)
2150
0
{
2151
0
    if (ctxt == nullptr || --(ctxt->nRefCount) > 0)
2152
0
        return;
2153
2154
0
    if (ctxt->pVMemBase != nullptr)
2155
0
    {
2156
0
        CPLVirtualMemFree(ctxt->pVMemBase);
2157
0
        if (ctxt->pfnFreeUserData != nullptr)
2158
0
            ctxt->pfnFreeUserData(ctxt->pCbkUserData);
2159
0
        CPLFree(ctxt);
2160
0
        return;
2161
0
    }
2162
2163
0
#ifdef HAVE_MMAP
2164
0
    if (ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED)
2165
0
        CPLVirtualMemFreeFileMemoryMapped(ctxt);
2166
0
#endif
2167
0
#ifdef HAVE_VIRTUAL_MEM_VMA
2168
0
    if (ctxt->eType == VIRTUAL_MEM_TYPE_VMA)
2169
0
        CPLVirtualMemFreeFileMemoryMapped(
2170
0
            reinterpret_cast<CPLVirtualMemVMA *>(ctxt));
2171
0
#endif
2172
2173
0
    if (ctxt->pfnFreeUserData != nullptr)
2174
0
        ctxt->pfnFreeUserData(ctxt->pCbkUserData);
2175
0
    CPLFree(ctxt);
2176
0
}
2177
2178
/************************************************************************/
2179
/*                      CPLVirtualMemGetAddr()                          */
2180
/************************************************************************/
2181
2182
void *CPLVirtualMemGetAddr(CPLVirtualMem *ctxt)
2183
0
{
2184
0
    return ctxt->pData;
2185
0
}
2186
2187
/************************************************************************/
2188
/*                     CPLVirtualMemIsFileMapping()                     */
2189
/************************************************************************/
2190
2191
int CPLVirtualMemIsFileMapping(CPLVirtualMem *ctxt)
2192
0
{
2193
0
    return ctxt->eType == VIRTUAL_MEM_TYPE_FILE_MEMORY_MAPPED;
2194
0
}
2195
2196
/************************************************************************/
2197
/*                     CPLVirtualMemGetAccessMode()                     */
2198
/************************************************************************/
2199
2200
CPLVirtualMemAccessMode CPLVirtualMemGetAccessMode(CPLVirtualMem *ctxt)
2201
0
{
2202
0
    return ctxt->eAccessMode;
2203
0
}
2204
2205
/************************************************************************/
2206
/*                      CPLVirtualMemGetPageSize()                      */
2207
/************************************************************************/
2208
2209
size_t CPLVirtualMemGetPageSize(CPLVirtualMem *ctxt)
2210
0
{
2211
0
    return ctxt->nPageSize;
2212
0
}
2213
2214
/************************************************************************/
2215
/*                        CPLVirtualMemGetSize()                        */
2216
/************************************************************************/
2217
2218
size_t CPLVirtualMemGetSize(CPLVirtualMem *ctxt)
2219
0
{
2220
0
    return ctxt->nSize;
2221
0
}
2222
2223
/************************************************************************/
2224
/*                   CPLVirtualMemIsAccessThreadSafe()                  */
2225
/************************************************************************/
2226
2227
int CPLVirtualMemIsAccessThreadSafe(CPLVirtualMem *ctxt)
2228
0
{
2229
0
    return !ctxt->bSingleThreadUsage;
2230
0
}
2231
2232
/************************************************************************/
2233
/*                       CPLVirtualMemDerivedNew()                      */
2234
/************************************************************************/
2235
2236
CPLVirtualMem *CPLVirtualMemDerivedNew(
2237
    CPLVirtualMem *pVMemBase, vsi_l_offset nOffset, vsi_l_offset nSize,
2238
    CPLVirtualMemFreeUserData pfnFreeUserData, void *pCbkUserData)
2239
0
{
2240
0
    if (nOffset + nSize > pVMemBase->nSize)
2241
0
        return nullptr;
2242
2243
0
    CPLVirtualMem *ctxt = static_cast<CPLVirtualMem *>(
2244
0
        VSI_CALLOC_VERBOSE(1, sizeof(CPLVirtualMem)));
2245
0
    if (ctxt == nullptr)
2246
0
        return nullptr;
2247
2248
0
    ctxt->eType = pVMemBase->eType;
2249
0
    ctxt->nRefCount = 1;
2250
0
    ctxt->pVMemBase = pVMemBase;
2251
0
    pVMemBase->nRefCount++;
2252
0
    ctxt->eAccessMode = pVMemBase->eAccessMode;
2253
0
    ctxt->pData = static_cast<GByte *>(pVMemBase->pData) + nOffset;
2254
0
    ctxt->pDataToFree = nullptr;
2255
0
    ctxt->nSize = static_cast<size_t>(nSize);
2256
0
    ctxt->nPageSize = pVMemBase->nPageSize;
2257
0
    ctxt->bSingleThreadUsage = CPL_TO_BOOL(pVMemBase->bSingleThreadUsage);
2258
0
    ctxt->pfnFreeUserData = pfnFreeUserData;
2259
0
    ctxt->pCbkUserData = pCbkUserData;
2260
2261
0
    return ctxt;
2262
0
}