/src/libgcrypt/src/secmem.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* secmem.c - memory allocation from a secure heap |
2 | | * Copyright (C) 1998, 1999, 2000, 2001, 2002, |
3 | | * 2003, 2007 Free Software Foundation, Inc. |
4 | | * Copyright (C) 2013, 2016 g10 Code GmbH |
5 | | * |
6 | | * This file is part of Libgcrypt. |
7 | | * |
8 | | * Libgcrypt is free software; you can redistribute it and/or modify |
9 | | * it under the terms of the GNU Lesser general Public License as |
10 | | * published by the Free Software Foundation; either version 2.1 of |
11 | | * the License, or (at your option) any later version. |
12 | | * |
13 | | * Libgcrypt is distributed in the hope that it will be useful, |
14 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | | * GNU Lesser General Public License for more details. |
17 | | * |
18 | | * You should have received a copy of the GNU Lesser General Public |
19 | | * License along with this program; if not, see <http://www.gnu.org/licenses/>. |
20 | | */ |
21 | | |
22 | | #include <config.h> |
23 | | #include <stdio.h> |
24 | | #include <stdlib.h> |
25 | | #include <string.h> |
26 | | #include <errno.h> |
27 | | #include <stdarg.h> |
28 | | #include <unistd.h> |
29 | | #include <stddef.h> |
30 | | |
31 | | #if defined(HAVE_MLOCK) || defined(HAVE_MMAP) |
32 | | #include <sys/mman.h> |
33 | | #include <sys/types.h> |
34 | | #include <fcntl.h> |
35 | | #ifdef USE_CAPABILITIES |
36 | | #include <sys/capability.h> |
37 | | #endif |
38 | | #endif |
39 | | |
40 | | #include "g10lib.h" |
41 | | #include "secmem.h" |
42 | | |
43 | | #if defined (MAP_ANON) && ! defined (MAP_ANONYMOUS) |
44 | | #define MAP_ANONYMOUS MAP_ANON |
45 | | #endif |
46 | | |
47 | 2 | #define MINIMUM_POOL_SIZE 16384 |
48 | 2 | #define STANDARD_POOL_SIZE 32768 |
49 | 2 | #define DEFAULT_PAGE_SIZE 4096 |
50 | | |
51 | | typedef struct memblock |
52 | | { |
53 | | unsigned size; /* Size of the memory available to the |
54 | | user. */ |
55 | | int flags; /* See below. */ |
56 | | PROPERLY_ALIGNED_TYPE aligned; |
57 | | } memblock_t; |
58 | | |
59 | | /* This flag specifies that the memory block is in use. */ |
60 | 310k | #define MB_FLAG_ACTIVE (1 << 0) |
61 | | |
62 | | /* An object describing a memory pool. */ |
63 | | typedef struct pooldesc_s |
64 | | { |
65 | | /* A link to the next pool. This is used to connect the overflow |
66 | | * pools. */ |
67 | | struct pooldesc_s * volatile next; |
68 | | |
69 | | /* A memory buffer used as allocation pool. */ |
70 | | void *mem; |
71 | | |
72 | | /* The allocated size of MEM. */ |
73 | | size_t size; |
74 | | |
75 | | /* Flag indicating that this memory pool is ready for use. May be |
76 | | * checked in an atexit function. */ |
77 | | volatile int okay; |
78 | | |
79 | | /* Flag indicating whether MEM is mmapped. */ |
80 | | volatile int is_mmapped; |
81 | | |
82 | | /* The number of allocated bytes and the number of used blocks in |
83 | | * this pool. */ |
84 | | unsigned int cur_alloced, cur_blocks; |
85 | | } pooldesc_t; |
86 | | |
87 | | |
88 | | /* The pool of secure memory. This is the head of a linked list with |
89 | | * the first element being the standard mlock-ed pool and the |
90 | | * following elements being the overflow pools. */ |
91 | | static pooldesc_t mainpool; |
92 | | |
93 | | |
94 | | /* A couple of flags with some being set early. */ |
95 | | static int disable_secmem; |
96 | | static int show_warning; |
97 | | static int not_locked; |
98 | | static int no_warning; |
99 | | static int suspend_warning; |
100 | | static int no_mlock; |
101 | | static int no_priv_drop; |
102 | | static unsigned int auto_expand; |
103 | | |
104 | | |
105 | | /* Lock protecting accesses to the memory pools. */ |
106 | | GPGRT_LOCK_DEFINE (secmem_lock); |
107 | | |
108 | | /* Convenient macros. */ |
109 | 124k | #define SECMEM_LOCK gpgrt_lock_lock (&secmem_lock) |
110 | 124k | #define SECMEM_UNLOCK gpgrt_lock_unlock (&secmem_lock) |
111 | | |
112 | | /* The size of the memblock structure; this does not include the |
113 | | memory that is available to the user. */ |
114 | | #define BLOCK_HEAD_SIZE \ |
115 | 62.1k | offsetof (memblock_t, aligned) |
116 | | |
117 | | /* Convert an address into the according memory block structure. */ |
118 | | #define ADDR_TO_BLOCK(addr) \ |
119 | 62.1k | (memblock_t *) (void *) ((char *) addr - BLOCK_HEAD_SIZE) |
120 | | |
121 | | /* Prototypes. */ |
122 | | static void secmem_dump_stats_internal (int extended); |
123 | | |
124 | | |
125 | | /* |
126 | | * Functions |
127 | | */ |
128 | | |
129 | | /* Memory barrier */ |
130 | | static inline void |
131 | | memory_barrier(void) |
132 | 0 | { |
133 | 0 | #ifdef HAVE_SYNC_SYNCHRONIZE |
134 | 0 | #ifdef HAVE_GCC_ASM_VOLATILE_MEMORY |
135 | 0 | asm volatile ("":::"memory"); |
136 | 0 | #endif |
137 | | /* Use GCC / clang intrinsic for memory barrier. */ |
138 | 0 | __sync_synchronize(); |
139 | | #else |
140 | | /* Slow portable alternative, implement memory barrier by using mutex. */ |
141 | | gpgrt_lock_t tmp; |
142 | | memset (&tmp, 0, sizeof(tmp)); |
143 | | gpgrt_lock_init (&tmp); |
144 | | gpgrt_lock_lock (&tmp); |
145 | | gpgrt_lock_unlock (&tmp); |
146 | | gpgrt_lock_destroy (&tmp); |
147 | | #endif |
148 | 0 | } |
149 | | |
150 | | |
151 | | /* Check whether P points into POOL. */ |
152 | | static inline int |
153 | | ptr_into_pool_p (pooldesc_t *pool, const void *p) |
154 | 45.1M | { |
155 | | /* We need to convert pointers to addresses. This is required by |
156 | | C-99 6.5.8 to avoid undefined behaviour. See also |
157 | | http://lists.gnupg.org/pipermail/gcrypt-devel/2007-February/001102.html |
158 | | */ |
159 | 45.1M | uintptr_t p_addr = (uintptr_t)p; |
160 | 45.1M | uintptr_t pool_addr = (uintptr_t)pool->mem; |
161 | | |
162 | 45.1M | return p_addr >= pool_addr && p_addr < pool_addr + pool->size; |
163 | 45.1M | } |
164 | | |
165 | | /* Update the stats. */ |
166 | | static void |
167 | | stats_update (pooldesc_t *pool, size_t add, size_t sub) |
168 | 124k | { |
169 | 124k | if (add) |
170 | 62.1k | { |
171 | 62.1k | pool->cur_alloced += add; |
172 | 62.1k | pool->cur_blocks++; |
173 | 62.1k | } |
174 | 124k | if (sub) |
175 | 62.1k | { |
176 | 62.1k | pool->cur_alloced -= sub; |
177 | 62.1k | pool->cur_blocks--; |
178 | 62.1k | } |
179 | 124k | } |
180 | | |
181 | | /* Return the block following MB or NULL, if MB is the last block. */ |
182 | | static memblock_t * |
183 | | mb_get_next (pooldesc_t *pool, memblock_t *mb) |
184 | 186k | { |
185 | 186k | memblock_t *mb_next; |
186 | | |
187 | 186k | mb_next = (memblock_t *) (void *) ((char *) mb + BLOCK_HEAD_SIZE + mb->size); |
188 | | |
189 | 186k | if (! ptr_into_pool_p (pool, mb_next)) |
190 | 62.1k | mb_next = NULL; |
191 | | |
192 | 186k | return mb_next; |
193 | 186k | } |
194 | | |
195 | | /* Return the block preceding MB or NULL, if MB is the first |
196 | | block. */ |
197 | | static memblock_t * |
198 | | mb_get_prev (pooldesc_t *pool, memblock_t *mb) |
199 | 124k | { |
200 | 124k | memblock_t *mb_prev, *mb_next; |
201 | | |
202 | 124k | if (mb == pool->mem) |
203 | 62.1k | mb_prev = NULL; |
204 | 62.1k | else |
205 | 62.1k | { |
206 | 62.1k | mb_prev = (memblock_t *) pool->mem; |
207 | 62.1k | while (1) |
208 | 62.1k | { |
209 | 62.1k | mb_next = mb_get_next (pool, mb_prev); |
210 | 62.1k | if (mb_next == mb) |
211 | 62.1k | break; |
212 | 0 | else |
213 | 0 | mb_prev = mb_next; |
214 | 62.1k | } |
215 | 62.1k | } |
216 | | |
217 | 124k | return mb_prev; |
218 | 124k | } |
219 | | |
220 | | /* If the preceding block of MB and/or the following block of MB |
221 | | exist and are not active, merge them to form a bigger block. */ |
222 | | static void |
223 | | mb_merge (pooldesc_t *pool, memblock_t *mb) |
224 | 124k | { |
225 | 124k | memblock_t *mb_prev, *mb_next; |
226 | | |
227 | 124k | mb_prev = mb_get_prev (pool, mb); |
228 | 124k | mb_next = mb_get_next (pool, mb); |
229 | | |
230 | 124k | if (mb_prev && (! (mb_prev->flags & MB_FLAG_ACTIVE))) |
231 | 0 | { |
232 | 0 | mb_prev->size += BLOCK_HEAD_SIZE + mb->size; |
233 | 0 | mb = mb_prev; |
234 | 0 | } |
235 | 124k | if (mb_next && (! (mb_next->flags & MB_FLAG_ACTIVE))) |
236 | 62.1k | mb->size += BLOCK_HEAD_SIZE + mb_next->size; |
237 | 124k | } |
238 | | |
239 | | /* Return a new block, which can hold SIZE bytes. */ |
240 | | static memblock_t * |
241 | | mb_get_new (pooldesc_t *pool, memblock_t *block, size_t size) |
242 | 62.1k | { |
243 | 62.1k | memblock_t *mb, *mb_split; |
244 | | |
245 | 62.1k | for (mb = block; ptr_into_pool_p (pool, mb); mb = mb_get_next (pool, mb)) |
246 | 62.1k | if (! (mb->flags & MB_FLAG_ACTIVE) && mb->size >= size) |
247 | 62.1k | { |
248 | | /* Found a free block. */ |
249 | 62.1k | mb->flags |= MB_FLAG_ACTIVE; |
250 | | |
251 | 62.1k | if (mb->size - size > BLOCK_HEAD_SIZE) |
252 | 62.1k | { |
253 | | /* Split block. */ |
254 | | |
255 | 62.1k | mb_split = (memblock_t *) (void *) (((char *) mb) + BLOCK_HEAD_SIZE |
256 | 62.1k | + size); |
257 | 62.1k | mb_split->size = mb->size - size - BLOCK_HEAD_SIZE; |
258 | 62.1k | mb_split->flags = 0; |
259 | | |
260 | 62.1k | mb->size = size; |
261 | | |
262 | 62.1k | mb_merge (pool, mb_split); |
263 | | |
264 | 62.1k | } |
265 | | |
266 | 62.1k | break; |
267 | 62.1k | } |
268 | | |
269 | 62.1k | if (! ptr_into_pool_p (pool, mb)) |
270 | 0 | { |
271 | 0 | gpg_err_set_errno (ENOMEM); |
272 | 0 | mb = NULL; |
273 | 0 | } |
274 | | |
275 | 62.1k | return mb; |
276 | 62.1k | } |
277 | | |
278 | | /* Print a warning message. */ |
279 | | static void |
280 | | print_warn (void) |
281 | 0 | { |
282 | 0 | if (!no_warning) |
283 | 0 | log_info (_("Warning: using insecure memory!\n")); |
284 | 0 | } |
285 | | |
286 | | |
287 | | /* Lock the memory pages of pool P of size N into core and drop |
288 | | * privileges. */ |
289 | | static void |
290 | | lock_pool_pages (void *p, size_t n) |
291 | 2 | { |
292 | 2 | #if defined(HAVE_MLOCK) |
293 | 2 | uid_t uid; |
294 | 2 | int err; |
295 | | |
296 | 2 | uid = getuid (); |
297 | | |
298 | | #ifdef HAVE_BROKEN_MLOCK |
299 | | /* Under HP/UX mlock segfaults if called by non-root. Note, we have |
300 | | noch checked whether mlock does really work under AIX where we |
301 | | also detected a broken nlock. Note further, that using plock () |
302 | | is not a good idea under AIX. */ |
303 | | if (uid) |
304 | | { |
305 | | errno = EPERM; |
306 | | err = -1; |
307 | | } |
308 | | else |
309 | | { |
310 | | err = no_mlock? 0 : mlock (p, n); |
311 | | } |
312 | | #else /* !HAVE_BROKEN_MLOCK */ |
313 | 2 | err = no_mlock? 0 : mlock (p, n); |
314 | 2 | #endif /* !HAVE_BROKEN_MLOCK */ |
315 | | |
316 | | /* Test whether we are running setuid(0). */ |
317 | 2 | if (uid && ! geteuid ()) |
318 | 0 | { |
319 | | /* Yes, we are. */ |
320 | 0 | if (!no_priv_drop) |
321 | 0 | { |
322 | | /* Check that we really dropped the privs. |
323 | | * Note: setuid(0) should always fail */ |
324 | 0 | if (setuid (uid) || getuid () != geteuid () || !setuid (0)) |
325 | 0 | log_fatal ("failed to reset uid: %s\n", strerror (errno)); |
326 | 0 | } |
327 | 0 | } |
328 | | |
329 | 2 | if (err) |
330 | 0 | { |
331 | 0 | if (errno != EPERM |
332 | 0 | #ifdef EAGAIN /* BSD and also Linux may return this. */ |
333 | 0 | && errno != EAGAIN |
334 | 0 | #endif |
335 | 0 | #ifdef ENOSYS /* Some SCOs return this (function not implemented). */ |
336 | 0 | && errno != ENOSYS |
337 | 0 | #endif |
338 | 0 | #ifdef ENOMEM /* Linux might return this. */ |
339 | 0 | && errno != ENOMEM |
340 | 0 | #endif |
341 | 0 | ) |
342 | 0 | log_error ("can't lock memory: %s\n", strerror (errno)); |
343 | 0 | show_warning = 1; |
344 | 0 | not_locked = 1; |
345 | 0 | } |
346 | | |
347 | | #elif defined ( __QNX__ ) |
348 | | /* QNX does not page at all, so the whole secure memory stuff does |
349 | | * not make much sense. However it is still of use because it |
350 | | * wipes out the memory on a free(). |
351 | | * Therefore it is sufficient to suppress the warning. */ |
352 | | (void)p; |
353 | | (void)n; |
354 | | #elif defined (HAVE_DOSISH_SYSTEM) || defined (__CYGWIN__) |
355 | | /* It does not make sense to print such a warning, given the fact that |
356 | | * this whole Windows !@#$% and their user base are inherently insecure. */ |
357 | | (void)p; |
358 | | (void)n; |
359 | | #else |
360 | | (void)p; |
361 | | (void)n; |
362 | | if (!no_mlock) |
363 | | log_info ("Please note that you don't have secure memory on this system\n"); |
364 | | #endif |
365 | 2 | } |
366 | | |
367 | | /* Initialize POOL. */ |
368 | | static void |
369 | | init_pool (pooldesc_t *pool, size_t n) |
370 | 2 | { |
371 | 2 | memblock_t *mb; |
372 | | |
373 | 2 | pool->size = n; |
374 | | |
375 | 2 | if (disable_secmem) |
376 | 0 | log_bug ("secure memory is disabled"); |
377 | | |
378 | | |
379 | 2 | #if HAVE_MMAP |
380 | 2 | { |
381 | 2 | size_t pgsize; |
382 | 2 | long int pgsize_val; |
383 | | |
384 | 2 | # if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE) |
385 | 2 | pgsize_val = sysconf (_SC_PAGESIZE); |
386 | | # elif defined(HAVE_GETPAGESIZE) |
387 | | pgsize_val = getpagesize (); |
388 | | # else |
389 | | pgsize_val = -1; |
390 | | # endif |
391 | 2 | pgsize = (pgsize_val > 0)? pgsize_val:DEFAULT_PAGE_SIZE; |
392 | | |
393 | 2 | pool->size = (pool->size + pgsize - 1) & ~(pgsize - 1); |
394 | 2 | # ifdef MAP_ANONYMOUS |
395 | 2 | pool->mem = mmap (0, pool->size, PROT_READ | PROT_WRITE, |
396 | 2 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
397 | | # else /* map /dev/zero instead */ |
398 | | { |
399 | | int fd; |
400 | | |
401 | | fd = open ("/dev/zero", O_RDWR); |
402 | | if (fd == -1) |
403 | | { |
404 | | log_error ("can't open /dev/zero: %s\n", strerror (errno)); |
405 | | pool->mem = (void *) -1; |
406 | | } |
407 | | else |
408 | | { |
409 | | pool->mem = mmap (0, pool->size, |
410 | | (PROT_READ | PROT_WRITE), MAP_PRIVATE, fd, 0); |
411 | | close (fd); |
412 | | } |
413 | | } |
414 | | # endif |
415 | 2 | if (pool->mem == (void *) -1) |
416 | 0 | log_info ("can't mmap pool of %u bytes: %s - using malloc\n", |
417 | 0 | (unsigned) pool->size, strerror (errno)); |
418 | 2 | else |
419 | 2 | { |
420 | 2 | pool->is_mmapped = 1; |
421 | 2 | pool->okay = 1; |
422 | 2 | } |
423 | 2 | } |
424 | 2 | #endif /*HAVE_MMAP*/ |
425 | | |
426 | 2 | if (!pool->okay) |
427 | 0 | { |
428 | 0 | pool->mem = malloc (pool->size); |
429 | 0 | if (!pool->mem) |
430 | 0 | log_fatal ("can't allocate memory pool of %u bytes\n", |
431 | 0 | (unsigned) pool->size); |
432 | 0 | else |
433 | 0 | pool->okay = 1; |
434 | 0 | } |
435 | | |
436 | | /* Initialize first memory block. */ |
437 | 2 | mb = (memblock_t *) pool->mem; |
438 | 2 | mb->size = pool->size - BLOCK_HEAD_SIZE; |
439 | 2 | mb->flags = 0; |
440 | 2 | } |
441 | | |
442 | | |
443 | | /* Enable overflow pool allocation in all cases. CHUNKSIZE is a hint |
444 | | * on how large to allocate overflow pools. */ |
445 | | void |
446 | | _gcry_secmem_set_auto_expand (unsigned int chunksize) |
447 | 0 | { |
448 | | /* Round up to a multiple of the STANDARD_POOL_SIZE. */ |
449 | 0 | chunksize = ((chunksize + (2*STANDARD_POOL_SIZE) - 1) |
450 | 0 | / STANDARD_POOL_SIZE ) * STANDARD_POOL_SIZE; |
451 | 0 | if (chunksize < STANDARD_POOL_SIZE) /* In case of overflow. */ |
452 | 0 | chunksize = STANDARD_POOL_SIZE; |
453 | |
|
454 | 0 | SECMEM_LOCK; |
455 | 0 | auto_expand = chunksize; |
456 | 0 | SECMEM_UNLOCK; |
457 | 0 | } |
458 | | |
459 | | |
460 | | void |
461 | | _gcry_secmem_set_flags (unsigned flags) |
462 | 0 | { |
463 | 0 | int was_susp; |
464 | |
|
465 | 0 | SECMEM_LOCK; |
466 | |
|
467 | 0 | was_susp = suspend_warning; |
468 | 0 | no_warning = flags & GCRY_SECMEM_FLAG_NO_WARNING; |
469 | 0 | suspend_warning = flags & GCRY_SECMEM_FLAG_SUSPEND_WARNING; |
470 | 0 | no_mlock = flags & GCRY_SECMEM_FLAG_NO_MLOCK; |
471 | 0 | no_priv_drop = flags & GCRY_SECMEM_FLAG_NO_PRIV_DROP; |
472 | | |
473 | | /* and now issue the warning if it is not longer suspended */ |
474 | 0 | if (was_susp && !suspend_warning && show_warning) |
475 | 0 | { |
476 | 0 | show_warning = 0; |
477 | 0 | print_warn (); |
478 | 0 | } |
479 | |
|
480 | 0 | SECMEM_UNLOCK; |
481 | 0 | } |
482 | | |
483 | | unsigned int |
484 | | _gcry_secmem_get_flags (void) |
485 | 0 | { |
486 | 0 | unsigned flags; |
487 | |
|
488 | 0 | SECMEM_LOCK; |
489 | |
|
490 | 0 | flags = no_warning ? GCRY_SECMEM_FLAG_NO_WARNING : 0; |
491 | 0 | flags |= suspend_warning ? GCRY_SECMEM_FLAG_SUSPEND_WARNING : 0; |
492 | 0 | flags |= not_locked ? GCRY_SECMEM_FLAG_NOT_LOCKED : 0; |
493 | 0 | flags |= no_mlock ? GCRY_SECMEM_FLAG_NO_MLOCK : 0; |
494 | 0 | flags |= no_priv_drop ? GCRY_SECMEM_FLAG_NO_PRIV_DROP : 0; |
495 | |
|
496 | 0 | SECMEM_UNLOCK; |
497 | |
|
498 | 0 | return flags; |
499 | 0 | } |
500 | | |
501 | | |
502 | | /* This function initializes the main memory pool MAINPOOL. It is |
503 | | * expected to be called with the secmem lock held. */ |
504 | | static void |
505 | | _gcry_secmem_init_internal (size_t n) |
506 | 2 | { |
507 | 2 | pooldesc_t *pool; |
508 | | |
509 | 2 | pool = &mainpool; |
510 | 2 | if (!n) |
511 | 0 | { |
512 | | #ifdef USE_CAPABILITIES |
513 | | /* drop all capabilities */ |
514 | | if (!no_priv_drop) |
515 | | { |
516 | | cap_t cap; |
517 | | |
518 | | cap = cap_from_text ("all-eip"); |
519 | | cap_set_proc (cap); |
520 | | cap_free (cap); |
521 | | } |
522 | | |
523 | | #elif !defined(HAVE_DOSISH_SYSTEM) |
524 | 0 | uid_t uid; |
525 | |
|
526 | 0 | disable_secmem = 1; |
527 | 0 | uid = getuid (); |
528 | 0 | if (uid != geteuid ()) |
529 | 0 | { |
530 | 0 | if (setuid (uid) || getuid () != geteuid () || !setuid (0)) |
531 | 0 | log_fatal ("failed to drop setuid\n"); |
532 | 0 | } |
533 | 0 | #endif |
534 | 0 | } |
535 | 2 | else |
536 | 2 | { |
537 | 2 | if (n < MINIMUM_POOL_SIZE) |
538 | 0 | n = MINIMUM_POOL_SIZE; |
539 | 2 | if (! pool->okay) |
540 | 2 | { |
541 | 2 | init_pool (pool, n); |
542 | 2 | lock_pool_pages (pool->mem, n); |
543 | 2 | } |
544 | 0 | else |
545 | 0 | log_error ("Oops, secure memory pool already initialized\n"); |
546 | 2 | } |
547 | 2 | } |
548 | | |
549 | | |
550 | | |
551 | | /* Initialize the secure memory system. If running with the necessary |
552 | | privileges, the secure memory pool will be locked into the core in |
553 | | order to prevent page-outs of the data. Furthermore allocated |
554 | | secure memory will be wiped out when released. */ |
555 | | void |
556 | | _gcry_secmem_init (size_t n) |
557 | 0 | { |
558 | 0 | SECMEM_LOCK; |
559 | |
|
560 | 0 | _gcry_secmem_init_internal (n); |
561 | |
|
562 | 0 | SECMEM_UNLOCK; |
563 | 0 | } |
564 | | |
565 | | |
566 | | gcry_err_code_t |
567 | | _gcry_secmem_module_init (void) |
568 | 4 | { |
569 | | /* Not anymore needed. */ |
570 | 4 | return 0; |
571 | 4 | } |
572 | | |
573 | | |
574 | | static void * |
575 | | _gcry_secmem_malloc_internal (size_t size, int xhint) |
576 | 62.1k | { |
577 | 62.1k | pooldesc_t *pool; |
578 | 62.1k | memblock_t *mb; |
579 | | |
580 | 62.1k | pool = &mainpool; |
581 | | |
582 | 62.1k | if (!pool->okay) |
583 | 2 | { |
584 | | /* Try to initialize the pool if the user forgot about it. */ |
585 | 2 | _gcry_secmem_init_internal (STANDARD_POOL_SIZE); |
586 | 2 | if (!pool->okay) |
587 | 0 | { |
588 | 0 | log_info (_("operation is not possible without " |
589 | 0 | "initialized secure memory\n")); |
590 | 0 | gpg_err_set_errno (ENOMEM); |
591 | 0 | return NULL; |
592 | 0 | } |
593 | 2 | } |
594 | 62.1k | if (not_locked && fips_mode ()) |
595 | 0 | { |
596 | 0 | log_info (_("secure memory pool is not locked while in FIPS mode\n")); |
597 | 0 | gpg_err_set_errno (ENOMEM); |
598 | 0 | return NULL; |
599 | 0 | } |
600 | 62.1k | if (show_warning && !suspend_warning) |
601 | 0 | { |
602 | 0 | show_warning = 0; |
603 | 0 | print_warn (); |
604 | 0 | } |
605 | | |
606 | | /* Blocks are always a multiple of 32. */ |
607 | 62.1k | size = ((size + 31) / 32) * 32; |
608 | | |
609 | 62.1k | mb = mb_get_new (pool, (memblock_t *) pool->mem, size); |
610 | 62.1k | if (mb) |
611 | 62.1k | { |
612 | 62.1k | stats_update (pool, mb->size, 0); |
613 | 62.1k | return &mb->aligned.c; |
614 | 62.1k | } |
615 | | |
616 | | /* If we are called from xmalloc style functions resort to the |
617 | | * overflow pools to return memory. We don't do this in FIPS mode, |
618 | | * though. If the auto-expand option is active we do the expanding |
619 | | * also for the standard malloc functions. |
620 | | * |
621 | | * The idea of using them by default only for the xmalloc function |
622 | | * is so that a user can control whether memory will be allocated in |
623 | | * the initial created mlock protected secmem area or may also be |
624 | | * allocated from the overflow pools. */ |
625 | 0 | if ((xhint || auto_expand) && !fips_mode ()) |
626 | 0 | { |
627 | | /* Check whether we can allocate from the overflow pools. */ |
628 | 0 | for (pool = pool->next; pool; pool = pool->next) |
629 | 0 | { |
630 | 0 | mb = mb_get_new (pool, (memblock_t *) pool->mem, size); |
631 | 0 | if (mb) |
632 | 0 | { |
633 | 0 | stats_update (pool, mb->size, 0); |
634 | 0 | return &mb->aligned.c; |
635 | 0 | } |
636 | 0 | } |
637 | | /* Allocate a new overflow pool. We put a new pool right after |
638 | | * the mainpool so that the next allocation will happen in that |
639 | | * pool and not in one of the older pools. When this new pool |
640 | | * gets full we will try to find space in the older pools. */ |
641 | 0 | pool = calloc (1, sizeof *pool); |
642 | 0 | if (!pool) |
643 | 0 | return NULL; /* Not enough memory for a new pool descriptor. */ |
644 | 0 | pool->size = auto_expand? auto_expand : STANDARD_POOL_SIZE; |
645 | 0 | pool->mem = malloc (pool->size); |
646 | 0 | if (!pool->mem) |
647 | 0 | { |
648 | 0 | free (pool); |
649 | 0 | return NULL; /* Not enough memory available for a new pool. */ |
650 | 0 | } |
651 | | /* Initialize first memory block. */ |
652 | 0 | mb = (memblock_t *) pool->mem; |
653 | 0 | mb->size = pool->size - BLOCK_HEAD_SIZE; |
654 | 0 | mb->flags = 0; |
655 | |
|
656 | 0 | pool->okay = 1; |
657 | | |
658 | | /* Take care: in _gcry_private_is_secure we do not lock and thus |
659 | | * we assume that the second assignment below is atomic. Memory |
660 | | * barrier prevents reordering of stores to new pool structure after |
661 | | * MAINPOOL.NEXT assigment and prevents _gcry_private_is_secure seeing |
662 | | * non-initialized POOL->NEXT pointers. */ |
663 | 0 | pool->next = mainpool.next; |
664 | 0 | memory_barrier(); |
665 | 0 | mainpool.next = pool; |
666 | | |
667 | | /* After the first time we allocated an overflow pool, print a |
668 | | * warning. */ |
669 | 0 | if (!pool->next) |
670 | 0 | print_warn (); |
671 | | |
672 | | /* Allocate. */ |
673 | 0 | mb = mb_get_new (pool, (memblock_t *) pool->mem, size); |
674 | 0 | if (mb) |
675 | 0 | { |
676 | 0 | stats_update (pool, mb->size, 0); |
677 | 0 | return &mb->aligned.c; |
678 | 0 | } |
679 | 0 | } |
680 | | |
681 | 0 | return NULL; |
682 | 0 | } |
683 | | |
684 | | |
685 | | /* Allocate a block from the secmem of SIZE. With XHINT set assume |
686 | | * that the caller is a xmalloc style function. */ |
687 | | void * |
688 | | _gcry_secmem_malloc (size_t size, int xhint) |
689 | 62.1k | { |
690 | 62.1k | void *p; |
691 | | |
692 | 62.1k | SECMEM_LOCK; |
693 | 62.1k | p = _gcry_secmem_malloc_internal (size, xhint); |
694 | 62.1k | SECMEM_UNLOCK; |
695 | | |
696 | 62.1k | return p; |
697 | 62.1k | } |
698 | | |
699 | | static int |
700 | | _gcry_secmem_free_internal (void *a) |
701 | 62.1k | { |
702 | 62.1k | pooldesc_t *pool; |
703 | 62.1k | memblock_t *mb; |
704 | 62.1k | int size; |
705 | | |
706 | 62.1k | for (pool = &mainpool; pool; pool = pool->next) |
707 | 62.1k | if (pool->okay && ptr_into_pool_p (pool, a)) |
708 | 62.1k | break; |
709 | 62.1k | if (!pool) |
710 | 0 | return 0; /* A does not belong to use. */ |
711 | | |
712 | 62.1k | mb = ADDR_TO_BLOCK (a); |
713 | 62.1k | size = mb->size; |
714 | | |
715 | | /* This does not make much sense: probably this memory is held in the |
716 | | * cache. We do it anyway: */ |
717 | 62.1k | #define MB_WIPE_OUT(byte) \ |
718 | 248k | wipememory2 (((char *) mb + BLOCK_HEAD_SIZE), (byte), size) |
719 | | |
720 | 62.1k | MB_WIPE_OUT (0xff); |
721 | 62.1k | MB_WIPE_OUT (0xaa); |
722 | 62.1k | MB_WIPE_OUT (0x55); |
723 | 62.1k | MB_WIPE_OUT (0x00); |
724 | | |
725 | | /* Update stats. */ |
726 | 62.1k | stats_update (pool, 0, size); |
727 | | |
728 | 62.1k | mb->flags &= ~MB_FLAG_ACTIVE; |
729 | | |
730 | 62.1k | mb_merge (pool, mb); |
731 | | |
732 | 62.1k | return 1; /* Freed. */ |
733 | 62.1k | } |
734 | | |
735 | | |
736 | | /* Wipe out and release memory. Returns true if this function |
737 | | * actually released A. */ |
738 | | int |
739 | | _gcry_secmem_free (void *a) |
740 | 62.1k | { |
741 | 62.1k | int mine; |
742 | | |
743 | 62.1k | if (!a) |
744 | 0 | return 1; /* Tell caller that we handled it. */ |
745 | | |
746 | 62.1k | SECMEM_LOCK; |
747 | 62.1k | mine = _gcry_secmem_free_internal (a); |
748 | 62.1k | SECMEM_UNLOCK; |
749 | 62.1k | return mine; |
750 | 62.1k | } |
751 | | |
752 | | |
753 | | static void * |
754 | | _gcry_secmem_realloc_internal (void *p, size_t newsize, int xhint) |
755 | 0 | { |
756 | 0 | memblock_t *mb; |
757 | 0 | size_t size; |
758 | 0 | void *a; |
759 | |
|
760 | 0 | mb = (memblock_t *) (void *) ((char *) p |
761 | 0 | - offsetof (memblock_t, aligned.c)); |
762 | 0 | size = mb->size; |
763 | 0 | if (newsize < size) |
764 | 0 | { |
765 | | /* It is easier to not shrink the memory. */ |
766 | 0 | a = p; |
767 | 0 | } |
768 | 0 | else |
769 | 0 | { |
770 | 0 | a = _gcry_secmem_malloc_internal (newsize, xhint); |
771 | 0 | if (a) |
772 | 0 | { |
773 | 0 | memcpy (a, p, size); |
774 | 0 | memset ((char *) a + size, 0, newsize - size); |
775 | 0 | _gcry_secmem_free_internal (p); |
776 | 0 | } |
777 | 0 | } |
778 | |
|
779 | 0 | return a; |
780 | 0 | } |
781 | | |
782 | | |
783 | | /* Realloc memory. With XHINT set assume that the caller is a xmalloc |
784 | | * style function. */ |
785 | | void * |
786 | | _gcry_secmem_realloc (void *p, size_t newsize, int xhint) |
787 | 0 | { |
788 | 0 | void *a; |
789 | |
|
790 | 0 | SECMEM_LOCK; |
791 | 0 | a = _gcry_secmem_realloc_internal (p, newsize, xhint); |
792 | 0 | SECMEM_UNLOCK; |
793 | |
|
794 | 0 | return a; |
795 | 0 | } |
796 | | |
797 | | |
798 | | /* Return true if P points into the secure memory areas. */ |
799 | | int |
800 | | _gcry_private_is_secure (const void *p) |
801 | 48.5M | { |
802 | 48.5M | pooldesc_t *pool; |
803 | | |
804 | | /* We do no lock here because once a pool is allocated it will not |
805 | | * be removed anymore (except for gcry_secmem_term). Further, as |
806 | | * assigment of POOL->NEXT in new pool structure is visible in |
807 | | * this thread before assigment of MAINPOOL.NEXT, pool list can be |
808 | | * iterated locklessly. This visiblity is ensured by memory barrier |
809 | | * between POOL->NEXT and MAINPOOL.NEXT assignments in |
810 | | * _gcry_secmem_malloc_internal. */ |
811 | 97.0M | for (pool = &mainpool; pool; pool = pool->next) |
812 | 48.5M | if (pool->okay && ptr_into_pool_p (pool, p)) |
813 | 62.1k | return 1; |
814 | | |
815 | 48.5M | return 0; |
816 | 48.5M | } |
817 | | |
818 | | |
819 | | /**************** |
820 | | * Warning: This code might be called by an interrupt handler |
821 | | * and frankly, there should really be such a handler, |
822 | | * to make sure that the memory is wiped out. |
823 | | * We hope that the OS wipes out mlocked memory after |
824 | | * receiving a SIGKILL - it really should do so, otherwise |
825 | | * there is no chance to get the secure memory cleaned. |
826 | | */ |
827 | | void |
828 | | _gcry_secmem_term (void) |
829 | 0 | { |
830 | 0 | pooldesc_t *pool, *next; |
831 | |
|
832 | 0 | for (pool = &mainpool; pool; pool = next) |
833 | 0 | { |
834 | 0 | next = pool->next; |
835 | 0 | if (!pool->okay) |
836 | 0 | continue; |
837 | | |
838 | 0 | wipememory2 (pool->mem, 0xff, pool->size); |
839 | 0 | wipememory2 (pool->mem, 0xaa, pool->size); |
840 | 0 | wipememory2 (pool->mem, 0x55, pool->size); |
841 | 0 | wipememory2 (pool->mem, 0x00, pool->size); |
842 | 0 | if (0) |
843 | 0 | ; |
844 | 0 | #if HAVE_MMAP |
845 | 0 | else if (pool->is_mmapped) |
846 | 0 | munmap (pool->mem, pool->size); |
847 | 0 | #endif |
848 | 0 | else |
849 | 0 | free (pool->mem); |
850 | 0 | pool->mem = NULL; |
851 | 0 | pool->okay = 0; |
852 | 0 | pool->size = 0; |
853 | 0 | if (pool != &mainpool) |
854 | 0 | free (pool); |
855 | 0 | } |
856 | 0 | mainpool.next = NULL; |
857 | 0 | not_locked = 0; |
858 | 0 | } |
859 | | |
860 | | |
861 | | /* Print stats of the secmem allocator. With EXTENDED passwed as true |
862 | | * a detiled listing is returned (used for testing). */ |
863 | | void |
864 | | _gcry_secmem_dump_stats (int extended) |
865 | 0 | { |
866 | 0 | SECMEM_LOCK; |
867 | 0 | secmem_dump_stats_internal (extended); |
868 | 0 | SECMEM_UNLOCK; |
869 | 0 | } |
870 | | |
871 | | |
872 | | static void |
873 | | secmem_dump_stats_internal (int extended) |
874 | 0 | { |
875 | 0 | pooldesc_t *pool; |
876 | 0 | memblock_t *mb; |
877 | 0 | int i, poolno; |
878 | |
|
879 | 0 | for (pool = &mainpool, poolno = 0; pool; pool = pool->next, poolno++) |
880 | 0 | { |
881 | 0 | if (!extended) |
882 | 0 | { |
883 | 0 | if (pool->okay) |
884 | 0 | log_info ("%-13s %u/%lu bytes in %u blocks\n", |
885 | 0 | pool == &mainpool? "secmem usage:":"", |
886 | 0 | pool->cur_alloced, (unsigned long)pool->size, |
887 | 0 | pool->cur_blocks); |
888 | 0 | } |
889 | 0 | else |
890 | 0 | { |
891 | 0 | for (i = 0, mb = (memblock_t *) pool->mem; |
892 | 0 | ptr_into_pool_p (pool, mb); |
893 | 0 | mb = mb_get_next (pool, mb), i++) |
894 | 0 | log_info ("SECMEM: pool %d %s block %i size %i\n", |
895 | 0 | poolno, |
896 | 0 | (mb->flags & MB_FLAG_ACTIVE) ? "used" : "free", |
897 | 0 | i, |
898 | 0 | mb->size); |
899 | 0 | } |
900 | 0 | } |
901 | 0 | } |