| 1 | /* Copyright (C) 2002-2016 Free Software Foundation, Inc. |
| 2 | This file is part of the GNU C Library. |
| 3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
| 4 | |
| 5 | The GNU C Library is free software; you can redistribute it and/or |
| 6 | modify it under the terms of the GNU Lesser General Public |
| 7 | License as published by the Free Software Foundation; either |
| 8 | version 2.1 of the License, or (at your option) any later version. |
| 9 | |
| 10 | The GNU C Library is distributed in the hope that it will be useful, |
| 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | Lesser General Public License for more details. |
| 14 | |
| 15 | You should have received a copy of the GNU Lesser General Public |
| 16 | License along with the GNU C Library; if not, see |
| 17 | <http://www.gnu.org/licenses/>. */ |
| 18 | |
| 19 | #include <errno.h> |
| 20 | #include <stdlib.h> |
| 21 | #include <string.h> |
| 22 | #include <fork.h> |
| 23 | #include <atomic.h> |
| 24 | |
| 25 | |
| 26 | struct fork_handler *__fork_handlers; |
| 27 | |
| 28 | /* Lock to protect allocation and deallocation of fork handlers. */ |
| 29 | int __fork_lock = LLL_LOCK_INITIALIZER; |
| 30 | |
| 31 | |
| 32 | /* Number of pre-allocated handler entries. */ |
| 33 | #define NHANDLER 48 |
| 34 | |
| 35 | /* Memory pool for fork handler structures. */ |
| 36 | static struct fork_handler_pool |
| 37 | { |
| 38 | struct fork_handler_pool *next; |
| 39 | struct fork_handler mem[NHANDLER]; |
| 40 | } fork_handler_pool; |
| 41 | |
| 42 | |
| 43 | static struct fork_handler * |
| 44 | fork_handler_alloc (void) |
| 45 | { |
| 46 | struct fork_handler_pool *runp = &fork_handler_pool; |
| 47 | struct fork_handler *result = NULL; |
| 48 | unsigned int i; |
| 49 | |
| 50 | do |
| 51 | { |
| 52 | /* Search for an empty entry. */ |
| 53 | for (i = 0; i < NHANDLER; ++i) |
| 54 | if (runp->mem[i].refcntr == 0) |
| 55 | goto found; |
| 56 | } |
| 57 | while ((runp = runp->next) != NULL); |
| 58 | |
| 59 | /* We have to allocate a new entry. */ |
| 60 | runp = (struct fork_handler_pool *) calloc (1, sizeof (*runp)); |
| 61 | if (runp != NULL) |
| 62 | { |
| 63 | /* Enqueue the new memory pool into the list. */ |
| 64 | runp->next = fork_handler_pool.next; |
| 65 | fork_handler_pool.next = runp; |
| 66 | |
| 67 | /* We use the last entry on the page. This means when we start |
| 68 | searching from the front the next time we will find the first |
| 69 | entry unused. */ |
| 70 | i = NHANDLER - 1; |
| 71 | |
| 72 | found: |
| 73 | result = &runp->mem[i]; |
| 74 | result->refcntr = 1; |
| 75 | result->need_signal = 0; |
| 76 | } |
| 77 | |
| 78 | return result; |
| 79 | } |
| 80 | |
| 81 | |
| 82 | int |
| 83 | __register_atfork (void (*prepare) (void), void (*parent) (void), |
| 84 | void (*child) (void), void *dso_handle) |
| 85 | { |
| 86 | /* Get the lock to not conflict with other allocations. */ |
| 87 | lll_lock (__fork_lock, LLL_PRIVATE); |
| 88 | |
| 89 | struct fork_handler *newp = fork_handler_alloc (); |
| 90 | |
| 91 | if (newp != NULL) |
| 92 | { |
| 93 | /* Initialize the new record. */ |
| 94 | newp->prepare_handler = prepare; |
| 95 | newp->parent_handler = parent; |
| 96 | newp->child_handler = child; |
| 97 | newp->dso_handle = dso_handle; |
| 98 | |
| 99 | __linkin_atfork (newp); |
| 100 | } |
| 101 | |
| 102 | /* Release the lock. */ |
| 103 | lll_unlock (__fork_lock, LLL_PRIVATE); |
| 104 | |
| 105 | return newp == NULL ? ENOMEM : 0; |
| 106 | } |
| 107 | libc_hidden_def (__register_atfork) |
| 108 | |
| 109 | |
| 110 | void |
| 111 | attribute_hidden |
| 112 | __linkin_atfork (struct fork_handler *newp) |
| 113 | { |
| 114 | do |
| 115 | newp->next = __fork_handlers; |
| 116 | while (catomic_compare_and_exchange_bool_acq (&__fork_handlers, |
| 117 | newp, newp->next) != 0); |
| 118 | } |
| 119 | |
| 120 | |
| 121 | libc_freeres_fn (free_mem) |
| 122 | { |
| 123 | /* Get the lock to not conflict with running forks. */ |
| 124 | lll_lock (__fork_lock, LLL_PRIVATE); |
| 125 | |
| 126 | /* No more fork handlers. */ |
| 127 | __fork_handlers = NULL; |
| 128 | |
| 129 | /* Free eventually allocated memory blocks for the object pool. */ |
| 130 | struct fork_handler_pool *runp = fork_handler_pool.next; |
| 131 | |
| 132 | memset (&fork_handler_pool, '\0', sizeof (fork_handler_pool)); |
| 133 | |
| 134 | /* Release the lock. */ |
| 135 | lll_unlock (__fork_lock, LLL_PRIVATE); |
| 136 | |
| 137 | /* We can free the memory after releasing the lock. */ |
| 138 | while (runp != NULL) |
| 139 | { |
| 140 | struct fork_handler_pool *oldp = runp; |
| 141 | runp = runp->next; |
| 142 | free (oldp); |
| 143 | } |
| 144 | } |
| 145 | |