| 1 | /* Copyright (C) 2002-2016 Free Software Foundation, Inc. |
| 2 | This file is part of the GNU C Library. |
| 3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
| 4 | |
| 5 | The GNU C Library is free software; you can redistribute it and/or |
| 6 | modify it under the terms of the GNU Lesser General Public |
| 7 | License as published by the Free Software Foundation; either |
| 8 | version 2.1 of the License, or (at your option) any later version. |
| 9 | |
| 10 | The GNU C Library is distributed in the hope that it will be useful, |
| 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | Lesser General Public License for more details. |
| 14 | |
| 15 | You should have received a copy of the GNU Lesser General Public |
| 16 | License along with the GNU C Library; if not, see |
| 17 | <http://www.gnu.org/licenses/>. */ |
| 18 | |
| 19 | #include <assert.h> |
| 20 | #include <stdlib.h> |
| 21 | #include <unistd.h> |
| 22 | #include <sys/types.h> |
| 23 | #include <sysdep.h> |
| 24 | #include <libio/libioP.h> |
| 25 | #include <tls.h> |
| 26 | #include <hp-timing.h> |
| 27 | #include <ldsodefs.h> |
| 28 | #include <stdio-lock.h> |
| 29 | #include <atomic.h> |
| 30 | #include <nptl/pthreadP.h> |
| 31 | #include <fork.h> |
| 32 | #include <arch-fork.h> |
| 33 | #include <futex-internal.h> |
| 34 | |
| 35 | |
| 36 | static void |
| 37 | fresetlockfiles (void) |
| 38 | { |
| 39 | _IO_ITER i; |
| 40 | |
| 41 | for (i = _IO_iter_begin(); i != _IO_iter_end(); i = _IO_iter_next(i)) |
| 42 | if ((_IO_iter_file (i)->_flags & _IO_USER_LOCK) == 0) |
| 43 | _IO_lock_init (*((_IO_lock_t *) _IO_iter_file(i)->_lock)); |
| 44 | } |
| 45 | |
| 46 | |
| 47 | pid_t |
| 48 | __libc_fork (void) |
| 49 | { |
| 50 | pid_t pid; |
| 51 | struct used_handler |
| 52 | { |
| 53 | struct fork_handler *handler; |
| 54 | struct used_handler *next; |
| 55 | } *allp = NULL; |
| 56 | |
| 57 | /* Run all the registered preparation handlers. In reverse order. |
| 58 | While doing this we build up a list of all the entries. */ |
| 59 | struct fork_handler *runp; |
| 60 | while ((runp = __fork_handlers) != NULL) |
| 61 | { |
| 62 | /* Make sure we read from the current RUNP pointer. */ |
| 63 | atomic_full_barrier (); |
| 64 | |
| 65 | unsigned int oldval = runp->refcntr; |
| 66 | |
| 67 | if (oldval == 0) |
| 68 | /* This means some other thread removed the list just after |
| 69 | the pointer has been loaded. Try again. Either the list |
| 70 | is empty or we can retry it. */ |
| 71 | continue; |
| 72 | |
| 73 | /* Bump the reference counter. */ |
| 74 | if (atomic_compare_and_exchange_bool_acq (&__fork_handlers->refcntr, |
| 75 | oldval + 1, oldval)) |
| 76 | /* The value changed, try again. */ |
| 77 | continue; |
| 78 | |
| 79 | /* We bumped the reference counter for the first entry in the |
| 80 | list. That means that none of the following entries will |
| 81 | just go away. The unloading code works in the order of the |
| 82 | list. |
| 83 | |
| 84 | While executing the registered handlers we are building a |
| 85 | list of all the entries so that we can go backward later on. */ |
| 86 | while (1) |
| 87 | { |
| 88 | /* Execute the handler if there is one. */ |
| 89 | if (runp->prepare_handler != NULL) |
| 90 | runp->prepare_handler (); |
| 91 | |
| 92 | /* Create a new element for the list. */ |
| 93 | struct used_handler *newp |
| 94 | = (struct used_handler *) alloca (sizeof (*newp)); |
| 95 | newp->handler = runp; |
| 96 | newp->next = allp; |
| 97 | allp = newp; |
| 98 | |
| 99 | /* Advance to the next handler. */ |
| 100 | runp = runp->next; |
| 101 | if (runp == NULL) |
| 102 | break; |
| 103 | |
| 104 | /* Bump the reference counter for the next entry. */ |
| 105 | atomic_increment (&runp->refcntr); |
| 106 | } |
| 107 | |
| 108 | /* We are done. */ |
| 109 | break; |
| 110 | } |
| 111 | |
| 112 | _IO_list_lock (); |
| 113 | |
| 114 | #ifndef NDEBUG |
| 115 | pid_t ppid = THREAD_GETMEM (THREAD_SELF, tid); |
| 116 | #endif |
| 117 | |
| 118 | /* We need to prevent the getpid() code to update the PID field so |
| 119 | that, if a signal arrives in the child very early and the signal |
| 120 | handler uses getpid(), the value returned is correct. */ |
| 121 | pid_t parentpid = THREAD_GETMEM (THREAD_SELF, pid); |
| 122 | THREAD_SETMEM (THREAD_SELF, pid, -parentpid); |
| 123 | |
| 124 | #ifdef ARCH_FORK |
| 125 | pid = ARCH_FORK (); |
| 126 | #else |
| 127 | # error "ARCH_FORK must be defined so that the CLONE_SETTID flag is used" |
| 128 | pid = INLINE_SYSCALL (fork, 0); |
| 129 | #endif |
| 130 | |
| 131 | |
| 132 | if (pid == 0) |
| 133 | { |
| 134 | struct pthread *self = THREAD_SELF; |
| 135 | |
| 136 | assert (THREAD_GETMEM (self, tid) != ppid); |
| 137 | |
| 138 | /* See __pthread_once. */ |
| 139 | if (__fork_generation_pointer != NULL) |
| 140 | *__fork_generation_pointer += __PTHREAD_ONCE_FORK_GEN_INCR; |
| 141 | |
| 142 | /* Adjust the PID field for the new process. */ |
| 143 | THREAD_SETMEM (self, pid, THREAD_GETMEM (self, tid)); |
| 144 | |
| 145 | #if HP_TIMING_AVAIL |
| 146 | /* The CPU clock of the thread and process have to be set to zero. */ |
| 147 | hp_timing_t now; |
| 148 | HP_TIMING_NOW (now); |
| 149 | THREAD_SETMEM (self, cpuclock_offset, now); |
| 150 | GL(dl_cpuclock_offset) = now; |
| 151 | #endif |
| 152 | |
| 153 | #ifdef __NR_set_robust_list |
| 154 | /* Initialize the robust mutex list which has been reset during |
| 155 | the fork. We do not check for errors since if it fails here |
| 156 | it failed at process start as well and noone could have used |
| 157 | robust mutexes. We also do not have to set |
| 158 | self->robust_head.futex_offset since we inherit the correct |
| 159 | value from the parent. */ |
| 160 | # ifdef SHARED |
| 161 | if (__builtin_expect (__libc_pthread_functions_init, 0)) |
| 162 | PTHFCT_CALL (ptr_set_robust, (self)); |
| 163 | # else |
| 164 | extern __typeof (__nptl_set_robust) __nptl_set_robust |
| 165 | __attribute__((weak)); |
| 166 | if (__builtin_expect (__nptl_set_robust != NULL, 0)) |
| 167 | __nptl_set_robust (self); |
| 168 | # endif |
| 169 | #endif |
| 170 | |
| 171 | /* Reset the file list. These are recursive mutexes. */ |
| 172 | fresetlockfiles (); |
| 173 | |
| 174 | /* Reset locks in the I/O code. */ |
| 175 | _IO_list_resetlock (); |
| 176 | |
| 177 | /* Reset the lock the dynamic loader uses to protect its data. */ |
| 178 | __rtld_lock_initialize (GL(dl_load_lock)); |
| 179 | |
| 180 | /* Run the handlers registered for the child. */ |
| 181 | while (allp != NULL) |
| 182 | { |
| 183 | if (allp->handler->child_handler != NULL) |
| 184 | allp->handler->child_handler (); |
| 185 | |
| 186 | /* Note that we do not have to wake any possible waiter. |
| 187 | This is the only thread in the new process. The count |
| 188 | may have been bumped up by other threads doing a fork. |
| 189 | We reset it to 1, to avoid waiting for non-existing |
| 190 | thread(s) to release the count. */ |
| 191 | allp->handler->refcntr = 1; |
| 192 | |
| 193 | /* XXX We could at this point look through the object pool |
| 194 | and mark all objects not on the __fork_handlers list as |
| 195 | unused. This is necessary in case the fork() happened |
| 196 | while another thread called dlclose() and that call had |
| 197 | to create a new list. */ |
| 198 | |
| 199 | allp = allp->next; |
| 200 | } |
| 201 | |
| 202 | /* Initialize the fork lock. */ |
| 203 | __fork_lock = LLL_LOCK_INITIALIZER; |
| 204 | } |
| 205 | else |
| 206 | { |
| 207 | assert (THREAD_GETMEM (THREAD_SELF, tid) == ppid); |
| 208 | |
| 209 | /* Restore the PID value. */ |
| 210 | THREAD_SETMEM (THREAD_SELF, pid, parentpid); |
| 211 | |
| 212 | /* We execute this even if the 'fork' call failed. */ |
| 213 | _IO_list_unlock (); |
| 214 | |
| 215 | /* Run the handlers registered for the parent. */ |
| 216 | while (allp != NULL) |
| 217 | { |
| 218 | if (allp->handler->parent_handler != NULL) |
| 219 | allp->handler->parent_handler (); |
| 220 | |
| 221 | if (atomic_decrement_and_test (&allp->handler->refcntr) |
| 222 | && allp->handler->need_signal) |
| 223 | futex_wake (&allp->handler->refcntr, 1, FUTEX_PRIVATE); |
| 224 | |
| 225 | allp = allp->next; |
| 226 | } |
| 227 | } |
| 228 | |
| 229 | return pid; |
| 230 | } |
| 231 | weak_alias (__libc_fork, __fork) |
| 232 | libc_hidden_def (__fork) |
| 233 | weak_alias (__libc_fork, fork) |
| 234 | |