1 | /* Copyright (C) 2002-2017 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <stdlib.h> |
21 | #include <unistd.h> |
22 | #include <sys/types.h> |
23 | #include <sysdep.h> |
24 | #include <libio/libioP.h> |
25 | #include <tls.h> |
26 | #include <hp-timing.h> |
27 | #include <ldsodefs.h> |
28 | #include <stdio-lock.h> |
29 | #include <atomic.h> |
30 | #include <nptl/pthreadP.h> |
31 | #include <fork.h> |
32 | #include <arch-fork.h> |
33 | #include <futex-internal.h> |
34 | #include <malloc/malloc-internal.h> |
35 | |
36 | static void |
37 | fresetlockfiles (void) |
38 | { |
39 | _IO_ITER i; |
40 | |
41 | for (i = _IO_iter_begin(); i != _IO_iter_end(); i = _IO_iter_next(i)) |
42 | if ((_IO_iter_file (i)->_flags & _IO_USER_LOCK) == 0) |
43 | _IO_lock_init (*((_IO_lock_t *) _IO_iter_file(i)->_lock)); |
44 | } |
45 | |
46 | |
47 | pid_t |
48 | __libc_fork (void) |
49 | { |
50 | pid_t pid; |
51 | struct used_handler |
52 | { |
53 | struct fork_handler *handler; |
54 | struct used_handler *next; |
55 | } *allp = NULL; |
56 | |
57 | /* Determine if we are running multiple threads. We skip some fork |
58 | handlers in the single-thread case, to make fork safer to use in |
59 | signal handlers. POSIX requires that fork is async-signal-safe, |
60 | but our current fork implementation is not. */ |
61 | bool multiple_threads = THREAD_GETMEM (THREAD_SELF, header.multiple_threads); |
62 | |
63 | /* Run all the registered preparation handlers. In reverse order. |
64 | While doing this we build up a list of all the entries. */ |
65 | struct fork_handler *runp; |
66 | while ((runp = __fork_handlers) != NULL) |
67 | { |
68 | /* Make sure we read from the current RUNP pointer. */ |
69 | atomic_full_barrier (); |
70 | |
71 | unsigned int oldval = runp->refcntr; |
72 | |
73 | if (oldval == 0) |
74 | /* This means some other thread removed the list just after |
75 | the pointer has been loaded. Try again. Either the list |
76 | is empty or we can retry it. */ |
77 | continue; |
78 | |
79 | /* Bump the reference counter. */ |
80 | if (atomic_compare_and_exchange_bool_acq (&__fork_handlers->refcntr, |
81 | oldval + 1, oldval)) |
82 | /* The value changed, try again. */ |
83 | continue; |
84 | |
85 | /* We bumped the reference counter for the first entry in the |
86 | list. That means that none of the following entries will |
87 | just go away. The unloading code works in the order of the |
88 | list. |
89 | |
90 | While executing the registered handlers we are building a |
91 | list of all the entries so that we can go backward later on. */ |
92 | while (1) |
93 | { |
94 | /* Execute the handler if there is one. */ |
95 | if (runp->prepare_handler != NULL) |
96 | runp->prepare_handler (); |
97 | |
98 | /* Create a new element for the list. */ |
99 | struct used_handler *newp |
100 | = (struct used_handler *) alloca (sizeof (*newp)); |
101 | newp->handler = runp; |
102 | newp->next = allp; |
103 | allp = newp; |
104 | |
105 | /* Advance to the next handler. */ |
106 | runp = runp->next; |
107 | if (runp == NULL) |
108 | break; |
109 | |
110 | /* Bump the reference counter for the next entry. */ |
111 | atomic_increment (&runp->refcntr); |
112 | } |
113 | |
114 | /* We are done. */ |
115 | break; |
116 | } |
117 | |
118 | /* If we are not running multiple threads, we do not have to |
119 | preserve lock state. If fork runs from a signal handler, only |
120 | async-signal-safe functions can be used in the child. These data |
121 | structures are only used by unsafe functions, so their state does |
122 | not matter if fork was called from a signal handler. */ |
123 | if (multiple_threads) |
124 | { |
125 | _IO_list_lock (); |
126 | |
127 | /* Acquire malloc locks. This needs to come last because fork |
128 | handlers may use malloc, and the libio list lock has an |
129 | indirect malloc dependency as well (via the getdelim |
130 | function). */ |
131 | call_function_static_weak (__malloc_fork_lock_parent); |
132 | } |
133 | |
134 | #ifndef NDEBUG |
135 | pid_t ppid = THREAD_GETMEM (THREAD_SELF, tid); |
136 | #endif |
137 | |
138 | #ifdef ARCH_FORK |
139 | pid = ARCH_FORK (); |
140 | #else |
141 | # error "ARCH_FORK must be defined so that the CLONE_SETTID flag is used" |
142 | pid = INLINE_SYSCALL (fork, 0); |
143 | #endif |
144 | |
145 | |
146 | if (pid == 0) |
147 | { |
148 | struct pthread *self = THREAD_SELF; |
149 | |
150 | assert (THREAD_GETMEM (self, tid) != ppid); |
151 | |
152 | /* See __pthread_once. */ |
153 | if (__fork_generation_pointer != NULL) |
154 | *__fork_generation_pointer += __PTHREAD_ONCE_FORK_GEN_INCR; |
155 | |
156 | #if HP_TIMING_AVAIL |
157 | /* The CPU clock of the thread and process have to be set to zero. */ |
158 | hp_timing_t now; |
159 | HP_TIMING_NOW (now); |
160 | THREAD_SETMEM (self, cpuclock_offset, now); |
161 | GL(dl_cpuclock_offset) = now; |
162 | #endif |
163 | |
164 | #ifdef __NR_set_robust_list |
165 | /* Initialize the robust mutex list setting in the kernel which has |
166 | been reset during the fork. We do not check for errors because if |
167 | it fails here, it must have failed at process startup as well and |
168 | nobody could have used robust mutexes. |
169 | Before we do that, we have to clear the list of robust mutexes |
170 | because we do not inherit ownership of mutexes from the parent. |
171 | We do not have to set self->robust_head.futex_offset since we do |
172 | inherit the correct value from the parent. We do not need to clear |
173 | the pending operation because it must have been zero when fork was |
174 | called. */ |
175 | # ifdef __PTHREAD_MUTEX_HAVE_PREV |
176 | self->robust_prev = &self->robust_head; |
177 | # endif |
178 | self->robust_head.list = &self->robust_head; |
179 | # ifdef SHARED |
180 | if (__builtin_expect (__libc_pthread_functions_init, 0)) |
181 | PTHFCT_CALL (ptr_set_robust, (self)); |
182 | # else |
183 | extern __typeof (__nptl_set_robust) __nptl_set_robust |
184 | __attribute__((weak)); |
185 | if (__builtin_expect (__nptl_set_robust != NULL, 0)) |
186 | __nptl_set_robust (self); |
187 | # endif |
188 | #endif |
189 | |
190 | /* Reset the lock state in the multi-threaded case. */ |
191 | if (multiple_threads) |
192 | { |
193 | /* Release malloc locks. */ |
194 | call_function_static_weak (__malloc_fork_unlock_child); |
195 | |
196 | /* Reset the file list. These are recursive mutexes. */ |
197 | fresetlockfiles (); |
198 | |
199 | /* Reset locks in the I/O code. */ |
200 | _IO_list_resetlock (); |
201 | } |
202 | |
203 | /* Reset the lock the dynamic loader uses to protect its data. */ |
204 | __rtld_lock_initialize (GL(dl_load_lock)); |
205 | |
206 | /* Run the handlers registered for the child. */ |
207 | while (allp != NULL) |
208 | { |
209 | if (allp->handler->child_handler != NULL) |
210 | allp->handler->child_handler (); |
211 | |
212 | /* Note that we do not have to wake any possible waiter. |
213 | This is the only thread in the new process. The count |
214 | may have been bumped up by other threads doing a fork. |
215 | We reset it to 1, to avoid waiting for non-existing |
216 | thread(s) to release the count. */ |
217 | allp->handler->refcntr = 1; |
218 | |
219 | /* XXX We could at this point look through the object pool |
220 | and mark all objects not on the __fork_handlers list as |
221 | unused. This is necessary in case the fork() happened |
222 | while another thread called dlclose() and that call had |
223 | to create a new list. */ |
224 | |
225 | allp = allp->next; |
226 | } |
227 | |
228 | /* Initialize the fork lock. */ |
229 | __fork_lock = LLL_LOCK_INITIALIZER; |
230 | } |
231 | else |
232 | { |
233 | assert (THREAD_GETMEM (THREAD_SELF, tid) == ppid); |
234 | |
235 | /* Release acquired locks in the multi-threaded case. */ |
236 | if (multiple_threads) |
237 | { |
238 | /* Release malloc locks, parent process variant. */ |
239 | call_function_static_weak (__malloc_fork_unlock_parent); |
240 | |
241 | /* We execute this even if the 'fork' call failed. */ |
242 | _IO_list_unlock (); |
243 | } |
244 | |
245 | /* Run the handlers registered for the parent. */ |
246 | while (allp != NULL) |
247 | { |
248 | if (allp->handler->parent_handler != NULL) |
249 | allp->handler->parent_handler (); |
250 | |
251 | if (atomic_decrement_and_test (&allp->handler->refcntr) |
252 | && allp->handler->need_signal) |
253 | futex_wake (&allp->handler->refcntr, 1, FUTEX_PRIVATE); |
254 | |
255 | allp = allp->next; |
256 | } |
257 | } |
258 | |
259 | return pid; |
260 | } |
261 | weak_alias (__libc_fork, __fork) |
262 | libc_hidden_def (__fork) |
263 | weak_alias (__libc_fork, fork) |
264 | |