1 | /* Copyright (C) 2002-2016 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <ctype.h> |
20 | #include <errno.h> |
21 | #include <stdbool.h> |
22 | #include <stdlib.h> |
23 | #include <string.h> |
24 | #include <stdint.h> |
25 | #include "pthreadP.h" |
26 | #include <hp-timing.h> |
27 | #include <ldsodefs.h> |
28 | #include <atomic.h> |
29 | #include <libc-internal.h> |
30 | #include <resolv.h> |
31 | #include <kernel-features.h> |
32 | #include <exit-thread.h> |
33 | #include <default-sched.h> |
34 | #include <futex-internal.h> |
35 | |
36 | #include <shlib-compat.h> |
37 | |
38 | #include <stap-probe.h> |
39 | |
40 | |
41 | /* Nozero if debugging mode is enabled. */ |
42 | int __pthread_debug; |
43 | |
44 | /* Globally enabled events. */ |
45 | static td_thr_events_t __nptl_threads_events __attribute_used__; |
46 | |
47 | /* Pointer to descriptor with the last event. */ |
48 | static struct pthread *__nptl_last_event __attribute_used__; |
49 | |
50 | /* Number of threads running. */ |
51 | unsigned int __nptl_nthreads = 1; |
52 | |
53 | |
54 | /* Code to allocate and deallocate a stack. */ |
55 | #include "allocatestack.c" |
56 | |
57 | /* createthread.c defines this function, and two macros: |
58 | START_THREAD_DEFN and START_THREAD_SELF (see below). |
59 | |
60 | create_thread is obliged to initialize PD->stopped_start. It |
61 | should be true if the STOPPED_START parameter is true, or if |
62 | create_thread needs the new thread to synchronize at startup for |
63 | some other implementation reason. If PD->stopped_start will be |
64 | true, then create_thread is obliged to perform the operation |
65 | "lll_lock (PD->lock, LLL_PRIVATE)" before starting the thread. |
66 | |
67 | The return value is zero for success or an errno code for failure. |
68 | If the return value is ENOMEM, that will be translated to EAGAIN, |
69 | so create_thread need not do that. On failure, *THREAD_RAN should |
70 | be set to true iff the thread actually started up and then got |
71 | cancelled before calling user code (*PD->start_routine), in which |
72 | case it is responsible for doing its own cleanup. */ |
73 | |
74 | static int create_thread (struct pthread *pd, const struct pthread_attr *attr, |
75 | bool stopped_start, STACK_VARIABLES_PARMS, |
76 | bool *thread_ran); |
77 | |
78 | #include <createthread.c> |
79 | |
80 | |
81 | struct pthread * |
82 | internal_function |
83 | __find_in_stack_list (struct pthread *pd) |
84 | { |
85 | list_t *entry; |
86 | struct pthread *result = NULL; |
87 | |
88 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
89 | |
90 | list_for_each (entry, &stack_used) |
91 | { |
92 | struct pthread *curp; |
93 | |
94 | curp = list_entry (entry, struct pthread, list); |
95 | if (curp == pd) |
96 | { |
97 | result = curp; |
98 | break; |
99 | } |
100 | } |
101 | |
102 | if (result == NULL) |
103 | list_for_each (entry, &__stack_user) |
104 | { |
105 | struct pthread *curp; |
106 | |
107 | curp = list_entry (entry, struct pthread, list); |
108 | if (curp == pd) |
109 | { |
110 | result = curp; |
111 | break; |
112 | } |
113 | } |
114 | |
115 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
116 | |
117 | return result; |
118 | } |
119 | |
120 | |
121 | /* Deallocate POSIX thread-local-storage. */ |
122 | void |
123 | attribute_hidden |
124 | __nptl_deallocate_tsd (void) |
125 | { |
126 | struct pthread *self = THREAD_SELF; |
127 | |
128 | /* Maybe no data was ever allocated. This happens often so we have |
129 | a flag for this. */ |
130 | if (THREAD_GETMEM (self, specific_used)) |
131 | { |
132 | size_t round; |
133 | size_t cnt; |
134 | |
135 | round = 0; |
136 | do |
137 | { |
138 | size_t idx; |
139 | |
140 | /* So far no new nonzero data entry. */ |
141 | THREAD_SETMEM (self, specific_used, false); |
142 | |
143 | for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt) |
144 | { |
145 | struct pthread_key_data *level2; |
146 | |
147 | level2 = THREAD_GETMEM_NC (self, specific, cnt); |
148 | |
149 | if (level2 != NULL) |
150 | { |
151 | size_t inner; |
152 | |
153 | for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE; |
154 | ++inner, ++idx) |
155 | { |
156 | void *data = level2[inner].data; |
157 | |
158 | if (data != NULL) |
159 | { |
160 | /* Always clear the data. */ |
161 | level2[inner].data = NULL; |
162 | |
163 | /* Make sure the data corresponds to a valid |
164 | key. This test fails if the key was |
165 | deallocated and also if it was |
166 | re-allocated. It is the user's |
167 | responsibility to free the memory in this |
168 | case. */ |
169 | if (level2[inner].seq |
170 | == __pthread_keys[idx].seq |
171 | /* It is not necessary to register a destructor |
172 | function. */ |
173 | && __pthread_keys[idx].destr != NULL) |
174 | /* Call the user-provided destructor. */ |
175 | __pthread_keys[idx].destr (data); |
176 | } |
177 | } |
178 | } |
179 | else |
180 | idx += PTHREAD_KEY_1STLEVEL_SIZE; |
181 | } |
182 | |
183 | if (THREAD_GETMEM (self, specific_used) == 0) |
184 | /* No data has been modified. */ |
185 | goto just_free; |
186 | } |
187 | /* We only repeat the process a fixed number of times. */ |
188 | while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0)); |
189 | |
190 | /* Just clear the memory of the first block for reuse. */ |
191 | memset (&THREAD_SELF->specific_1stblock, '\0', |
192 | sizeof (self->specific_1stblock)); |
193 | |
194 | just_free: |
195 | /* Free the memory for the other blocks. */ |
196 | for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt) |
197 | { |
198 | struct pthread_key_data *level2; |
199 | |
200 | level2 = THREAD_GETMEM_NC (self, specific, cnt); |
201 | if (level2 != NULL) |
202 | { |
203 | /* The first block is allocated as part of the thread |
204 | descriptor. */ |
205 | free (level2); |
206 | THREAD_SETMEM_NC (self, specific, cnt, NULL); |
207 | } |
208 | } |
209 | |
210 | THREAD_SETMEM (self, specific_used, false); |
211 | } |
212 | } |
213 | |
214 | |
215 | /* Deallocate a thread's stack after optionally making sure the thread |
216 | descriptor is still valid. */ |
217 | void |
218 | internal_function |
219 | __free_tcb (struct pthread *pd) |
220 | { |
221 | /* The thread is exiting now. */ |
222 | if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling, |
223 | TERMINATED_BIT) == 0, 1)) |
224 | { |
225 | /* Remove the descriptor from the list. */ |
226 | if (DEBUGGING_P && __find_in_stack_list (pd) == NULL) |
227 | /* Something is really wrong. The descriptor for a still |
228 | running thread is gone. */ |
229 | abort (); |
230 | |
231 | /* Free TPP data. */ |
232 | if (__glibc_unlikely (pd->tpp != NULL)) |
233 | { |
234 | struct priority_protection_data *tpp = pd->tpp; |
235 | |
236 | pd->tpp = NULL; |
237 | free (tpp); |
238 | } |
239 | |
240 | /* Queue the stack memory block for reuse and exit the process. The |
241 | kernel will signal via writing to the address returned by |
242 | QUEUE-STACK when the stack is available. */ |
243 | __deallocate_stack (pd); |
244 | } |
245 | } |
246 | |
247 | |
248 | /* Local function to start thread and handle cleanup. |
249 | createthread.c defines the macro START_THREAD_DEFN to the |
250 | declaration that its create_thread function will refer to, and |
251 | START_THREAD_SELF to the expression to optimally deliver the new |
252 | thread's THREAD_SELF value. */ |
253 | START_THREAD_DEFN |
254 | { |
255 | struct pthread *pd = START_THREAD_SELF; |
256 | |
257 | #if HP_TIMING_AVAIL |
258 | /* Remember the time when the thread was started. */ |
259 | hp_timing_t now; |
260 | HP_TIMING_NOW (now); |
261 | THREAD_SETMEM (pd, cpuclock_offset, now); |
262 | #endif |
263 | |
264 | /* Initialize resolver state pointer. */ |
265 | __resp = &pd->res; |
266 | |
267 | /* Initialize pointers to locale data. */ |
268 | __ctype_init (); |
269 | |
270 | /* Allow setxid from now onwards. */ |
271 | if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2)) |
272 | futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE); |
273 | |
274 | #ifdef __NR_set_robust_list |
275 | # ifndef __ASSUME_SET_ROBUST_LIST |
276 | if (__set_robust_list_avail >= 0) |
277 | # endif |
278 | { |
279 | INTERNAL_SYSCALL_DECL (err); |
280 | /* This call should never fail because the initial call in init.c |
281 | succeeded. */ |
282 | INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head, |
283 | sizeof (struct robust_list_head)); |
284 | } |
285 | #endif |
286 | |
287 | #ifdef SIGCANCEL |
288 | /* If the parent was running cancellation handlers while creating |
289 | the thread the new thread inherited the signal mask. Reset the |
290 | cancellation signal mask. */ |
291 | if (__glibc_unlikely (pd->parent_cancelhandling & CANCELING_BITMASK)) |
292 | { |
293 | INTERNAL_SYSCALL_DECL (err); |
294 | sigset_t mask; |
295 | __sigemptyset (&mask); |
296 | __sigaddset (&mask, SIGCANCEL); |
297 | (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask, |
298 | NULL, _NSIG / 8); |
299 | } |
300 | #endif |
301 | |
302 | /* This is where the try/finally block should be created. For |
303 | compilers without that support we do use setjmp. */ |
304 | struct pthread_unwind_buf unwind_buf; |
305 | |
306 | /* No previous handlers. */ |
307 | unwind_buf.priv.data.prev = NULL; |
308 | unwind_buf.priv.data.cleanup = NULL; |
309 | |
310 | int not_first_call; |
311 | not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf); |
312 | if (__glibc_likely (! not_first_call)) |
313 | { |
314 | /* Store the new cleanup handler info. */ |
315 | THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf); |
316 | |
317 | if (__glibc_unlikely (pd->stopped_start)) |
318 | { |
319 | int oldtype = CANCEL_ASYNC (); |
320 | |
321 | /* Get the lock the parent locked to force synchronization. */ |
322 | lll_lock (pd->lock, LLL_PRIVATE); |
323 | /* And give it up right away. */ |
324 | lll_unlock (pd->lock, LLL_PRIVATE); |
325 | |
326 | CANCEL_RESET (oldtype); |
327 | } |
328 | |
329 | LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg); |
330 | |
331 | /* Run the code the user provided. */ |
332 | #ifdef CALL_THREAD_FCT |
333 | THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd)); |
334 | #else |
335 | THREAD_SETMEM (pd, result, pd->start_routine (pd->arg)); |
336 | #endif |
337 | } |
338 | |
339 | /* Call destructors for the thread_local TLS variables. */ |
340 | #ifndef SHARED |
341 | if (&__call_tls_dtors != NULL) |
342 | #endif |
343 | __call_tls_dtors (); |
344 | |
345 | /* Run the destructor for the thread-local data. */ |
346 | __nptl_deallocate_tsd (); |
347 | |
348 | /* Clean up any state libc stored in thread-local variables. */ |
349 | __libc_thread_freeres (); |
350 | |
351 | /* If this is the last thread we terminate the process now. We |
352 | do not notify the debugger, it might just irritate it if there |
353 | is no thread left. */ |
354 | if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads))) |
355 | /* This was the last thread. */ |
356 | exit (0); |
357 | |
358 | /* Report the death of the thread if this is wanted. */ |
359 | if (__glibc_unlikely (pd->report_events)) |
360 | { |
361 | /* See whether TD_DEATH is in any of the mask. */ |
362 | const int idx = __td_eventword (TD_DEATH); |
363 | const uint32_t mask = __td_eventmask (TD_DEATH); |
364 | |
365 | if ((mask & (__nptl_threads_events.event_bits[idx] |
366 | | pd->eventbuf.eventmask.event_bits[idx])) != 0) |
367 | { |
368 | /* Yep, we have to signal the death. Add the descriptor to |
369 | the list but only if it is not already on it. */ |
370 | if (pd->nextevent == NULL) |
371 | { |
372 | pd->eventbuf.eventnum = TD_DEATH; |
373 | pd->eventbuf.eventdata = pd; |
374 | |
375 | do |
376 | pd->nextevent = __nptl_last_event; |
377 | while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event, |
378 | pd, pd->nextevent)); |
379 | } |
380 | |
381 | /* Now call the function to signal the event. */ |
382 | __nptl_death_event (); |
383 | } |
384 | } |
385 | |
386 | /* The thread is exiting now. Don't set this bit until after we've hit |
387 | the event-reporting breakpoint, so that td_thr_get_info on us while at |
388 | the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */ |
389 | atomic_bit_set (&pd->cancelhandling, EXITING_BIT); |
390 | |
391 | #ifndef __ASSUME_SET_ROBUST_LIST |
392 | /* If this thread has any robust mutexes locked, handle them now. */ |
393 | # ifdef __PTHREAD_MUTEX_HAVE_PREV |
394 | void *robust = pd->robust_head.list; |
395 | # else |
396 | __pthread_slist_t *robust = pd->robust_list.__next; |
397 | # endif |
398 | /* We let the kernel do the notification if it is able to do so. |
399 | If we have to do it here there for sure are no PI mutexes involved |
400 | since the kernel support for them is even more recent. */ |
401 | if (__set_robust_list_avail < 0 |
402 | && __builtin_expect (robust != (void *) &pd->robust_head, 0)) |
403 | { |
404 | do |
405 | { |
406 | struct __pthread_mutex_s *this = (struct __pthread_mutex_s *) |
407 | ((char *) robust - offsetof (struct __pthread_mutex_s, |
408 | __list.__next)); |
409 | robust = *((void **) robust); |
410 | |
411 | # ifdef __PTHREAD_MUTEX_HAVE_PREV |
412 | this->__list.__prev = NULL; |
413 | # endif |
414 | this->__list.__next = NULL; |
415 | |
416 | atomic_or (&this->__lock, FUTEX_OWNER_DIED); |
417 | futex_wake ((unsigned int *) &this->__lock, 1, |
418 | /* XYZ */ FUTEX_SHARED); |
419 | } |
420 | while (robust != (void *) &pd->robust_head); |
421 | } |
422 | #endif |
423 | |
424 | /* Mark the memory of the stack as usable to the kernel. We free |
425 | everything except for the space used for the TCB itself. */ |
426 | size_t pagesize_m1 = __getpagesize () - 1; |
427 | #ifdef _STACK_GROWS_DOWN |
428 | char *sp = CURRENT_STACK_FRAME; |
429 | size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1; |
430 | assert (freesize < pd->stackblock_size); |
431 | if (freesize > PTHREAD_STACK_MIN) |
432 | __madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED); |
433 | #else |
434 | /* Page aligned start of memory to free (higher than or equal |
435 | to current sp plus the minimum stack size). */ |
436 | void *freeblock = (void*)((size_t)(CURRENT_STACK_FRAME |
437 | + PTHREAD_STACK_MIN |
438 | + pagesize_m1) |
439 | & ~pagesize_m1); |
440 | char *free_end = (char *) (((uintptr_t) pd - pd->guardsize) & ~pagesize_m1); |
441 | /* Is there any space to free? */ |
442 | if (free_end > (char *)freeblock) |
443 | { |
444 | size_t freesize = (size_t)(free_end - (char *)freeblock); |
445 | assert (freesize < pd->stackblock_size); |
446 | __madvise (freeblock, freesize, MADV_DONTNEED); |
447 | } |
448 | #endif |
449 | |
450 | /* If the thread is detached free the TCB. */ |
451 | if (IS_DETACHED (pd)) |
452 | /* Free the TCB. */ |
453 | __free_tcb (pd); |
454 | else if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK)) |
455 | { |
456 | /* Some other thread might call any of the setXid functions and expect |
457 | us to reply. In this case wait until we did that. */ |
458 | do |
459 | /* XXX This differs from the typical futex_wait_simple pattern in that |
460 | the futex_wait condition (setxid_futex) is different from the |
461 | condition used in the surrounding loop (cancelhandling). We need |
462 | to check and document why this is correct. */ |
463 | futex_wait_simple (&pd->setxid_futex, 0, FUTEX_PRIVATE); |
464 | while (pd->cancelhandling & SETXID_BITMASK); |
465 | |
466 | /* Reset the value so that the stack can be reused. */ |
467 | pd->setxid_futex = 0; |
468 | } |
469 | |
470 | /* We cannot call '_exit' here. '_exit' will terminate the process. |
471 | |
472 | The 'exit' implementation in the kernel will signal when the |
473 | process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID |
474 | flag. The 'tid' field in the TCB will be set to zero. |
475 | |
476 | The exit code is zero since in case all threads exit by calling |
477 | 'pthread_exit' the exit status must be 0 (zero). */ |
478 | __exit_thread (); |
479 | |
480 | /* NOTREACHED */ |
481 | } |
482 | |
483 | |
484 | /* Return true iff obliged to report TD_CREATE events. */ |
485 | static bool |
486 | report_thread_creation (struct pthread *pd) |
487 | { |
488 | if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF, report_events))) |
489 | { |
490 | /* The parent thread is supposed to report events. |
491 | Check whether the TD_CREATE event is needed, too. */ |
492 | const size_t idx = __td_eventword (TD_CREATE); |
493 | const uint32_t mask = __td_eventmask (TD_CREATE); |
494 | |
495 | return ((mask & (__nptl_threads_events.event_bits[idx] |
496 | | pd->eventbuf.eventmask.event_bits[idx])) != 0); |
497 | } |
498 | return false; |
499 | } |
500 | |
501 | |
502 | int |
503 | __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr, |
504 | void *(*start_routine) (void *), void *arg) |
505 | { |
506 | STACK_VARIABLES; |
507 | |
508 | const struct pthread_attr *iattr = (struct pthread_attr *) attr; |
509 | struct pthread_attr default_attr; |
510 | bool free_cpuset = false; |
511 | if (iattr == NULL) |
512 | { |
513 | lll_lock (__default_pthread_attr_lock, LLL_PRIVATE); |
514 | default_attr = __default_pthread_attr; |
515 | size_t cpusetsize = default_attr.cpusetsize; |
516 | if (cpusetsize > 0) |
517 | { |
518 | cpu_set_t *cpuset; |
519 | if (__glibc_likely (__libc_use_alloca (cpusetsize))) |
520 | cpuset = __alloca (cpusetsize); |
521 | else |
522 | { |
523 | cpuset = malloc (cpusetsize); |
524 | if (cpuset == NULL) |
525 | { |
526 | lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE); |
527 | return ENOMEM; |
528 | } |
529 | free_cpuset = true; |
530 | } |
531 | memcpy (cpuset, default_attr.cpuset, cpusetsize); |
532 | default_attr.cpuset = cpuset; |
533 | } |
534 | lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE); |
535 | iattr = &default_attr; |
536 | } |
537 | |
538 | struct pthread *pd = NULL; |
539 | int err = ALLOCATE_STACK (iattr, &pd); |
540 | int retval = 0; |
541 | |
542 | if (__glibc_unlikely (err != 0)) |
543 | /* Something went wrong. Maybe a parameter of the attributes is |
544 | invalid or we could not allocate memory. Note we have to |
545 | translate error codes. */ |
546 | { |
547 | retval = err == ENOMEM ? EAGAIN : err; |
548 | goto out; |
549 | } |
550 | |
551 | |
552 | /* Initialize the TCB. All initializations with zero should be |
553 | performed in 'get_cached_stack'. This way we avoid doing this if |
554 | the stack freshly allocated with 'mmap'. */ |
555 | |
556 | #if TLS_TCB_AT_TP |
557 | /* Reference to the TCB itself. */ |
558 | pd->header.self = pd; |
559 | |
560 | /* Self-reference for TLS. */ |
561 | pd->header.tcb = pd; |
562 | #endif |
563 | |
564 | /* Store the address of the start routine and the parameter. Since |
565 | we do not start the function directly the stillborn thread will |
566 | get the information from its thread descriptor. */ |
567 | pd->start_routine = start_routine; |
568 | pd->arg = arg; |
569 | |
570 | /* Copy the thread attribute flags. */ |
571 | struct pthread *self = THREAD_SELF; |
572 | pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) |
573 | | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))); |
574 | |
575 | /* Initialize the field for the ID of the thread which is waiting |
576 | for us. This is a self-reference in case the thread is created |
577 | detached. */ |
578 | pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL; |
579 | |
580 | /* The debug events are inherited from the parent. */ |
581 | pd->eventbuf = self->eventbuf; |
582 | |
583 | |
584 | /* Copy the parent's scheduling parameters. The flags will say what |
585 | is valid and what is not. */ |
586 | pd->schedpolicy = self->schedpolicy; |
587 | pd->schedparam = self->schedparam; |
588 | |
589 | /* Copy the stack guard canary. */ |
590 | #ifdef THREAD_COPY_STACK_GUARD |
591 | THREAD_COPY_STACK_GUARD (pd); |
592 | #endif |
593 | |
594 | /* Copy the pointer guard value. */ |
595 | #ifdef THREAD_COPY_POINTER_GUARD |
596 | THREAD_COPY_POINTER_GUARD (pd); |
597 | #endif |
598 | |
599 | /* Verify the sysinfo bits were copied in allocate_stack if needed. */ |
600 | #ifdef NEED_DL_SYSINFO |
601 | CHECK_THREAD_SYSINFO (pd); |
602 | #endif |
603 | |
604 | /* Inform start_thread (above) about cancellation state that might |
605 | translate into inherited signal state. */ |
606 | pd->parent_cancelhandling = THREAD_GETMEM (THREAD_SELF, cancelhandling); |
607 | |
608 | /* Determine scheduling parameters for the thread. */ |
609 | if (__builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0) |
610 | && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0) |
611 | { |
612 | /* Use the scheduling parameters the user provided. */ |
613 | if (iattr->flags & ATTR_FLAG_POLICY_SET) |
614 | { |
615 | pd->schedpolicy = iattr->schedpolicy; |
616 | pd->flags |= ATTR_FLAG_POLICY_SET; |
617 | } |
618 | if (iattr->flags & ATTR_FLAG_SCHED_SET) |
619 | { |
620 | /* The values were validated in pthread_attr_setschedparam. */ |
621 | pd->schedparam = iattr->schedparam; |
622 | pd->flags |= ATTR_FLAG_SCHED_SET; |
623 | } |
624 | |
625 | if ((pd->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) |
626 | != (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) |
627 | collect_default_sched (pd); |
628 | } |
629 | |
630 | /* Pass the descriptor to the caller. */ |
631 | *newthread = (pthread_t) pd; |
632 | |
633 | LIBC_PROBE (pthread_create, 4, newthread, attr, start_routine, arg); |
634 | |
635 | /* One more thread. We cannot have the thread do this itself, since it |
636 | might exist but not have been scheduled yet by the time we've returned |
637 | and need to check the value to behave correctly. We must do it before |
638 | creating the thread, in case it does get scheduled first and then |
639 | might mistakenly think it was the only thread. In the failure case, |
640 | we momentarily store a false value; this doesn't matter because there |
641 | is no kosher thing a signal handler interrupting us right here can do |
642 | that cares whether the thread count is correct. */ |
643 | atomic_increment (&__nptl_nthreads); |
644 | |
645 | bool thread_ran = false; |
646 | |
647 | /* Start the thread. */ |
648 | if (__glibc_unlikely (report_thread_creation (pd))) |
649 | { |
650 | /* Create the thread. We always create the thread stopped |
651 | so that it does not get far before we tell the debugger. */ |
652 | retval = create_thread (pd, iattr, true, STACK_VARIABLES_ARGS, |
653 | &thread_ran); |
654 | if (retval == 0) |
655 | { |
656 | /* create_thread should have set this so that the logic below can |
657 | test it. */ |
658 | assert (pd->stopped_start); |
659 | |
660 | /* Now fill in the information about the new thread in |
661 | the newly created thread's data structure. We cannot let |
662 | the new thread do this since we don't know whether it was |
663 | already scheduled when we send the event. */ |
664 | pd->eventbuf.eventnum = TD_CREATE; |
665 | pd->eventbuf.eventdata = pd; |
666 | |
667 | /* Enqueue the descriptor. */ |
668 | do |
669 | pd->nextevent = __nptl_last_event; |
670 | while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event, |
671 | pd, pd->nextevent) |
672 | != 0); |
673 | |
674 | /* Now call the function which signals the event. */ |
675 | __nptl_create_event (); |
676 | } |
677 | } |
678 | else |
679 | retval = create_thread (pd, iattr, false, STACK_VARIABLES_ARGS, |
680 | &thread_ran); |
681 | |
682 | if (__glibc_unlikely (retval != 0)) |
683 | { |
684 | /* If thread creation "failed", that might mean that the thread got |
685 | created and ran a little--short of running user code--but then |
686 | create_thread cancelled it. In that case, the thread will do all |
687 | its own cleanup just like a normal thread exit after a successful |
688 | creation would do. */ |
689 | |
690 | if (thread_ran) |
691 | assert (pd->stopped_start); |
692 | else |
693 | { |
694 | /* Oops, we lied for a second. */ |
695 | atomic_decrement (&__nptl_nthreads); |
696 | |
697 | /* Perhaps a thread wants to change the IDs and is waiting for this |
698 | stillborn thread. */ |
699 | if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) |
700 | == -2)) |
701 | futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE); |
702 | |
703 | /* Free the resources. */ |
704 | __deallocate_stack (pd); |
705 | } |
706 | |
707 | /* We have to translate error codes. */ |
708 | if (retval == ENOMEM) |
709 | retval = EAGAIN; |
710 | } |
711 | else |
712 | { |
713 | if (pd->stopped_start) |
714 | /* The thread blocked on this lock either because we're doing TD_CREATE |
715 | event reporting, or for some other reason that create_thread chose. |
716 | Now let it run free. */ |
717 | lll_unlock (pd->lock, LLL_PRIVATE); |
718 | |
719 | /* We now have for sure more than one thread. The main thread might |
720 | not yet have the flag set. No need to set the global variable |
721 | again if this is what we use. */ |
722 | THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1); |
723 | } |
724 | |
725 | out: |
726 | if (__glibc_unlikely (free_cpuset)) |
727 | free (default_attr.cpuset); |
728 | |
729 | return retval; |
730 | } |
731 | versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1); |
732 | |
733 | |
734 | #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1) |
735 | int |
736 | __pthread_create_2_0 (pthread_t *newthread, const pthread_attr_t *attr, |
737 | void *(*start_routine) (void *), void *arg) |
738 | { |
739 | /* The ATTR attribute is not really of type `pthread_attr_t *'. It has |
740 | the old size and access to the new members might crash the program. |
741 | We convert the struct now. */ |
742 | struct pthread_attr new_attr; |
743 | |
744 | if (attr != NULL) |
745 | { |
746 | struct pthread_attr *iattr = (struct pthread_attr *) attr; |
747 | size_t ps = __getpagesize (); |
748 | |
749 | /* Copy values from the user-provided attributes. */ |
750 | new_attr.schedparam = iattr->schedparam; |
751 | new_attr.schedpolicy = iattr->schedpolicy; |
752 | new_attr.flags = iattr->flags; |
753 | |
754 | /* Fill in default values for the fields not present in the old |
755 | implementation. */ |
756 | new_attr.guardsize = ps; |
757 | new_attr.stackaddr = NULL; |
758 | new_attr.stacksize = 0; |
759 | new_attr.cpuset = NULL; |
760 | |
761 | /* We will pass this value on to the real implementation. */ |
762 | attr = (pthread_attr_t *) &new_attr; |
763 | } |
764 | |
765 | return __pthread_create_2_1 (newthread, attr, start_routine, arg); |
766 | } |
767 | compat_symbol (libpthread, __pthread_create_2_0, pthread_create, |
768 | GLIBC_2_0); |
769 | #endif |
770 | |
771 | /* Information for libthread_db. */ |
772 | |
773 | #include "../nptl_db/db_info.c" |
774 | |
775 | /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread |
776 | functions to be present as well. */ |
777 | PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock) |
778 | PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock) |
779 | PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock) |
780 | |
781 | PTHREAD_STATIC_FN_REQUIRE (pthread_once) |
782 | PTHREAD_STATIC_FN_REQUIRE (pthread_cancel) |
783 | |
784 | PTHREAD_STATIC_FN_REQUIRE (pthread_key_create) |
785 | PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete) |
786 | PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific) |
787 | PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific) |
788 | |