1/* Copyright (C) 2002-2021 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <ctype.h>
20#include <errno.h>
21#include <stdbool.h>
22#include <stdlib.h>
23#include <string.h>
24#include <stdint.h>
25#include "pthreadP.h"
26#include <hp-timing.h>
27#include <ldsodefs.h>
28#include <atomic.h>
29#include <libc-diag.h>
30#include <libc-internal.h>
31#include <resolv.h>
32#include <kernel-features.h>
33#include <default-sched.h>
34#include <futex-internal.h>
35#include <tls-setup.h>
36#include "libioP.h"
37#include <sys/single_threaded.h>
38#include <version.h>
39#include <clone_internal.h>
40
41#include <shlib-compat.h>
42
43#include <stap-probe.h>
44
45
46/* Globally enabled events. */
47td_thr_events_t __nptl_threads_events;
48libc_hidden_proto (__nptl_threads_events)
49libc_hidden_data_def (__nptl_threads_events)
50
51/* Pointer to descriptor with the last event. */
52struct pthread *__nptl_last_event;
53libc_hidden_proto (__nptl_last_event)
54libc_hidden_data_def (__nptl_last_event)
55
56#ifdef SHARED
57/* This variable is used to access _rtld_global from libthread_db. If
58 GDB loads libpthread before ld.so, it is not possible to resolve
59 _rtld_global directly during libpthread initialization. */
60struct rtld_global *__nptl_rtld_global = &_rtld_global;
61#endif
62
63/* Version of the library, used in libthread_db to detect mismatches. */
64const char __nptl_version[] = VERSION;
65
66/* This performs the initialization necessary when going from
67 single-threaded to multi-threaded mode for the first time. */
68static void
69late_init (void)
70{
71 struct sigaction sa;
72 __sigemptyset (&sa.sa_mask);
73
74 /* Install the handle to change the threads' uid/gid. Use
75 SA_ONSTACK because the signal may be sent to threads that are
76 running with custom stacks. (This is less likely for
77 SIGCANCEL.) */
78 sa.sa_sigaction = __nptl_setxid_sighandler;
79 sa.sa_flags = SA_ONSTACK | SA_SIGINFO | SA_RESTART;
80 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
81
82 /* The parent process might have left the signals blocked. Just in
83 case, unblock it. We reuse the signal mask in the sigaction
84 structure. It is already cleared. */
85 __sigaddset (&sa.sa_mask, SIGCANCEL);
86 __sigaddset (&sa.sa_mask, SIGSETXID);
87 INTERNAL_SYSCALL_CALL (rt_sigprocmask, SIG_UNBLOCK, &sa.sa_mask,
88 NULL, __NSIG_BYTES);
89}
90
91/* Code to allocate and deallocate a stack. */
92#include "allocatestack.c"
93
94/* CONCURRENCY NOTES:
95
96 Understanding who is the owner of the 'struct pthread' or 'PD'
97 (refers to the value of the 'struct pthread *pd' function argument)
98 is critically important in determining exactly which operations are
99 allowed and which are not and when, particularly when it comes to the
100 implementation of pthread_create, pthread_join, pthread_detach, and
101 other functions which all operate on PD.
102
103 The owner of PD is responsible for freeing the final resources
104 associated with PD, and may examine the memory underlying PD at any
105 point in time until it frees it back to the OS or to reuse by the
106 runtime.
107
108 The thread which calls pthread_create is called the creating thread.
109 The creating thread begins as the owner of PD.
110
111 During startup the new thread may examine PD in coordination with the
112 owner thread (which may be itself).
113
114 The four cases of ownership transfer are:
115
116 (1) Ownership of PD is released to the process (all threads may use it)
117 after the new thread starts in a joinable state
118 i.e. pthread_create returns a usable pthread_t.
119
120 (2) Ownership of PD is released to the new thread starting in a detached
121 state.
122
123 (3) Ownership of PD is dynamically released to a running thread via
124 pthread_detach.
125
126 (4) Ownership of PD is acquired by the thread which calls pthread_join.
127
128 Implementation notes:
129
130 The PD->stopped_start and thread_ran variables are used to determine
131 exactly which of the four ownership states we are in and therefore
132 what actions can be taken. For example after (2) we cannot read or
133 write from PD anymore since the thread may no longer exist and the
134 memory may be unmapped.
135
136 It is important to point out that PD->lock is being used both
137 similar to a one-shot semaphore and subsequently as a mutex. The
138 lock is taken in the parent to force the child to wait, and then the
139 child releases the lock. However, this semaphore-like effect is used
140 only for synchronizing the parent and child. After startup the lock
141 is used like a mutex to create a critical section during which a
142 single owner modifies the thread parameters.
143
144 The most complicated cases happen during thread startup:
145
146 (a) If the created thread is in a detached (PTHREAD_CREATE_DETACHED),
147 or joinable (default PTHREAD_CREATE_JOINABLE) state and
148 STOPPED_START is true, then the creating thread has ownership of
149 PD until the PD->lock is released by pthread_create. If any
150 errors occur we are in states (c) or (d) below.
151
152 (b) If the created thread is in a detached state
153 (PTHREAD_CREATED_DETACHED), and STOPPED_START is false, then the
154 creating thread has ownership of PD until it invokes the OS
155 kernel's thread creation routine. If this routine returns
156 without error, then the created thread owns PD; otherwise, see
157 (c) or (d) below.
158
159 (c) If either a joinable or detached thread setup failed and THREAD_RAN
160 is true, then the creating thread releases ownership to the new thread,
161 the created thread sees the failed setup through PD->setup_failed
162 member, releases the PD ownership, and exits. The creating thread will
163 be responsible for cleanup the allocated resources. The THREAD_RAN is
164 local to creating thread and indicate whether thread creation or setup
165 has failed.
166
167 (d) If the thread creation failed and THREAD_RAN is false (meaning
168 ARCH_CLONE has failed), then the creating thread retains ownership
169 of PD and must cleanup he allocated resource. No waiting for the new
170 thread is required because it never started.
171
172 The nptl_db interface:
173
174 The interface with nptl_db requires that we enqueue PD into a linked
175 list and then call a function which the debugger will trap. The PD
176 will then be dequeued and control returned to the thread. The caller
177 at the time must have ownership of PD and such ownership remains
178 after control returns to thread. The enqueued PD is removed from the
179 linked list by the nptl_db callback td_thr_event_getmsg. The debugger
180 must ensure that the thread does not resume execution, otherwise
181 ownership of PD may be lost and examining PD will not be possible.
182
183 Note that the GNU Debugger as of (December 10th 2015) commit
184 c2c2a31fdb228d41ce3db62b268efea04bd39c18 no longer uses
185 td_thr_event_getmsg and several other related nptl_db interfaces. The
186 principal reason for this is that nptl_db does not support non-stop
187 mode where other threads can run concurrently and modify runtime
188 structures currently in use by the debugger and the nptl_db
189 interface.
190
191 Axioms:
192
193 * The create_thread function can never set stopped_start to false.
194 * The created thread can read stopped_start but never write to it.
195 * The variable thread_ran is set some time after the OS thread
196 creation routine returns, how much time after the thread is created
197 is unspecified, but it should be as quickly as possible.
198
199*/
200
201/* CREATE THREAD NOTES:
202
203 create_thread must initialize PD->stopped_start. It should be true
204 if the STOPPED_START parameter is true, or if create_thread needs the
205 new thread to synchronize at startup for some other implementation
206 reason. If STOPPED_START will be true, then create_thread is obliged
207 to lock PD->lock before starting the thread. Then pthread_create
208 unlocks PD->lock which synchronizes-with create_thread in the
209 child thread which does an acquire/release of PD->lock as the last
210 action before calling the user entry point. The goal of all of this
211 is to ensure that the required initial thread attributes are applied
212 (by the creating thread) before the new thread runs user code. Note
213 that the the functions pthread_getschedparam, pthread_setschedparam,
214 pthread_setschedprio, __pthread_tpp_change_priority, and
215 __pthread_current_priority reuse the same lock, PD->lock, for a
216 similar purpose e.g. synchronizing the setting of similar thread
217 attributes. These functions are never called before the thread is
218 created, so don't participate in startup syncronization, but given
219 that the lock is present already and in the unlocked state, reusing
220 it saves space.
221
222 The return value is zero for success or an errno code for failure.
223 If the return value is ENOMEM, that will be translated to EAGAIN,
224 so create_thread need not do that. On failure, *THREAD_RAN should
225 be set to true iff the thread actually started up but before calling
226 the user code (*PD->start_routine). */
227
228static int _Noreturn start_thread (void *arg);
229
230static int create_thread (struct pthread *pd, const struct pthread_attr *attr,
231 bool *stopped_start, void *stackaddr,
232 size_t stacksize, bool *thread_ran)
233{
234 /* Determine whether the newly created threads has to be started
235 stopped since we have to set the scheduling parameters or set the
236 affinity. */
237 bool need_setaffinity = (attr != NULL && attr->extension != NULL
238 && attr->extension->cpuset != 0);
239 if (attr != NULL
240 && (__glibc_unlikely (need_setaffinity)
241 || __glibc_unlikely ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)))
242 *stopped_start = true;
243
244 pd->stopped_start = *stopped_start;
245 if (__glibc_unlikely (*stopped_start))
246 lll_lock (pd->lock, LLL_PRIVATE);
247
248 /* We rely heavily on various flags the CLONE function understands:
249
250 CLONE_VM, CLONE_FS, CLONE_FILES
251 These flags select semantics with shared address space and
252 file descriptors according to what POSIX requires.
253
254 CLONE_SIGHAND, CLONE_THREAD
255 This flag selects the POSIX signal semantics and various
256 other kinds of sharing (itimers, POSIX timers, etc.).
257
258 CLONE_SETTLS
259 The sixth parameter to CLONE determines the TLS area for the
260 new thread.
261
262 CLONE_PARENT_SETTID
263 The kernels writes the thread ID of the newly created thread
264 into the location pointed to by the fifth parameters to CLONE.
265
266 Note that it would be semantically equivalent to use
267 CLONE_CHILD_SETTID but it is be more expensive in the kernel.
268
269 CLONE_CHILD_CLEARTID
270 The kernels clears the thread ID of a thread that has called
271 sys_exit() in the location pointed to by the seventh parameter
272 to CLONE.
273
274 The termination signal is chosen to be zero which means no signal
275 is sent. */
276 const int clone_flags = (CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SYSVSEM
277 | CLONE_SIGHAND | CLONE_THREAD
278 | CLONE_SETTLS | CLONE_PARENT_SETTID
279 | CLONE_CHILD_CLEARTID
280 | 0);
281
282 TLS_DEFINE_INIT_TP (tp, pd);
283
284 struct clone_args args =
285 {
286 .flags = clone_flags,
287 .pidfd = (uintptr_t) &pd->tid,
288 .parent_tid = (uintptr_t) &pd->tid,
289 .child_tid = (uintptr_t) &pd->tid,
290 .stack = (uintptr_t) stackaddr,
291 .stack_size = stacksize,
292 .tls = (uintptr_t) tp,
293 };
294 int ret = __clone_internal (&args, &start_thread, pd);
295 if (__glibc_unlikely (ret == -1))
296 return errno;
297
298 /* It's started now, so if we fail below, we'll have to let it clean itself
299 up. */
300 *thread_ran = true;
301
302 /* Now we have the possibility to set scheduling parameters etc. */
303 if (attr != NULL)
304 {
305 /* Set the affinity mask if necessary. */
306 if (need_setaffinity)
307 {
308 assert (*stopped_start);
309
310 int res = INTERNAL_SYSCALL_CALL (sched_setaffinity, pd->tid,
311 attr->extension->cpusetsize,
312 attr->extension->cpuset);
313 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res)))
314 return INTERNAL_SYSCALL_ERRNO (res);
315 }
316
317 /* Set the scheduling parameters. */
318 if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
319 {
320 assert (*stopped_start);
321
322 int res = INTERNAL_SYSCALL_CALL (sched_setscheduler, pd->tid,
323 pd->schedpolicy, &pd->schedparam);
324 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res)))
325 return INTERNAL_SYSCALL_ERRNO (res);
326 }
327 }
328
329 return 0;
330}
331
332/* Local function to start thread and handle cleanup. */
333static int _Noreturn
334start_thread (void *arg)
335{
336 struct pthread *pd = arg;
337
338 /* We are either in (a) or (b), and in either case we either own PD already
339 (2) or are about to own PD (1), and so our only restriction would be that
340 we can't free PD until we know we have ownership (see CONCURRENCY NOTES
341 above). */
342 if (pd->stopped_start)
343 {
344 bool setup_failed = false;
345
346 /* Get the lock the parent locked to force synchronization. */
347 lll_lock (pd->lock, LLL_PRIVATE);
348
349 /* We have ownership of PD now, for detached threads with setup failure
350 we set it as joinable so the creating thread could synchronous join
351 and free any resource prior return to the pthread_create caller. */
352 setup_failed = pd->setup_failed == 1;
353 if (setup_failed)
354 pd->joinid = NULL;
355
356 /* And give it up right away. */
357 lll_unlock (pd->lock, LLL_PRIVATE);
358
359 if (setup_failed)
360 goto out;
361 }
362
363 /* Initialize resolver state pointer. */
364 __resp = &pd->res;
365
366 /* Initialize pointers to locale data. */
367 __ctype_init ();
368
369#ifndef __ASSUME_SET_ROBUST_LIST
370 if (__nptl_set_robust_list_avail)
371#endif
372 {
373 /* This call should never fail because the initial call in init.c
374 succeeded. */
375 INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head,
376 sizeof (struct robust_list_head));
377 }
378
379 /* This is where the try/finally block should be created. For
380 compilers without that support we do use setjmp. */
381 struct pthread_unwind_buf unwind_buf;
382
383 int not_first_call;
384 DIAG_PUSH_NEEDS_COMMENT;
385#if __GNUC_PREREQ (7, 0)
386 /* This call results in a -Wstringop-overflow warning because struct
387 pthread_unwind_buf is smaller than jmp_buf. setjmp and longjmp
388 do not use anything beyond the common prefix (they never access
389 the saved signal mask), so that is a false positive. */
390 DIAG_IGNORE_NEEDS_COMMENT (11, "-Wstringop-overflow=");
391#endif
392 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
393 DIAG_POP_NEEDS_COMMENT;
394
395 /* No previous handlers. NB: This must be done after setjmp since the
396 private space in the unwind jump buffer may overlap space used by
397 setjmp to store extra architecture-specific information which is
398 never used by the cancellation-specific __libc_unwind_longjmp.
399
400 The private space is allowed to overlap because the unwinder never
401 has to return through any of the jumped-to call frames, and thus
402 only a minimum amount of saved data need be stored, and for example,
403 need not include the process signal mask information. This is all
404 an optimization to reduce stack usage when pushing cancellation
405 handlers. */
406 unwind_buf.priv.data.prev = NULL;
407 unwind_buf.priv.data.cleanup = NULL;
408
409 __libc_signal_restore_set (&pd->sigmask);
410
411 /* Allow setxid from now onwards. */
412 if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
413 futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
414
415 if (__glibc_likely (! not_first_call))
416 {
417 /* Store the new cleanup handler info. */
418 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
419
420 LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg);
421
422 /* Run the code the user provided. */
423 void *ret;
424 if (pd->c11)
425 {
426 /* The function pointer of the c11 thread start is cast to an incorrect
427 type on __pthread_create_2_1 call, however it is casted back to correct
428 one so the call behavior is well-defined (it is assumed that pointers
429 to void are able to represent all values of int. */
430 int (*start)(void*) = (int (*) (void*)) pd->start_routine;
431 ret = (void*) (uintptr_t) start (pd->arg);
432 }
433 else
434 ret = pd->start_routine (pd->arg);
435 THREAD_SETMEM (pd, result, ret);
436 }
437
438 /* Call destructors for the thread_local TLS variables. */
439#ifndef SHARED
440 if (&__call_tls_dtors != NULL)
441#endif
442 __call_tls_dtors ();
443
444 /* Run the destructor for the thread-local data. */
445 __nptl_deallocate_tsd ();
446
447 /* Clean up any state libc stored in thread-local variables. */
448 __libc_thread_freeres ();
449
450 /* Report the death of the thread if this is wanted. */
451 if (__glibc_unlikely (pd->report_events))
452 {
453 /* See whether TD_DEATH is in any of the mask. */
454 const int idx = __td_eventword (TD_DEATH);
455 const uint32_t mask = __td_eventmask (TD_DEATH);
456
457 if ((mask & (__nptl_threads_events.event_bits[idx]
458 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
459 {
460 /* Yep, we have to signal the death. Add the descriptor to
461 the list but only if it is not already on it. */
462 if (pd->nextevent == NULL)
463 {
464 pd->eventbuf.eventnum = TD_DEATH;
465 pd->eventbuf.eventdata = pd;
466
467 do
468 pd->nextevent = __nptl_last_event;
469 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
470 pd, pd->nextevent));
471 }
472
473 /* Now call the function which signals the event. See
474 CONCURRENCY NOTES for the nptl_db interface comments. */
475 __nptl_death_event ();
476 }
477 }
478
479 /* The thread is exiting now. Don't set this bit until after we've hit
480 the event-reporting breakpoint, so that td_thr_get_info on us while at
481 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
482 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
483
484 if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads)))
485 /* This was the last thread. */
486 exit (0);
487
488#ifndef __ASSUME_SET_ROBUST_LIST
489 /* If this thread has any robust mutexes locked, handle them now. */
490# if __PTHREAD_MUTEX_HAVE_PREV
491 void *robust = pd->robust_head.list;
492# else
493 __pthread_slist_t *robust = pd->robust_list.__next;
494# endif
495 /* We let the kernel do the notification if it is able to do so.
496 If we have to do it here there for sure are no PI mutexes involved
497 since the kernel support for them is even more recent. */
498 if (!__nptl_set_robust_list_avail
499 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
500 {
501 do
502 {
503 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
504 ((char *) robust - offsetof (struct __pthread_mutex_s,
505 __list.__next));
506 robust = *((void **) robust);
507
508# if __PTHREAD_MUTEX_HAVE_PREV
509 this->__list.__prev = NULL;
510# endif
511 this->__list.__next = NULL;
512
513 atomic_or (&this->__lock, FUTEX_OWNER_DIED);
514 futex_wake ((unsigned int *) &this->__lock, 1,
515 /* XYZ */ FUTEX_SHARED);
516 }
517 while (robust != (void *) &pd->robust_head);
518 }
519#endif
520
521 if (!pd->user_stack)
522 advise_stack_range (pd->stackblock, pd->stackblock_size, (uintptr_t) pd,
523 pd->guardsize);
524
525 if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK))
526 {
527 /* Some other thread might call any of the setXid functions and expect
528 us to reply. In this case wait until we did that. */
529 do
530 /* XXX This differs from the typical futex_wait_simple pattern in that
531 the futex_wait condition (setxid_futex) is different from the
532 condition used in the surrounding loop (cancelhandling). We need
533 to check and document why this is correct. */
534 futex_wait_simple (&pd->setxid_futex, 0, FUTEX_PRIVATE);
535 while (pd->cancelhandling & SETXID_BITMASK);
536
537 /* Reset the value so that the stack can be reused. */
538 pd->setxid_futex = 0;
539 }
540
541 /* If the thread is detached free the TCB. */
542 if (IS_DETACHED (pd))
543 /* Free the TCB. */
544 __nptl_free_tcb (pd);
545
546out:
547 /* We cannot call '_exit' here. '_exit' will terminate the process.
548
549 The 'exit' implementation in the kernel will signal when the
550 process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID
551 flag. The 'tid' field in the TCB will be set to zero.
552
553 The exit code is zero since in case all threads exit by calling
554 'pthread_exit' the exit status must be 0 (zero). */
555 while (1)
556 INTERNAL_SYSCALL_CALL (exit, 0);
557
558 /* NOTREACHED */
559}
560
561
562/* Return true iff obliged to report TD_CREATE events. */
563static bool
564report_thread_creation (struct pthread *pd)
565{
566 if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF, report_events)))
567 {
568 /* The parent thread is supposed to report events.
569 Check whether the TD_CREATE event is needed, too. */
570 const size_t idx = __td_eventword (TD_CREATE);
571 const uint32_t mask = __td_eventmask (TD_CREATE);
572
573 return ((mask & (__nptl_threads_events.event_bits[idx]
574 | pd->eventbuf.eventmask.event_bits[idx])) != 0);
575 }
576 return false;
577}
578
579
580int
581__pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr,
582 void *(*start_routine) (void *), void *arg)
583{
584 void *stackaddr = NULL;
585 size_t stacksize = 0;
586
587 /* Avoid a data race in the multi-threaded case, and call the
588 deferred initialization only once. */
589 if (__libc_single_threaded)
590 {
591 late_init ();
592 __libc_single_threaded = 0;
593 }
594
595 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
596 union pthread_attr_transparent default_attr;
597 bool destroy_default_attr = false;
598 bool c11 = (attr == ATTR_C11_THREAD);
599 if (iattr == NULL || c11)
600 {
601 int ret = __pthread_getattr_default_np (&default_attr.external);
602 if (ret != 0)
603 return ret;
604 destroy_default_attr = true;
605 iattr = &default_attr.internal;
606 }
607
608 struct pthread *pd = NULL;
609 int err = allocate_stack (iattr, &pd, &stackaddr, &stacksize);
610 int retval = 0;
611
612 if (__glibc_unlikely (err != 0))
613 /* Something went wrong. Maybe a parameter of the attributes is
614 invalid or we could not allocate memory. Note we have to
615 translate error codes. */
616 {
617 retval = err == ENOMEM ? EAGAIN : err;
618 goto out;
619 }
620
621
622 /* Initialize the TCB. All initializations with zero should be
623 performed in 'get_cached_stack'. This way we avoid doing this if
624 the stack freshly allocated with 'mmap'. */
625
626#if TLS_TCB_AT_TP
627 /* Reference to the TCB itself. */
628 pd->header.self = pd;
629
630 /* Self-reference for TLS. */
631 pd->header.tcb = pd;
632#endif
633
634 /* Store the address of the start routine and the parameter. Since
635 we do not start the function directly the stillborn thread will
636 get the information from its thread descriptor. */
637 pd->start_routine = start_routine;
638 pd->arg = arg;
639 pd->c11 = c11;
640
641 /* Copy the thread attribute flags. */
642 struct pthread *self = THREAD_SELF;
643 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
644 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
645
646 /* Initialize the field for the ID of the thread which is waiting
647 for us. This is a self-reference in case the thread is created
648 detached. */
649 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
650
651 /* The debug events are inherited from the parent. */
652 pd->eventbuf = self->eventbuf;
653
654
655 /* Copy the parent's scheduling parameters. The flags will say what
656 is valid and what is not. */
657 pd->schedpolicy = self->schedpolicy;
658 pd->schedparam = self->schedparam;
659
660 /* Copy the stack guard canary. */
661#ifdef THREAD_COPY_STACK_GUARD
662 THREAD_COPY_STACK_GUARD (pd);
663#endif
664
665 /* Copy the pointer guard value. */
666#ifdef THREAD_COPY_POINTER_GUARD
667 THREAD_COPY_POINTER_GUARD (pd);
668#endif
669
670 /* Setup tcbhead. */
671 tls_setup_tcbhead (pd);
672
673 /* Verify the sysinfo bits were copied in allocate_stack if needed. */
674#ifdef NEED_DL_SYSINFO
675 CHECK_THREAD_SYSINFO (pd);
676#endif
677
678 /* Determine scheduling parameters for the thread. */
679 if (__builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
680 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
681 {
682 /* Use the scheduling parameters the user provided. */
683 if (iattr->flags & ATTR_FLAG_POLICY_SET)
684 {
685 pd->schedpolicy = iattr->schedpolicy;
686 pd->flags |= ATTR_FLAG_POLICY_SET;
687 }
688 if (iattr->flags & ATTR_FLAG_SCHED_SET)
689 {
690 /* The values were validated in pthread_attr_setschedparam. */
691 pd->schedparam = iattr->schedparam;
692 pd->flags |= ATTR_FLAG_SCHED_SET;
693 }
694
695 if ((pd->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
696 != (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
697 collect_default_sched (pd);
698 }
699
700 if (__glibc_unlikely (__nptl_nthreads == 1))
701 _IO_enable_locks ();
702
703 /* Pass the descriptor to the caller. */
704 *newthread = (pthread_t) pd;
705
706 LIBC_PROBE (pthread_create, 4, newthread, attr, start_routine, arg);
707
708 /* One more thread. We cannot have the thread do this itself, since it
709 might exist but not have been scheduled yet by the time we've returned
710 and need to check the value to behave correctly. We must do it before
711 creating the thread, in case it does get scheduled first and then
712 might mistakenly think it was the only thread. In the failure case,
713 we momentarily store a false value; this doesn't matter because there
714 is no kosher thing a signal handler interrupting us right here can do
715 that cares whether the thread count is correct. */
716 atomic_increment (&__nptl_nthreads);
717
718 /* Our local value of stopped_start and thread_ran can be accessed at
719 any time. The PD->stopped_start may only be accessed if we have
720 ownership of PD (see CONCURRENCY NOTES above). */
721 bool stopped_start = false; bool thread_ran = false;
722
723 /* Block all signals, so that the new thread starts out with
724 signals disabled. This avoids race conditions in the thread
725 startup. */
726 sigset_t original_sigmask;
727 __libc_signal_block_all (&original_sigmask);
728
729 if (iattr->extension != NULL && iattr->extension->sigmask_set)
730 /* Use the signal mask in the attribute. The internal signals
731 have already been filtered by the public
732 pthread_attr_setsigmask_np interface. */
733 pd->sigmask = iattr->extension->sigmask;
734 else
735 {
736 /* Conceptually, the new thread needs to inherit the signal mask
737 of this thread. Therefore, it needs to restore the saved
738 signal mask of this thread, so save it in the startup
739 information. */
740 pd->sigmask = original_sigmask;
741 /* Reset the cancellation signal mask in case this thread is
742 running cancellation. */
743 __sigdelset (&pd->sigmask, SIGCANCEL);
744 }
745
746 /* Start the thread. */
747 if (__glibc_unlikely (report_thread_creation (pd)))
748 {
749 stopped_start = true;
750
751 /* We always create the thread stopped at startup so we can
752 notify the debugger. */
753 retval = create_thread (pd, iattr, &stopped_start, stackaddr,
754 stacksize, &thread_ran);
755 if (retval == 0)
756 {
757 /* We retain ownership of PD until (a) (see CONCURRENCY NOTES
758 above). */
759
760 /* Assert stopped_start is true in both our local copy and the
761 PD copy. */
762 assert (stopped_start);
763 assert (pd->stopped_start);
764
765 /* Now fill in the information about the new thread in
766 the newly created thread's data structure. We cannot let
767 the new thread do this since we don't know whether it was
768 already scheduled when we send the event. */
769 pd->eventbuf.eventnum = TD_CREATE;
770 pd->eventbuf.eventdata = pd;
771
772 /* Enqueue the descriptor. */
773 do
774 pd->nextevent = __nptl_last_event;
775 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
776 pd, pd->nextevent)
777 != 0);
778
779 /* Now call the function which signals the event. See
780 CONCURRENCY NOTES for the nptl_db interface comments. */
781 __nptl_create_event ();
782 }
783 }
784 else
785 retval = create_thread (pd, iattr, &stopped_start, stackaddr,
786 stacksize, &thread_ran);
787
788 /* Return to the previous signal mask, after creating the new
789 thread. */
790 __libc_signal_restore_set (&original_sigmask);
791
792 if (__glibc_unlikely (retval != 0))
793 {
794 if (thread_ran)
795 /* State (c) and we not have PD ownership (see CONCURRENCY NOTES
796 above). We can assert that STOPPED_START must have been true
797 because thread creation didn't fail, but thread attribute setting
798 did. */
799 {
800 assert (stopped_start);
801 /* Signal the created thread to release PD ownership and early
802 exit so it could be joined. */
803 pd->setup_failed = 1;
804 lll_unlock (pd->lock, LLL_PRIVATE);
805
806 /* Similar to pthread_join, but since thread creation has failed at
807 startup there is no need to handle all the steps. */
808 pid_t tid;
809 while ((tid = atomic_load_acquire (&pd->tid)) != 0)
810 __futex_abstimed_wait_cancelable64 ((unsigned int *) &pd->tid,
811 tid, 0, NULL, LLL_SHARED);
812 }
813
814 /* State (c) or (d) and we have ownership of PD (see CONCURRENCY
815 NOTES above). */
816
817 /* Oops, we lied for a second. */
818 atomic_decrement (&__nptl_nthreads);
819
820 /* Free the resources. */
821 __nptl_deallocate_stack (pd);
822
823 /* We have to translate error codes. */
824 if (retval == ENOMEM)
825 retval = EAGAIN;
826 }
827 else
828 {
829 /* We don't know if we have PD ownership. Once we check the local
830 stopped_start we'll know if we're in state (a) or (b) (see
831 CONCURRENCY NOTES above). */
832 if (stopped_start)
833 /* State (a), we own PD. The thread blocked on this lock either
834 because we're doing TD_CREATE event reporting, or for some
835 other reason that create_thread chose. Now let it run
836 free. */
837 lll_unlock (pd->lock, LLL_PRIVATE);
838
839 /* We now have for sure more than one thread. The main thread might
840 not yet have the flag set. No need to set the global variable
841 again if this is what we use. */
842 THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
843 }
844
845 out:
846 if (destroy_default_attr)
847 __pthread_attr_destroy (&default_attr.external);
848
849 return retval;
850}
851versioned_symbol (libc, __pthread_create_2_1, pthread_create, GLIBC_2_34);
852libc_hidden_ver (__pthread_create_2_1, __pthread_create)
853#ifndef SHARED
854strong_alias (__pthread_create_2_1, __pthread_create)
855#endif
856
857#if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_1, GLIBC_2_34)
858compat_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
859#endif
860
861#if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
862int
863__pthread_create_2_0 (pthread_t *newthread, const pthread_attr_t *attr,
864 void *(*start_routine) (void *), void *arg)
865{
866 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
867 the old size and access to the new members might crash the program.
868 We convert the struct now. */
869 struct pthread_attr new_attr;
870
871 if (attr != NULL)
872 {
873 struct pthread_attr *iattr = (struct pthread_attr *) attr;
874 size_t ps = __getpagesize ();
875
876 /* Copy values from the user-provided attributes. */
877 new_attr.schedparam = iattr->schedparam;
878 new_attr.schedpolicy = iattr->schedpolicy;
879 new_attr.flags = iattr->flags;
880
881 /* Fill in default values for the fields not present in the old
882 implementation. */
883 new_attr.guardsize = ps;
884 new_attr.stackaddr = NULL;
885 new_attr.stacksize = 0;
886 new_attr.extension = NULL;
887
888 /* We will pass this value on to the real implementation. */
889 attr = (pthread_attr_t *) &new_attr;
890 }
891
892 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
893}
894compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
895 GLIBC_2_0);
896#endif
897
898/* Information for libthread_db. */
899
900#include "../nptl_db/db_info.c"
901
902/* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
903 functions to be present as well. */
904PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_lock)
905PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_trylock)
906PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_unlock)
907
908PTHREAD_STATIC_FN_REQUIRE (__pthread_once)
909PTHREAD_STATIC_FN_REQUIRE (__pthread_cancel)
910
911PTHREAD_STATIC_FN_REQUIRE (__pthread_key_create)
912PTHREAD_STATIC_FN_REQUIRE (__pthread_key_delete)
913PTHREAD_STATIC_FN_REQUIRE (__pthread_setspecific)
914PTHREAD_STATIC_FN_REQUIRE (__pthread_getspecific)
915