1 | /* Copyright (C) 2009-2016 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2009. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <sysdep.h> |
20 | #include <tcb-offsets.h> |
21 | #include <kernel-features.h> |
22 | #include "lowlevellock.h" |
23 | |
24 | #if IS_IN (libpthread) |
25 | # if defined SHARED && !defined NO_HIDDEN |
26 | # define __pthread_unwind __GI___pthread_unwind |
27 | # endif |
28 | #else |
29 | # ifndef SHARED |
30 | .weak __pthread_unwind |
31 | # endif |
32 | #endif |
33 | |
34 | |
35 | #ifdef __ASSUME_PRIVATE_FUTEX |
36 | # define LOAD_PRIVATE_FUTEX_WAIT(reg) \ |
37 | movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg |
38 | #else |
39 | # if FUTEX_WAIT == 0 |
40 | # define LOAD_PRIVATE_FUTEX_WAIT(reg) \ |
41 | movl %fs:PRIVATE_FUTEX, reg |
42 | # else |
43 | # define LOAD_PRIVATE_FUTEX_WAIT(reg) \ |
44 | movl %fs:PRIVATE_FUTEX, reg ; \ |
45 | orl $FUTEX_WAIT, reg |
46 | # endif |
47 | #endif |
48 | |
49 | /* It is crucial that the functions in this file don't modify registers |
50 | other than %rax and %r11. The syscall wrapper code depends on this |
51 | because it doesn't explicitly save the other registers which hold |
52 | relevant values. */ |
53 | .text |
54 | |
55 | .hidden __pthread_enable_asynccancel |
56 | ENTRY(__pthread_enable_asynccancel) |
57 | movl %fs:CANCELHANDLING, %eax |
58 | 2: movl %eax, %r11d |
59 | orl $TCB_CANCELTYPE_BITMASK, %r11d |
60 | cmpl %eax, %r11d |
61 | je 1f |
62 | |
63 | lock |
64 | cmpxchgl %r11d, %fs:CANCELHANDLING |
65 | jnz 2b |
66 | |
67 | andl $(TCB_CANCELSTATE_BITMASK|TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK|TCB_EXITING_BITMASK|TCB_CANCEL_RESTMASK|TCB_TERMINATED_BITMASK), %r11d |
68 | cmpl $(TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK), %r11d |
69 | je 3f |
70 | |
71 | 1: ret |
72 | |
73 | 3: subq $8, %rsp |
74 | cfi_adjust_cfa_offset(8) |
75 | LP_OP(mov) $TCB_PTHREAD_CANCELED, %fs:RESULT |
76 | lock |
77 | orl $TCB_EXITING_BITMASK, %fs:CANCELHANDLING |
78 | mov %fs:CLEANUP_JMP_BUF, %RDI_LP |
79 | #ifdef SHARED |
80 | call __pthread_unwind@PLT |
81 | #else |
82 | call __pthread_unwind |
83 | #endif |
84 | hlt |
85 | END(__pthread_enable_asynccancel) |
86 | |
87 | |
88 | .hidden __pthread_disable_asynccancel |
89 | ENTRY(__pthread_disable_asynccancel) |
90 | testl $TCB_CANCELTYPE_BITMASK, %edi |
91 | jnz 1f |
92 | |
93 | movl %fs:CANCELHANDLING, %eax |
94 | 2: movl %eax, %r11d |
95 | andl $~TCB_CANCELTYPE_BITMASK, %r11d |
96 | lock |
97 | cmpxchgl %r11d, %fs:CANCELHANDLING |
98 | jnz 2b |
99 | |
100 | movl %r11d, %eax |
101 | 3: andl $(TCB_CANCELING_BITMASK|TCB_CANCELED_BITMASK), %eax |
102 | cmpl $TCB_CANCELING_BITMASK, %eax |
103 | je 4f |
104 | 1: ret |
105 | |
106 | /* Performance doesn't matter in this loop. We will |
107 | delay until the thread is canceled. And we will unlikely |
108 | enter the loop twice. */ |
109 | 4: mov %fs:0, %RDI_LP |
110 | movl $__NR_futex, %eax |
111 | xorq %r10, %r10 |
112 | addq $CANCELHANDLING, %rdi |
113 | LOAD_PRIVATE_FUTEX_WAIT (%esi) |
114 | syscall |
115 | movl %fs:CANCELHANDLING, %eax |
116 | jmp 3b |
117 | END(__pthread_disable_asynccancel) |
118 | |