1 | /* Copyright (C) 2002-2016 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <sysdep.h> |
20 | #include <shlib-compat.h> |
21 | #include <lowlevellock.h> |
22 | #include <lowlevelcond.h> |
23 | #include <pthread-pi-defines.h> |
24 | #include <pthread-errnos.h> |
25 | #include <stap-probe.h> |
26 | |
27 | #include <kernel-features.h> |
28 | |
29 | |
30 | .text |
31 | |
32 | |
33 | /* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex, |
34 | const struct timespec *abstime) */ |
35 | .globl __pthread_cond_timedwait |
36 | .type __pthread_cond_timedwait, @function |
37 | .align 16 |
38 | __pthread_cond_timedwait: |
39 | .LSTARTCODE: |
40 | cfi_startproc |
41 | #ifdef SHARED |
42 | cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect, |
43 | DW.ref.__gcc_personality_v0) |
44 | cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART) |
45 | #else |
46 | cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0) |
47 | cfi_lsda(DW_EH_PE_udata4, .LexceptSTART) |
48 | #endif |
49 | |
50 | pushq %r12 |
51 | cfi_adjust_cfa_offset(8) |
52 | cfi_rel_offset(%r12, 0) |
53 | pushq %r13 |
54 | cfi_adjust_cfa_offset(8) |
55 | cfi_rel_offset(%r13, 0) |
56 | pushq %r14 |
57 | cfi_adjust_cfa_offset(8) |
58 | cfi_rel_offset(%r14, 0) |
59 | pushq %r15 |
60 | cfi_adjust_cfa_offset(8) |
61 | cfi_rel_offset(%r15, 0) |
62 | #define FRAME_SIZE (32+8) |
63 | subq $FRAME_SIZE, %rsp |
64 | cfi_adjust_cfa_offset(FRAME_SIZE) |
65 | cfi_remember_state |
66 | |
67 | LIBC_PROBE (cond_timedwait, 3, %rdi, %rsi, %rdx) |
68 | |
69 | cmpq $1000000000, 8(%rdx) |
70 | movl $EINVAL, %eax |
71 | jae 48f |
72 | |
73 | /* Stack frame: |
74 | |
75 | rsp + 48 |
76 | +--------------------------+ |
77 | rsp + 32 | timeout value | |
78 | +--------------------------+ |
79 | rsp + 24 | old wake_seq value | |
80 | +--------------------------+ |
81 | rsp + 16 | mutex pointer | |
82 | +--------------------------+ |
83 | rsp + 8 | condvar pointer | |
84 | +--------------------------+ |
85 | rsp + 4 | old broadcast_seq value | |
86 | +--------------------------+ |
87 | rsp + 0 | old cancellation mode | |
88 | +--------------------------+ |
89 | */ |
90 | |
91 | LP_OP(cmp) $-1, dep_mutex(%rdi) |
92 | |
93 | /* Prepare structure passed to cancellation handler. */ |
94 | movq %rdi, 8(%rsp) |
95 | movq %rsi, 16(%rsp) |
96 | movq %rdx, %r13 |
97 | |
98 | je 22f |
99 | mov %RSI_LP, dep_mutex(%rdi) |
100 | |
101 | 22: |
102 | xorb %r15b, %r15b |
103 | |
104 | /* Get internal lock. */ |
105 | movl $1, %esi |
106 | xorl %eax, %eax |
107 | LOCK |
108 | #if cond_lock == 0 |
109 | cmpxchgl %esi, (%rdi) |
110 | #else |
111 | cmpxchgl %esi, cond_lock(%rdi) |
112 | #endif |
113 | jnz 31f |
114 | |
115 | /* Unlock the mutex. */ |
116 | 32: movq 16(%rsp), %rdi |
117 | xorl %esi, %esi |
118 | callq __pthread_mutex_unlock_usercnt |
119 | |
120 | testl %eax, %eax |
121 | jne 46f |
122 | |
123 | movq 8(%rsp), %rdi |
124 | incq total_seq(%rdi) |
125 | incl cond_futex(%rdi) |
126 | addl $(1 << nwaiters_shift), cond_nwaiters(%rdi) |
127 | |
128 | /* Get and store current wakeup_seq value. */ |
129 | movq 8(%rsp), %rdi |
130 | movq wakeup_seq(%rdi), %r9 |
131 | movl broadcast_seq(%rdi), %edx |
132 | movq %r9, 24(%rsp) |
133 | movl %edx, 4(%rsp) |
134 | |
135 | cmpq $0, (%r13) |
136 | movq $-ETIMEDOUT, %r14 |
137 | js 36f |
138 | |
139 | 38: movl cond_futex(%rdi), %r12d |
140 | |
141 | /* Unlock. */ |
142 | LOCK |
143 | #if cond_lock == 0 |
144 | decl (%rdi) |
145 | #else |
146 | decl cond_lock(%rdi) |
147 | #endif |
148 | jne 33f |
149 | |
150 | .LcleanupSTART1: |
151 | 34: callq __pthread_enable_asynccancel |
152 | movl %eax, (%rsp) |
153 | |
154 | movq %r13, %r10 |
155 | movl $FUTEX_WAIT_BITSET, %esi |
156 | LP_OP(cmp) $-1, dep_mutex(%rdi) |
157 | je 60f |
158 | |
159 | mov dep_mutex(%rdi), %R8_LP |
160 | /* Requeue to a non-robust PI mutex if the PI bit is set and |
161 | the robust bit is not set. */ |
162 | movl MUTEX_KIND(%r8), %eax |
163 | andl $(ROBUST_BIT|PI_BIT), %eax |
164 | cmpl $PI_BIT, %eax |
165 | jne 61f |
166 | |
167 | movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi |
168 | xorl %eax, %eax |
169 | /* The following only works like this because we only support |
170 | two clocks, represented using a single bit. */ |
171 | testl $1, cond_nwaiters(%rdi) |
172 | movl $FUTEX_CLOCK_REALTIME, %edx |
173 | cmove %edx, %eax |
174 | orl %eax, %esi |
175 | movq %r12, %rdx |
176 | addq $cond_futex, %rdi |
177 | movl $SYS_futex, %eax |
178 | syscall |
179 | |
180 | cmpl $0, %eax |
181 | sete %r15b |
182 | |
183 | #ifdef __ASSUME_REQUEUE_PI |
184 | jmp 62f |
185 | #else |
186 | je 62f |
187 | |
188 | /* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns |
189 | successfully, it has already locked the mutex for us and the |
190 | pi_flag (%r15b) is set to denote that fact. However, if another |
191 | thread changed the futex value before we entered the wait, the |
192 | syscall may return an EAGAIN and the mutex is not locked. We go |
193 | ahead with a success anyway since later we look at the pi_flag to |
194 | decide if we got the mutex or not. The sequence numbers then make |
195 | sure that only one of the threads actually wake up. We retry using |
196 | normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal |
197 | and PI futexes don't mix. |
198 | |
199 | Note that we don't check for EAGAIN specifically; we assume that the |
200 | only other error the futex function could return is EAGAIN (barring |
201 | the ETIMEOUT of course, for the timeout case in futex) since |
202 | anything else would mean an error in our function. It is too |
203 | expensive to do that check for every call (which is quite common in |
204 | case of a large number of threads), so it has been skipped. */ |
205 | cmpl $-ENOSYS, %eax |
206 | jne 62f |
207 | |
208 | subq $cond_futex, %rdi |
209 | #endif |
210 | |
211 | 61: movl $(FUTEX_WAIT_BITSET|FUTEX_PRIVATE_FLAG), %esi |
212 | 60: xorb %r15b, %r15b |
213 | xorl %eax, %eax |
214 | /* The following only works like this because we only support |
215 | two clocks, represented using a single bit. */ |
216 | testl $1, cond_nwaiters(%rdi) |
217 | movl $FUTEX_CLOCK_REALTIME, %edx |
218 | movl $0xffffffff, %r9d |
219 | cmove %edx, %eax |
220 | orl %eax, %esi |
221 | movq %r12, %rdx |
222 | addq $cond_futex, %rdi |
223 | movl $SYS_futex, %eax |
224 | syscall |
225 | 62: movq %rax, %r14 |
226 | |
227 | movl (%rsp), %edi |
228 | callq __pthread_disable_asynccancel |
229 | .LcleanupEND1: |
230 | |
231 | /* Lock. */ |
232 | movq 8(%rsp), %rdi |
233 | movl $1, %esi |
234 | xorl %eax, %eax |
235 | LOCK |
236 | #if cond_lock == 0 |
237 | cmpxchgl %esi, (%rdi) |
238 | #else |
239 | cmpxchgl %esi, cond_lock(%rdi) |
240 | #endif |
241 | jne 35f |
242 | |
243 | 36: movl broadcast_seq(%rdi), %edx |
244 | |
245 | movq woken_seq(%rdi), %rax |
246 | |
247 | movq wakeup_seq(%rdi), %r9 |
248 | |
249 | cmpl 4(%rsp), %edx |
250 | jne 53f |
251 | |
252 | cmpq 24(%rsp), %r9 |
253 | jbe 45f |
254 | |
255 | cmpq %rax, %r9 |
256 | ja 39f |
257 | |
258 | 45: cmpq $-ETIMEDOUT, %r14 |
259 | je 99f |
260 | |
261 | /* We need to go back to futex_wait. If we're using requeue_pi, then |
262 | release the mutex we had acquired and go back. */ |
263 | test %r15b, %r15b |
264 | jz 38b |
265 | |
266 | /* Adjust the mutex values first and then unlock it. The unlock |
267 | should always succeed or else the kernel did not lock the |
268 | mutex correctly. */ |
269 | movq %r8, %rdi |
270 | callq __pthread_mutex_cond_lock_adjust |
271 | xorl %esi, %esi |
272 | callq __pthread_mutex_unlock_usercnt |
273 | /* Reload cond_var. */ |
274 | movq 8(%rsp), %rdi |
275 | jmp 38b |
276 | |
277 | 99: incq wakeup_seq(%rdi) |
278 | incl cond_futex(%rdi) |
279 | movl $ETIMEDOUT, %r14d |
280 | jmp 44f |
281 | |
282 | 53: xorq %r14, %r14 |
283 | jmp 54f |
284 | |
285 | 39: xorq %r14, %r14 |
286 | 44: incq woken_seq(%rdi) |
287 | |
288 | 54: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi) |
289 | |
290 | /* Wake up a thread which wants to destroy the condvar object. */ |
291 | cmpq $0xffffffffffffffff, total_seq(%rdi) |
292 | jne 55f |
293 | movl cond_nwaiters(%rdi), %eax |
294 | andl $~((1 << nwaiters_shift) - 1), %eax |
295 | jne 55f |
296 | |
297 | addq $cond_nwaiters, %rdi |
298 | LP_OP(cmp) $-1, dep_mutex-cond_nwaiters(%rdi) |
299 | movl $1, %edx |
300 | #ifdef __ASSUME_PRIVATE_FUTEX |
301 | movl $FUTEX_WAKE, %eax |
302 | movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi |
303 | cmove %eax, %esi |
304 | #else |
305 | movl $0, %eax |
306 | movl %fs:PRIVATE_FUTEX, %esi |
307 | cmove %eax, %esi |
308 | orl $FUTEX_WAKE, %esi |
309 | #endif |
310 | movl $SYS_futex, %eax |
311 | syscall |
312 | subq $cond_nwaiters, %rdi |
313 | |
314 | 55: LOCK |
315 | #if cond_lock == 0 |
316 | decl (%rdi) |
317 | #else |
318 | decl cond_lock(%rdi) |
319 | #endif |
320 | jne 40f |
321 | |
322 | /* If requeue_pi is used the kernel performs the locking of the |
323 | mutex. */ |
324 | 41: movq 16(%rsp), %rdi |
325 | testb %r15b, %r15b |
326 | jnz 64f |
327 | |
328 | callq __pthread_mutex_cond_lock |
329 | |
330 | 63: testq %rax, %rax |
331 | cmoveq %r14, %rax |
332 | |
333 | 48: addq $FRAME_SIZE, %rsp |
334 | cfi_adjust_cfa_offset(-FRAME_SIZE) |
335 | popq %r15 |
336 | cfi_adjust_cfa_offset(-8) |
337 | cfi_restore(%r15) |
338 | popq %r14 |
339 | cfi_adjust_cfa_offset(-8) |
340 | cfi_restore(%r14) |
341 | popq %r13 |
342 | cfi_adjust_cfa_offset(-8) |
343 | cfi_restore(%r13) |
344 | popq %r12 |
345 | cfi_adjust_cfa_offset(-8) |
346 | cfi_restore(%r12) |
347 | |
348 | retq |
349 | |
350 | cfi_restore_state |
351 | |
352 | 64: callq __pthread_mutex_cond_lock_adjust |
353 | movq %r14, %rax |
354 | jmp 48b |
355 | |
356 | /* Initial locking failed. */ |
357 | 31: |
358 | #if cond_lock != 0 |
359 | addq $cond_lock, %rdi |
360 | #endif |
361 | LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
362 | movl $LLL_PRIVATE, %eax |
363 | movl $LLL_SHARED, %esi |
364 | cmovne %eax, %esi |
365 | callq __lll_lock_wait |
366 | jmp 32b |
367 | |
368 | /* Unlock in loop requires wakeup. */ |
369 | 33: |
370 | #if cond_lock != 0 |
371 | addq $cond_lock, %rdi |
372 | #endif |
373 | LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
374 | movl $LLL_PRIVATE, %eax |
375 | movl $LLL_SHARED, %esi |
376 | cmovne %eax, %esi |
377 | callq __lll_unlock_wake |
378 | jmp 34b |
379 | |
380 | /* Locking in loop failed. */ |
381 | 35: |
382 | #if cond_lock != 0 |
383 | addq $cond_lock, %rdi |
384 | #endif |
385 | LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
386 | movl $LLL_PRIVATE, %eax |
387 | movl $LLL_SHARED, %esi |
388 | cmovne %eax, %esi |
389 | callq __lll_lock_wait |
390 | #if cond_lock != 0 |
391 | subq $cond_lock, %rdi |
392 | #endif |
393 | jmp 36b |
394 | |
395 | /* Unlock after loop requires wakeup. */ |
396 | 40: |
397 | #if cond_lock != 0 |
398 | addq $cond_lock, %rdi |
399 | #endif |
400 | LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
401 | movl $LLL_PRIVATE, %eax |
402 | movl $LLL_SHARED, %esi |
403 | cmovne %eax, %esi |
404 | callq __lll_unlock_wake |
405 | jmp 41b |
406 | |
407 | /* The initial unlocking of the mutex failed. */ |
408 | 46: movq 8(%rsp), %rdi |
409 | movq %rax, (%rsp) |
410 | LOCK |
411 | #if cond_lock == 0 |
412 | decl (%rdi) |
413 | #else |
414 | decl cond_lock(%rdi) |
415 | #endif |
416 | jne 47f |
417 | |
418 | #if cond_lock != 0 |
419 | addq $cond_lock, %rdi |
420 | #endif |
421 | LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
422 | movl $LLL_PRIVATE, %eax |
423 | movl $LLL_SHARED, %esi |
424 | cmovne %eax, %esi |
425 | callq __lll_unlock_wake |
426 | |
427 | 47: movq (%rsp), %rax |
428 | jmp 48b |
429 | |
430 | .size __pthread_cond_timedwait, .-__pthread_cond_timedwait |
431 | versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait, |
432 | GLIBC_2_3_2) |
433 | |
434 | |
435 | .align 16 |
436 | .type __condvar_cleanup2, @function |
437 | __condvar_cleanup2: |
438 | /* Stack frame: |
439 | |
440 | rsp + 72 |
441 | +--------------------------+ |
442 | rsp + 64 | %r12 | |
443 | +--------------------------+ |
444 | rsp + 56 | %r13 | |
445 | +--------------------------+ |
446 | rsp + 48 | %r14 | |
447 | +--------------------------+ |
448 | rsp + 24 | unused | |
449 | +--------------------------+ |
450 | rsp + 16 | mutex pointer | |
451 | +--------------------------+ |
452 | rsp + 8 | condvar pointer | |
453 | +--------------------------+ |
454 | rsp + 4 | old broadcast_seq value | |
455 | +--------------------------+ |
456 | rsp + 0 | old cancellation mode | |
457 | +--------------------------+ |
458 | */ |
459 | |
460 | movq %rax, 24(%rsp) |
461 | |
462 | /* Get internal lock. */ |
463 | movq 8(%rsp), %rdi |
464 | movl $1, %esi |
465 | xorl %eax, %eax |
466 | LOCK |
467 | #if cond_lock == 0 |
468 | cmpxchgl %esi, (%rdi) |
469 | #else |
470 | cmpxchgl %esi, cond_lock(%rdi) |
471 | #endif |
472 | jz 1f |
473 | |
474 | #if cond_lock != 0 |
475 | addq $cond_lock, %rdi |
476 | #endif |
477 | LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
478 | movl $LLL_PRIVATE, %eax |
479 | movl $LLL_SHARED, %esi |
480 | cmovne %eax, %esi |
481 | callq __lll_lock_wait |
482 | #if cond_lock != 0 |
483 | subq $cond_lock, %rdi |
484 | #endif |
485 | |
486 | 1: movl broadcast_seq(%rdi), %edx |
487 | cmpl 4(%rsp), %edx |
488 | jne 3f |
489 | |
490 | /* We increment the wakeup_seq counter only if it is lower than |
491 | total_seq. If this is not the case the thread was woken and |
492 | then canceled. In this case we ignore the signal. */ |
493 | movq total_seq(%rdi), %rax |
494 | cmpq wakeup_seq(%rdi), %rax |
495 | jbe 6f |
496 | incq wakeup_seq(%rdi) |
497 | incl cond_futex(%rdi) |
498 | 6: incq woken_seq(%rdi) |
499 | |
500 | 3: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi) |
501 | |
502 | /* Wake up a thread which wants to destroy the condvar object. */ |
503 | xorq %r12, %r12 |
504 | cmpq $0xffffffffffffffff, total_seq(%rdi) |
505 | jne 4f |
506 | movl cond_nwaiters(%rdi), %eax |
507 | andl $~((1 << nwaiters_shift) - 1), %eax |
508 | jne 4f |
509 | |
510 | LP_OP(cmp) $-1, dep_mutex(%rdi) |
511 | leaq cond_nwaiters(%rdi), %rdi |
512 | movl $1, %edx |
513 | #ifdef __ASSUME_PRIVATE_FUTEX |
514 | movl $FUTEX_WAKE, %eax |
515 | movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi |
516 | cmove %eax, %esi |
517 | #else |
518 | movl $0, %eax |
519 | movl %fs:PRIVATE_FUTEX, %esi |
520 | cmove %eax, %esi |
521 | orl $FUTEX_WAKE, %esi |
522 | #endif |
523 | movl $SYS_futex, %eax |
524 | syscall |
525 | subq $cond_nwaiters, %rdi |
526 | movl $1, %r12d |
527 | |
528 | 4: LOCK |
529 | #if cond_lock == 0 |
530 | decl (%rdi) |
531 | #else |
532 | decl cond_lock(%rdi) |
533 | #endif |
534 | je 2f |
535 | #if cond_lock != 0 |
536 | addq $cond_lock, %rdi |
537 | #endif |
538 | LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) |
539 | movl $LLL_PRIVATE, %eax |
540 | movl $LLL_SHARED, %esi |
541 | cmovne %eax, %esi |
542 | callq __lll_unlock_wake |
543 | |
544 | /* Wake up all waiters to make sure no signal gets lost. */ |
545 | 2: testq %r12, %r12 |
546 | jnz 5f |
547 | addq $cond_futex, %rdi |
548 | LP_OP(cmp) $-1, dep_mutex-cond_futex(%rdi) |
549 | movl $0x7fffffff, %edx |
550 | #ifdef __ASSUME_PRIVATE_FUTEX |
551 | movl $FUTEX_WAKE, %eax |
552 | movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi |
553 | cmove %eax, %esi |
554 | #else |
555 | movl $0, %eax |
556 | movl %fs:PRIVATE_FUTEX, %esi |
557 | cmove %eax, %esi |
558 | orl $FUTEX_WAKE, %esi |
559 | #endif |
560 | movl $SYS_futex, %eax |
561 | syscall |
562 | |
563 | /* Lock the mutex only if we don't own it already. This only happens |
564 | in case of PI mutexes, if we got cancelled after a successful |
565 | return of the futex syscall and before disabling async |
566 | cancellation. */ |
567 | 5: movq 16(%rsp), %rdi |
568 | movl MUTEX_KIND(%rdi), %eax |
569 | andl $(ROBUST_BIT|PI_BIT), %eax |
570 | cmpl $PI_BIT, %eax |
571 | jne 7f |
572 | |
573 | movl (%rdi), %eax |
574 | andl $TID_MASK, %eax |
575 | cmpl %eax, %fs:TID |
576 | jne 7f |
577 | /* We managed to get the lock. Fix it up before returning. */ |
578 | callq __pthread_mutex_cond_lock_adjust |
579 | jmp 8f |
580 | |
581 | 7: callq __pthread_mutex_cond_lock |
582 | |
583 | 8: movq 24(%rsp), %rdi |
584 | movq FRAME_SIZE(%rsp), %r15 |
585 | movq FRAME_SIZE+8(%rsp), %r14 |
586 | movq FRAME_SIZE+16(%rsp), %r13 |
587 | movq FRAME_SIZE+24(%rsp), %r12 |
588 | .LcallUR: |
589 | call _Unwind_Resume@PLT |
590 | hlt |
591 | .LENDCODE: |
592 | cfi_endproc |
593 | .size __condvar_cleanup2, .-__condvar_cleanup2 |
594 | |
595 | |
596 | .section .gcc_except_table,"a" ,@progbits |
597 | .LexceptSTART: |
598 | .byte DW_EH_PE_omit # @LPStart format |
599 | .byte DW_EH_PE_omit # @TType format |
600 | .byte DW_EH_PE_uleb128 # call-site format |
601 | .uleb128 .Lcstend-.Lcstbegin |
602 | .Lcstbegin: |
603 | .uleb128 .LcleanupSTART1-.LSTARTCODE |
604 | .uleb128 .LcleanupEND1-.LcleanupSTART1 |
605 | .uleb128 __condvar_cleanup2-.LSTARTCODE |
606 | .uleb128 0 |
607 | .uleb128 .LcallUR-.LSTARTCODE |
608 | .uleb128 .LENDCODE-.LcallUR |
609 | .uleb128 0 |
610 | .uleb128 0 |
611 | .Lcstend: |
612 | |
613 | |
614 | #ifdef SHARED |
615 | .hidden DW.ref.__gcc_personality_v0 |
616 | .weak DW.ref.__gcc_personality_v0 |
617 | .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw" ,@progbits |
618 | .align LP_SIZE |
619 | .type DW.ref.__gcc_personality_v0, @object |
620 | .size DW.ref.__gcc_personality_v0, LP_SIZE |
621 | DW.ref.__gcc_personality_v0: |
622 | ASM_ADDR __gcc_personality_v0 |
623 | #endif |
624 | |