1 | /* Profiling of shared libraries. |
2 | Copyright (C) 1997-2022 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | Based on the BSD mcount implementation. |
5 | |
6 | The GNU C Library is free software; you can redistribute it and/or |
7 | modify it under the terms of the GNU Lesser General Public |
8 | License as published by the Free Software Foundation; either |
9 | version 2.1 of the License, or (at your option) any later version. |
10 | |
11 | The GNU C Library is distributed in the hope that it will be useful, |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | Lesser General Public License for more details. |
15 | |
16 | You should have received a copy of the GNU Lesser General Public |
17 | License along with the GNU C Library; if not, see |
18 | <https://www.gnu.org/licenses/>. */ |
19 | |
20 | #include <assert.h> |
21 | #include <errno.h> |
22 | #include <fcntl.h> |
23 | #include <inttypes.h> |
24 | #include <limits.h> |
25 | #include <stdio.h> |
26 | #include <stdlib.h> |
27 | #include <string.h> |
28 | #include <unistd.h> |
29 | #include <stdint.h> |
30 | #include <ldsodefs.h> |
31 | #include <sys/gmon.h> |
32 | #include <sys/gmon_out.h> |
33 | #include <sys/mman.h> |
34 | #include <sys/param.h> |
35 | #include <sys/stat.h> |
36 | #include <atomic.h> |
37 | #include <not-cancel.h> |
38 | |
39 | /* The LD_PROFILE feature has to be implemented different to the |
40 | normal profiling using the gmon/ functions. The problem is that an |
41 | arbitrary amount of processes simulataneously can be run using |
42 | profiling and all write the results in the same file. To provide |
43 | this mechanism one could implement a complicated mechanism to merge |
44 | the content of two profiling runs or one could extend the file |
45 | format to allow more than one data set. For the second solution we |
46 | would have the problem that the file can grow in size beyond any |
47 | limit and both solutions have the problem that the concurrency of |
48 | writing the results is a big problem. |
49 | |
50 | Another much simpler method is to use mmap to map the same file in |
51 | all using programs and modify the data in the mmap'ed area and so |
52 | also automatically on the disk. Using the MAP_SHARED option of |
53 | mmap(2) this can be done without big problems in more than one |
54 | file. |
55 | |
56 | This approach is very different from the normal profiling. We have |
57 | to use the profiling data in exactly the way they are expected to |
58 | be written to disk. But the normal format used by gprof is not usable |
59 | to do this. It is optimized for size. It writes the tags as single |
60 | bytes but this means that the following 32/64 bit values are |
61 | unaligned. |
62 | |
63 | Therefore we use a new format. This will look like this |
64 | |
65 | 0 1 2 3 <- byte is 32 bit word |
66 | 0000 g m o n |
67 | 0004 *version* <- GMON_SHOBJ_VERSION |
68 | 0008 00 00 00 00 |
69 | 000c 00 00 00 00 |
70 | 0010 00 00 00 00 |
71 | |
72 | 0014 *tag* <- GMON_TAG_TIME_HIST |
73 | 0018 ?? ?? ?? ?? |
74 | ?? ?? ?? ?? <- 32/64 bit LowPC |
75 | 0018+A ?? ?? ?? ?? |
76 | ?? ?? ?? ?? <- 32/64 bit HighPC |
77 | 0018+2*A *histsize* |
78 | 001c+2*A *profrate* |
79 | 0020+2*A s e c o |
80 | 0024+2*A n d s \0 |
81 | 0028+2*A \0 \0 \0 \0 |
82 | 002c+2*A \0 \0 \0 |
83 | 002f+2*A s |
84 | |
85 | 0030+2*A ?? ?? ?? ?? <- Count data |
86 | ... ... |
87 | 0030+2*A+K ?? ?? ?? ?? |
88 | |
89 | 0030+2*A+K *tag* <- GMON_TAG_CG_ARC |
90 | 0034+2*A+K *lastused* |
91 | 0038+2*A+K ?? ?? ?? ?? |
92 | ?? ?? ?? ?? <- FromPC#1 |
93 | 0038+3*A+K ?? ?? ?? ?? |
94 | ?? ?? ?? ?? <- ToPC#1 |
95 | 0038+4*A+K ?? ?? ?? ?? <- Count#1 |
96 | ... ... ... |
97 | 0038+(2*(CN-1)+2)*A+(CN-1)*4+K ?? ?? ?? ?? |
98 | ?? ?? ?? ?? <- FromPC#CGN |
99 | 0038+(2*(CN-1)+3)*A+(CN-1)*4+K ?? ?? ?? ?? |
100 | ?? ?? ?? ?? <- ToPC#CGN |
101 | 0038+(2*CN+2)*A+(CN-1)*4+K ?? ?? ?? ?? <- Count#CGN |
102 | |
103 | We put (for now?) no basic block information in the file since this would |
104 | introduce rase conditions among all the processes who want to write them. |
105 | |
106 | `K' is the number of count entries which is computed as |
107 | |
108 | textsize / HISTFRACTION |
109 | |
110 | `CG' in the above table is the number of call graph arcs. Normally, |
111 | the table is sparse and the profiling code writes out only the those |
112 | entries which are really used in the program run. But since we must |
113 | not extend this table (the profiling file) we'll keep them all here. |
114 | So CN can be executed in advance as |
115 | |
116 | MINARCS <= textsize*(ARCDENSITY/100) <= MAXARCS |
117 | |
118 | Now the remaining question is: how to build the data structures we can |
119 | work with from this data. We need the from set and must associate the |
120 | froms with all the associated tos. We will do this by constructing this |
121 | data structures at the program start. To do this we'll simply visit all |
122 | entries in the call graph table and add it to the appropriate list. */ |
123 | |
124 | extern int __profile_frequency (void); |
125 | libc_hidden_proto (__profile_frequency) |
126 | |
127 | /* We define a special type to address the elements of the arc table. |
128 | This is basically the `gmon_cg_arc_record' format but it includes |
129 | the room for the tag and it uses real types. */ |
130 | struct here_cg_arc_record |
131 | { |
132 | uintptr_t from_pc; |
133 | uintptr_t self_pc; |
134 | /* The count field is atomically incremented in _dl_mcount, which |
135 | requires it to be properly aligned for its type, and for this |
136 | alignment to be visible to the compiler. The amount of data |
137 | before an array of this structure is calculated as |
138 | expected_size in _dl_start_profile. Everything in that |
139 | calculation is a multiple of 4 bytes (in the case of |
140 | kcountsize, because it is derived from a subtraction of |
141 | page-aligned values, and the corresponding calculation in |
142 | __monstartup also ensures it is at least a multiple of the size |
143 | of u_long), so all copies of this field do in fact have the |
144 | appropriate alignment. */ |
145 | uint32_t count __attribute__ ((aligned (__alignof__ (uint32_t)))); |
146 | } __attribute__ ((packed)); |
147 | |
148 | static struct here_cg_arc_record *data; |
149 | |
150 | /* Nonzero if profiling is under way. */ |
151 | static int running; |
152 | |
153 | /* This is the number of entry which have been incorporated in the toset. */ |
154 | static uint32_t narcs; |
155 | /* This is a pointer to the object representing the number of entries |
156 | currently in the mmaped file. At no point of time this has to be the |
157 | same as NARCS. If it is equal all entries from the file are in our |
158 | lists. */ |
159 | static volatile uint32_t *narcsp; |
160 | |
161 | |
162 | struct here_fromstruct |
163 | { |
164 | struct here_cg_arc_record volatile *here; |
165 | uint16_t link; |
166 | }; |
167 | |
168 | static volatile uint16_t *tos; |
169 | |
170 | static struct here_fromstruct *froms; |
171 | static uint32_t fromlimit; |
172 | static volatile uint32_t fromidx; |
173 | |
174 | static uintptr_t lowpc; |
175 | static size_t textsize; |
176 | static unsigned int log_hashfraction; |
177 | |
178 | |
179 | |
180 | /* Set up profiling data to profile object desribed by MAP. The output |
181 | file is found (or created) in OUTPUT_DIR. */ |
182 | void |
183 | _dl_start_profile (void) |
184 | { |
185 | char *filename; |
186 | int fd; |
187 | struct __stat64_t64 st; |
188 | const ElfW(Phdr) *ph; |
189 | ElfW(Addr) mapstart = ~((ElfW(Addr)) 0); |
190 | ElfW(Addr) mapend = 0; |
191 | char *hist, *cp; |
192 | size_t idx; |
193 | size_t tossize; |
194 | size_t fromssize; |
195 | uintptr_t highpc; |
196 | uint16_t *kcount; |
197 | size_t kcountsize; |
198 | struct gmon_hdr *addr = NULL; |
199 | off_t expected_size; |
200 | /* See profil(2) where this is described. */ |
201 | int s_scale; |
202 | #define SCALE_1_TO_1 0x10000L |
203 | const char *errstr = NULL; |
204 | |
205 | /* Compute the size of the sections which contain program code. */ |
206 | for (ph = GL(dl_profile_map)->l_phdr; |
207 | ph < &GL(dl_profile_map)->l_phdr[GL(dl_profile_map)->l_phnum]; ++ph) |
208 | if (ph->p_type == PT_LOAD && (ph->p_flags & PF_X)) |
209 | { |
210 | ElfW(Addr) start = (ph->p_vaddr & ~(GLRO(dl_pagesize) - 1)); |
211 | ElfW(Addr) end = ((ph->p_vaddr + ph->p_memsz + GLRO(dl_pagesize) - 1) |
212 | & ~(GLRO(dl_pagesize) - 1)); |
213 | |
214 | if (start < mapstart) |
215 | mapstart = start; |
216 | if (end > mapend) |
217 | mapend = end; |
218 | } |
219 | |
220 | /* Now we can compute the size of the profiling data. This is done |
221 | with the same formulars as in `monstartup' (see gmon.c). */ |
222 | running = 0; |
223 | lowpc = ROUNDDOWN (mapstart + GL(dl_profile_map)->l_addr, |
224 | HISTFRACTION * sizeof (HISTCOUNTER)); |
225 | highpc = ROUNDUP (mapend + GL(dl_profile_map)->l_addr, |
226 | HISTFRACTION * sizeof (HISTCOUNTER)); |
227 | textsize = highpc - lowpc; |
228 | kcountsize = textsize / HISTFRACTION; |
229 | if ((HASHFRACTION & (HASHFRACTION - 1)) == 0) |
230 | { |
231 | /* If HASHFRACTION is a power of two, mcount can use shifting |
232 | instead of integer division. Precompute shift amount. |
233 | |
234 | This is a constant but the compiler cannot compile the |
235 | expression away since the __ffs implementation is not known |
236 | to the compiler. Help the compiler by precomputing the |
237 | usual cases. */ |
238 | assert (HASHFRACTION == 2); |
239 | |
240 | if (sizeof (*froms) == 8) |
241 | log_hashfraction = 4; |
242 | else if (sizeof (*froms) == 16) |
243 | log_hashfraction = 5; |
244 | else |
245 | log_hashfraction = __ffs (HASHFRACTION * sizeof (*froms)) - 1; |
246 | } |
247 | else |
248 | log_hashfraction = -1; |
249 | tossize = textsize / HASHFRACTION; |
250 | fromlimit = textsize * ARCDENSITY / 100; |
251 | if (fromlimit < MINARCS) |
252 | fromlimit = MINARCS; |
253 | if (fromlimit > MAXARCS) |
254 | fromlimit = MAXARCS; |
255 | fromssize = fromlimit * sizeof (struct here_fromstruct); |
256 | |
257 | expected_size = (sizeof (struct gmon_hdr) |
258 | + 4 + sizeof (struct gmon_hist_hdr) + kcountsize |
259 | + 4 + 4 + fromssize * sizeof (struct here_cg_arc_record)); |
260 | |
261 | /* Create the gmon_hdr we expect or write. */ |
262 | struct real_gmon_hdr |
263 | { |
264 | char cookie[4]; |
265 | int32_t version; |
266 | char spare[3 * 4]; |
267 | } gmon_hdr; |
268 | if (sizeof (gmon_hdr) != sizeof (struct gmon_hdr) |
269 | || (offsetof (struct real_gmon_hdr, cookie) |
270 | != offsetof (struct gmon_hdr, cookie)) |
271 | || (offsetof (struct real_gmon_hdr, version) |
272 | != offsetof (struct gmon_hdr, version))) |
273 | abort (); |
274 | |
275 | memcpy (&gmon_hdr.cookie[0], GMON_MAGIC, sizeof (gmon_hdr.cookie)); |
276 | gmon_hdr.version = GMON_SHOBJ_VERSION; |
277 | memset (gmon_hdr.spare, '\0', sizeof (gmon_hdr.spare)); |
278 | |
279 | /* Create the hist_hdr we expect or write. */ |
280 | struct real_gmon_hist_hdr |
281 | { |
282 | char *low_pc; |
283 | char *high_pc; |
284 | int32_t hist_size; |
285 | int32_t prof_rate; |
286 | char dimen[15]; |
287 | char dimen_abbrev; |
288 | } hist_hdr; |
289 | if (sizeof (hist_hdr) != sizeof (struct gmon_hist_hdr) |
290 | || (offsetof (struct real_gmon_hist_hdr, low_pc) |
291 | != offsetof (struct gmon_hist_hdr, low_pc)) |
292 | || (offsetof (struct real_gmon_hist_hdr, high_pc) |
293 | != offsetof (struct gmon_hist_hdr, high_pc)) |
294 | || (offsetof (struct real_gmon_hist_hdr, hist_size) |
295 | != offsetof (struct gmon_hist_hdr, hist_size)) |
296 | || (offsetof (struct real_gmon_hist_hdr, prof_rate) |
297 | != offsetof (struct gmon_hist_hdr, prof_rate)) |
298 | || (offsetof (struct real_gmon_hist_hdr, dimen) |
299 | != offsetof (struct gmon_hist_hdr, dimen)) |
300 | || (offsetof (struct real_gmon_hist_hdr, dimen_abbrev) |
301 | != offsetof (struct gmon_hist_hdr, dimen_abbrev))) |
302 | abort (); |
303 | |
304 | hist_hdr.low_pc = (char *) mapstart; |
305 | hist_hdr.high_pc = (char *) mapend; |
306 | hist_hdr.hist_size = kcountsize / sizeof (HISTCOUNTER); |
307 | hist_hdr.prof_rate = __profile_frequency (); |
308 | if (sizeof (hist_hdr.dimen) >= sizeof ("seconds" )) |
309 | { |
310 | memcpy (hist_hdr.dimen, "seconds" , sizeof ("seconds" )); |
311 | memset (hist_hdr.dimen + sizeof ("seconds" ), '\0', |
312 | sizeof (hist_hdr.dimen) - sizeof ("seconds" )); |
313 | } |
314 | else |
315 | strncpy (hist_hdr.dimen, "seconds" , sizeof (hist_hdr.dimen)); |
316 | hist_hdr.dimen_abbrev = 's'; |
317 | |
318 | /* First determine the output name. We write in the directory |
319 | OUTPUT_DIR and the name is composed from the shared objects |
320 | soname (or the file name) and the ending ".profile". */ |
321 | filename = (char *) alloca (strlen (GLRO(dl_profile_output)) + 1 |
322 | + strlen (GLRO(dl_profile)) + sizeof ".profile" ); |
323 | cp = __stpcpy (filename, GLRO(dl_profile_output)); |
324 | *cp++ = '/'; |
325 | __stpcpy (__stpcpy (cp, GLRO(dl_profile)), ".profile" ); |
326 | |
327 | fd = __open64_nocancel (filename, O_RDWR|O_CREAT|O_NOFOLLOW, DEFFILEMODE); |
328 | if (fd == -1) |
329 | { |
330 | char buf[400]; |
331 | int errnum; |
332 | |
333 | /* We cannot write the profiling data so don't do anything. */ |
334 | errstr = "%s: cannot open file: %s\n" ; |
335 | print_error: |
336 | errnum = errno; |
337 | if (fd != -1) |
338 | __close_nocancel (fd); |
339 | _dl_error_printf (errstr, filename, |
340 | __strerror_r (errnum, buf, sizeof buf)); |
341 | return; |
342 | } |
343 | |
344 | if (__fstat64_time64 (fd, &st) < 0 || !S_ISREG (st.st_mode)) |
345 | { |
346 | /* Not stat'able or not a regular file => don't use it. */ |
347 | errstr = "%s: cannot stat file: %s\n" ; |
348 | goto print_error; |
349 | } |
350 | |
351 | /* Test the size. If it does not match what we expect from the size |
352 | values in the map MAP we don't use it and warn the user. */ |
353 | if (st.st_size == 0) |
354 | { |
355 | /* We have to create the file. */ |
356 | char buf[GLRO(dl_pagesize)]; |
357 | |
358 | memset (buf, '\0', GLRO(dl_pagesize)); |
359 | |
360 | if (__lseek (fd, expected_size & ~(GLRO(dl_pagesize) - 1), SEEK_SET) == -1) |
361 | { |
362 | cannot_create: |
363 | errstr = "%s: cannot create file: %s\n" ; |
364 | goto print_error; |
365 | } |
366 | |
367 | if (TEMP_FAILURE_RETRY |
368 | (__write_nocancel (fd, buf, (expected_size & (GLRO(dl_pagesize) - 1)))) |
369 | < 0) |
370 | goto cannot_create; |
371 | } |
372 | else if (st.st_size != expected_size) |
373 | { |
374 | __close_nocancel (fd); |
375 | wrong_format: |
376 | |
377 | if (addr != NULL) |
378 | __munmap ((void *) addr, expected_size); |
379 | |
380 | _dl_error_printf ("%s: file is no correct profile data file for `%s'\n" , |
381 | filename, GLRO(dl_profile)); |
382 | return; |
383 | } |
384 | |
385 | addr = (struct gmon_hdr *) __mmap (NULL, expected_size, PROT_READ|PROT_WRITE, |
386 | MAP_SHARED|MAP_FILE, fd, 0); |
387 | if (addr == (struct gmon_hdr *) MAP_FAILED) |
388 | { |
389 | errstr = "%s: cannot map file: %s\n" ; |
390 | goto print_error; |
391 | } |
392 | |
393 | /* We don't need the file descriptor anymore. */ |
394 | __close_nocancel (fd); |
395 | |
396 | /* Pointer to data after the header. */ |
397 | hist = (char *) (addr + 1); |
398 | kcount = (uint16_t *) ((char *) hist + sizeof (uint32_t) |
399 | + sizeof (struct gmon_hist_hdr)); |
400 | |
401 | /* Compute pointer to array of the arc information. */ |
402 | narcsp = (uint32_t *) ((char *) kcount + kcountsize + sizeof (uint32_t)); |
403 | data = (struct here_cg_arc_record *) ((char *) narcsp + sizeof (uint32_t)); |
404 | |
405 | if (st.st_size == 0) |
406 | { |
407 | /* Create the signature. */ |
408 | memcpy (addr, &gmon_hdr, sizeof (struct gmon_hdr)); |
409 | |
410 | *(uint32_t *) hist = GMON_TAG_TIME_HIST; |
411 | memcpy (hist + sizeof (uint32_t), &hist_hdr, |
412 | sizeof (struct gmon_hist_hdr)); |
413 | |
414 | narcsp[-1] = GMON_TAG_CG_ARC; |
415 | } |
416 | else |
417 | { |
418 | /* Test the signature in the file. */ |
419 | if (memcmp (addr, &gmon_hdr, sizeof (struct gmon_hdr)) != 0 |
420 | || *(uint32_t *) hist != GMON_TAG_TIME_HIST |
421 | || memcmp (hist + sizeof (uint32_t), &hist_hdr, |
422 | sizeof (struct gmon_hist_hdr)) != 0 |
423 | || narcsp[-1] != GMON_TAG_CG_ARC) |
424 | goto wrong_format; |
425 | } |
426 | |
427 | /* Allocate memory for the froms data and the pointer to the tos records. */ |
428 | tos = (uint16_t *) calloc (tossize + fromssize, 1); |
429 | if (tos == NULL) |
430 | { |
431 | __munmap ((void *) addr, expected_size); |
432 | _dl_fatal_printf ("Out of memory while initializing profiler\n" ); |
433 | /* NOTREACHED */ |
434 | } |
435 | |
436 | froms = (struct here_fromstruct *) ((char *) tos + tossize); |
437 | fromidx = 0; |
438 | |
439 | /* Now we have to process all the arc count entries. BTW: it is |
440 | not critical whether the *NARCSP value changes meanwhile. Before |
441 | we enter a new entry in to toset we will check that everything is |
442 | available in TOS. This happens in _dl_mcount. |
443 | |
444 | Loading the entries in reverse order should help to get the most |
445 | frequently used entries at the front of the list. */ |
446 | for (idx = narcs = MIN (*narcsp, fromlimit); idx > 0; ) |
447 | { |
448 | size_t to_index; |
449 | size_t newfromidx; |
450 | --idx; |
451 | to_index = (data[idx].self_pc / (HASHFRACTION * sizeof (*tos))); |
452 | newfromidx = fromidx++; |
453 | froms[newfromidx].here = &data[idx]; |
454 | froms[newfromidx].link = tos[to_index]; |
455 | tos[to_index] = newfromidx; |
456 | } |
457 | |
458 | /* Setup counting data. */ |
459 | if (kcountsize < highpc - lowpc) |
460 | { |
461 | #if 0 |
462 | s_scale = ((double) kcountsize / (highpc - lowpc)) * SCALE_1_TO_1; |
463 | #else |
464 | size_t range = highpc - lowpc; |
465 | size_t quot = range / kcountsize; |
466 | |
467 | if (quot >= SCALE_1_TO_1) |
468 | s_scale = 1; |
469 | else if (quot >= SCALE_1_TO_1 / 256) |
470 | s_scale = SCALE_1_TO_1 / quot; |
471 | else if (range > ULONG_MAX / 256) |
472 | s_scale = (SCALE_1_TO_1 * 256) / (range / (kcountsize / 256)); |
473 | else |
474 | s_scale = (SCALE_1_TO_1 * 256) / ((range * 256) / kcountsize); |
475 | #endif |
476 | } |
477 | else |
478 | s_scale = SCALE_1_TO_1; |
479 | |
480 | /* Start the profiler. */ |
481 | __profil ((void *) kcount, kcountsize, lowpc, s_scale); |
482 | |
483 | /* Turn on profiling. */ |
484 | running = 1; |
485 | } |
486 | |
487 | |
488 | void |
489 | _dl_mcount (ElfW(Addr) frompc, ElfW(Addr) selfpc) |
490 | { |
491 | volatile uint16_t *topcindex; |
492 | size_t i, fromindex; |
493 | struct here_fromstruct *fromp; |
494 | |
495 | if (! running) |
496 | return; |
497 | |
498 | /* Compute relative addresses. The shared object can be loaded at |
499 | any address. The value of frompc could be anything. We cannot |
500 | restrict it in any way, just set to a fixed value (0) in case it |
501 | is outside the allowed range. These calls show up as calls from |
502 | <external> in the gprof output. */ |
503 | frompc -= lowpc; |
504 | if (frompc >= textsize) |
505 | frompc = 0; |
506 | selfpc -= lowpc; |
507 | if (selfpc >= textsize) |
508 | goto done; |
509 | |
510 | /* Getting here we now have to find out whether the location was |
511 | already used. If yes we are lucky and only have to increment a |
512 | counter (this also has to be atomic). If the entry is new things |
513 | are getting complicated... */ |
514 | |
515 | /* Avoid integer divide if possible. */ |
516 | if ((HASHFRACTION & (HASHFRACTION - 1)) == 0) |
517 | i = selfpc >> log_hashfraction; |
518 | else |
519 | i = selfpc / (HASHFRACTION * sizeof (*tos)); |
520 | |
521 | topcindex = &tos[i]; |
522 | fromindex = *topcindex; |
523 | |
524 | if (fromindex == 0) |
525 | goto check_new_or_add; |
526 | |
527 | fromp = &froms[fromindex]; |
528 | |
529 | /* We have to look through the chain of arcs whether there is already |
530 | an entry for our arc. */ |
531 | while (fromp->here->from_pc != frompc) |
532 | { |
533 | if (fromp->link != 0) |
534 | do |
535 | fromp = &froms[fromp->link]; |
536 | while (fromp->link != 0 && fromp->here->from_pc != frompc); |
537 | |
538 | if (fromp->here->from_pc != frompc) |
539 | { |
540 | topcindex = &fromp->link; |
541 | |
542 | check_new_or_add: |
543 | /* Our entry is not among the entries we read so far from the |
544 | data file. Now see whether we have to update the list. */ |
545 | while (narcs != *narcsp && narcs < fromlimit) |
546 | { |
547 | size_t to_index; |
548 | size_t newfromidx; |
549 | to_index = (data[narcs].self_pc |
550 | / (HASHFRACTION * sizeof (*tos))); |
551 | newfromidx = catomic_exchange_and_add (&fromidx, 1) + 1; |
552 | froms[newfromidx].here = &data[narcs]; |
553 | froms[newfromidx].link = tos[to_index]; |
554 | tos[to_index] = newfromidx; |
555 | catomic_increment (&narcs); |
556 | } |
557 | |
558 | /* If we still have no entry stop searching and insert. */ |
559 | if (*topcindex == 0) |
560 | { |
561 | unsigned int newarc = catomic_exchange_and_add (narcsp, 1); |
562 | |
563 | /* In rare cases it could happen that all entries in FROMS are |
564 | occupied. So we cannot count this anymore. */ |
565 | if (newarc >= fromlimit) |
566 | goto done; |
567 | |
568 | *topcindex = catomic_exchange_and_add (&fromidx, 1) + 1; |
569 | fromp = &froms[*topcindex]; |
570 | |
571 | fromp->here = &data[newarc]; |
572 | data[newarc].from_pc = frompc; |
573 | data[newarc].self_pc = selfpc; |
574 | data[newarc].count = 0; |
575 | fromp->link = 0; |
576 | catomic_increment (&narcs); |
577 | |
578 | break; |
579 | } |
580 | |
581 | fromp = &froms[*topcindex]; |
582 | } |
583 | else |
584 | /* Found in. */ |
585 | break; |
586 | } |
587 | |
588 | /* Increment the counter. */ |
589 | catomic_increment (&fromp->here->count); |
590 | |
591 | done: |
592 | ; |
593 | } |
594 | rtld_hidden_def (_dl_mcount) |
595 | |