1 | /* Cache handling for netgroup lookup. |
2 | Copyright (C) 2011-2018 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | Contributed by Ulrich Drepper <drepper@gmail.com>, 2011. |
5 | |
6 | This program is free software; you can redistribute it and/or modify |
7 | it under the terms of the GNU General Public License as published |
8 | by the Free Software Foundation; version 2 of the License, or |
9 | (at your option) any later version. |
10 | |
11 | This program is distributed in the hope that it will be useful, |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | GNU General Public License for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with this program; if not, see <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <alloca.h> |
20 | #include <assert.h> |
21 | #include <errno.h> |
22 | #include <libintl.h> |
23 | #include <stdbool.h> |
24 | #include <stdlib.h> |
25 | #include <unistd.h> |
26 | #include <sys/mman.h> |
27 | |
28 | #include "../inet/netgroup.h" |
29 | #include "nscd.h" |
30 | #include "dbg_log.h" |
31 | |
32 | #include <kernel-features.h> |
33 | |
34 | |
35 | /* This is the standard reply in case the service is disabled. */ |
36 | static const netgroup_response_header disabled = |
37 | { |
38 | .version = NSCD_VERSION, |
39 | .found = -1, |
40 | .nresults = 0, |
41 | .result_len = 0 |
42 | }; |
43 | |
44 | /* This is the struct describing how to write this record. */ |
45 | const struct iovec netgroup_iov_disabled = |
46 | { |
47 | .iov_base = (void *) &disabled, |
48 | .iov_len = sizeof (disabled) |
49 | }; |
50 | |
51 | |
52 | /* This is the standard reply in case we haven't found the dataset. */ |
53 | static const netgroup_response_header notfound = |
54 | { |
55 | .version = NSCD_VERSION, |
56 | .found = 0, |
57 | .nresults = 0, |
58 | .result_len = 0 |
59 | }; |
60 | |
61 | |
62 | struct dataset |
63 | { |
64 | struct datahead head; |
65 | netgroup_response_header resp; |
66 | char strdata[0]; |
67 | }; |
68 | |
69 | /* Sends a notfound message and prepares a notfound dataset to write to the |
70 | cache. Returns true if there was enough memory to allocate the dataset and |
71 | returns the dataset in DATASETP, total bytes to write in TOTALP and the |
72 | timeout in TIMEOUTP. KEY_COPY is set to point to the copy of the key in the |
73 | dataset. */ |
74 | static bool |
75 | do_notfound (struct database_dyn *db, int fd, request_header *req, |
76 | const char *key, struct dataset **datasetp, ssize_t *totalp, |
77 | time_t *timeoutp, char **key_copy) |
78 | { |
79 | struct dataset *dataset; |
80 | ssize_t total; |
81 | time_t timeout; |
82 | bool cacheable = false; |
83 | |
84 | total = sizeof (notfound); |
85 | timeout = time (NULL) + db->negtimeout; |
86 | |
87 | if (fd != -1) |
88 | TEMP_FAILURE_RETRY (send (fd, ¬found, total, MSG_NOSIGNAL)); |
89 | |
90 | dataset = mempool_alloc (db, sizeof (struct dataset) + req->key_len, 1); |
91 | /* If we cannot permanently store the result, so be it. */ |
92 | if (dataset != NULL) |
93 | { |
94 | timeout = datahead_init_neg (&dataset->head, |
95 | sizeof (struct dataset) + req->key_len, |
96 | total, db->negtimeout); |
97 | |
98 | /* This is the reply. */ |
99 | memcpy (&dataset->resp, ¬found, total); |
100 | |
101 | /* Copy the key data. */ |
102 | memcpy (dataset->strdata, key, req->key_len); |
103 | *key_copy = dataset->strdata; |
104 | |
105 | cacheable = true; |
106 | } |
107 | *timeoutp = timeout; |
108 | *totalp = total; |
109 | *datasetp = dataset; |
110 | return cacheable; |
111 | } |
112 | |
113 | static time_t |
114 | addgetnetgrentX (struct database_dyn *db, int fd, request_header *req, |
115 | const char *key, uid_t uid, struct hashentry *he, |
116 | struct datahead *dh, struct dataset **resultp) |
117 | { |
118 | if (__glibc_unlikely (debug_level > 0)) |
119 | { |
120 | if (he == NULL) |
121 | dbg_log (_("Haven't found \"%s\" in netgroup cache!" ), key); |
122 | else |
123 | dbg_log (_("Reloading \"%s\" in netgroup cache!" ), key); |
124 | } |
125 | |
126 | static service_user *netgroup_database; |
127 | time_t timeout; |
128 | struct dataset *dataset; |
129 | bool cacheable = false; |
130 | ssize_t total; |
131 | bool found = false; |
132 | |
133 | char *key_copy = NULL; |
134 | struct __netgrent data; |
135 | size_t buflen = MAX (1024, sizeof (*dataset) + req->key_len); |
136 | size_t buffilled = sizeof (*dataset); |
137 | char *buffer = NULL; |
138 | size_t nentries = 0; |
139 | size_t group_len = strlen (key) + 1; |
140 | struct name_list *first_needed |
141 | = alloca (sizeof (struct name_list) + group_len); |
142 | |
143 | if (netgroup_database == NULL |
144 | && __nss_database_lookup ("netgroup" , NULL, NULL, &netgroup_database)) |
145 | { |
146 | /* No such service. */ |
147 | cacheable = do_notfound (db, fd, req, key, &dataset, &total, &timeout, |
148 | &key_copy); |
149 | goto writeout; |
150 | } |
151 | |
152 | memset (&data, '\0', sizeof (data)); |
153 | buffer = xmalloc (buflen); |
154 | first_needed->next = first_needed; |
155 | memcpy (first_needed->name, key, group_len); |
156 | data.needed_groups = first_needed; |
157 | |
158 | while (data.needed_groups != NULL) |
159 | { |
160 | /* Add the next group to the list of those which are known. */ |
161 | struct name_list *this_group = data.needed_groups->next; |
162 | if (this_group == data.needed_groups) |
163 | data.needed_groups = NULL; |
164 | else |
165 | data.needed_groups->next = this_group->next; |
166 | this_group->next = data.known_groups; |
167 | data.known_groups = this_group; |
168 | |
169 | union |
170 | { |
171 | enum nss_status (*f) (const char *, struct __netgrent *); |
172 | void *ptr; |
173 | } setfct; |
174 | |
175 | service_user *nip = netgroup_database; |
176 | int no_more = __nss_lookup (&nip, "setnetgrent" , NULL, &setfct.ptr); |
177 | while (!no_more) |
178 | { |
179 | enum nss_status status |
180 | = DL_CALL_FCT (*setfct.f, (data.known_groups->name, &data)); |
181 | |
182 | if (status == NSS_STATUS_SUCCESS) |
183 | { |
184 | found = true; |
185 | union |
186 | { |
187 | enum nss_status (*f) (struct __netgrent *, char *, size_t, |
188 | int *); |
189 | void *ptr; |
190 | } getfct; |
191 | getfct.ptr = __nss_lookup_function (nip, "getnetgrent_r" ); |
192 | if (getfct.f != NULL) |
193 | while (1) |
194 | { |
195 | int e; |
196 | status = getfct.f (&data, buffer + buffilled, |
197 | buflen - buffilled - req->key_len, &e); |
198 | if (status == NSS_STATUS_SUCCESS) |
199 | { |
200 | if (data.type == triple_val) |
201 | { |
202 | const char *nhost = data.val.triple.host; |
203 | const char *nuser = data.val.triple.user; |
204 | const char *ndomain = data.val.triple.domain; |
205 | |
206 | size_t hostlen = strlen (nhost ?: "" ) + 1; |
207 | size_t userlen = strlen (nuser ?: "" ) + 1; |
208 | size_t domainlen = strlen (ndomain ?: "" ) + 1; |
209 | |
210 | if (nhost == NULL || nuser == NULL || ndomain == NULL |
211 | || nhost > nuser || nuser > ndomain) |
212 | { |
213 | const char *last = nhost; |
214 | if (last == NULL |
215 | || (nuser != NULL && nuser > last)) |
216 | last = nuser; |
217 | if (last == NULL |
218 | || (ndomain != NULL && ndomain > last)) |
219 | last = ndomain; |
220 | |
221 | size_t bufused |
222 | = (last == NULL |
223 | ? buffilled |
224 | : last + strlen (last) + 1 - buffer); |
225 | |
226 | /* We have to make temporary copies. */ |
227 | size_t needed = hostlen + userlen + domainlen; |
228 | |
229 | if (buflen - req->key_len - bufused < needed) |
230 | { |
231 | buflen += MAX (buflen, 2 * needed); |
232 | /* Save offset in the old buffer. We don't |
233 | bother with the NULL check here since |
234 | we'll do that later anyway. */ |
235 | size_t nhostdiff = nhost - buffer; |
236 | size_t nuserdiff = nuser - buffer; |
237 | size_t ndomaindiff = ndomain - buffer; |
238 | |
239 | char *newbuf = xrealloc (buffer, buflen); |
240 | /* Fix up the triplet pointers into the new |
241 | buffer. */ |
242 | nhost = (nhost ? newbuf + nhostdiff |
243 | : NULL); |
244 | nuser = (nuser ? newbuf + nuserdiff |
245 | : NULL); |
246 | ndomain = (ndomain ? newbuf + ndomaindiff |
247 | : NULL); |
248 | buffer = newbuf; |
249 | } |
250 | |
251 | nhost = memcpy (buffer + bufused, |
252 | nhost ?: "" , hostlen); |
253 | nuser = memcpy ((char *) nhost + hostlen, |
254 | nuser ?: "" , userlen); |
255 | ndomain = memcpy ((char *) nuser + userlen, |
256 | ndomain ?: "" , domainlen); |
257 | } |
258 | |
259 | char *wp = buffer + buffilled; |
260 | wp = memmove (wp, nhost ?: "" , hostlen); |
261 | wp += hostlen; |
262 | wp = memmove (wp, nuser ?: "" , userlen); |
263 | wp += userlen; |
264 | wp = memmove (wp, ndomain ?: "" , domainlen); |
265 | wp += domainlen; |
266 | buffilled = wp - buffer; |
267 | ++nentries; |
268 | } |
269 | else |
270 | { |
271 | /* Check that the group has not been |
272 | requested before. */ |
273 | struct name_list *runp = data.needed_groups; |
274 | if (runp != NULL) |
275 | while (1) |
276 | { |
277 | if (strcmp (runp->name, data.val.group) == 0) |
278 | break; |
279 | |
280 | runp = runp->next; |
281 | if (runp == data.needed_groups) |
282 | { |
283 | runp = NULL; |
284 | break; |
285 | } |
286 | } |
287 | |
288 | if (runp == NULL) |
289 | { |
290 | runp = data.known_groups; |
291 | while (runp != NULL) |
292 | if (strcmp (runp->name, data.val.group) == 0) |
293 | break; |
294 | else |
295 | runp = runp->next; |
296 | } |
297 | |
298 | if (runp == NULL) |
299 | { |
300 | /* A new group is requested. */ |
301 | size_t namelen = strlen (data.val.group) + 1; |
302 | struct name_list *newg = alloca (sizeof (*newg) |
303 | + namelen); |
304 | memcpy (newg->name, data.val.group, namelen); |
305 | if (data.needed_groups == NULL) |
306 | data.needed_groups = newg->next = newg; |
307 | else |
308 | { |
309 | newg->next = data.needed_groups->next; |
310 | data.needed_groups->next = newg; |
311 | data.needed_groups = newg; |
312 | } |
313 | } |
314 | } |
315 | } |
316 | else if (status == NSS_STATUS_TRYAGAIN && e == ERANGE) |
317 | { |
318 | buflen *= 2; |
319 | buffer = xrealloc (buffer, buflen); |
320 | } |
321 | else if (status == NSS_STATUS_RETURN |
322 | || status == NSS_STATUS_NOTFOUND |
323 | || status == NSS_STATUS_UNAVAIL) |
324 | /* This was either the last one for this group or the |
325 | group was empty or the NSS module had an internal |
326 | failure. Look at next group if available. */ |
327 | break; |
328 | } |
329 | |
330 | enum nss_status (*endfct) (struct __netgrent *); |
331 | endfct = __nss_lookup_function (nip, "endnetgrent" ); |
332 | if (endfct != NULL) |
333 | (void) DL_CALL_FCT (*endfct, (&data)); |
334 | |
335 | break; |
336 | } |
337 | |
338 | no_more = __nss_next2 (&nip, "setnetgrent" , NULL, &setfct.ptr, |
339 | status, 0); |
340 | } |
341 | } |
342 | |
343 | /* No results. Return a failure and write out a notfound record in the |
344 | cache. */ |
345 | if (!found) |
346 | { |
347 | cacheable = do_notfound (db, fd, req, key, &dataset, &total, &timeout, |
348 | &key_copy); |
349 | goto writeout; |
350 | } |
351 | |
352 | total = buffilled; |
353 | |
354 | /* Fill in the dataset. */ |
355 | dataset = (struct dataset *) buffer; |
356 | timeout = datahead_init_pos (&dataset->head, total + req->key_len, |
357 | total - offsetof (struct dataset, resp), |
358 | he == NULL ? 0 : dh->nreloads + 1, |
359 | db->postimeout); |
360 | |
361 | dataset->resp.version = NSCD_VERSION; |
362 | dataset->resp.found = 1; |
363 | dataset->resp.nresults = nentries; |
364 | dataset->resp.result_len = buffilled - sizeof (*dataset); |
365 | |
366 | assert (buflen - buffilled >= req->key_len); |
367 | key_copy = memcpy (buffer + buffilled, key, req->key_len); |
368 | buffilled += req->key_len; |
369 | |
370 | /* Now we can determine whether on refill we have to create a new |
371 | record or not. */ |
372 | if (he != NULL) |
373 | { |
374 | assert (fd == -1); |
375 | |
376 | if (dataset->head.allocsize == dh->allocsize |
377 | && dataset->head.recsize == dh->recsize |
378 | && memcmp (&dataset->resp, dh->data, |
379 | dh->allocsize - offsetof (struct dataset, resp)) == 0) |
380 | { |
381 | /* The data has not changed. We will just bump the timeout |
382 | value. Note that the new record has been allocated on |
383 | the stack and need not be freed. */ |
384 | dh->timeout = dataset->head.timeout; |
385 | dh->ttl = dataset->head.ttl; |
386 | ++dh->nreloads; |
387 | dataset = (struct dataset *) dh; |
388 | |
389 | goto out; |
390 | } |
391 | } |
392 | |
393 | { |
394 | struct dataset *newp |
395 | = (struct dataset *) mempool_alloc (db, total + req->key_len, 1); |
396 | if (__glibc_likely (newp != NULL)) |
397 | { |
398 | /* Adjust pointer into the memory block. */ |
399 | key_copy = (char *) newp + (key_copy - buffer); |
400 | |
401 | dataset = memcpy (newp, dataset, total + req->key_len); |
402 | cacheable = true; |
403 | |
404 | if (he != NULL) |
405 | /* Mark the old record as obsolete. */ |
406 | dh->usable = false; |
407 | } |
408 | } |
409 | |
410 | if (he == NULL && fd != -1) |
411 | { |
412 | /* We write the dataset before inserting it to the database |
413 | since while inserting this thread might block and so would |
414 | unnecessarily let the receiver wait. */ |
415 | writeout: |
416 | #ifdef HAVE_SENDFILE |
417 | if (__builtin_expect (db->mmap_used, 1) && cacheable) |
418 | { |
419 | assert (db->wr_fd != -1); |
420 | assert ((char *) &dataset->resp > (char *) db->data); |
421 | assert ((char *) dataset - (char *) db->head + total |
422 | <= (sizeof (struct database_pers_head) |
423 | + db->head->module * sizeof (ref_t) |
424 | + db->head->data_size)); |
425 | # ifndef __ASSUME_SENDFILE |
426 | ssize_t written = |
427 | # endif |
428 | sendfileall (fd, db->wr_fd, (char *) &dataset->resp |
429 | - (char *) db->head, dataset->head.recsize); |
430 | # ifndef __ASSUME_SENDFILE |
431 | if (written == -1 && errno == ENOSYS) |
432 | goto use_write; |
433 | # endif |
434 | } |
435 | else |
436 | #endif |
437 | { |
438 | #if defined HAVE_SENDFILE && !defined __ASSUME_SENDFILE |
439 | use_write: |
440 | #endif |
441 | writeall (fd, &dataset->resp, dataset->head.recsize); |
442 | } |
443 | } |
444 | |
445 | if (cacheable) |
446 | { |
447 | /* If necessary, we also propagate the data to disk. */ |
448 | if (db->persistent) |
449 | { |
450 | // XXX async OK? |
451 | uintptr_t pval = (uintptr_t) dataset & ~pagesize_m1; |
452 | msync ((void *) pval, |
453 | ((uintptr_t) dataset & pagesize_m1) + total + req->key_len, |
454 | MS_ASYNC); |
455 | } |
456 | |
457 | (void) cache_add (req->type, key_copy, req->key_len, &dataset->head, |
458 | true, db, uid, he == NULL); |
459 | |
460 | pthread_rwlock_unlock (&db->lock); |
461 | |
462 | /* Mark the old entry as obsolete. */ |
463 | if (dh != NULL) |
464 | dh->usable = false; |
465 | } |
466 | |
467 | out: |
468 | free (buffer); |
469 | |
470 | *resultp = dataset; |
471 | |
472 | return timeout; |
473 | } |
474 | |
475 | |
476 | static time_t |
477 | addinnetgrX (struct database_dyn *db, int fd, request_header *req, |
478 | char *key, uid_t uid, struct hashentry *he, |
479 | struct datahead *dh) |
480 | { |
481 | const char *group = key; |
482 | key = (char *) rawmemchr (key, '\0') + 1; |
483 | size_t group_len = key - group - 1; |
484 | const char *host = *key++ ? key : NULL; |
485 | if (host != NULL) |
486 | key = (char *) rawmemchr (key, '\0') + 1; |
487 | const char *user = *key++ ? key : NULL; |
488 | if (user != NULL) |
489 | key = (char *) rawmemchr (key, '\0') + 1; |
490 | const char *domain = *key++ ? key : NULL; |
491 | |
492 | if (__glibc_unlikely (debug_level > 0)) |
493 | { |
494 | if (he == NULL) |
495 | dbg_log (_("Haven't found \"%s (%s,%s,%s)\" in netgroup cache!" ), |
496 | group, host ?: "" , user ?: "" , domain ?: "" ); |
497 | else |
498 | dbg_log (_("Reloading \"%s (%s,%s,%s)\" in netgroup cache!" ), |
499 | group, host ?: "" , user ?: "" , domain ?: "" ); |
500 | } |
501 | |
502 | struct dataset *result = (struct dataset *) cache_search (GETNETGRENT, |
503 | group, group_len, |
504 | db, uid); |
505 | time_t timeout; |
506 | if (result != NULL) |
507 | timeout = result->head.timeout; |
508 | else |
509 | { |
510 | request_header req_get = |
511 | { |
512 | .type = GETNETGRENT, |
513 | .key_len = group_len |
514 | }; |
515 | timeout = addgetnetgrentX (db, -1, &req_get, group, uid, NULL, NULL, |
516 | &result); |
517 | } |
518 | |
519 | struct indataset |
520 | { |
521 | struct datahead head; |
522 | innetgroup_response_header resp; |
523 | } *dataset |
524 | = (struct indataset *) mempool_alloc (db, |
525 | sizeof (*dataset) + req->key_len, |
526 | 1); |
527 | struct indataset dataset_mem; |
528 | bool cacheable = true; |
529 | if (__glibc_unlikely (dataset == NULL)) |
530 | { |
531 | cacheable = false; |
532 | dataset = &dataset_mem; |
533 | } |
534 | |
535 | datahead_init_pos (&dataset->head, sizeof (*dataset) + req->key_len, |
536 | sizeof (innetgroup_response_header), |
537 | he == NULL ? 0 : dh->nreloads + 1, result->head.ttl); |
538 | /* Set the notfound status and timeout based on the result from |
539 | getnetgrent. */ |
540 | dataset->head.notfound = result->head.notfound; |
541 | dataset->head.timeout = timeout; |
542 | |
543 | dataset->resp.version = NSCD_VERSION; |
544 | dataset->resp.found = result->resp.found; |
545 | /* Until we find a matching entry the result is 0. */ |
546 | dataset->resp.result = 0; |
547 | |
548 | char *key_copy = memcpy ((char *) (dataset + 1), group, req->key_len); |
549 | |
550 | if (dataset->resp.found) |
551 | { |
552 | const char *triplets = (const char *) (&result->resp + 1); |
553 | |
554 | for (nscd_ssize_t i = result->resp.nresults; i > 0; --i) |
555 | { |
556 | bool success = true; |
557 | |
558 | /* For the host, user and domain in each triplet, we assume success |
559 | if the value is blank because that is how the wildcard entry to |
560 | match anything is stored in the netgroup cache. */ |
561 | if (host != NULL && *triplets != '\0') |
562 | success = strcmp (host, triplets) == 0; |
563 | triplets = (const char *) rawmemchr (triplets, '\0') + 1; |
564 | |
565 | if (success && user != NULL && *triplets != '\0') |
566 | success = strcmp (user, triplets) == 0; |
567 | triplets = (const char *) rawmemchr (triplets, '\0') + 1; |
568 | |
569 | if (success && (domain == NULL || *triplets == '\0' |
570 | || strcmp (domain, triplets) == 0)) |
571 | { |
572 | dataset->resp.result = 1; |
573 | break; |
574 | } |
575 | triplets = (const char *) rawmemchr (triplets, '\0') + 1; |
576 | } |
577 | } |
578 | |
579 | if (he != NULL && dh->data[0].innetgroupdata.result == dataset->resp.result) |
580 | { |
581 | /* The data has not changed. We will just bump the timeout |
582 | value. Note that the new record has been allocated on |
583 | the stack and need not be freed. */ |
584 | dh->timeout = timeout; |
585 | dh->ttl = dataset->head.ttl; |
586 | ++dh->nreloads; |
587 | if (cacheable) |
588 | pthread_rwlock_unlock (&db->lock); |
589 | return timeout; |
590 | } |
591 | |
592 | if (he == NULL) |
593 | { |
594 | /* We write the dataset before inserting it to the database |
595 | since while inserting this thread might block and so would |
596 | unnecessarily let the receiver wait. */ |
597 | assert (fd != -1); |
598 | |
599 | #ifdef HAVE_SENDFILE |
600 | if (__builtin_expect (db->mmap_used, 1) && cacheable) |
601 | { |
602 | assert (db->wr_fd != -1); |
603 | assert ((char *) &dataset->resp > (char *) db->data); |
604 | assert ((char *) dataset - (char *) db->head + sizeof (*dataset) |
605 | <= (sizeof (struct database_pers_head) |
606 | + db->head->module * sizeof (ref_t) |
607 | + db->head->data_size)); |
608 | # ifndef __ASSUME_SENDFILE |
609 | ssize_t written = |
610 | # endif |
611 | sendfileall (fd, db->wr_fd, |
612 | (char *) &dataset->resp - (char *) db->head, |
613 | sizeof (innetgroup_response_header)); |
614 | # ifndef __ASSUME_SENDFILE |
615 | if (written == -1 && errno == ENOSYS) |
616 | goto use_write; |
617 | # endif |
618 | } |
619 | else |
620 | #endif |
621 | { |
622 | #if defined HAVE_SENDFILE && !defined __ASSUME_SENDFILE |
623 | use_write: |
624 | #endif |
625 | writeall (fd, &dataset->resp, sizeof (innetgroup_response_header)); |
626 | } |
627 | } |
628 | |
629 | if (cacheable) |
630 | { |
631 | /* If necessary, we also propagate the data to disk. */ |
632 | if (db->persistent) |
633 | { |
634 | // XXX async OK? |
635 | uintptr_t pval = (uintptr_t) dataset & ~pagesize_m1; |
636 | msync ((void *) pval, |
637 | ((uintptr_t) dataset & pagesize_m1) + sizeof (*dataset) |
638 | + req->key_len, |
639 | MS_ASYNC); |
640 | } |
641 | |
642 | (void) cache_add (req->type, key_copy, req->key_len, &dataset->head, |
643 | true, db, uid, he == NULL); |
644 | |
645 | pthread_rwlock_unlock (&db->lock); |
646 | |
647 | /* Mark the old entry as obsolete. */ |
648 | if (dh != NULL) |
649 | dh->usable = false; |
650 | } |
651 | |
652 | return timeout; |
653 | } |
654 | |
655 | |
656 | void |
657 | addgetnetgrent (struct database_dyn *db, int fd, request_header *req, |
658 | void *key, uid_t uid) |
659 | { |
660 | struct dataset *ignore; |
661 | |
662 | addgetnetgrentX (db, fd, req, key, uid, NULL, NULL, &ignore); |
663 | } |
664 | |
665 | |
666 | time_t |
667 | readdgetnetgrent (struct database_dyn *db, struct hashentry *he, |
668 | struct datahead *dh) |
669 | { |
670 | request_header req = |
671 | { |
672 | .type = GETNETGRENT, |
673 | .key_len = he->len |
674 | }; |
675 | struct dataset *ignore; |
676 | |
677 | return addgetnetgrentX (db, -1, &req, db->data + he->key, he->owner, he, dh, |
678 | &ignore); |
679 | } |
680 | |
681 | |
682 | void |
683 | addinnetgr (struct database_dyn *db, int fd, request_header *req, |
684 | void *key, uid_t uid) |
685 | { |
686 | addinnetgrX (db, fd, req, key, uid, NULL, NULL); |
687 | } |
688 | |
689 | |
690 | time_t |
691 | readdinnetgr (struct database_dyn *db, struct hashentry *he, |
692 | struct datahead *dh) |
693 | { |
694 | request_header req = |
695 | { |
696 | .type = INNETGR, |
697 | .key_len = he->len |
698 | }; |
699 | |
700 | return addinnetgrX (db, -1, &req, db->data + he->key, he->owner, he, dh); |
701 | } |
702 | |