1 | /* Enqueue and list of read or write requests. |
2 | Copyright (C) 1997-2021 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997. |
5 | |
6 | The GNU C Library is free software; you can redistribute it and/or |
7 | modify it under the terms of the GNU Lesser General Public |
8 | License as published by the Free Software Foundation; either |
9 | version 2.1 of the License, or (at your option) any later version. |
10 | |
11 | The GNU C Library is distributed in the hope that it will be useful, |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | Lesser General Public License for more details. |
15 | |
16 | You should have received a copy of the GNU Lesser General Public |
17 | License along with the GNU C Library; if not, see |
18 | <https://www.gnu.org/licenses/>. */ |
19 | |
20 | #ifndef lio_listio |
21 | #include <aio.h> |
22 | #include <assert.h> |
23 | #include <errno.h> |
24 | #include <stdlib.h> |
25 | #include <unistd.h> |
26 | |
27 | #include <aio_misc.h> |
28 | |
29 | #define LIO_OPCODE_BASE 0 |
30 | #endif |
31 | |
32 | #include <shlib-compat.h> |
33 | |
34 | |
35 | /* We need this special structure to handle asynchronous I/O. */ |
36 | struct async_waitlist |
37 | { |
38 | unsigned int counter; |
39 | struct sigevent sigev; |
40 | struct waitlist list[0]; |
41 | }; |
42 | |
43 | |
44 | /* The code in glibc 2.1 to glibc 2.4 issued only one event when all |
45 | requests submitted with lio_listio finished. The existing practice |
46 | is to issue events for the individual requests as well. This is |
47 | what the new code does. */ |
48 | #if SHLIB_COMPAT (librt, GLIBC_2_1, GLIBC_2_4) |
49 | # define LIO_MODE(mode) ((mode) & 127) |
50 | # define NO_INDIVIDUAL_EVENT_P(mode) ((mode) & 128) |
51 | #else |
52 | # define LIO_MODE(mode) mode |
53 | # define NO_INDIVIDUAL_EVENT_P(mode) 0 |
54 | #endif |
55 | |
56 | |
57 | static int |
58 | lio_listio_internal (int mode, struct aiocb *const list[], int nent, |
59 | struct sigevent *sig) |
60 | { |
61 | struct sigevent defsigev; |
62 | struct requestlist *requests[nent]; |
63 | int cnt; |
64 | volatile unsigned int total = 0; |
65 | int result = 0; |
66 | |
67 | if (sig == NULL) |
68 | { |
69 | defsigev.sigev_notify = SIGEV_NONE; |
70 | sig = &defsigev; |
71 | } |
72 | |
73 | /* Request the mutex. */ |
74 | pthread_mutex_lock (&__aio_requests_mutex); |
75 | |
76 | /* Now we can enqueue all requests. Since we already acquired the |
77 | mutex the enqueue function need not do this. */ |
78 | for (cnt = 0; cnt < nent; ++cnt) |
79 | if (list[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP) |
80 | { |
81 | if (NO_INDIVIDUAL_EVENT_P (mode)) |
82 | list[cnt]->aio_sigevent.sigev_notify = SIGEV_NONE; |
83 | |
84 | requests[cnt] = __aio_enqueue_request ((aiocb_union *) list[cnt], |
85 | (list[cnt]->aio_lio_opcode |
86 | | LIO_OPCODE_BASE)); |
87 | |
88 | if (requests[cnt] != NULL) |
89 | /* Successfully enqueued. */ |
90 | ++total; |
91 | else |
92 | /* Signal that we've seen an error. `errno' and the error code |
93 | of the aiocb will tell more. */ |
94 | result = -1; |
95 | } |
96 | else |
97 | requests[cnt] = NULL; |
98 | |
99 | if (total == 0) |
100 | { |
101 | /* We don't have anything to do except signalling if we work |
102 | asynchronously. */ |
103 | |
104 | /* Release the mutex. We do this before raising a signal since the |
105 | signal handler might do a `siglongjmp' and then the mutex is |
106 | locked forever. */ |
107 | pthread_mutex_unlock (&__aio_requests_mutex); |
108 | |
109 | if (LIO_MODE (mode) == LIO_NOWAIT) |
110 | __aio_notify_only (sig); |
111 | |
112 | return result; |
113 | } |
114 | else if (LIO_MODE (mode) == LIO_WAIT) |
115 | { |
116 | #ifndef DONT_NEED_AIO_MISC_COND |
117 | pthread_cond_t cond = PTHREAD_COND_INITIALIZER; |
118 | int oldstate; |
119 | #endif |
120 | struct waitlist waitlist[nent]; |
121 | |
122 | total = 0; |
123 | for (cnt = 0; cnt < nent; ++cnt) |
124 | { |
125 | assert (requests[cnt] == NULL || list[cnt] != NULL); |
126 | |
127 | if (requests[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP) |
128 | { |
129 | #ifndef DONT_NEED_AIO_MISC_COND |
130 | waitlist[cnt].cond = &cond; |
131 | #endif |
132 | waitlist[cnt].result = &result; |
133 | waitlist[cnt].next = requests[cnt]->waiting; |
134 | waitlist[cnt].counterp = &total; |
135 | waitlist[cnt].sigevp = NULL; |
136 | requests[cnt]->waiting = &waitlist[cnt]; |
137 | ++total; |
138 | } |
139 | } |
140 | |
141 | #ifdef DONT_NEED_AIO_MISC_COND |
142 | AIO_MISC_WAIT (result, total, NULL, 0); |
143 | #else |
144 | /* Since `pthread_cond_wait'/`pthread_cond_timedwait' are cancellation |
145 | points we must be careful. We added entries to the waiting lists |
146 | which we must remove. So defer cancellation for now. */ |
147 | pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &oldstate); |
148 | |
149 | while (total > 0) |
150 | pthread_cond_wait (&cond, &__aio_requests_mutex); |
151 | |
152 | /* Now it's time to restore the cancellation state. */ |
153 | pthread_setcancelstate (oldstate, NULL); |
154 | |
155 | /* Release the conditional variable. */ |
156 | if (pthread_cond_destroy (&cond) != 0) |
157 | /* This must never happen. */ |
158 | abort (); |
159 | #endif |
160 | |
161 | /* If any of the I/O requests failed, return -1 and set errno. */ |
162 | if (result != 0) |
163 | { |
164 | __set_errno (result == EINTR ? EINTR : EIO); |
165 | result = -1; |
166 | } |
167 | } |
168 | else |
169 | { |
170 | struct async_waitlist *waitlist; |
171 | |
172 | waitlist = (struct async_waitlist *) |
173 | malloc (sizeof (struct async_waitlist) |
174 | + (nent * sizeof (struct waitlist))); |
175 | |
176 | if (waitlist == NULL) |
177 | { |
178 | __set_errno (EAGAIN); |
179 | result = -1; |
180 | } |
181 | else |
182 | { |
183 | total = 0; |
184 | |
185 | for (cnt = 0; cnt < nent; ++cnt) |
186 | { |
187 | assert (requests[cnt] == NULL || list[cnt] != NULL); |
188 | |
189 | if (requests[cnt] != NULL |
190 | && list[cnt]->aio_lio_opcode != LIO_NOP) |
191 | { |
192 | #ifndef DONT_NEED_AIO_MISC_COND |
193 | waitlist->list[cnt].cond = NULL; |
194 | #endif |
195 | waitlist->list[cnt].result = NULL; |
196 | waitlist->list[cnt].next = requests[cnt]->waiting; |
197 | waitlist->list[cnt].counterp = &waitlist->counter; |
198 | waitlist->list[cnt].sigevp = &waitlist->sigev; |
199 | requests[cnt]->waiting = &waitlist->list[cnt]; |
200 | ++total; |
201 | } |
202 | } |
203 | |
204 | waitlist->counter = total; |
205 | waitlist->sigev = *sig; |
206 | } |
207 | } |
208 | |
209 | /* Release the mutex. */ |
210 | pthread_mutex_unlock (&__aio_requests_mutex); |
211 | |
212 | return result; |
213 | } |
214 | |
215 | |
216 | #if SHLIB_COMPAT (librt, GLIBC_2_1, GLIBC_2_4) |
217 | int |
218 | attribute_compat_text_section |
219 | __lio_listio_21 (int mode, struct aiocb *const list[], int nent, |
220 | struct sigevent *sig) |
221 | { |
222 | /* Check arguments. */ |
223 | if (mode != LIO_WAIT && mode != LIO_NOWAIT) |
224 | { |
225 | __set_errno (EINVAL); |
226 | return -1; |
227 | } |
228 | |
229 | return lio_listio_internal (mode | LIO_NO_INDIVIDUAL_EVENT, list, nent, sig); |
230 | } |
231 | compat_symbol (librt, __lio_listio_21, lio_listio, GLIBC_2_1); |
232 | #endif |
233 | |
234 | |
235 | int |
236 | __lio_listio_item_notify (int mode, struct aiocb *const list[], int nent, |
237 | struct sigevent *sig) |
238 | { |
239 | /* Check arguments. */ |
240 | if (mode != LIO_WAIT && mode != LIO_NOWAIT) |
241 | { |
242 | __set_errno (EINVAL); |
243 | return -1; |
244 | } |
245 | |
246 | return lio_listio_internal (mode, list, nent, sig); |
247 | } |
248 | versioned_symbol (librt, __lio_listio_item_notify, lio_listio, GLIBC_2_4); |
249 | |