1 | /* |
2 | * Copyright (c) 2007-2015 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | /************* |
30 | * These functions implement RPCSEC_GSS security for the NFS client and server. |
31 | * The code is specific to the use of Kerberos v5 and the use of DES MAC MD5 |
32 | * protection as described in Internet RFC 2203 and 2623. |
33 | * |
34 | * In contrast to the original AUTH_SYS authentication, RPCSEC_GSS is stateful. |
35 | * It requires the client and server negotiate a secure connection as part of a |
36 | * security context. The context state is maintained in client and server structures. |
37 | * On the client side, each user of an NFS mount is assigned their own context, |
38 | * identified by UID, on their first use of the mount, and it persists until the |
39 | * unmount or until the context is renewed. Each user context has a corresponding |
40 | * server context which the server maintains until the client destroys it, or |
41 | * until the context expires. |
42 | * |
43 | * The client and server contexts are set up dynamically. When a user attempts |
44 | * to send an NFS request, if there is no context for the user, then one is |
45 | * set up via an exchange of NFS null procedure calls as described in RFC 2203. |
46 | * During this exchange, the client and server pass a security token that is |
47 | * forwarded via Mach upcall to the gssd, which invokes the GSS-API to authenticate |
48 | * the user to the server (and vice-versa). The client and server also receive |
49 | * a unique session key that can be used to digitally sign the credentials and |
50 | * verifier or optionally to provide data integrity and/or privacy. |
51 | * |
52 | * Once the context is complete, the client and server enter a normal data |
53 | * exchange phase - beginning with the NFS request that prompted the context |
54 | * creation. During this phase, the client's RPC header contains an RPCSEC_GSS |
55 | * credential and verifier, and the server returns a verifier as well. |
56 | * For simple authentication, the verifier contains a signed checksum of the |
57 | * RPC header, including the credential. The server's verifier has a signed |
58 | * checksum of the current sequence number. |
59 | * |
60 | * Each client call contains a sequence number that nominally increases by one |
61 | * on each request. The sequence number is intended to prevent replay attacks. |
62 | * Since the protocol can be used over UDP, there is some allowance for |
63 | * out-of-sequence requests, so the server checks whether the sequence numbers |
64 | * are within a sequence "window". If a sequence number is outside the lower |
65 | * bound of the window, the server silently drops the request. This has some |
66 | * implications for retransmission. If a request needs to be retransmitted, the |
67 | * client must bump the sequence number even if the request XID is unchanged. |
68 | * |
69 | * When the NFS mount is unmounted, the client sends a "destroy" credential |
70 | * to delete the server's context for each user of the mount. Since it's |
71 | * possible for the client to crash or disconnect without sending the destroy |
72 | * message, the server has a thread that reaps contexts that have been idle |
73 | * too long. |
74 | */ |
75 | |
76 | #include <stdint.h> |
77 | #include <sys/param.h> |
78 | #include <sys/systm.h> |
79 | #include <sys/proc.h> |
80 | #include <sys/kauth.h> |
81 | #include <sys/kernel.h> |
82 | #include <sys/mount_internal.h> |
83 | #include <sys/vnode.h> |
84 | #include <sys/ubc.h> |
85 | #include <sys/malloc.h> |
86 | #include <sys/kpi_mbuf.h> |
87 | #include <sys/ucred.h> |
88 | |
89 | #include <kern/host.h> |
90 | #include <kern/task.h> |
91 | #include <libkern/libkern.h> |
92 | |
93 | #include <mach/task.h> |
94 | #include <mach/host_special_ports.h> |
95 | #include <mach/host_priv.h> |
96 | #include <mach/thread_act.h> |
97 | #include <mach/mig_errors.h> |
98 | #include <mach/vm_map.h> |
99 | #include <vm/vm_map.h> |
100 | #include <vm/vm_kern.h> |
101 | #include <gssd/gssd_mach.h> |
102 | |
103 | #include <nfs/rpcv2.h> |
104 | #include <nfs/nfsproto.h> |
105 | #include <nfs/nfs.h> |
106 | #include <nfs/nfsnode.h> |
107 | #include <nfs/nfs_gss.h> |
108 | #include <nfs/nfsmount.h> |
109 | #include <nfs/xdr_subs.h> |
110 | #include <nfs/nfsm_subs.h> |
111 | #include <nfs/nfs_gss.h> |
112 | #include <mach_assert.h> |
113 | #include <kern/assert.h> |
114 | |
115 | #define ASSERT(EX) assert(EX) |
116 | |
117 | #define NFS_GSS_MACH_MAX_RETRIES 3 |
118 | |
119 | #define NFS_GSS_DBG(...) NFS_DBG(NFS_FAC_GSS, 7, ## __VA_ARGS__) |
120 | #define NFS_GSS_ISDBG (NFS_DEBUG_FACILITY & NFS_FAC_GSS) |
121 | |
122 | |
123 | #if NFSSERVER |
124 | u_long nfs_gss_svc_ctx_hash; |
125 | struct nfs_gss_svc_ctx_hashhead *nfs_gss_svc_ctx_hashtbl; |
126 | lck_mtx_t *nfs_gss_svc_ctx_mutex; |
127 | lck_grp_t *nfs_gss_svc_grp; |
128 | uint32_t nfsrv_gss_context_ttl = GSS_CTX_EXPIRE; |
129 | #define GSS_SVC_CTX_TTL ((uint64_t)max(2*GSS_CTX_PEND, nfsrv_gss_context_ttl) * NSEC_PER_SEC) |
130 | #endif /* NFSSERVER */ |
131 | |
132 | #if NFSCLIENT |
133 | lck_grp_t *nfs_gss_clnt_grp; |
134 | #endif /* NFSCLIENT */ |
135 | |
136 | #define KRB5_MAX_MIC_SIZE 128 |
137 | uint8_t krb5_mech_oid[11] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 }; |
138 | static uint8_t xdrpad[] = { 0x00, 0x00, 0x00, 0x00}; |
139 | |
140 | #if NFSCLIENT |
141 | static int nfs_gss_clnt_ctx_find(struct nfsreq *); |
142 | static int nfs_gss_clnt_ctx_init(struct nfsreq *, struct nfs_gss_clnt_ctx *); |
143 | static int nfs_gss_clnt_ctx_init_retry(struct nfsreq *, struct nfs_gss_clnt_ctx *); |
144 | static int nfs_gss_clnt_ctx_callserver(struct nfsreq *, struct nfs_gss_clnt_ctx *); |
145 | static uint8_t *nfs_gss_clnt_svcname(struct nfsmount *, gssd_nametype *, uint32_t *); |
146 | static int nfs_gss_clnt_gssd_upcall(struct nfsreq *, struct nfs_gss_clnt_ctx *, uint32_t); |
147 | void nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount *); |
148 | static void nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *); |
149 | static int nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx *, struct nfs_gss_clnt_ctx **); |
150 | static void nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *); |
151 | static void nfs_gss_clnt_log_error(struct nfsreq *, struct nfs_gss_clnt_ctx *, uint32_t, uint32_t); |
152 | #endif /* NFSCLIENT */ |
153 | |
154 | #if NFSSERVER |
155 | static struct nfs_gss_svc_ctx *nfs_gss_svc_ctx_find(uint32_t); |
156 | static void nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *); |
157 | static void nfs_gss_svc_ctx_timer(void *, void *); |
158 | static int nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *); |
159 | static int nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *, uint32_t); |
160 | #endif /* NFSSERVER */ |
161 | |
162 | static void host_release_special_port(mach_port_t); |
163 | static mach_port_t host_copy_special_port(mach_port_t); |
164 | static void nfs_gss_mach_alloc_buffer(u_char *, uint32_t, vm_map_copy_t *); |
165 | static int nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *); |
166 | |
167 | static int nfs_gss_mchain_length(mbuf_t); |
168 | static int nfs_gss_append_chain(struct nfsm_chain *, mbuf_t); |
169 | static void nfs_gss_nfsm_chain(struct nfsm_chain *, mbuf_t); |
170 | |
171 | #if NFSSERVER |
172 | thread_call_t nfs_gss_svc_ctx_timer_call; |
173 | int nfs_gss_timer_on = 0; |
174 | uint32_t nfs_gss_ctx_count = 0; |
175 | const uint32_t nfs_gss_ctx_max = GSS_SVC_MAXCONTEXTS; |
176 | #endif /* NFSSERVER */ |
177 | |
178 | /* |
179 | * Initialization when NFS starts |
180 | */ |
181 | void |
182 | nfs_gss_init(void) |
183 | { |
184 | #if NFSCLIENT |
185 | nfs_gss_clnt_grp = lck_grp_alloc_init("rpcsec_gss_clnt" , LCK_GRP_ATTR_NULL); |
186 | #endif /* NFSCLIENT */ |
187 | |
188 | #if NFSSERVER |
189 | nfs_gss_svc_grp = lck_grp_alloc_init("rpcsec_gss_svc" , LCK_GRP_ATTR_NULL); |
190 | |
191 | nfs_gss_svc_ctx_hashtbl = hashinit(SVC_CTX_HASHSZ, M_TEMP, &nfs_gss_svc_ctx_hash); |
192 | nfs_gss_svc_ctx_mutex = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL); |
193 | |
194 | nfs_gss_svc_ctx_timer_call = thread_call_allocate(nfs_gss_svc_ctx_timer, NULL); |
195 | #endif /* NFSSERVER */ |
196 | } |
197 | |
198 | /* |
199 | * Common RPCSEC_GSS support routines |
200 | */ |
201 | |
202 | static errno_t |
203 | rpc_gss_prepend_32(mbuf_t *mb, uint32_t value) |
204 | { |
205 | int error; |
206 | uint32_t *data; |
207 | |
208 | #if 0 |
209 | data = mbuf_data(*mb); |
210 | /* |
211 | * If a wap token comes back and is not aligned |
212 | * get a new buffer (which should be aligned) to put the |
213 | * length in. |
214 | */ |
215 | if ((uintptr_t)data & 0x3) { |
216 | mbuf_t nmb; |
217 | |
218 | error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, &nmb); |
219 | if (error) |
220 | return (error); |
221 | mbuf_setnext(nmb, *mb); |
222 | *mb = nmb; |
223 | } |
224 | #endif |
225 | error = mbuf_prepend(mb, sizeof(uint32_t), MBUF_WAITOK); |
226 | if (error) |
227 | return (error); |
228 | |
229 | data = mbuf_data(*mb); |
230 | *data = txdr_unsigned(value); |
231 | |
232 | return (0); |
233 | } |
234 | |
235 | /* |
236 | * Prepend the sequence number to the xdr encode argumen or result |
237 | * Sequence number is prepended in its own mbuf. |
238 | * |
239 | * On successful return mbp_head will point to the old mbuf chain |
240 | * prepended with a new mbuf that has the sequence number. |
241 | */ |
242 | |
243 | static errno_t |
244 | rpc_gss_data_create(mbuf_t *mbp_head, uint32_t seqnum) |
245 | { |
246 | int error; |
247 | mbuf_t mb; |
248 | struct nfsm_chain nmc; |
249 | struct nfsm_chain *nmcp = &nmc; |
250 | uint8_t *data; |
251 | |
252 | error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, &mb); |
253 | if (error) |
254 | return (error); |
255 | data = mbuf_data(mb); |
256 | #if 0 |
257 | /* Reserve space for prepending */ |
258 | len = mbuf_maxlen(mb); |
259 | len = (len & ~0x3) - NFSX_UNSIGNED; |
260 | printf("%s: data = %p, len = %d\n" , __func__, data, (int)len); |
261 | error = mbuf_setdata(mb, data + len, 0); |
262 | if (error || mbuf_trailingspace(mb)) |
263 | printf("%s: data = %p trailingspace = %d error = %d\n" , __func__, mbuf_data(mb), (int)mbuf_trailingspace(mb), error); |
264 | #endif |
265 | /* Reserve 16 words for prepending */ |
266 | error = mbuf_setdata(mb, data + 16*sizeof(uint32_t), 0); |
267 | nfsm_chain_init(nmcp, mb); |
268 | nfsm_chain_add_32(error, nmcp, seqnum); |
269 | nfsm_chain_build_done(error, nmcp); |
270 | if (error) |
271 | return (EINVAL); |
272 | mbuf_setnext(nmcp->nmc_mcur, *mbp_head); |
273 | *mbp_head = nmcp->nmc_mhead; |
274 | |
275 | return (0); |
276 | } |
277 | |
278 | /* |
279 | * Create an rpc_gss_integ_data_t given an argument or result in mb_head. |
280 | * On successful return mb_head will point to the rpc_gss_integ_data_t of length len. |
281 | * Note mb_head will now point to a 4 byte sequence number. len does not include |
282 | * any extra xdr padding. |
283 | * Returns 0 on success, else an errno_t |
284 | */ |
285 | |
286 | static errno_t |
287 | rpc_gss_integ_data_create(gss_ctx_id_t ctx, mbuf_t *mb_head, uint32_t seqnum, uint32_t *len) |
288 | { |
289 | uint32_t error; |
290 | uint32_t major; |
291 | uint32_t length; |
292 | gss_buffer_desc mic; |
293 | struct nfsm_chain nmc; |
294 | |
295 | /* Length of the argument or result */ |
296 | length = nfs_gss_mchain_length(*mb_head); |
297 | if (len) |
298 | *len = length; |
299 | error = rpc_gss_data_create(mb_head, seqnum); |
300 | if (error) |
301 | return (error); |
302 | |
303 | /* |
304 | * length is the length of the rpc_gss_data |
305 | */ |
306 | length += NFSX_UNSIGNED; /* Add the sequence number to the length */ |
307 | major = gss_krb5_get_mic_mbuf(&error, ctx, 0, *mb_head, 0, length, &mic); |
308 | if (major != GSS_S_COMPLETE) { |
309 | printf("gss_krb5_get_mic_mbuf failed %d\n" , error); |
310 | return (error); |
311 | } |
312 | |
313 | error = rpc_gss_prepend_32(mb_head, length); |
314 | if (error) |
315 | return (error); |
316 | |
317 | nfsm_chain_dissect_init(error, &nmc, *mb_head); |
318 | /* Append GSS mic token by advancing rpc_gss_data_t length + NFSX_UNSIGNED (size of the length field) */ |
319 | nfsm_chain_adv(error, &nmc, length + NFSX_UNSIGNED); |
320 | nfsm_chain_finish_mbuf(error, &nmc); // Force the mic into its own sub chain. |
321 | nfsm_chain_add_32(error, &nmc, mic.length); |
322 | nfsm_chain_add_opaque(error, &nmc, mic.value, mic.length); |
323 | nfsm_chain_build_done(error, &nmc); |
324 | gss_release_buffer(NULL, &mic); |
325 | |
326 | // printmbuf("rpc_gss_integ_data_create done", *mb_head, 0, 0); |
327 | assert(nmc.nmc_mhead == *mb_head); |
328 | |
329 | return (error); |
330 | } |
331 | |
332 | /* |
333 | * Create an rpc_gss_priv_data_t out of the supplied raw arguments or results in mb_head. |
334 | * On successful return mb_head will point to a wrap token of lenght len. |
335 | * Note len does not include any xdr padding |
336 | * Returns 0 on success, else an errno_t |
337 | */ |
338 | static errno_t |
339 | rpc_gss_priv_data_create(gss_ctx_id_t ctx, mbuf_t *mb_head, uint32_t seqnum, uint32_t *len) |
340 | { |
341 | uint32_t error; |
342 | uint32_t major; |
343 | struct nfsm_chain nmc; |
344 | uint32_t pad; |
345 | uint32_t length; |
346 | |
347 | error = rpc_gss_data_create(mb_head, seqnum); |
348 | if (error) |
349 | return (error); |
350 | |
351 | length = nfs_gss_mchain_length(*mb_head); |
352 | major = gss_krb5_wrap_mbuf(&error, ctx, 1, 0, mb_head, 0, length, NULL); |
353 | if (major != GSS_S_COMPLETE) |
354 | return (error); |
355 | |
356 | length = nfs_gss_mchain_length(*mb_head); |
357 | if (len) |
358 | *len = length; |
359 | pad = nfsm_pad(length); |
360 | |
361 | /* Prepend the opaque length of rep rpc_gss_priv_data */ |
362 | error = rpc_gss_prepend_32(mb_head, length); |
363 | |
364 | if (error) |
365 | return (error); |
366 | if (pad) { |
367 | nfsm_chain_dissect_init(error, &nmc, *mb_head); |
368 | /* Advance the opauque size of length and length data */ |
369 | nfsm_chain_adv(error, &nmc, NFSX_UNSIGNED + length); |
370 | nfsm_chain_finish_mbuf(error, &nmc); |
371 | nfsm_chain_add_opaque_nopad(error, &nmc, xdrpad, pad); |
372 | nfsm_chain_build_done(error, &nmc); |
373 | } |
374 | |
375 | return (error); |
376 | } |
377 | |
378 | #if NFSCLIENT |
379 | |
380 | /* |
381 | * Restore the argument or result from an rpc_gss_integ_data mbuf chain |
382 | * We have a four byte seqence number, len arguments, and an opaque |
383 | * encoded mic, possibly followed by some pad bytes. The mic and possible |
384 | * pad bytes are on their own sub mbuf chains. |
385 | * |
386 | * On successful return mb_head is the chain of the xdr args or results sans |
387 | * the sequence number and mic and return 0. Otherwise return an errno. |
388 | * |
389 | */ |
390 | static errno_t |
391 | rpc_gss_integ_data_restore(gss_ctx_id_t ctx __unused, mbuf_t *mb_head, size_t len) |
392 | { |
393 | mbuf_t mb = *mb_head; |
394 | mbuf_t tail = NULL, next; |
395 | |
396 | /* Chop of the opaque length and seq number */ |
397 | mbuf_adj(mb, 2 * NFSX_UNSIGNED); |
398 | |
399 | /* should only be one, ... but */ |
400 | for (; mb; mb = next) { |
401 | next = mbuf_next(mb); |
402 | if (mbuf_len(mb) == 0) |
403 | mbuf_free(mb); |
404 | else |
405 | break; |
406 | } |
407 | *mb_head = mb; |
408 | |
409 | for (; mb && len; mb = mbuf_next(mb)) { |
410 | tail = mb; |
411 | if (mbuf_len(mb) <= len) |
412 | len -= mbuf_len(mb); |
413 | else |
414 | return (EBADRPC); |
415 | } |
416 | /* drop the mic */ |
417 | if (tail) { |
418 | mbuf_setnext(tail, NULL); |
419 | mbuf_freem(mb); |
420 | } |
421 | |
422 | return (0); |
423 | } |
424 | |
425 | /* |
426 | * Restore the argument or result rfom an rpc_gss_priv_data mbuf chain |
427 | * mb_head points to the wrap token of length len. |
428 | * |
429 | * On successful return mb_head is our original xdr arg or result an |
430 | * the return value is 0. Otherise return an errno |
431 | */ |
432 | static errno_t |
433 | rpc_gss_priv_data_restore(gss_ctx_id_t ctx, mbuf_t *mb_head, size_t len) |
434 | { |
435 | uint32_t major, error; |
436 | mbuf_t mb = *mb_head, next; |
437 | uint32_t plen; |
438 | size_t length; |
439 | gss_qop_t qop = GSS_C_QOP_REVERSE; |
440 | |
441 | /* Chop of the opaque length */ |
442 | mbuf_adj(mb, NFSX_UNSIGNED); |
443 | /* If we have padding, drop it */ |
444 | plen = nfsm_pad(len); |
445 | if (plen) { |
446 | mbuf_t tail = NULL; |
447 | |
448 | for(length = 0; length < len && mb; mb = mbuf_next(mb)) { |
449 | tail = mb; |
450 | length += mbuf_len(mb); |
451 | } |
452 | if ((length != len) || (mb == NULL) || (tail == NULL)) |
453 | return (EBADRPC); |
454 | |
455 | mbuf_freem(mb); |
456 | mbuf_setnext(tail, NULL); |
457 | } |
458 | |
459 | major = gss_krb5_unwrap_mbuf(&error, ctx, mb_head, 0, len, NULL, &qop); |
460 | if (major != GSS_S_COMPLETE) { |
461 | printf("gss_krb5_unwrap_mbuf failed. major = %d minor = %d\n" , (int)major, error); |
462 | return (error); |
463 | } |
464 | mb = *mb_head; |
465 | |
466 | /* Drop the seqence number */ |
467 | mbuf_adj(mb, NFSX_UNSIGNED); |
468 | assert(mbuf_len(mb) == 0); |
469 | |
470 | /* Chop of any empty mbufs */ |
471 | for (mb = *mb_head; mb; mb = next) { |
472 | next = mbuf_next(mb); |
473 | if (mbuf_len(mb) == 0) |
474 | mbuf_free(mb); |
475 | else |
476 | break; |
477 | } |
478 | *mb_head = mb; |
479 | |
480 | return (0); |
481 | } |
482 | |
483 | /* |
484 | * Find the context for a particular user. |
485 | * |
486 | * If the context doesn't already exist |
487 | * then create a new context for this user. |
488 | * |
489 | * Note that the code allows superuser (uid == 0) |
490 | * to adopt the context of another user. |
491 | * |
492 | * We'll match on the audit session ids, since those |
493 | * processes will have acccess to the same credential cache. |
494 | */ |
495 | |
496 | #define kauth_cred_getasid(cred) ((cred)->cr_audit.as_aia_p->ai_asid) |
497 | #define kauth_cred_getauid(cred) ((cred)->cr_audit.as_aia_p->ai_auid) |
498 | |
499 | #define SAFE_CAST_INTTYPE( type, intval ) \ |
500 | ( (type)(intval)/(sizeof(type) < sizeof(intval) ? 0 : 1) ) |
501 | |
502 | uid_t |
503 | nfs_cred_getasid2uid(kauth_cred_t cred) |
504 | { |
505 | uid_t result = SAFE_CAST_INTTYPE(uid_t, kauth_cred_getasid(cred)); |
506 | return (result); |
507 | } |
508 | |
509 | /* |
510 | * Debugging |
511 | */ |
512 | static void |
513 | nfs_gss_clnt_ctx_dump(struct nfsmount *nmp) |
514 | { |
515 | struct nfs_gss_clnt_ctx *cp; |
516 | |
517 | lck_mtx_lock(&nmp->nm_lock); |
518 | NFS_GSS_DBG("Enter\n" ); |
519 | TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) { |
520 | lck_mtx_lock(cp->gss_clnt_mtx); |
521 | printf("context %d/%d: refcnt = %d, flags = %x\n" , |
522 | kauth_cred_getasid(cp->gss_clnt_cred), |
523 | kauth_cred_getauid(cp->gss_clnt_cred), |
524 | cp->gss_clnt_refcnt, cp->gss_clnt_flags); |
525 | lck_mtx_unlock(cp->gss_clnt_mtx); |
526 | } |
527 | NFS_GSS_DBG("Exit\n" ); |
528 | lck_mtx_unlock(&nmp->nm_lock); |
529 | } |
530 | |
531 | static char * |
532 | nfs_gss_clnt_ctx_name(struct nfsmount *nmp, struct nfs_gss_clnt_ctx *cp, char *buf, int len) |
533 | { |
534 | char *np; |
535 | int nlen; |
536 | const char *server = "" ; |
537 | |
538 | if (nmp && nmp->nm_mountp) |
539 | server = vfs_statfs(nmp->nm_mountp)->f_mntfromname; |
540 | |
541 | if (cp == NULL) { |
542 | snprintf(buf, len, "[%s] NULL context" , server); |
543 | return (buf); |
544 | } |
545 | |
546 | if (cp->gss_clnt_principal && !cp->gss_clnt_display) { |
547 | np = (char *)cp->gss_clnt_principal; |
548 | nlen = cp->gss_clnt_prinlen; |
549 | } else { |
550 | np = cp->gss_clnt_display; |
551 | nlen = np ? strlen(cp->gss_clnt_display) : 0; |
552 | } |
553 | if (nlen) |
554 | snprintf(buf, len, "[%s] %.*s %d/%d %s" , server, nlen, np, |
555 | kauth_cred_getasid(cp->gss_clnt_cred), |
556 | kauth_cred_getuid(cp->gss_clnt_cred), |
557 | cp->gss_clnt_principal ? "" : "[from default cred] " ); |
558 | else |
559 | snprintf(buf, len, "[%s] using default %d/%d " , server, |
560 | kauth_cred_getasid(cp->gss_clnt_cred), |
561 | kauth_cred_getuid(cp->gss_clnt_cred)); |
562 | return (buf); |
563 | } |
564 | |
565 | #define NFS_CTXBUFSZ 80 |
566 | #define NFS_GSS_CTX(req, cp) nfs_gss_clnt_ctx_name((req)->r_nmp, cp ? cp : (req)->r_gss_ctx, CTXBUF, sizeof(CTXBUF)) |
567 | |
568 | #define NFS_GSS_CLNT_CTX_DUMP(nmp) \ |
569 | do { \ |
570 | if (NFS_GSS_ISDBG && (NFS_DEBUG_FLAGS & 0x2)) \ |
571 | nfs_gss_clnt_ctx_dump((nmp)); \ |
572 | } while (0) |
573 | |
574 | static int |
575 | nfs_gss_clnt_ctx_cred_match(kauth_cred_t cred1, kauth_cred_t cred2) |
576 | { |
577 | if (kauth_cred_getasid(cred1) == kauth_cred_getasid(cred2)) |
578 | return (1); |
579 | return (0); |
580 | } |
581 | |
582 | /* |
583 | * Busy the mount for each principal set on the mount |
584 | * so that the automounter will not unmount the file |
585 | * system underneath us. With out this, if an unmount |
586 | * occurs the principal that is set for an audit session |
587 | * will be lost and we may end up with a different identity. |
588 | * |
589 | * Note setting principals on the mount is a bad idea. This |
590 | * really should be handle by KIM (Kerberos Identity Management) |
591 | * so that defaults can be set by service identities. |
592 | */ |
593 | |
594 | static void |
595 | nfs_gss_clnt_mnt_ref(struct nfsmount *nmp) |
596 | { |
597 | int error; |
598 | vnode_t rvp; |
599 | |
600 | if (nmp == NULL || |
601 | !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED)) |
602 | return; |
603 | |
604 | error = VFS_ROOT(nmp->nm_mountp, &rvp, NULL); |
605 | if (!error) { |
606 | vnode_ref(rvp); |
607 | vnode_put(rvp); |
608 | } |
609 | } |
610 | |
611 | /* |
612 | * Unbusy the mout. See above comment, |
613 | */ |
614 | |
615 | static void |
616 | nfs_gss_clnt_mnt_rele(struct nfsmount *nmp) |
617 | { |
618 | int error; |
619 | vnode_t rvp; |
620 | |
621 | if (nmp == NULL || |
622 | !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED)) |
623 | return; |
624 | |
625 | error = VFS_ROOT(nmp->nm_mountp, &rvp, NULL); |
626 | if (!error) { |
627 | vnode_rele(rvp); |
628 | vnode_put(rvp); |
629 | } |
630 | } |
631 | |
632 | int nfs_root_steals_ctx = 0; |
633 | |
634 | static int |
635 | nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t plen, uint32_t nt) |
636 | { |
637 | struct nfsmount *nmp = req->r_nmp; |
638 | struct nfs_gss_clnt_ctx *cp; |
639 | struct nfsreq treq; |
640 | int error = 0; |
641 | struct timeval now; |
642 | char CTXBUF[NFS_CTXBUFSZ]; |
643 | |
644 | bzero(&treq, sizeof (struct nfsreq)); |
645 | treq.r_nmp = nmp; |
646 | |
647 | microuptime(&now); |
648 | lck_mtx_lock(&nmp->nm_lock); |
649 | TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) { |
650 | lck_mtx_lock(cp->gss_clnt_mtx); |
651 | if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { |
652 | NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n" , |
653 | NFS_GSS_CTX(req, cp), |
654 | cp->gss_clnt_refcnt); |
655 | lck_mtx_unlock(cp->gss_clnt_mtx); |
656 | continue; |
657 | } |
658 | if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, req->r_cred)) { |
659 | if (nmp->nm_gsscl.tqh_first != cp) { |
660 | TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries); |
661 | TAILQ_INSERT_HEAD(&nmp->nm_gsscl, cp, gss_clnt_entries); |
662 | } |
663 | if (principal) { |
664 | /* |
665 | * If we have a principal, but it does not match the current cred |
666 | * mark it for removal |
667 | */ |
668 | if (cp->gss_clnt_prinlen != plen || cp->gss_clnt_prinnt != nt || |
669 | bcmp(cp->gss_clnt_principal, principal, plen) != 0) { |
670 | cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY); |
671 | cp->gss_clnt_refcnt++; |
672 | lck_mtx_unlock(cp->gss_clnt_mtx); |
673 | NFS_GSS_DBG("Marking %s for deletion because %s does not match\n" , |
674 | NFS_GSS_CTX(req, cp), principal); |
675 | NFS_GSS_DBG("len = (%d,%d), nt = (%d,%d)\n" , cp->gss_clnt_prinlen, plen, |
676 | cp->gss_clnt_prinnt, nt); |
677 | treq.r_gss_ctx = cp; |
678 | cp = NULL; |
679 | break; |
680 | } |
681 | } |
682 | if (cp->gss_clnt_flags & GSS_CTX_INVAL) { |
683 | /* |
684 | * If we're still being used and we're not expired |
685 | * just return and don't bother gssd again. Note if |
686 | * gss_clnt_nctime is zero it is about to be set to now. |
687 | */ |
688 | if (cp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec || cp->gss_clnt_nctime == 0) { |
689 | NFS_GSS_DBG("Context %s (refcnt = %d) not expired returning EAUTH nctime = %ld now = %ld\n" , |
690 | NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt, cp->gss_clnt_nctime, now.tv_sec); |
691 | lck_mtx_unlock(cp->gss_clnt_mtx); |
692 | lck_mtx_unlock(&nmp->nm_lock); |
693 | return (NFSERR_EAUTH); |
694 | } |
695 | if (cp->gss_clnt_refcnt) { |
696 | struct nfs_gss_clnt_ctx *ncp; |
697 | /* |
698 | * If this context has references, we can't use it so we mark if for |
699 | * destruction and create a new context based on this one in the |
700 | * same manner as renewing one. |
701 | */ |
702 | cp->gss_clnt_flags |= GSS_CTX_DESTROY; |
703 | NFS_GSS_DBG("Context %s has expired but we still have %d references\n" , |
704 | NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt); |
705 | error = nfs_gss_clnt_ctx_copy(cp, &ncp); |
706 | lck_mtx_unlock(cp->gss_clnt_mtx); |
707 | if (error) { |
708 | lck_mtx_unlock(&nmp->nm_lock); |
709 | return (error); |
710 | } |
711 | cp = ncp; |
712 | break; |
713 | } else { |
714 | if (cp->gss_clnt_nctime) |
715 | nmp->nm_ncentries--; |
716 | lck_mtx_unlock(cp->gss_clnt_mtx); |
717 | TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries); |
718 | break; |
719 | } |
720 | } |
721 | /* Found a valid context to return */ |
722 | cp->gss_clnt_refcnt++; |
723 | req->r_gss_ctx = cp; |
724 | lck_mtx_unlock(cp->gss_clnt_mtx); |
725 | lck_mtx_unlock(&nmp->nm_lock); |
726 | return (0); |
727 | } |
728 | lck_mtx_unlock(cp->gss_clnt_mtx); |
729 | } |
730 | |
731 | if (!cp && nfs_root_steals_ctx && principal == NULL && kauth_cred_getuid(req->r_cred) == 0) { |
732 | /* |
733 | * If superuser is trying to get access, then co-opt |
734 | * the first valid context in the list. |
735 | * XXX Ultimately, we need to allow superuser to |
736 | * go ahead and attempt to set up its own context |
737 | * in case one is set up for it. |
738 | */ |
739 | TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) { |
740 | if (!(cp->gss_clnt_flags & (GSS_CTX_INVAL|GSS_CTX_DESTROY))) { |
741 | nfs_gss_clnt_ctx_ref(req, cp); |
742 | lck_mtx_unlock(&nmp->nm_lock); |
743 | NFS_GSS_DBG("Root stole context %s\n" , NFS_GSS_CTX(req, NULL)); |
744 | return (0); |
745 | } |
746 | } |
747 | } |
748 | |
749 | NFS_GSS_DBG("Context %s%sfound in Neg Cache @ %ld\n" , |
750 | NFS_GSS_CTX(req, cp), |
751 | cp == NULL ? " not " : "" , |
752 | cp == NULL ? 0L : cp->gss_clnt_nctime); |
753 | |
754 | /* |
755 | * Not found - create a new context |
756 | */ |
757 | |
758 | if (cp == NULL) { |
759 | MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO); |
760 | if (cp == NULL) { |
761 | lck_mtx_unlock(&nmp->nm_lock); |
762 | return (ENOMEM); |
763 | } |
764 | cp->gss_clnt_cred = req->r_cred; |
765 | kauth_cred_ref(cp->gss_clnt_cred); |
766 | cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL); |
767 | cp->gss_clnt_ptime = now.tv_sec - GSS_PRINT_DELAY; |
768 | if (principal) { |
769 | MALLOC(cp->gss_clnt_principal, uint8_t *, plen+1, M_TEMP, M_WAITOK|M_ZERO); |
770 | memcpy(cp->gss_clnt_principal, principal, plen); |
771 | cp->gss_clnt_prinlen = plen; |
772 | cp->gss_clnt_prinnt = nt; |
773 | cp->gss_clnt_flags |= GSS_CTX_STICKY; |
774 | nfs_gss_clnt_mnt_ref(nmp); |
775 | } |
776 | } else { |
777 | nfs_gss_clnt_ctx_clean(cp); |
778 | if (principal) { |
779 | /* |
780 | * If we have a principal and we found a matching audit |
781 | * session, then to get here, the principal had to match. |
782 | * In walking the context list if it has a principal |
783 | * or the principal is not set then we mark the context |
784 | * for destruction and set cp to NULL and we fall to the |
785 | * if clause above. If the context still has references |
786 | * again we copy the context which will preserve the principal |
787 | * and we end up here with the correct principal set. |
788 | * If we don't have references the the principal must have |
789 | * match and we will fall through here. |
790 | */ |
791 | cp->gss_clnt_flags |= GSS_CTX_STICKY; |
792 | } |
793 | } |
794 | |
795 | cp->gss_clnt_thread = current_thread(); |
796 | nfs_gss_clnt_ctx_ref(req, cp); |
797 | TAILQ_INSERT_HEAD(&nmp->nm_gsscl, cp, gss_clnt_entries); |
798 | lck_mtx_unlock(&nmp->nm_lock); |
799 | |
800 | error = nfs_gss_clnt_ctx_init_retry(req, cp); // Initialize new context |
801 | if (error) { |
802 | NFS_GSS_DBG("nfs_gss_clnt_ctx_init_retry returned %d for %s\n" , error, NFS_GSS_CTX(req, cp)); |
803 | nfs_gss_clnt_ctx_unref(req); |
804 | } |
805 | |
806 | /* Remove any old matching contex that had a different principal */ |
807 | nfs_gss_clnt_ctx_unref(&treq); |
808 | |
809 | return (error); |
810 | } |
811 | |
812 | static int |
813 | nfs_gss_clnt_ctx_find(struct nfsreq *req) |
814 | { |
815 | return (nfs_gss_clnt_ctx_find_principal(req, NULL, 0, 0)); |
816 | } |
817 | |
818 | /* |
819 | * Inserts an RPCSEC_GSS credential into an RPC header. |
820 | * After the credential is inserted, the code continues |
821 | * to build the verifier which contains a signed checksum |
822 | * of the RPC header. |
823 | */ |
824 | |
825 | int |
826 | nfs_gss_clnt_cred_put(struct nfsreq *req, struct nfsm_chain *nmc, mbuf_t args) |
827 | { |
828 | struct nfs_gss_clnt_ctx *cp; |
829 | uint32_t seqnum = 0; |
830 | uint32_t major; |
831 | uint32_t error = 0; |
832 | int slpflag, recordmark = 0, offset; |
833 | struct gss_seq *gsp; |
834 | gss_buffer_desc mic; |
835 | |
836 | slpflag = (PZERO-1); |
837 | if (req->r_nmp) { |
838 | slpflag |= (NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0; |
839 | recordmark = (req->r_nmp->nm_sotype == SOCK_STREAM); |
840 | } |
841 | |
842 | retry: |
843 | if (req->r_gss_ctx == NULL) { |
844 | /* |
845 | * Find the context for this user. |
846 | * If no context is found, one will |
847 | * be created. |
848 | */ |
849 | error = nfs_gss_clnt_ctx_find(req); |
850 | if (error) |
851 | return (error); |
852 | } |
853 | cp = req->r_gss_ctx; |
854 | |
855 | /* |
856 | * If the context thread isn't null, then the context isn't |
857 | * yet complete and is for the exclusive use of the thread |
858 | * doing the context setup. Wait until the context thread |
859 | * is null. |
860 | */ |
861 | lck_mtx_lock(cp->gss_clnt_mtx); |
862 | if (cp->gss_clnt_thread && cp->gss_clnt_thread != current_thread()) { |
863 | cp->gss_clnt_flags |= GSS_NEEDCTX; |
864 | msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "ctxwait" , NULL); |
865 | slpflag &= ~PCATCH; |
866 | if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) |
867 | return (error); |
868 | nfs_gss_clnt_ctx_unref(req); |
869 | goto retry; |
870 | } |
871 | lck_mtx_unlock(cp->gss_clnt_mtx); |
872 | |
873 | if (cp->gss_clnt_flags & GSS_CTX_COMPLETE) { |
874 | /* |
875 | * Get a sequence number for this request. |
876 | * Check whether the oldest request in the window is complete. |
877 | * If it's still pending, then wait until it's done before |
878 | * we allocate a new sequence number and allow this request |
879 | * to proceed. |
880 | */ |
881 | lck_mtx_lock(cp->gss_clnt_mtx); |
882 | while (win_getbit(cp->gss_clnt_seqbits, |
883 | ((cp->gss_clnt_seqnum - cp->gss_clnt_seqwin) + 1) % cp->gss_clnt_seqwin)) { |
884 | cp->gss_clnt_flags |= GSS_NEEDSEQ; |
885 | msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "seqwin" , NULL); |
886 | slpflag &= ~PCATCH; |
887 | if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) { |
888 | return (error); |
889 | } |
890 | lck_mtx_lock(cp->gss_clnt_mtx); |
891 | if (cp->gss_clnt_flags & GSS_CTX_INVAL) { |
892 | /* Renewed while while we were waiting */ |
893 | lck_mtx_unlock(cp->gss_clnt_mtx); |
894 | nfs_gss_clnt_ctx_unref(req); |
895 | goto retry; |
896 | } |
897 | } |
898 | seqnum = ++cp->gss_clnt_seqnum; |
899 | win_setbit(cp->gss_clnt_seqbits, seqnum % cp->gss_clnt_seqwin); |
900 | lck_mtx_unlock(cp->gss_clnt_mtx); |
901 | |
902 | MALLOC(gsp, struct gss_seq *, sizeof(*gsp), M_TEMP, M_WAITOK|M_ZERO); |
903 | if (gsp == NULL) |
904 | return (ENOMEM); |
905 | gsp->gss_seqnum = seqnum; |
906 | SLIST_INSERT_HEAD(&req->r_gss_seqlist, gsp, gss_seqnext); |
907 | } |
908 | |
909 | /* Insert the credential */ |
910 | nfsm_chain_add_32(error, nmc, RPCSEC_GSS); |
911 | nfsm_chain_add_32(error, nmc, 5 * NFSX_UNSIGNED + cp->gss_clnt_handle_len); |
912 | nfsm_chain_add_32(error, nmc, RPCSEC_GSS_VERS_1); |
913 | nfsm_chain_add_32(error, nmc, cp->gss_clnt_proc); |
914 | nfsm_chain_add_32(error, nmc, seqnum); |
915 | nfsm_chain_add_32(error, nmc, cp->gss_clnt_service); |
916 | nfsm_chain_add_32(error, nmc, cp->gss_clnt_handle_len); |
917 | if (cp->gss_clnt_handle_len > 0) { |
918 | if (cp->gss_clnt_handle == NULL) |
919 | return (EBADRPC); |
920 | nfsm_chain_add_opaque(error, nmc, cp->gss_clnt_handle, cp->gss_clnt_handle_len); |
921 | } |
922 | if (error) |
923 | return(error); |
924 | /* |
925 | * Now add the verifier |
926 | */ |
927 | if (cp->gss_clnt_proc == RPCSEC_GSS_INIT || |
928 | cp->gss_clnt_proc == RPCSEC_GSS_CONTINUE_INIT) { |
929 | /* |
930 | * If the context is still being created |
931 | * then use a null verifier. |
932 | */ |
933 | nfsm_chain_add_32(error, nmc, RPCAUTH_NULL); // flavor |
934 | nfsm_chain_add_32(error, nmc, 0); // length |
935 | nfsm_chain_build_done(error, nmc); |
936 | if (!error) |
937 | nfs_gss_append_chain(nmc, args); |
938 | return (error); |
939 | } |
940 | |
941 | offset = recordmark ? NFSX_UNSIGNED : 0; // record mark |
942 | nfsm_chain_build_done(error, nmc); |
943 | |
944 | major = gss_krb5_get_mic_mbuf((uint32_t *)&error, cp->gss_clnt_ctx_id, 0, nmc->nmc_mhead, offset, 0, &mic); |
945 | if (major != GSS_S_COMPLETE) { |
946 | printf ("gss_krb5_get_mic_buf failed %d\n" , error); |
947 | return (error); |
948 | } |
949 | |
950 | nfsm_chain_add_32(error, nmc, RPCSEC_GSS); // flavor |
951 | nfsm_chain_add_32(error, nmc, mic.length); // length |
952 | nfsm_chain_add_opaque(error, nmc, mic.value, mic.length); |
953 | (void)gss_release_buffer(NULL, &mic); |
954 | nfsm_chain_build_done(error, nmc); |
955 | if (error) |
956 | return (error); |
957 | |
958 | /* |
959 | * Now we may have to compute integrity or encrypt the call args |
960 | * per RFC 2203 Section 5.3.2 |
961 | */ |
962 | switch (cp->gss_clnt_service) { |
963 | case RPCSEC_GSS_SVC_NONE: |
964 | if (args) |
965 | nfs_gss_append_chain(nmc, args); |
966 | break; |
967 | case RPCSEC_GSS_SVC_INTEGRITY: |
968 | /* |
969 | * r_gss_arglen is the length of args mbuf going into the routine. |
970 | * Its used to find the mic if we need to restore the args. |
971 | */ |
972 | /* Note the mbufs that were used in r_mrest are being encapsulated in the rpc_gss_integ_data_t */ |
973 | assert(req->r_mrest == args); |
974 | nfsm_chain_finish_mbuf(error, nmc); |
975 | if (error) |
976 | return (error); |
977 | error = rpc_gss_integ_data_create(cp->gss_clnt_ctx_id, &args, seqnum, &req->r_gss_arglen); |
978 | if (error) |
979 | break; |
980 | req->r_mrest = args; |
981 | req->r_gss_argoff = nfsm_chain_offset(nmc); |
982 | nfs_gss_append_chain(nmc, args); |
983 | break; |
984 | case RPCSEC_GSS_SVC_PRIVACY: |
985 | /* |
986 | * r_gss_arglen is the length of the wrap token sans any padding length. |
987 | * Its used to find any XDR padding of the wrap token. |
988 | */ |
989 | /* Note the mbufs that were used in r_mrest are being encapsulated in the rpc_gss_priv_data_t */ |
990 | assert(req->r_mrest == args); |
991 | nfsm_chain_finish_mbuf(error, nmc); |
992 | if (error) |
993 | return (error); |
994 | error = rpc_gss_priv_data_create(cp->gss_clnt_ctx_id, &args, seqnum, &req->r_gss_arglen); |
995 | if (error) |
996 | break; |
997 | req->r_mrest = args; |
998 | req->r_gss_argoff = nfsm_chain_offset(nmc); |
999 | nfs_gss_append_chain(nmc, args); |
1000 | break; |
1001 | default: |
1002 | return (EINVAL); |
1003 | } |
1004 | |
1005 | return (error); |
1006 | } |
1007 | |
1008 | /* |
1009 | * When receiving a reply, the client checks the verifier |
1010 | * returned by the server. Check that the verifier is the |
1011 | * correct type, then extract the sequence number checksum |
1012 | * from the token in the credential and compare it with a |
1013 | * computed checksum of the sequence number in the request |
1014 | * that was sent. |
1015 | */ |
1016 | int |
1017 | nfs_gss_clnt_verf_get( |
1018 | struct nfsreq *req, |
1019 | struct nfsm_chain *nmc, |
1020 | uint32_t verftype, |
1021 | uint32_t verflen, |
1022 | uint32_t *accepted_statusp) |
1023 | { |
1024 | gss_buffer_desc cksum; |
1025 | uint32_t seqnum = 0; |
1026 | uint32_t major; |
1027 | struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx; |
1028 | struct nfsm_chain nmc_tmp; |
1029 | struct gss_seq *gsp; |
1030 | uint32_t reslen, offset; |
1031 | int error = 0; |
1032 | mbuf_t results_mbuf, prev_mbuf, pad_mbuf; |
1033 | size_t ressize; |
1034 | |
1035 | reslen = 0; |
1036 | *accepted_statusp = 0; |
1037 | |
1038 | if (cp == NULL) |
1039 | return (NFSERR_EAUTH); |
1040 | /* |
1041 | * If it's not an RPCSEC_GSS verifier, then it has to |
1042 | * be a null verifier that resulted from either |
1043 | * a CONTINUE_NEEDED reply during context setup or |
1044 | * from the reply to an AUTH_UNIX call from a dummy |
1045 | * context that resulted from a fallback to sec=sys. |
1046 | */ |
1047 | if (verftype != RPCSEC_GSS) { |
1048 | if (verftype != RPCAUTH_NULL) |
1049 | return (NFSERR_EAUTH); |
1050 | if (cp->gss_clnt_flags & GSS_CTX_COMPLETE) |
1051 | return (NFSERR_EAUTH); |
1052 | if (verflen > 0) |
1053 | nfsm_chain_adv(error, nmc, nfsm_rndup(verflen)); |
1054 | nfsm_chain_get_32(error, nmc, *accepted_statusp); |
1055 | return (error); |
1056 | } |
1057 | |
1058 | /* |
1059 | * If we received an RPCSEC_GSS verifier but the |
1060 | * context isn't yet complete, then it must be |
1061 | * the context complete message from the server. |
1062 | * The verifier will contain an encrypted checksum |
1063 | * of the window but we don't have the session key |
1064 | * yet so we can't decrypt it. Stash the verifier |
1065 | * and check it later in nfs_gss_clnt_ctx_init() when |
1066 | * the context is complete. |
1067 | */ |
1068 | if (!(cp->gss_clnt_flags & GSS_CTX_COMPLETE)) { |
1069 | if (verflen > KRB5_MAX_MIC_SIZE) |
1070 | return (EBADRPC); |
1071 | MALLOC(cp->gss_clnt_verf, u_char *, verflen, M_TEMP, M_WAITOK|M_ZERO); |
1072 | if (cp->gss_clnt_verf == NULL) |
1073 | return (ENOMEM); |
1074 | cp->gss_clnt_verflen = verflen; |
1075 | nfsm_chain_get_opaque(error, nmc, verflen, cp->gss_clnt_verf); |
1076 | nfsm_chain_get_32(error, nmc, *accepted_statusp); |
1077 | return (error); |
1078 | } |
1079 | |
1080 | if (verflen > KRB5_MAX_MIC_SIZE) |
1081 | return (EBADRPC); |
1082 | cksum.length = verflen; |
1083 | MALLOC(cksum.value, void *, verflen, M_TEMP, M_WAITOK); |
1084 | |
1085 | /* |
1086 | * Get the gss mic |
1087 | */ |
1088 | nfsm_chain_get_opaque(error, nmc, verflen, cksum.value); |
1089 | if (error) { |
1090 | FREE(cksum.value, M_TEMP); |
1091 | goto nfsmout; |
1092 | } |
1093 | |
1094 | /* |
1095 | * Search the request sequence numbers for this reply, starting |
1096 | * with the most recent, looking for a checksum that matches |
1097 | * the one in the verifier returned by the server. |
1098 | */ |
1099 | SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) { |
1100 | gss_buffer_desc seqnum_buf; |
1101 | uint32_t network_seqnum = htonl(gsp->gss_seqnum); |
1102 | |
1103 | seqnum_buf.length = sizeof(network_seqnum); |
1104 | seqnum_buf.value = &network_seqnum; |
1105 | major = gss_krb5_verify_mic(NULL, cp->gss_clnt_ctx_id, &seqnum_buf, &cksum, NULL); |
1106 | if (major == GSS_S_COMPLETE) |
1107 | break; |
1108 | } |
1109 | FREE(cksum.value, M_TEMP); |
1110 | if (gsp == NULL) |
1111 | return (NFSERR_EAUTH); |
1112 | |
1113 | /* |
1114 | * Get the RPC accepted status |
1115 | */ |
1116 | nfsm_chain_get_32(error, nmc, *accepted_statusp); |
1117 | if (*accepted_statusp != RPC_SUCCESS) |
1118 | return (0); |
1119 | |
1120 | /* |
1121 | * Now we may have to check integrity or decrypt the results |
1122 | * per RFC 2203 Section 5.3.2 |
1123 | */ |
1124 | switch (cp->gss_clnt_service) { |
1125 | case RPCSEC_GSS_SVC_NONE: |
1126 | /* nothing to do */ |
1127 | break; |
1128 | case RPCSEC_GSS_SVC_INTEGRITY: |
1129 | /* |
1130 | * Here's what we expect in the integrity results from RFC 2203: |
1131 | * |
1132 | * - length of seq num + results (4 bytes) |
1133 | * - sequence number (4 bytes) |
1134 | * - results (variable bytes) |
1135 | * - length of checksum token |
1136 | * - checksum of seqnum + results |
1137 | */ |
1138 | |
1139 | nfsm_chain_get_32(error, nmc, reslen); // length of results |
1140 | if (reslen > NFS_MAXPACKET) { |
1141 | error = EBADRPC; |
1142 | goto nfsmout; |
1143 | } |
1144 | |
1145 | /* Advance and fetch the mic */ |
1146 | nmc_tmp = *nmc; |
1147 | nfsm_chain_adv(error, &nmc_tmp, reslen); // skip over the results |
1148 | nfsm_chain_get_32(error, &nmc_tmp, cksum.length); |
1149 | if (cksum.length > KRB5_MAX_MIC_SIZE) { |
1150 | error = EBADRPC; |
1151 | goto nfsmout; |
1152 | } |
1153 | MALLOC(cksum.value, void *, cksum.length, M_TEMP, M_WAITOK); |
1154 | nfsm_chain_get_opaque(error, &nmc_tmp, cksum.length, cksum.value); |
1155 | //XXX chop offf the cksum? |
1156 | |
1157 | /* Call verify mic */ |
1158 | offset = nfsm_chain_offset(nmc); |
1159 | major = gss_krb5_verify_mic_mbuf((uint32_t *)&error, cp->gss_clnt_ctx_id, nmc->nmc_mhead, offset, reslen, &cksum, NULL); |
1160 | FREE(cksum.value, M_TEMP); |
1161 | if (major != GSS_S_COMPLETE) { |
1162 | printf("client results: gss_krb5_verify_mic_mbuf failed %d\n" , error); |
1163 | error = EBADRPC; |
1164 | goto nfsmout; |
1165 | } |
1166 | |
1167 | /* |
1168 | * Get the sequence number prepended to the results |
1169 | * and compare it against the header. |
1170 | */ |
1171 | nfsm_chain_get_32(error, nmc, seqnum); |
1172 | if (gsp->gss_seqnum != seqnum) { |
1173 | error = EBADRPC; |
1174 | goto nfsmout; |
1175 | } |
1176 | #if 0 |
1177 | SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) { |
1178 | if (seqnum == gsp->gss_seqnum) |
1179 | break; |
1180 | } |
1181 | if (gsp == NULL) { |
1182 | error = EBADRPC; |
1183 | goto nfsmout; |
1184 | } |
1185 | #endif |
1186 | break; |
1187 | case RPCSEC_GSS_SVC_PRIVACY: |
1188 | /* |
1189 | * Here's what we expect in the privacy results: |
1190 | * |
1191 | * opaque encodeing of the wrap token |
1192 | * - length of wrap token |
1193 | * - wrap token |
1194 | */ |
1195 | prev_mbuf = nmc->nmc_mcur; |
1196 | nfsm_chain_get_32(error, nmc, reslen); // length of results |
1197 | if (reslen == 0 || reslen > NFS_MAXPACKET) { |
1198 | error = EBADRPC; |
1199 | goto nfsmout; |
1200 | } |
1201 | |
1202 | /* Get the wrap token (current mbuf in the chain starting at the current offset) */ |
1203 | offset = nmc->nmc_ptr - (caddr_t)mbuf_data(nmc->nmc_mcur); |
1204 | |
1205 | /* split out the wrap token */ |
1206 | ressize = reslen; |
1207 | error = gss_normalize_mbuf(nmc->nmc_mcur, offset, &ressize, &results_mbuf, &pad_mbuf, 0); |
1208 | if (error) |
1209 | goto nfsmout; |
1210 | |
1211 | if (pad_mbuf) { |
1212 | assert(nfsm_pad(reslen) == mbuf_len(pad_mbuf)); |
1213 | mbuf_free(pad_mbuf); |
1214 | } |
1215 | |
1216 | major = gss_krb5_unwrap_mbuf((uint32_t *)&error, cp->gss_clnt_ctx_id, &results_mbuf, 0, ressize, NULL, NULL); |
1217 | if (major) { |
1218 | printf("%s unwraped failed %d\n" , __func__, error); |
1219 | goto nfsmout; |
1220 | } |
1221 | |
1222 | /* Now replace the wrapped arguments with the unwrapped ones */ |
1223 | mbuf_setnext(prev_mbuf, results_mbuf); |
1224 | nmc->nmc_mcur = results_mbuf; |
1225 | nmc->nmc_ptr = mbuf_data(results_mbuf); |
1226 | nmc->nmc_left = mbuf_len(results_mbuf); |
1227 | |
1228 | /* |
1229 | * Get the sequence number prepended to the results |
1230 | * and compare it against the header |
1231 | */ |
1232 | nfsm_chain_get_32(error, nmc, seqnum); |
1233 | if (gsp->gss_seqnum != seqnum) { |
1234 | printf("%s bad seqnum\n" , __func__); |
1235 | error = EBADRPC; |
1236 | goto nfsmout; |
1237 | } |
1238 | #if 0 |
1239 | SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) { |
1240 | if (seqnum == gsp->gss_seqnum) |
1241 | break; |
1242 | } |
1243 | if (gsp == NULL) { |
1244 | error = EBADRPC; |
1245 | goto nfsmout; |
1246 | } |
1247 | #endif |
1248 | break; |
1249 | } |
1250 | nfsmout: |
1251 | return (error); |
1252 | } |
1253 | |
1254 | /* |
1255 | * An RPCSEC_GSS request with no integrity or privacy consists |
1256 | * of just the header mbufs followed by the arg mbufs. |
1257 | * |
1258 | * However, integrity or privacy the original mbufs have mbufs |
1259 | * prepended and appended to, which means we have to do some work to |
1260 | * restore the arg mbuf chain to its previous state in case we need to |
1261 | * retransmit. |
1262 | * |
1263 | * The location and length of the args is marked by two fields |
1264 | * in the request structure: r_gss_argoff and r_gss_arglen, |
1265 | * which are stashed when the NFS request is built. |
1266 | */ |
1267 | int |
1268 | nfs_gss_clnt_args_restore(struct nfsreq *req) |
1269 | { |
1270 | struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx; |
1271 | struct nfsm_chain mchain, *nmc = &mchain; |
1272 | int error = 0, merr; |
1273 | |
1274 | if (cp == NULL) |
1275 | return (NFSERR_EAUTH); |
1276 | |
1277 | if ((cp->gss_clnt_flags & GSS_CTX_COMPLETE) == 0) |
1278 | return (ENEEDAUTH); |
1279 | |
1280 | /* Nothing to restore for SVC_NONE */ |
1281 | if (cp->gss_clnt_service == RPCSEC_GSS_SVC_NONE) |
1282 | return (0); |
1283 | |
1284 | nfsm_chain_dissect_init(error, nmc, req->r_mhead); // start at RPC header |
1285 | nfsm_chain_adv(error, nmc, req->r_gss_argoff); // advance to args |
1286 | if (error) |
1287 | return (error); |
1288 | |
1289 | if (cp->gss_clnt_service == RPCSEC_GSS_SVC_INTEGRITY) |
1290 | error = rpc_gss_integ_data_restore(cp->gss_clnt_ctx_id, &req->r_mrest, req->r_gss_arglen); |
1291 | else |
1292 | error = rpc_gss_priv_data_restore(cp->gss_clnt_ctx_id, &req->r_mrest, req->r_gss_arglen); |
1293 | |
1294 | merr = mbuf_setnext(nmc->nmc_mcur, req->r_mrest); /* Should always succeed */ |
1295 | assert (merr == 0); |
1296 | |
1297 | return (error ? error : merr); |
1298 | } |
1299 | |
1300 | /* |
1301 | * This function sets up a new context on the client. |
1302 | * Context setup alternates upcalls to the gssd with NFS nullproc calls |
1303 | * to the server. Each of these calls exchanges an opaque token, obtained |
1304 | * via the gssd's calls into the GSS-API on either the client or the server. |
1305 | * This cycle of calls ends when the client's upcall to the gssd and the |
1306 | * server's response both return GSS_S_COMPLETE. At this point, the client |
1307 | * should have its session key and a handle that it can use to refer to its |
1308 | * new context on the server. |
1309 | */ |
1310 | static int |
1311 | nfs_gss_clnt_ctx_init(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp) |
1312 | { |
1313 | struct nfsmount *nmp = req->r_nmp; |
1314 | gss_buffer_desc cksum, window; |
1315 | uint32_t network_seqnum; |
1316 | int client_complete = 0; |
1317 | int server_complete = 0; |
1318 | int error = 0; |
1319 | int retrycnt = 0; |
1320 | uint32_t major; |
1321 | |
1322 | /* Initialize a new client context */ |
1323 | |
1324 | if (cp->gss_clnt_svcname == NULL) { |
1325 | cp->gss_clnt_svcname = nfs_gss_clnt_svcname(nmp, &cp->gss_clnt_svcnt, &cp->gss_clnt_svcnamlen); |
1326 | if (cp->gss_clnt_svcname == NULL) { |
1327 | error = NFSERR_EAUTH; |
1328 | goto nfsmout; |
1329 | } |
1330 | } |
1331 | |
1332 | cp->gss_clnt_proc = RPCSEC_GSS_INIT; |
1333 | |
1334 | cp->gss_clnt_service = |
1335 | req->r_auth == RPCAUTH_KRB5 ? RPCSEC_GSS_SVC_NONE : |
1336 | req->r_auth == RPCAUTH_KRB5I ? RPCSEC_GSS_SVC_INTEGRITY : |
1337 | req->r_auth == RPCAUTH_KRB5P ? RPCSEC_GSS_SVC_PRIVACY : 0; |
1338 | |
1339 | /* |
1340 | * Now loop around alternating gss_init_sec_context and |
1341 | * gss_accept_sec_context upcalls to the gssd on the client |
1342 | * and server side until the context is complete - or fails. |
1343 | */ |
1344 | for (;;) { |
1345 | retry: |
1346 | /* Upcall to the gss_init_sec_context in the gssd */ |
1347 | error = nfs_gss_clnt_gssd_upcall(req, cp, retrycnt); |
1348 | if (error) |
1349 | goto nfsmout; |
1350 | |
1351 | if (cp->gss_clnt_major == GSS_S_COMPLETE) { |
1352 | client_complete = 1; |
1353 | NFS_GSS_DBG("Client complete\n" ); |
1354 | if (server_complete) |
1355 | break; |
1356 | } else if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) { |
1357 | /* |
1358 | * We may have gotten here because the accept sec context |
1359 | * from the server failed and sent back a GSS token that |
1360 | * encapsulates a kerberos error token per RFC 1964/4121 |
1361 | * with a status of GSS_S_CONTINUE_NEEDED. That caused us |
1362 | * to loop to the above up call and received the now |
1363 | * decoded errors. |
1364 | */ |
1365 | retrycnt++; |
1366 | cp->gss_clnt_gssd_flags |= GSSD_RESTART; |
1367 | NFS_GSS_DBG("Retrying major = %x minor = %d\n" , cp->gss_clnt_major, (int)cp->gss_clnt_minor); |
1368 | goto retry; |
1369 | } |
1370 | |
1371 | /* |
1372 | * Pass the token to the server. |
1373 | */ |
1374 | error = nfs_gss_clnt_ctx_callserver(req, cp); |
1375 | if (error) { |
1376 | if (error == ENEEDAUTH && |
1377 | (cp->gss_clnt_proc == RPCSEC_GSS_INIT || |
1378 | cp->gss_clnt_proc == RPCSEC_GSS_CONTINUE_INIT)) { |
1379 | /* |
1380 | * We got here because the server had a problem |
1381 | * trying to establish a context and sent that there |
1382 | * was a context problem at the rpc sec layer. Perhaps |
1383 | * gss_accept_sec_context succeeded in user space, |
1384 | * but the kernel could not handle the etype |
1385 | * to generate the mic for the verifier of the rpc_sec |
1386 | * window size. |
1387 | */ |
1388 | retrycnt++; |
1389 | cp->gss_clnt_gssd_flags |= GSSD_RESTART; |
1390 | NFS_GSS_DBG("Retrying major = %x minor = %d\n" , cp->gss_clnt_major, (int)cp->gss_clnt_minor); |
1391 | goto retry; |
1392 | } |
1393 | goto nfsmout; |
1394 | } |
1395 | if (cp->gss_clnt_major == GSS_S_COMPLETE) { |
1396 | NFS_GSS_DBG("Server complete\n" ); |
1397 | server_complete = 1; |
1398 | if (client_complete) |
1399 | break; |
1400 | } else if (cp->gss_clnt_major == GSS_S_CONTINUE_NEEDED) { |
1401 | cp->gss_clnt_proc = RPCSEC_GSS_CONTINUE_INIT; |
1402 | } else { |
1403 | /* Server didn't like us. Try something else */ |
1404 | retrycnt++; |
1405 | cp->gss_clnt_gssd_flags |= GSSD_RESTART; |
1406 | NFS_GSS_DBG("Retrying major = %x minor = %d\n" , cp->gss_clnt_major, (int)cp->gss_clnt_minor); |
1407 | } |
1408 | } |
1409 | |
1410 | /* |
1411 | * The context is apparently established successfully |
1412 | */ |
1413 | lck_mtx_lock(cp->gss_clnt_mtx); |
1414 | cp->gss_clnt_flags |= GSS_CTX_COMPLETE; |
1415 | lck_mtx_unlock(cp->gss_clnt_mtx); |
1416 | cp->gss_clnt_proc = RPCSEC_GSS_DATA; |
1417 | |
1418 | network_seqnum = htonl(cp->gss_clnt_seqwin); |
1419 | window.length = sizeof (cp->gss_clnt_seqwin); |
1420 | window.value = &network_seqnum; |
1421 | cksum.value = cp->gss_clnt_verf; |
1422 | cksum.length = cp->gss_clnt_verflen; |
1423 | major = gss_krb5_verify_mic((uint32_t *)&error, cp->gss_clnt_ctx_id, &window, &cksum, NULL); |
1424 | cp->gss_clnt_verflen = 0; |
1425 | FREE(cp->gss_clnt_verf, M_TEMP); |
1426 | cp->gss_clnt_verf = NULL; |
1427 | if (major != GSS_S_COMPLETE) { |
1428 | printf("%s: could not verify window\n" , __func__); |
1429 | error = NFSERR_EAUTH; |
1430 | goto nfsmout; |
1431 | } |
1432 | |
1433 | /* |
1434 | * Set an initial sequence number somewhat randomized. |
1435 | * Start small so we don't overflow GSS_MAXSEQ too quickly. |
1436 | * Add the size of the sequence window so seqbits arithmetic |
1437 | * doesn't go negative. |
1438 | */ |
1439 | cp->gss_clnt_seqnum = (random() & 0xffff) + cp->gss_clnt_seqwin; |
1440 | |
1441 | /* |
1442 | * Allocate a bitmap to keep track of which requests |
1443 | * are pending within the sequence number window. |
1444 | */ |
1445 | MALLOC(cp->gss_clnt_seqbits, uint32_t *, |
1446 | nfsm_rndup((cp->gss_clnt_seqwin + 7) / 8), M_TEMP, M_WAITOK|M_ZERO); |
1447 | if (cp->gss_clnt_seqbits == NULL) |
1448 | error = NFSERR_EAUTH; |
1449 | |
1450 | nfsmout: |
1451 | /* |
1452 | * If the error is ENEEDAUTH we're not done, so no need |
1453 | * to wake up other threads again. This thread will retry in |
1454 | * the find or renew routines. |
1455 | */ |
1456 | if (error == ENEEDAUTH) { |
1457 | NFS_GSS_DBG("Returning ENEEDAUTH\n" ); |
1458 | return (error); |
1459 | } |
1460 | |
1461 | /* |
1462 | * If there's an error, just mark it as invalid. |
1463 | * It will be removed when the reference count |
1464 | * drops to zero. |
1465 | */ |
1466 | lck_mtx_lock(cp->gss_clnt_mtx); |
1467 | if (error) |
1468 | cp->gss_clnt_flags |= GSS_CTX_INVAL; |
1469 | |
1470 | /* |
1471 | * Wake any threads waiting to use the context |
1472 | */ |
1473 | cp->gss_clnt_thread = NULL; |
1474 | if (cp->gss_clnt_flags & GSS_NEEDCTX) { |
1475 | cp->gss_clnt_flags &= ~GSS_NEEDCTX; |
1476 | wakeup(cp); |
1477 | } |
1478 | lck_mtx_unlock(cp->gss_clnt_mtx); |
1479 | |
1480 | NFS_GSS_DBG("Returning error = %d\n" , error); |
1481 | return (error); |
1482 | } |
1483 | |
1484 | /* |
1485 | * This function calls nfs_gss_clnt_ctx_init() to set up a new context. |
1486 | * But if there's a failure in trying to establish the context it keeps |
1487 | * retrying at progressively longer intervals in case the failure is |
1488 | * due to some transient condition. For instance, the server might be |
1489 | * failing the context setup because directory services is not coming |
1490 | * up in a timely fashion. |
1491 | */ |
1492 | static int |
1493 | nfs_gss_clnt_ctx_init_retry(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp) |
1494 | { |
1495 | struct nfsmount *nmp = req->r_nmp; |
1496 | struct timeval now; |
1497 | time_t waituntil; |
1498 | int error, slpflag; |
1499 | int retries = 0; |
1500 | int timeo = NFS_TRYLATERDEL; |
1501 | |
1502 | if (nfs_mount_gone(nmp)) { |
1503 | error = ENXIO; |
1504 | goto bad; |
1505 | } |
1506 | |
1507 | /* For an "intr" mount allow a signal to interrupt the retries */ |
1508 | slpflag = (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) ? PCATCH : 0; |
1509 | |
1510 | while ((error = nfs_gss_clnt_ctx_init(req, cp)) == ENEEDAUTH) { |
1511 | microuptime(&now); |
1512 | waituntil = now.tv_sec + timeo; |
1513 | while (now.tv_sec < waituntil) { |
1514 | tsleep(NULL, PSOCK | slpflag, "nfs_gss_clnt_ctx_init_retry" , hz); |
1515 | slpflag = 0; |
1516 | error = nfs_sigintr(req->r_nmp, req, current_thread(), 0); |
1517 | if (error) |
1518 | goto bad; |
1519 | microuptime(&now); |
1520 | } |
1521 | |
1522 | retries++; |
1523 | /* If it's a soft mount just give up after a while */ |
1524 | if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (retries > nmp->nm_retry)) { |
1525 | error = ETIMEDOUT; |
1526 | goto bad; |
1527 | } |
1528 | timeo *= 2; |
1529 | if (timeo > 60) |
1530 | timeo = 60; |
1531 | } |
1532 | |
1533 | if (error == 0) |
1534 | return 0; // success |
1535 | bad: |
1536 | /* |
1537 | * Give up on this context |
1538 | */ |
1539 | lck_mtx_lock(cp->gss_clnt_mtx); |
1540 | cp->gss_clnt_flags |= GSS_CTX_INVAL; |
1541 | |
1542 | /* |
1543 | * Wake any threads waiting to use the context |
1544 | */ |
1545 | cp->gss_clnt_thread = NULL; |
1546 | if (cp->gss_clnt_flags & GSS_NEEDCTX) { |
1547 | cp->gss_clnt_flags &= ~GSS_NEEDCTX; |
1548 | wakeup(cp); |
1549 | } |
1550 | lck_mtx_unlock(cp->gss_clnt_mtx); |
1551 | |
1552 | return error; |
1553 | } |
1554 | |
1555 | /* |
1556 | * Call the NFS server using a null procedure for context setup. |
1557 | * Even though it's a null procedure and nominally has no arguments |
1558 | * RFC 2203 requires that the GSS-API token be passed as an argument |
1559 | * and received as a reply. |
1560 | */ |
1561 | static int |
1562 | nfs_gss_clnt_ctx_callserver(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp) |
1563 | { |
1564 | struct nfsm_chain nmreq, nmrep; |
1565 | int error = 0, status; |
1566 | uint32_t major = cp->gss_clnt_major, minor = cp->gss_clnt_minor; |
1567 | int sz; |
1568 | |
1569 | if (nfs_mount_gone(req->r_nmp)) |
1570 | return (ENXIO); |
1571 | nfsm_chain_null(&nmreq); |
1572 | nfsm_chain_null(&nmrep); |
1573 | sz = NFSX_UNSIGNED + nfsm_rndup(cp->gss_clnt_tokenlen); |
1574 | nfsm_chain_build_alloc_init(error, &nmreq, sz); |
1575 | nfsm_chain_add_32(error, &nmreq, cp->gss_clnt_tokenlen); |
1576 | if (cp->gss_clnt_tokenlen > 0) |
1577 | nfsm_chain_add_opaque(error, &nmreq, cp->gss_clnt_token, cp->gss_clnt_tokenlen); |
1578 | nfsm_chain_build_done(error, &nmreq); |
1579 | if (error) |
1580 | goto nfsmout; |
1581 | |
1582 | /* Call the server */ |
1583 | error = nfs_request_gss(req->r_nmp->nm_mountp, &nmreq, req->r_thread, req->r_cred, |
1584 | (req->r_flags & R_OPTMASK), cp, &nmrep, &status); |
1585 | if (cp->gss_clnt_token != NULL) { |
1586 | FREE(cp->gss_clnt_token, M_TEMP); |
1587 | cp->gss_clnt_token = NULL; |
1588 | } |
1589 | if (!error) |
1590 | error = status; |
1591 | if (error) |
1592 | goto nfsmout; |
1593 | |
1594 | /* Get the server's reply */ |
1595 | |
1596 | nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_handle_len); |
1597 | if (cp->gss_clnt_handle != NULL) { |
1598 | FREE(cp->gss_clnt_handle, M_TEMP); |
1599 | cp->gss_clnt_handle = NULL; |
1600 | } |
1601 | if (cp->gss_clnt_handle_len > 0 && cp->gss_clnt_handle_len < GSS_MAX_CTX_HANDLE_LEN) { |
1602 | MALLOC(cp->gss_clnt_handle, u_char *, cp->gss_clnt_handle_len, M_TEMP, M_WAITOK); |
1603 | if (cp->gss_clnt_handle == NULL) { |
1604 | error = ENOMEM; |
1605 | goto nfsmout; |
1606 | } |
1607 | nfsm_chain_get_opaque(error, &nmrep, cp->gss_clnt_handle_len, cp->gss_clnt_handle); |
1608 | } else { |
1609 | error = EBADRPC; |
1610 | } |
1611 | nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_major); |
1612 | nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_minor); |
1613 | nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_seqwin); |
1614 | nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_tokenlen); |
1615 | if (error) |
1616 | goto nfsmout; |
1617 | if (cp->gss_clnt_tokenlen > 0 && cp->gss_clnt_tokenlen < GSS_MAX_TOKEN_LEN) { |
1618 | MALLOC(cp->gss_clnt_token, u_char *, cp->gss_clnt_tokenlen, M_TEMP, M_WAITOK); |
1619 | if (cp->gss_clnt_token == NULL) { |
1620 | error = ENOMEM; |
1621 | goto nfsmout; |
1622 | } |
1623 | nfsm_chain_get_opaque(error, &nmrep, cp->gss_clnt_tokenlen, cp->gss_clnt_token); |
1624 | } else { |
1625 | error = EBADRPC; |
1626 | } |
1627 | |
1628 | /* |
1629 | * Make sure any unusual errors are expanded and logged by gssd |
1630 | */ |
1631 | if (cp->gss_clnt_major != GSS_S_COMPLETE && |
1632 | cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) { |
1633 | |
1634 | printf("nfs_gss_clnt_ctx_callserver: gss_clnt_major = %d\n" , cp->gss_clnt_major); |
1635 | nfs_gss_clnt_log_error(req, cp, major, minor); |
1636 | |
1637 | } |
1638 | |
1639 | nfsmout: |
1640 | nfsm_chain_cleanup(&nmreq); |
1641 | nfsm_chain_cleanup(&nmrep); |
1642 | |
1643 | return (error); |
1644 | } |
1645 | |
1646 | /* |
1647 | * We construct the service principal as a gss hostbased service principal of |
1648 | * the form nfs@<server>, unless the servers principal was passed down in the |
1649 | * mount arguments. If the arguments don't specify the service principal, the |
1650 | * server name is extracted the location passed in the mount argument if |
1651 | * available. Otherwise assume a format of <server>:<path> in the |
1652 | * mntfromname. We don't currently support url's or other bizarre formats like |
1653 | * path@server. Mount_url will convert the nfs url into <server>:<path> when |
1654 | * calling mount, so this works out well in practice. |
1655 | * |
1656 | */ |
1657 | |
1658 | static uint8_t * |
1659 | nfs_gss_clnt_svcname(struct nfsmount *nmp, gssd_nametype *nt, uint32_t *len) |
1660 | { |
1661 | char *svcname, *d, *server; |
1662 | int lindx, sindx; |
1663 | |
1664 | if (nfs_mount_gone(nmp)) |
1665 | return (NULL); |
1666 | |
1667 | if (nmp->nm_sprinc) { |
1668 | *len = strlen(nmp->nm_sprinc) + 1; |
1669 | MALLOC(svcname, char *, *len, M_TEMP, M_WAITOK); |
1670 | *nt = GSSD_HOSTBASED; |
1671 | if (svcname == NULL) |
1672 | return (NULL); |
1673 | strlcpy(svcname, nmp->nm_sprinc, *len); |
1674 | |
1675 | return ((uint8_t *)svcname); |
1676 | } |
1677 | |
1678 | *nt = GSSD_HOSTBASED; |
1679 | if (nmp->nm_locations.nl_numlocs && !(NFS_GSS_ISDBG && (NFS_DEBUG_FLAGS & 0x1))) { |
1680 | lindx = nmp->nm_locations.nl_current.nli_loc; |
1681 | sindx = nmp->nm_locations.nl_current.nli_serv; |
1682 | server = nmp->nm_locations.nl_locations[lindx]->nl_servers[sindx]->ns_name; |
1683 | *len = (uint32_t)strlen(server); |
1684 | } else { |
1685 | /* Older binaries using older mount args end up here */ |
1686 | server = vfs_statfs(nmp->nm_mountp)->f_mntfromname; |
1687 | NFS_GSS_DBG("nfs getting gss svcname from %s\n" , server); |
1688 | d = strchr(server, ':'); |
1689 | *len = (uint32_t)(d ? (d - server) : strlen(server)); |
1690 | } |
1691 | |
1692 | *len += 5; /* "nfs@" plus null */ |
1693 | MALLOC(svcname, char *, *len, M_TEMP, M_WAITOK); |
1694 | strlcpy(svcname, "nfs" , *len); |
1695 | strlcat(svcname, "@" , *len); |
1696 | strlcat(svcname, server, *len); |
1697 | NFS_GSS_DBG("nfs svcname = %s\n" , svcname); |
1698 | |
1699 | return ((uint8_t *)svcname); |
1700 | } |
1701 | |
1702 | /* |
1703 | * Get a mach port to talk to gssd. |
1704 | * gssd lives in the root bootstrap, so we call gssd's lookup routine |
1705 | * to get a send right to talk to a new gssd instance that launchd has launched |
1706 | * based on the cred's uid and audit session id. |
1707 | */ |
1708 | |
1709 | static mach_port_t |
1710 | nfs_gss_clnt_get_upcall_port(kauth_cred_t credp) |
1711 | { |
1712 | mach_port_t gssd_host_port, uc_port = IPC_PORT_NULL; |
1713 | kern_return_t kr; |
1714 | au_asid_t asid; |
1715 | uid_t uid; |
1716 | |
1717 | kr = host_get_gssd_port(host_priv_self(), &gssd_host_port); |
1718 | if (kr != KERN_SUCCESS) { |
1719 | printf("nfs_gss_get_upcall_port: can't get gssd port, status %x (%d)\n" , kr, kr); |
1720 | return (IPC_PORT_NULL); |
1721 | } |
1722 | if (!IPC_PORT_VALID(gssd_host_port)) { |
1723 | printf("nfs_gss_get_upcall_port: gssd port not valid\n" ); |
1724 | return (IPC_PORT_NULL); |
1725 | } |
1726 | |
1727 | asid = kauth_cred_getasid(credp); |
1728 | uid = kauth_cred_getauid(credp); |
1729 | if (uid == AU_DEFAUDITID) |
1730 | uid = kauth_cred_getuid(credp); |
1731 | kr = mach_gss_lookup(gssd_host_port, uid, asid, &uc_port); |
1732 | if (kr != KERN_SUCCESS) |
1733 | printf("nfs_gss_clnt_get_upcall_port: mach_gssd_lookup failed: status %x (%d)\n" , kr, kr); |
1734 | host_release_special_port(gssd_host_port); |
1735 | |
1736 | return (uc_port); |
1737 | } |
1738 | |
1739 | |
1740 | static void |
1741 | nfs_gss_clnt_log_error(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32_t major, uint32_t minor) |
1742 | { |
1743 | #define GETMAJERROR(x) (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK) |
1744 | struct nfsmount *nmp = req->r_nmp; |
1745 | char who[] = "client" ; |
1746 | uint32_t gss_error = GETMAJERROR(cp->gss_clnt_major); |
1747 | const char *procn = "unkown" ; |
1748 | proc_t proc; |
1749 | pid_t pid = -1; |
1750 | struct timeval now; |
1751 | |
1752 | if (req->r_thread) { |
1753 | proc = (proc_t)get_bsdthreadtask_info(req->r_thread); |
1754 | if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK))) |
1755 | proc = NULL; |
1756 | if (proc) { |
1757 | if (*proc->p_comm) |
1758 | procn = proc->p_comm; |
1759 | pid = proc->p_pid; |
1760 | } |
1761 | } else { |
1762 | procn = "kernproc" ; |
1763 | pid = 0; |
1764 | } |
1765 | |
1766 | microuptime(&now); |
1767 | if ((cp->gss_clnt_major != major || cp->gss_clnt_minor != minor || |
1768 | cp->gss_clnt_ptime + GSS_PRINT_DELAY < now.tv_sec) && |
1769 | (nmp->nm_state & NFSSTA_MOUNTED)) { |
1770 | /* |
1771 | * Will let gssd do some logging in hopes that it can translate |
1772 | * the minor code. |
1773 | */ |
1774 | if (cp->gss_clnt_minor && cp->gss_clnt_minor != minor) { |
1775 | (void) mach_gss_log_error( |
1776 | cp->gss_clnt_mport, |
1777 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, |
1778 | kauth_cred_getuid(cp->gss_clnt_cred), |
1779 | who, |
1780 | cp->gss_clnt_major, |
1781 | cp->gss_clnt_minor); |
1782 | } |
1783 | gss_error = gss_error ? gss_error : cp->gss_clnt_major; |
1784 | |
1785 | /* |
1786 | *%%% It would be really nice to get the terminal from the proc or auditinfo_addr struct and print that here. |
1787 | */ |
1788 | printf("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n" , |
1789 | cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred), |
1790 | procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor); |
1791 | cp->gss_clnt_ptime = now.tv_sec; |
1792 | switch (gss_error) { |
1793 | case 7: printf("NFS: gssd does not have credentials for session %d/%d, (kinit)?\n" , |
1794 | kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred)); |
1795 | break; |
1796 | case 11: printf("NFS: gssd has expired credentals for session %d/%d, (kinit)?\n" , |
1797 | kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred)); |
1798 | break; |
1799 | } |
1800 | } else { |
1801 | NFS_GSS_DBG("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n" , |
1802 | cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred), |
1803 | procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor); |
1804 | } |
1805 | } |
1806 | |
1807 | /* |
1808 | * Make an upcall to the gssd using Mach RPC |
1809 | * The upcall is made using a host special port. |
1810 | * This allows launchd to fire up the gssd in the |
1811 | * user's session. This is important, since gssd |
1812 | * must have access to the user's credential cache. |
1813 | */ |
1814 | static int |
1815 | nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32_t retrycnt) |
1816 | { |
1817 | kern_return_t kr; |
1818 | gssd_byte_buffer octx = NULL; |
1819 | uint32_t lucidlen = 0; |
1820 | void *lucid_ctx_buffer; |
1821 | int retry_cnt = 0; |
1822 | vm_map_copy_t itoken = NULL; |
1823 | gssd_byte_buffer otoken = NULL; |
1824 | mach_msg_type_number_t otokenlen; |
1825 | int error = 0; |
1826 | uint8_t *principal = NULL; |
1827 | uint32_t plen = 0; |
1828 | int32_t nt = GSSD_STRING_NAME; |
1829 | vm_map_copy_t pname = NULL; |
1830 | vm_map_copy_t svcname = NULL; |
1831 | char display_name[MAX_DISPLAY_STR] = "" ; |
1832 | uint32_t ret_flags; |
1833 | struct nfsmount *nmp = req->r_nmp; |
1834 | uint32_t major = cp->gss_clnt_major, minor = cp->gss_clnt_minor; |
1835 | uint32_t selected = (uint32_t)-1; |
1836 | struct nfs_etype etype; |
1837 | |
1838 | if (nmp == NULL || vfs_isforce(nmp->nm_mountp) || (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) |
1839 | return (ENXIO); |
1840 | |
1841 | if (cp->gss_clnt_gssd_flags & GSSD_RESTART) { |
1842 | if (cp->gss_clnt_token) |
1843 | FREE(cp->gss_clnt_token, M_TEMP); |
1844 | cp->gss_clnt_token = NULL; |
1845 | cp->gss_clnt_tokenlen = 0; |
1846 | cp->gss_clnt_proc = RPCSEC_GSS_INIT; |
1847 | /* Server's handle isn't valid. Don't reuse */ |
1848 | cp->gss_clnt_handle_len = 0; |
1849 | if (cp->gss_clnt_handle != NULL) { |
1850 | FREE(cp->gss_clnt_handle, M_TEMP); |
1851 | cp->gss_clnt_handle = NULL; |
1852 | } |
1853 | } |
1854 | |
1855 | NFS_GSS_DBG("Retrycnt = %d nm_etype.count = %d\n" , retrycnt, nmp->nm_etype.count); |
1856 | if (retrycnt >= nmp->nm_etype.count) |
1857 | return (EACCES); |
1858 | |
1859 | /* Copy the mount etypes to an order set of etypes to try */ |
1860 | etype = nmp->nm_etype; |
1861 | |
1862 | /* |
1863 | * If we've already selected an etype, lets put that first in our |
1864 | * array of etypes to try, since overwhelmingly, that is likely |
1865 | * to be the etype we want. |
1866 | */ |
1867 | if (etype.selected < etype.count) { |
1868 | etype.etypes[0] = nmp->nm_etype.etypes[etype.selected]; |
1869 | for (uint32_t i = 0; i < etype.selected; i++) |
1870 | etype.etypes[i+1] = nmp->nm_etype.etypes[i]; |
1871 | for (uint32_t i = etype.selected + 1; i < etype.count; i++) |
1872 | etype.etypes[i] = nmp->nm_etype.etypes[i]; |
1873 | } |
1874 | |
1875 | /* Remove the ones we've already have tried */ |
1876 | for (uint32_t i = retrycnt; i < etype.count; i++) |
1877 | etype.etypes[i - retrycnt] = etype.etypes[i]; |
1878 | etype.count = etype.count - retrycnt; |
1879 | |
1880 | NFS_GSS_DBG("etype count = %d preferred etype = %d\n" , etype.count, etype.etypes[0]); |
1881 | |
1882 | /* |
1883 | * NFS currently only supports default principals or |
1884 | * principals based on the uid of the caller, unless |
1885 | * the principal to use for the mounting cred was specified |
1886 | * in the mount argmuments. If the realm to use was specified |
1887 | * then will send that up as the principal since the realm is |
1888 | * preceed by an "@" gssd that will try and select the default |
1889 | * principal for that realm. |
1890 | */ |
1891 | |
1892 | if (cp->gss_clnt_principal && cp->gss_clnt_prinlen) { |
1893 | principal = cp->gss_clnt_principal; |
1894 | plen = cp->gss_clnt_prinlen; |
1895 | nt = cp->gss_clnt_prinnt; |
1896 | } else if (nmp->nm_principal && IS_VALID_CRED(nmp->nm_mcred) && req->r_cred == nmp->nm_mcred) { |
1897 | plen = (uint32_t)strlen(nmp->nm_principal); |
1898 | principal = (uint8_t *)nmp->nm_principal; |
1899 | cp->gss_clnt_prinnt = nt = GSSD_USER; |
1900 | } |
1901 | else if (nmp->nm_realm) { |
1902 | plen = (uint32_t)strlen(nmp->nm_realm); |
1903 | principal = (uint8_t *)nmp->nm_realm; |
1904 | nt = GSSD_USER; |
1905 | } |
1906 | |
1907 | if (!IPC_PORT_VALID(cp->gss_clnt_mport)) { |
1908 | cp->gss_clnt_mport = nfs_gss_clnt_get_upcall_port(req->r_cred); |
1909 | if (cp->gss_clnt_mport == IPC_PORT_NULL) |
1910 | goto out; |
1911 | } |
1912 | |
1913 | if (plen) |
1914 | nfs_gss_mach_alloc_buffer(principal, plen, &pname); |
1915 | if (cp->gss_clnt_svcnamlen) |
1916 | nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname); |
1917 | if (cp->gss_clnt_tokenlen) |
1918 | nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken); |
1919 | |
1920 | /* Always want to export the lucid context */ |
1921 | cp->gss_clnt_gssd_flags |= GSSD_LUCID_CONTEXT; |
1922 | |
1923 | retry: |
1924 | kr = mach_gss_init_sec_context_v3( |
1925 | cp->gss_clnt_mport, |
1926 | GSSD_KRB5_MECH, |
1927 | (gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_clnt_tokenlen, |
1928 | kauth_cred_getuid(cp->gss_clnt_cred), |
1929 | nt, |
1930 | (gssd_byte_buffer)pname, (mach_msg_type_number_t) plen, |
1931 | cp->gss_clnt_svcnt, |
1932 | (gssd_byte_buffer)svcname, (mach_msg_type_number_t) cp->gss_clnt_svcnamlen, |
1933 | GSSD_MUTUAL_FLAG, |
1934 | (gssd_etype_list)etype.etypes, (mach_msg_type_number_t)etype.count, |
1935 | &cp->gss_clnt_gssd_flags, |
1936 | &cp->gss_clnt_context, |
1937 | &cp->gss_clnt_cred_handle, |
1938 | &ret_flags, |
1939 | &octx, (mach_msg_type_number_t *) &lucidlen, |
1940 | &otoken, &otokenlen, |
1941 | cp->gss_clnt_display ? NULL : display_name, |
1942 | &cp->gss_clnt_major, |
1943 | &cp->gss_clnt_minor); |
1944 | |
1945 | /* Clear the RESTART flag */ |
1946 | cp->gss_clnt_gssd_flags &= ~GSSD_RESTART; |
1947 | if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) { |
1948 | /* We're done with the gssd handles */ |
1949 | cp->gss_clnt_context = 0; |
1950 | cp->gss_clnt_cred_handle = 0; |
1951 | } |
1952 | |
1953 | if (kr != KERN_SUCCESS) { |
1954 | printf("nfs_gss_clnt_gssd_upcall: mach_gss_init_sec_context failed: %x (%d)\n" , kr, kr); |
1955 | if (kr == MIG_SERVER_DIED && cp->gss_clnt_cred_handle == 0 && |
1956 | retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES && |
1957 | !vfs_isforce(nmp->nm_mountp) && (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) == 0) { |
1958 | if (plen) |
1959 | nfs_gss_mach_alloc_buffer(principal, plen, &pname); |
1960 | if (cp->gss_clnt_svcnamlen) |
1961 | nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname); |
1962 | if (cp->gss_clnt_tokenlen > 0) |
1963 | nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken); |
1964 | goto retry; |
1965 | } |
1966 | |
1967 | host_release_special_port(cp->gss_clnt_mport); |
1968 | cp->gss_clnt_mport = IPC_PORT_NULL; |
1969 | goto out; |
1970 | } |
1971 | |
1972 | if (cp->gss_clnt_display == NULL && *display_name != '\0') { |
1973 | int dlen = strnlen(display_name, MAX_DISPLAY_STR) + 1; /* Add extra byte to include '\0' */ |
1974 | |
1975 | if (dlen < MAX_DISPLAY_STR) { |
1976 | MALLOC(cp->gss_clnt_display, char *, dlen, M_TEMP, M_WAITOK); |
1977 | if (cp->gss_clnt_display == NULL) |
1978 | goto skip; |
1979 | bcopy(display_name, cp->gss_clnt_display, dlen); |
1980 | } else { |
1981 | goto skip; |
1982 | } |
1983 | } |
1984 | skip: |
1985 | /* |
1986 | * Make sure any unusual errors are expanded and logged by gssd |
1987 | * |
1988 | * XXXX, we need to rethink this and just have gssd return a string for the major and minor codes. |
1989 | */ |
1990 | if (cp->gss_clnt_major != GSS_S_COMPLETE && |
1991 | cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) { |
1992 | NFS_GSS_DBG("Up call returned error\n" ); |
1993 | nfs_gss_clnt_log_error(req, cp, major, minor); |
1994 | /* Server's handle isn't valid. Don't reuse */ |
1995 | cp->gss_clnt_handle_len = 0; |
1996 | if (cp->gss_clnt_handle != NULL) { |
1997 | FREE(cp->gss_clnt_handle, M_TEMP); |
1998 | cp->gss_clnt_handle = NULL; |
1999 | } |
2000 | } |
2001 | |
2002 | if (lucidlen > 0) { |
2003 | if (lucidlen > MAX_LUCIDLEN) { |
2004 | printf("nfs_gss_clnt_gssd_upcall: bad context length (%d)\n" , lucidlen); |
2005 | vm_map_copy_discard((vm_map_copy_t) octx); |
2006 | vm_map_copy_discard((vm_map_copy_t) otoken); |
2007 | goto out; |
2008 | } |
2009 | MALLOC(lucid_ctx_buffer, void *, lucidlen, M_TEMP, M_WAITOK | M_ZERO); |
2010 | error = nfs_gss_mach_vmcopyout((vm_map_copy_t) octx, lucidlen, lucid_ctx_buffer); |
2011 | if (error) { |
2012 | vm_map_copy_discard((vm_map_copy_t) otoken); |
2013 | goto out; |
2014 | } |
2015 | |
2016 | if (cp->gss_clnt_ctx_id) |
2017 | gss_krb5_destroy_context(cp->gss_clnt_ctx_id); |
2018 | cp->gss_clnt_ctx_id = gss_krb5_make_context(lucid_ctx_buffer, lucidlen); |
2019 | if (cp->gss_clnt_ctx_id == NULL) { |
2020 | printf("Failed to make context from lucid_ctx_buffer\n" ); |
2021 | goto out; |
2022 | } |
2023 | for (uint32_t i = 0; i < nmp->nm_etype.count; i++) { |
2024 | if (nmp->nm_etype.etypes[i] == cp->gss_clnt_ctx_id->gss_cryptor.etype) { |
2025 | selected = i; |
2026 | break; |
2027 | } |
2028 | } |
2029 | } |
2030 | |
2031 | /* Free context token used as input */ |
2032 | if (cp->gss_clnt_token) |
2033 | FREE(cp->gss_clnt_token, M_TEMP); |
2034 | cp->gss_clnt_token = NULL; |
2035 | cp->gss_clnt_tokenlen = 0; |
2036 | |
2037 | if (otokenlen > 0) { |
2038 | /* Set context token to gss output token */ |
2039 | MALLOC(cp->gss_clnt_token, u_char *, otokenlen, M_TEMP, M_WAITOK); |
2040 | if (cp->gss_clnt_token == NULL) { |
2041 | printf("nfs_gss_clnt_gssd_upcall: could not allocate %d bytes\n" , otokenlen); |
2042 | vm_map_copy_discard((vm_map_copy_t) otoken); |
2043 | return (ENOMEM); |
2044 | } |
2045 | error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_clnt_token); |
2046 | if (error) { |
2047 | printf("Could not copyout gss token\n" ); |
2048 | FREE(cp->gss_clnt_token, M_TEMP); |
2049 | cp->gss_clnt_token = NULL; |
2050 | return (NFSERR_EAUTH); |
2051 | } |
2052 | cp->gss_clnt_tokenlen = otokenlen; |
2053 | } |
2054 | |
2055 | if (selected != (uint32_t)-1) { |
2056 | nmp->nm_etype.selected = selected; |
2057 | NFS_GSS_DBG("etype selected = %d\n" , nmp->nm_etype.etypes[selected]); |
2058 | } |
2059 | NFS_GSS_DBG("Up call succeeded major = %d\n" , cp->gss_clnt_major); |
2060 | return (0); |
2061 | |
2062 | out: |
2063 | if (cp->gss_clnt_token) |
2064 | FREE(cp->gss_clnt_token, M_TEMP); |
2065 | cp->gss_clnt_token = NULL; |
2066 | cp->gss_clnt_tokenlen = 0; |
2067 | /* Server's handle isn't valid. Don't reuse */ |
2068 | cp->gss_clnt_handle_len = 0; |
2069 | if (cp->gss_clnt_handle != NULL) { |
2070 | FREE(cp->gss_clnt_handle, M_TEMP); |
2071 | cp->gss_clnt_handle = NULL; |
2072 | } |
2073 | |
2074 | NFS_GSS_DBG("Up call returned NFSERR_EAUTH" ); |
2075 | return (NFSERR_EAUTH); |
2076 | } |
2077 | |
2078 | /* |
2079 | * Invoked at the completion of an RPC call that uses an RPCSEC_GSS |
2080 | * credential. The sequence number window that the server returns |
2081 | * at context setup indicates the maximum number of client calls that |
2082 | * can be outstanding on a context. The client maintains a bitmap that |
2083 | * represents the server's window. Each pending request has a bit set |
2084 | * in the window bitmap. When a reply comes in or times out, we reset |
2085 | * the bit in the bitmap and if there are any other threads waiting for |
2086 | * a context slot we notify the waiting thread(s). |
2087 | * |
2088 | * Note that if a request is retransmitted, it will have a single XID |
2089 | * but it may be associated with multiple sequence numbers. So we |
2090 | * may have to reset multiple sequence number bits in the window bitmap. |
2091 | */ |
2092 | void |
2093 | nfs_gss_clnt_rpcdone(struct nfsreq *req) |
2094 | { |
2095 | struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx; |
2096 | struct gss_seq *gsp, *ngsp; |
2097 | int i = 0; |
2098 | |
2099 | if (cp == NULL || !(cp->gss_clnt_flags & GSS_CTX_COMPLETE)) |
2100 | return; // no context - don't bother |
2101 | /* |
2102 | * Reset the bit for this request in the |
2103 | * sequence number window to indicate it's done. |
2104 | * We do this even if the request timed out. |
2105 | */ |
2106 | lck_mtx_lock(cp->gss_clnt_mtx); |
2107 | gsp = SLIST_FIRST(&req->r_gss_seqlist); |
2108 | if (gsp && gsp->gss_seqnum > (cp->gss_clnt_seqnum - cp->gss_clnt_seqwin)) |
2109 | win_resetbit(cp->gss_clnt_seqbits, |
2110 | gsp->gss_seqnum % cp->gss_clnt_seqwin); |
2111 | |
2112 | /* |
2113 | * Limit the seqnum list to GSS_CLNT_SEQLISTMAX entries |
2114 | */ |
2115 | SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp) { |
2116 | if (++i > GSS_CLNT_SEQLISTMAX) { |
2117 | SLIST_REMOVE(&req->r_gss_seqlist, gsp, gss_seq, gss_seqnext); |
2118 | FREE(gsp, M_TEMP); |
2119 | } |
2120 | } |
2121 | |
2122 | /* |
2123 | * If there's a thread waiting for |
2124 | * the window to advance, wake it up. |
2125 | */ |
2126 | if (cp->gss_clnt_flags & GSS_NEEDSEQ) { |
2127 | cp->gss_clnt_flags &= ~GSS_NEEDSEQ; |
2128 | wakeup(cp); |
2129 | } |
2130 | lck_mtx_unlock(cp->gss_clnt_mtx); |
2131 | } |
2132 | |
2133 | /* |
2134 | * Create a reference to a context from a request |
2135 | * and bump the reference count |
2136 | */ |
2137 | void |
2138 | nfs_gss_clnt_ctx_ref(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp) |
2139 | { |
2140 | req->r_gss_ctx = cp; |
2141 | |
2142 | lck_mtx_lock(cp->gss_clnt_mtx); |
2143 | cp->gss_clnt_refcnt++; |
2144 | lck_mtx_unlock(cp->gss_clnt_mtx); |
2145 | } |
2146 | |
2147 | /* |
2148 | * Remove a context reference from a request |
2149 | * If the reference count drops to zero, and the |
2150 | * context is invalid, destroy the context |
2151 | */ |
2152 | void |
2153 | nfs_gss_clnt_ctx_unref(struct nfsreq *req) |
2154 | { |
2155 | struct nfsmount *nmp = req->r_nmp; |
2156 | struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx; |
2157 | int on_neg_cache = 0; |
2158 | int neg_cache = 0; |
2159 | int destroy = 0; |
2160 | struct timeval now; |
2161 | char CTXBUF[NFS_CTXBUFSZ]; |
2162 | |
2163 | if (cp == NULL) |
2164 | return; |
2165 | |
2166 | req->r_gss_ctx = NULL; |
2167 | |
2168 | lck_mtx_lock(cp->gss_clnt_mtx); |
2169 | if (--cp->gss_clnt_refcnt < 0) |
2170 | panic("Over release of gss context!\n" ); |
2171 | |
2172 | if (cp->gss_clnt_refcnt == 0) { |
2173 | if ((cp->gss_clnt_flags & GSS_CTX_INVAL) && |
2174 | cp->gss_clnt_ctx_id) { |
2175 | gss_krb5_destroy_context(cp->gss_clnt_ctx_id); |
2176 | cp->gss_clnt_ctx_id = NULL; |
2177 | } |
2178 | if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { |
2179 | destroy = 1; |
2180 | if (cp->gss_clnt_flags & GSS_CTX_STICKY) |
2181 | nfs_gss_clnt_mnt_rele(nmp); |
2182 | if (cp->gss_clnt_nctime) |
2183 | on_neg_cache = 1; |
2184 | } |
2185 | } |
2186 | if (!destroy && cp->gss_clnt_nctime == 0 && |
2187 | (cp->gss_clnt_flags & GSS_CTX_INVAL)) { |
2188 | microuptime(&now); |
2189 | cp->gss_clnt_nctime = now.tv_sec; |
2190 | neg_cache = 1; |
2191 | } |
2192 | lck_mtx_unlock(cp->gss_clnt_mtx); |
2193 | if (destroy) { |
2194 | NFS_GSS_DBG("Destroying context %s\n" , NFS_GSS_CTX(req, cp)); |
2195 | if (nmp) { |
2196 | lck_mtx_lock(&nmp->nm_lock); |
2197 | if (cp->gss_clnt_entries.tqe_next != NFSNOLIST) { |
2198 | TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries); |
2199 | } |
2200 | if (on_neg_cache) { |
2201 | nmp->nm_ncentries--; |
2202 | } |
2203 | lck_mtx_unlock(&nmp->nm_lock); |
2204 | } |
2205 | nfs_gss_clnt_ctx_destroy(cp); |
2206 | } else if (neg_cache) { |
2207 | NFS_GSS_DBG("Entering context %s into negative cache\n" , NFS_GSS_CTX(req, cp)); |
2208 | if (nmp) { |
2209 | lck_mtx_lock(&nmp->nm_lock); |
2210 | nmp->nm_ncentries++; |
2211 | nfs_gss_clnt_ctx_neg_cache_reap(nmp); |
2212 | lck_mtx_unlock(&nmp->nm_lock); |
2213 | } |
2214 | } |
2215 | NFS_GSS_CLNT_CTX_DUMP(nmp); |
2216 | } |
2217 | |
2218 | /* |
2219 | * Try and reap any old negative cache entries. |
2220 | * cache queue. |
2221 | */ |
2222 | void |
2223 | nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount *nmp) |
2224 | { |
2225 | struct nfs_gss_clnt_ctx *cp, *tcp; |
2226 | struct timeval now; |
2227 | int reaped = 0; |
2228 | |
2229 | /* Try and reap old, unreferenced, expired contexts */ |
2230 | microuptime(&now); |
2231 | |
2232 | NFS_GSS_DBG("Reaping contexts ncentries = %d\n" , nmp->nm_ncentries); |
2233 | |
2234 | TAILQ_FOREACH_SAFE(cp, &nmp->nm_gsscl, gss_clnt_entries, tcp) { |
2235 | int destroy = 0; |
2236 | |
2237 | /* Don't reap STICKY contexts */ |
2238 | if ((cp->gss_clnt_flags & GSS_CTX_STICKY) || |
2239 | !(cp->gss_clnt_flags & GSS_CTX_INVAL)) |
2240 | continue; |
2241 | /* Keep up to GSS_MAX_NEG_CACHE_ENTRIES */ |
2242 | if (nmp->nm_ncentries <= GSS_MAX_NEG_CACHE_ENTRIES) |
2243 | break; |
2244 | /* Contexts too young */ |
2245 | if (cp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec) |
2246 | continue; |
2247 | /* Not referenced, remove it. */ |
2248 | lck_mtx_lock(cp->gss_clnt_mtx); |
2249 | if (cp->gss_clnt_refcnt == 0) { |
2250 | cp->gss_clnt_flags |= GSS_CTX_DESTROY; |
2251 | destroy = 1; |
2252 | } |
2253 | lck_mtx_unlock(cp->gss_clnt_mtx); |
2254 | if (destroy) { |
2255 | TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries); |
2256 | nmp->nm_ncentries++; |
2257 | reaped++; |
2258 | nfs_gss_clnt_ctx_destroy(cp); |
2259 | } |
2260 | } |
2261 | NFS_GSS_DBG("Reaped %d contexts ncentries = %d\n" , reaped, nmp->nm_ncentries); |
2262 | } |
2263 | |
2264 | /* |
2265 | * Clean a context to be cached |
2266 | */ |
2267 | static void |
2268 | nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *cp) |
2269 | { |
2270 | /* Preserve gss_clnt_mtx */ |
2271 | assert(cp->gss_clnt_thread == NULL); /* Will be set to this thread */ |
2272 | /* gss_clnt_entries we should not be on any list at this point */ |
2273 | cp->gss_clnt_flags = 0; |
2274 | /* gss_clnt_refcnt should be zero */ |
2275 | assert(cp->gss_clnt_refcnt == 0); |
2276 | /* |
2277 | * We are who we are preserve: |
2278 | * gss_clnt_cred |
2279 | * gss_clnt_principal |
2280 | * gss_clnt_prinlen |
2281 | * gss_clnt_prinnt |
2282 | * gss_clnt_desplay |
2283 | */ |
2284 | /* gss_clnt_proc will be set in nfs_gss_clnt_ctx_init */ |
2285 | cp->gss_clnt_seqnum = 0; |
2286 | /* Preserve gss_clnt_service, we're not changing flavors */ |
2287 | if (cp->gss_clnt_handle) { |
2288 | FREE(cp->gss_clnt_handle, M_TEMP); |
2289 | cp->gss_clnt_handle = NULL; |
2290 | } |
2291 | cp->gss_clnt_handle_len = 0; |
2292 | cp->gss_clnt_nctime = 0; |
2293 | cp->gss_clnt_seqwin = 0; |
2294 | if (cp->gss_clnt_seqbits) { |
2295 | FREE(cp->gss_clnt_seqbits, M_TEMP); |
2296 | cp->gss_clnt_seqbits = NULL; |
2297 | } |
2298 | /* Preserve gss_clnt_mport. Still talking to the same gssd */ |
2299 | if (cp->gss_clnt_verf) { |
2300 | FREE(cp->gss_clnt_verf, M_TEMP); |
2301 | cp->gss_clnt_verf = NULL; |
2302 | } |
2303 | /* Service name might change on failover, so reset it */ |
2304 | if (cp->gss_clnt_svcname) { |
2305 | FREE(cp->gss_clnt_svcname, M_TEMP); |
2306 | cp->gss_clnt_svcname = NULL; |
2307 | cp->gss_clnt_svcnt = 0; |
2308 | } |
2309 | cp->gss_clnt_svcnamlen = 0; |
2310 | cp->gss_clnt_cred_handle = 0; |
2311 | cp->gss_clnt_context = 0; |
2312 | if (cp->gss_clnt_token) { |
2313 | FREE(cp->gss_clnt_token, M_TEMP); |
2314 | cp->gss_clnt_token = NULL; |
2315 | } |
2316 | cp->gss_clnt_tokenlen = 0; |
2317 | /* XXX gss_clnt_ctx_id ??? */ |
2318 | /* |
2319 | * Preserve: |
2320 | * gss_clnt_gssd_flags |
2321 | * gss_clnt_major |
2322 | * gss_clnt_minor |
2323 | * gss_clnt_ptime |
2324 | */ |
2325 | } |
2326 | |
2327 | /* |
2328 | * Copy a source context to a new context. This is used to create a new context |
2329 | * with the identity of the old context for renewal. The old context is invalid |
2330 | * at this point but may have reference still to it, so it is not safe to use that |
2331 | * context. |
2332 | */ |
2333 | static int |
2334 | nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx *scp, struct nfs_gss_clnt_ctx **dcpp) |
2335 | { |
2336 | struct nfs_gss_clnt_ctx *dcp; |
2337 | |
2338 | *dcpp = (struct nfs_gss_clnt_ctx *)NULL; |
2339 | MALLOC(dcp, struct nfs_gss_clnt_ctx *, sizeof (struct nfs_gss_clnt_ctx), M_TEMP, M_WAITOK); |
2340 | if (dcp == NULL) |
2341 | return (ENOMEM); |
2342 | bzero(dcp, sizeof (struct nfs_gss_clnt_ctx)); |
2343 | dcp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL); |
2344 | dcp->gss_clnt_cred = scp->gss_clnt_cred; |
2345 | kauth_cred_ref(dcp->gss_clnt_cred); |
2346 | dcp->gss_clnt_prinlen = scp->gss_clnt_prinlen; |
2347 | dcp->gss_clnt_prinnt = scp->gss_clnt_prinnt; |
2348 | if (scp->gss_clnt_principal) { |
2349 | MALLOC(dcp->gss_clnt_principal, uint8_t *, dcp->gss_clnt_prinlen, M_TEMP, M_WAITOK | M_ZERO); |
2350 | if (dcp->gss_clnt_principal == NULL) { |
2351 | FREE(dcp, M_TEMP); |
2352 | return (ENOMEM); |
2353 | } |
2354 | bcopy(scp->gss_clnt_principal, dcp->gss_clnt_principal, dcp->gss_clnt_prinlen); |
2355 | } |
2356 | /* Note we don't preserve the display name, that will be set by a successful up call */ |
2357 | dcp->gss_clnt_service = scp->gss_clnt_service; |
2358 | dcp->gss_clnt_mport = host_copy_special_port(scp->gss_clnt_mport); |
2359 | dcp->gss_clnt_ctx_id = NULL; /* Will be set from successful upcall */ |
2360 | dcp->gss_clnt_gssd_flags = scp->gss_clnt_gssd_flags; |
2361 | dcp->gss_clnt_major = scp->gss_clnt_major; |
2362 | dcp->gss_clnt_minor = scp->gss_clnt_minor; |
2363 | dcp->gss_clnt_ptime = scp->gss_clnt_ptime; |
2364 | |
2365 | *dcpp = dcp; |
2366 | |
2367 | return (0); |
2368 | } |
2369 | |
2370 | /* |
2371 | * Remove a context |
2372 | */ |
2373 | static void |
2374 | nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *cp) |
2375 | { |
2376 | NFS_GSS_DBG("Destroying context %d/%d\n" , |
2377 | kauth_cred_getasid(cp->gss_clnt_cred), |
2378 | kauth_cred_getauid(cp->gss_clnt_cred)); |
2379 | |
2380 | host_release_special_port(cp->gss_clnt_mport); |
2381 | cp->gss_clnt_mport = IPC_PORT_NULL; |
2382 | |
2383 | if (cp->gss_clnt_mtx) { |
2384 | lck_mtx_destroy(cp->gss_clnt_mtx, nfs_gss_clnt_grp); |
2385 | cp->gss_clnt_mtx = (lck_mtx_t *)NULL; |
2386 | } |
2387 | if (IS_VALID_CRED(cp->gss_clnt_cred)) |
2388 | kauth_cred_unref(&cp->gss_clnt_cred); |
2389 | cp->gss_clnt_entries.tqe_next = NFSNOLIST; |
2390 | cp->gss_clnt_entries.tqe_prev = NFSNOLIST; |
2391 | if (cp->gss_clnt_principal) { |
2392 | FREE(cp->gss_clnt_principal, M_TEMP); |
2393 | cp->gss_clnt_principal = NULL; |
2394 | } |
2395 | if (cp->gss_clnt_display) { |
2396 | FREE(cp->gss_clnt_display, M_TEMP); |
2397 | cp->gss_clnt_display = NULL; |
2398 | } |
2399 | if (cp->gss_clnt_ctx_id) { |
2400 | gss_krb5_destroy_context(cp->gss_clnt_ctx_id); |
2401 | cp->gss_clnt_ctx_id = NULL; |
2402 | } |
2403 | |
2404 | nfs_gss_clnt_ctx_clean(cp); |
2405 | |
2406 | FREE(cp, M_TEMP); |
2407 | } |
2408 | |
2409 | /* |
2410 | * The context for a user is invalid. |
2411 | * Mark the context as invalid, then |
2412 | * create a new context. |
2413 | */ |
2414 | int |
2415 | nfs_gss_clnt_ctx_renew(struct nfsreq *req) |
2416 | { |
2417 | struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx; |
2418 | struct nfs_gss_clnt_ctx *ncp; |
2419 | struct nfsmount *nmp; |
2420 | int error = 0; |
2421 | char CTXBUF[NFS_CTXBUFSZ]; |
2422 | |
2423 | if (cp == NULL) |
2424 | return (0); |
2425 | |
2426 | if (req->r_nmp == NULL) |
2427 | return (ENXIO); |
2428 | nmp = req->r_nmp; |
2429 | |
2430 | lck_mtx_lock(cp->gss_clnt_mtx); |
2431 | if (cp->gss_clnt_flags & GSS_CTX_INVAL) { |
2432 | lck_mtx_unlock(cp->gss_clnt_mtx); |
2433 | nfs_gss_clnt_ctx_unref(req); |
2434 | return (0); // already being renewed |
2435 | } |
2436 | |
2437 | cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY); |
2438 | |
2439 | if (cp->gss_clnt_flags & (GSS_NEEDCTX | GSS_NEEDSEQ)) { |
2440 | cp->gss_clnt_flags &= ~GSS_NEEDSEQ; |
2441 | wakeup(cp); |
2442 | } |
2443 | lck_mtx_unlock(cp->gss_clnt_mtx); |
2444 | |
2445 | if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY) |
2446 | return (EACCES); /* Destroying a context is best effort. Don't renew. */ |
2447 | /* |
2448 | * If we're setting up a context let nfs_gss_clnt_ctx_init know this is not working |
2449 | * and to try some other etype. |
2450 | */ |
2451 | if (cp->gss_clnt_proc != RPCSEC_GSS_DATA) |
2452 | return (ENEEDAUTH); |
2453 | error = nfs_gss_clnt_ctx_copy(cp, &ncp); |
2454 | NFS_GSS_DBG("Renewing context %s\n" , NFS_GSS_CTX(req, ncp)); |
2455 | nfs_gss_clnt_ctx_unref(req); |
2456 | if (error) |
2457 | return (error); |
2458 | |
2459 | lck_mtx_lock(&nmp->nm_lock); |
2460 | /* |
2461 | * Note we don't bother taking the new context mutex as we're |
2462 | * not findable at the moment. |
2463 | */ |
2464 | ncp->gss_clnt_thread = current_thread(); |
2465 | nfs_gss_clnt_ctx_ref(req, ncp); |
2466 | TAILQ_INSERT_HEAD(&nmp->nm_gsscl, ncp, gss_clnt_entries); |
2467 | lck_mtx_unlock(&nmp->nm_lock); |
2468 | |
2469 | error = nfs_gss_clnt_ctx_init_retry(req, ncp); // Initialize new context |
2470 | if (error) |
2471 | nfs_gss_clnt_ctx_unref(req); |
2472 | |
2473 | return (error); |
2474 | } |
2475 | |
2476 | |
2477 | /* |
2478 | * Destroy all the contexts associated with a mount. |
2479 | * The contexts are also destroyed by the server. |
2480 | */ |
2481 | void |
2482 | nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp) |
2483 | { |
2484 | struct nfs_gss_clnt_ctx *cp; |
2485 | struct nfsm_chain nmreq, nmrep; |
2486 | int error, status; |
2487 | struct nfsreq req; |
2488 | req.r_nmp = nmp; |
2489 | |
2490 | if (!nmp) |
2491 | return; |
2492 | |
2493 | |
2494 | lck_mtx_lock(&nmp->nm_lock); |
2495 | while((cp = TAILQ_FIRST(&nmp->nm_gsscl))) { |
2496 | TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries); |
2497 | cp->gss_clnt_entries.tqe_next = NFSNOLIST; |
2498 | lck_mtx_lock(cp->gss_clnt_mtx); |
2499 | if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { |
2500 | lck_mtx_unlock(cp->gss_clnt_mtx); |
2501 | continue; |
2502 | } |
2503 | cp->gss_clnt_refcnt++; |
2504 | lck_mtx_unlock(cp->gss_clnt_mtx); |
2505 | req.r_gss_ctx = cp; |
2506 | |
2507 | lck_mtx_unlock(&nmp->nm_lock); |
2508 | /* |
2509 | * Tell the server to destroy its context. |
2510 | * But don't bother if it's a forced unmount. |
2511 | */ |
2512 | if (!nfs_mount_gone(nmp) && |
2513 | (cp->gss_clnt_flags & (GSS_CTX_INVAL | GSS_CTX_DESTROY | GSS_CTX_COMPLETE)) == GSS_CTX_COMPLETE) { |
2514 | cp->gss_clnt_proc = RPCSEC_GSS_DESTROY; |
2515 | |
2516 | error = 0; |
2517 | nfsm_chain_null(&nmreq); |
2518 | nfsm_chain_null(&nmrep); |
2519 | nfsm_chain_build_alloc_init(error, &nmreq, 0); |
2520 | nfsm_chain_build_done(error, &nmreq); |
2521 | if (!error) |
2522 | nfs_request_gss(nmp->nm_mountp, &nmreq, |
2523 | current_thread(), cp->gss_clnt_cred, 0, cp, &nmrep, &status); |
2524 | nfsm_chain_cleanup(&nmreq); |
2525 | nfsm_chain_cleanup(&nmrep); |
2526 | } |
2527 | |
2528 | /* |
2529 | * Mark the context invalid then drop |
2530 | * the reference to remove it if its |
2531 | * refcount is zero. |
2532 | */ |
2533 | lck_mtx_lock(cp->gss_clnt_mtx); |
2534 | cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY); |
2535 | lck_mtx_unlock(cp->gss_clnt_mtx); |
2536 | nfs_gss_clnt_ctx_unref(&req); |
2537 | lck_mtx_lock(&nmp->nm_lock); |
2538 | } |
2539 | lck_mtx_unlock(&nmp->nm_lock); |
2540 | assert(TAILQ_EMPTY(&nmp->nm_gsscl)); |
2541 | } |
2542 | |
2543 | |
2544 | /* |
2545 | * Removes a mounts context for a credential |
2546 | */ |
2547 | int |
2548 | nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, kauth_cred_t cred) |
2549 | { |
2550 | struct nfs_gss_clnt_ctx *cp; |
2551 | struct nfsreq req; |
2552 | |
2553 | req.r_nmp = nmp; |
2554 | |
2555 | NFS_GSS_DBG("Enter\n" ); |
2556 | NFS_GSS_CLNT_CTX_DUMP(nmp); |
2557 | lck_mtx_lock(&nmp->nm_lock); |
2558 | TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) { |
2559 | lck_mtx_lock(cp->gss_clnt_mtx); |
2560 | if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) { |
2561 | if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { |
2562 | NFS_GSS_DBG("Found destroyed context %d/%d. refcnt = %d continuing\n" , |
2563 | kauth_cred_getasid(cp->gss_clnt_cred), |
2564 | kauth_cred_getauid(cp->gss_clnt_cred), |
2565 | cp->gss_clnt_refcnt); |
2566 | lck_mtx_unlock(cp->gss_clnt_mtx); |
2567 | continue; |
2568 | } |
2569 | cp->gss_clnt_refcnt++; |
2570 | cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY); |
2571 | lck_mtx_unlock(cp->gss_clnt_mtx); |
2572 | req.r_gss_ctx = cp; |
2573 | lck_mtx_unlock(&nmp->nm_lock); |
2574 | /* |
2575 | * Drop the reference to remove it if its |
2576 | * refcount is zero. |
2577 | */ |
2578 | NFS_GSS_DBG("Removed context %d/%d refcnt = %d\n" , |
2579 | kauth_cred_getasid(cp->gss_clnt_cred), |
2580 | kauth_cred_getuid(cp->gss_clnt_cred), |
2581 | cp->gss_clnt_refcnt); |
2582 | nfs_gss_clnt_ctx_unref(&req); |
2583 | return (0); |
2584 | } |
2585 | lck_mtx_unlock(cp->gss_clnt_mtx); |
2586 | } |
2587 | |
2588 | lck_mtx_unlock(&nmp->nm_lock); |
2589 | |
2590 | NFS_GSS_DBG("Returning ENOENT\n" ); |
2591 | return (ENOENT); |
2592 | } |
2593 | |
2594 | /* |
2595 | * Sets a mounts principal for a session associated with cred. |
2596 | */ |
2597 | int |
2598 | nfs_gss_clnt_ctx_set_principal(struct nfsmount *nmp, vfs_context_t ctx, |
2599 | uint8_t *principal, uint32_t princlen, uint32_t nametype) |
2600 | |
2601 | { |
2602 | struct nfsreq req; |
2603 | int error; |
2604 | |
2605 | NFS_GSS_DBG("Enter:\n" ); |
2606 | |
2607 | bzero(&req, sizeof(struct nfsreq)); |
2608 | req.r_nmp = nmp; |
2609 | req.r_gss_ctx = NULL; |
2610 | req.r_auth = nmp->nm_auth; |
2611 | req.r_thread = vfs_context_thread(ctx); |
2612 | req.r_cred = vfs_context_ucred(ctx); |
2613 | |
2614 | error = nfs_gss_clnt_ctx_find_principal(&req, principal, princlen, nametype); |
2615 | NFS_GSS_DBG("nfs_gss_clnt_ctx_find_principal returned %d\n" , error); |
2616 | /* |
2617 | * We don't care about auth errors. Those would indicate that the context is in the |
2618 | * neagative cache and if and when the user has credentials for the principal |
2619 | * we should be good to go in that we will select those credentials for this principal. |
2620 | */ |
2621 | if (error == EACCES || error == EAUTH || error == ENEEDAUTH) |
2622 | error = 0; |
2623 | |
2624 | /* We're done with this request */ |
2625 | nfs_gss_clnt_ctx_unref(&req); |
2626 | |
2627 | return (error); |
2628 | } |
2629 | |
2630 | /* |
2631 | * Gets a mounts principal from a session associated with cred |
2632 | */ |
2633 | int |
2634 | nfs_gss_clnt_ctx_get_principal(struct nfsmount *nmp, vfs_context_t ctx, |
2635 | struct user_nfs_gss_principal *p) |
2636 | { |
2637 | struct nfsreq req; |
2638 | int error = 0; |
2639 | struct nfs_gss_clnt_ctx *cp; |
2640 | kauth_cred_t cred = vfs_context_ucred(ctx); |
2641 | const char *princ = NULL; |
2642 | char CTXBUF[NFS_CTXBUFSZ]; |
2643 | |
2644 | /* Make sure the the members of the struct user_nfs_gss_principal are initialized */ |
2645 | p->nametype = GSSD_STRING_NAME; |
2646 | p->principal = USER_ADDR_NULL; |
2647 | p->princlen = 0; |
2648 | p->flags = 0; |
2649 | |
2650 | req.r_nmp = nmp; |
2651 | lck_mtx_lock(&nmp->nm_lock); |
2652 | TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) { |
2653 | lck_mtx_lock(cp->gss_clnt_mtx); |
2654 | if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { |
2655 | NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n" , |
2656 | NFS_GSS_CTX(&req, cp), |
2657 | cp->gss_clnt_refcnt); |
2658 | lck_mtx_unlock(cp->gss_clnt_mtx); |
2659 | continue; |
2660 | } |
2661 | if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) { |
2662 | cp->gss_clnt_refcnt++; |
2663 | lck_mtx_unlock(cp->gss_clnt_mtx); |
2664 | goto out; |
2665 | } |
2666 | lck_mtx_unlock(cp->gss_clnt_mtx); |
2667 | } |
2668 | |
2669 | out: |
2670 | if (cp == NULL) { |
2671 | lck_mtx_unlock(&nmp->nm_lock); |
2672 | p->flags |= NFS_IOC_NO_CRED_FLAG; /* No credentials, valid or invalid on this mount */ |
2673 | NFS_GSS_DBG("No context found for session %d by uid %d\n" , |
2674 | kauth_cred_getasid(cred), kauth_cred_getuid(cred)); |
2675 | return (0); |
2676 | } |
2677 | |
2678 | /* Indicate if the cred is INVALID */ |
2679 | if (cp->gss_clnt_flags & GSS_CTX_INVAL) |
2680 | p->flags |= NFS_IOC_INVALID_CRED_FLAG; |
2681 | |
2682 | /* We have set a principal on the mount */ |
2683 | if (cp->gss_clnt_principal) { |
2684 | princ = (char *)cp->gss_clnt_principal; |
2685 | p->princlen = cp->gss_clnt_prinlen; |
2686 | p->nametype = cp->gss_clnt_prinnt; |
2687 | } else if (cp->gss_clnt_display) { |
2688 | /* We have a successful use the the default credential */ |
2689 | princ = cp->gss_clnt_display; |
2690 | p->princlen = strlen(cp->gss_clnt_display); |
2691 | } |
2692 | |
2693 | /* |
2694 | * If neither of the above is true we have an invalid default credential |
2695 | * So from above p->principal is USER_ADDR_NULL and princ is NULL |
2696 | */ |
2697 | |
2698 | if (princ) { |
2699 | char *pp; |
2700 | |
2701 | MALLOC(pp, char *, p->princlen, M_TEMP, M_WAITOK); |
2702 | bcopy(princ, pp, p->princlen); |
2703 | p->principal = CAST_USER_ADDR_T(pp); |
2704 | } |
2705 | |
2706 | lck_mtx_unlock(&nmp->nm_lock); |
2707 | |
2708 | req.r_gss_ctx = cp; |
2709 | NFS_GSS_DBG("Found context %s\n" , NFS_GSS_CTX(&req, NULL)); |
2710 | nfs_gss_clnt_ctx_unref(&req); |
2711 | return (error); |
2712 | } |
2713 | #endif /* NFSCLIENT */ |
2714 | |
2715 | /************* |
2716 | * |
2717 | * Server functions |
2718 | */ |
2719 | |
2720 | #if NFSSERVER |
2721 | |
2722 | /* |
2723 | * Find a server context based on a handle value received |
2724 | * in an RPCSEC_GSS credential. |
2725 | */ |
2726 | static struct nfs_gss_svc_ctx * |
2727 | nfs_gss_svc_ctx_find(uint32_t handle) |
2728 | { |
2729 | struct nfs_gss_svc_ctx_hashhead *head; |
2730 | struct nfs_gss_svc_ctx *cp; |
2731 | uint64_t timenow; |
2732 | |
2733 | if (handle == 0) |
2734 | return (NULL); |
2735 | |
2736 | head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(handle)]; |
2737 | /* |
2738 | * Don't return a context that is going to expire in GSS_CTX_PEND seconds |
2739 | */ |
2740 | clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, &timenow); |
2741 | |
2742 | lck_mtx_lock(nfs_gss_svc_ctx_mutex); |
2743 | |
2744 | LIST_FOREACH(cp, head, gss_svc_entries) { |
2745 | if (cp->gss_svc_handle == handle) { |
2746 | if (timenow > cp->gss_svc_incarnation + GSS_SVC_CTX_TTL) { |
2747 | /* |
2748 | * Context has or is about to expire. Don't use. |
2749 | * We'll return null and the client will have to create |
2750 | * a new context. |
2751 | */ |
2752 | cp->gss_svc_handle = 0; |
2753 | /* |
2754 | * Make sure though that we stay around for GSS_CTX_PEND seconds |
2755 | * for other threads that might be using the context. |
2756 | */ |
2757 | cp->gss_svc_incarnation = timenow; |
2758 | |
2759 | cp = NULL; |
2760 | break; |
2761 | } |
2762 | lck_mtx_lock(cp->gss_svc_mtx); |
2763 | cp->gss_svc_refcnt++; |
2764 | lck_mtx_unlock(cp->gss_svc_mtx); |
2765 | break; |
2766 | } |
2767 | } |
2768 | |
2769 | lck_mtx_unlock(nfs_gss_svc_ctx_mutex); |
2770 | |
2771 | return (cp); |
2772 | } |
2773 | |
2774 | /* |
2775 | * Insert a new server context into the hash table |
2776 | * and start the context reap thread if necessary. |
2777 | */ |
2778 | static void |
2779 | nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *cp) |
2780 | { |
2781 | struct nfs_gss_svc_ctx_hashhead *head; |
2782 | struct nfs_gss_svc_ctx *p; |
2783 | |
2784 | lck_mtx_lock(nfs_gss_svc_ctx_mutex); |
2785 | |
2786 | /* |
2787 | * Give the client a random handle so that if we reboot |
2788 | * it's unlikely the client will get a bad context match. |
2789 | * Make sure it's not zero or already assigned. |
2790 | */ |
2791 | retry: |
2792 | cp->gss_svc_handle = random(); |
2793 | if (cp->gss_svc_handle == 0) |
2794 | goto retry; |
2795 | head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(cp->gss_svc_handle)]; |
2796 | LIST_FOREACH(p, head, gss_svc_entries) |
2797 | if (p->gss_svc_handle == cp->gss_svc_handle) |
2798 | goto retry; |
2799 | |
2800 | clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, |
2801 | &cp->gss_svc_incarnation); |
2802 | LIST_INSERT_HEAD(head, cp, gss_svc_entries); |
2803 | nfs_gss_ctx_count++; |
2804 | |
2805 | if (!nfs_gss_timer_on) { |
2806 | nfs_gss_timer_on = 1; |
2807 | |
2808 | nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call, |
2809 | min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC); |
2810 | } |
2811 | |
2812 | lck_mtx_unlock(nfs_gss_svc_ctx_mutex); |
2813 | } |
2814 | |
2815 | /* |
2816 | * This function is called via the kernel's callout |
2817 | * mechanism. It runs only when there are |
2818 | * cached RPCSEC_GSS contexts. |
2819 | */ |
2820 | void |
2821 | nfs_gss_svc_ctx_timer(__unused void *param1, __unused void *param2) |
2822 | { |
2823 | struct nfs_gss_svc_ctx *cp, *next; |
2824 | uint64_t timenow; |
2825 | int contexts = 0; |
2826 | int i; |
2827 | |
2828 | lck_mtx_lock(nfs_gss_svc_ctx_mutex); |
2829 | clock_get_uptime(&timenow); |
2830 | |
2831 | NFS_GSS_DBG("is running\n" ); |
2832 | |
2833 | /* |
2834 | * Scan all the hash chains |
2835 | */ |
2836 | for (i = 0; i < SVC_CTX_HASHSZ; i++) { |
2837 | /* |
2838 | * For each hash chain, look for entries |
2839 | * that haven't been used in a while. |
2840 | */ |
2841 | LIST_FOREACH_SAFE(cp, &nfs_gss_svc_ctx_hashtbl[i], gss_svc_entries, next) { |
2842 | contexts++; |
2843 | if (timenow > cp->gss_svc_incarnation + |
2844 | (cp->gss_svc_handle ? GSS_SVC_CTX_TTL : 0) |
2845 | && cp->gss_svc_refcnt == 0) { |
2846 | /* |
2847 | * A stale context - remove it |
2848 | */ |
2849 | LIST_REMOVE(cp, gss_svc_entries); |
2850 | NFS_GSS_DBG("Removing contex for %d\n" , cp->gss_svc_uid); |
2851 | if (cp->gss_svc_seqbits) |
2852 | FREE(cp->gss_svc_seqbits, M_TEMP); |
2853 | lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp); |
2854 | FREE(cp, M_TEMP); |
2855 | contexts--; |
2856 | } |
2857 | } |
2858 | } |
2859 | |
2860 | nfs_gss_ctx_count = contexts; |
2861 | |
2862 | /* |
2863 | * If there are still some cached contexts left, |
2864 | * set up another callout to check on them later. |
2865 | */ |
2866 | nfs_gss_timer_on = nfs_gss_ctx_count > 0; |
2867 | if (nfs_gss_timer_on) |
2868 | nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call, |
2869 | min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC); |
2870 | |
2871 | lck_mtx_unlock(nfs_gss_svc_ctx_mutex); |
2872 | } |
2873 | |
2874 | /* |
2875 | * Here the server receives an RPCSEC_GSS credential in an |
2876 | * RPC call header. First there's some checking to make sure |
2877 | * the credential is appropriate - whether the context is still |
2878 | * being set up, or is complete. Then we use the handle to find |
2879 | * the server's context and validate the verifier, which contains |
2880 | * a signed checksum of the RPC header. If the verifier checks |
2881 | * out, we extract the user's UID and groups from the context |
2882 | * and use it to set up a UNIX credential for the user's request. |
2883 | */ |
2884 | int |
2885 | nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) |
2886 | { |
2887 | uint32_t vers, proc, seqnum, service; |
2888 | uint32_t handle, handle_len; |
2889 | uint32_t major; |
2890 | struct nfs_gss_svc_ctx *cp = NULL; |
2891 | uint32_t flavor = 0, ; |
2892 | int error = 0; |
2893 | uint32_t arglen, start; |
2894 | size_t argsize; |
2895 | gss_buffer_desc cksum; |
2896 | struct nfsm_chain nmc_tmp; |
2897 | mbuf_t reply_mbuf, prev_mbuf, pad_mbuf; |
2898 | |
2899 | vers = proc = seqnum = service = handle_len = 0; |
2900 | arglen = 0; |
2901 | |
2902 | nfsm_chain_get_32(error, nmc, vers); |
2903 | if (vers != RPCSEC_GSS_VERS_1) { |
2904 | error = NFSERR_AUTHERR | AUTH_REJECTCRED; |
2905 | goto nfsmout; |
2906 | } |
2907 | |
2908 | nfsm_chain_get_32(error, nmc, proc); |
2909 | nfsm_chain_get_32(error, nmc, seqnum); |
2910 | nfsm_chain_get_32(error, nmc, service); |
2911 | nfsm_chain_get_32(error, nmc, handle_len); |
2912 | if (error) |
2913 | goto nfsmout; |
2914 | |
2915 | /* |
2916 | * Make sure context setup/destroy is being done with a nullproc |
2917 | */ |
2918 | if (proc != RPCSEC_GSS_DATA && nd->nd_procnum != NFSPROC_NULL) { |
2919 | error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM; |
2920 | goto nfsmout; |
2921 | } |
2922 | |
2923 | /* |
2924 | * If the sequence number is greater than the max |
2925 | * allowable, reject and have the client init a |
2926 | * new context. |
2927 | */ |
2928 | if (seqnum > GSS_MAXSEQ) { |
2929 | error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM; |
2930 | goto nfsmout; |
2931 | } |
2932 | |
2933 | nd->nd_sec = |
2934 | service == RPCSEC_GSS_SVC_NONE ? RPCAUTH_KRB5 : |
2935 | service == RPCSEC_GSS_SVC_INTEGRITY ? RPCAUTH_KRB5I : |
2936 | service == RPCSEC_GSS_SVC_PRIVACY ? RPCAUTH_KRB5P : 0; |
2937 | |
2938 | if (proc == RPCSEC_GSS_INIT) { |
2939 | /* |
2940 | * Limit the total number of contexts |
2941 | */ |
2942 | if (nfs_gss_ctx_count > nfs_gss_ctx_max) { |
2943 | error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM; |
2944 | goto nfsmout; |
2945 | } |
2946 | |
2947 | /* |
2948 | * Set up a new context |
2949 | */ |
2950 | MALLOC(cp, struct nfs_gss_svc_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO); |
2951 | if (cp == NULL) { |
2952 | error = ENOMEM; |
2953 | goto nfsmout; |
2954 | } |
2955 | cp->gss_svc_mtx = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL); |
2956 | cp->gss_svc_refcnt = 1; |
2957 | } else { |
2958 | /* |
2959 | * Use the handle to find the context |
2960 | */ |
2961 | if (handle_len != sizeof(handle)) { |
2962 | error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM; |
2963 | goto nfsmout; |
2964 | } |
2965 | nfsm_chain_get_32(error, nmc, handle); |
2966 | if (error) |
2967 | goto nfsmout; |
2968 | cp = nfs_gss_svc_ctx_find(handle); |
2969 | if (cp == NULL) { |
2970 | error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM; |
2971 | goto nfsmout; |
2972 | } |
2973 | } |
2974 | |
2975 | cp->gss_svc_proc = proc; |
2976 | |
2977 | if (proc == RPCSEC_GSS_DATA || proc == RPCSEC_GSS_DESTROY) { |
2978 | struct posix_cred temp_pcred; |
2979 | |
2980 | if (cp->gss_svc_seqwin == 0) { |
2981 | /* |
2982 | * Context isn't complete |
2983 | */ |
2984 | error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM; |
2985 | goto nfsmout; |
2986 | } |
2987 | |
2988 | if (!nfs_gss_svc_seqnum_valid(cp, seqnum)) { |
2989 | /* |
2990 | * Sequence number is bad |
2991 | */ |
2992 | error = EINVAL; // drop the request |
2993 | goto nfsmout; |
2994 | } |
2995 | |
2996 | /* |
2997 | * Validate the verifier. |
2998 | * The verifier contains an encrypted checksum |
2999 | * of the call header from the XID up to and |
3000 | * including the credential. We compute the |
3001 | * checksum and compare it with what came in |
3002 | * the verifier. |
3003 | */ |
3004 | header_len = nfsm_chain_offset(nmc); |
3005 | nfsm_chain_get_32(error, nmc, flavor); |
3006 | nfsm_chain_get_32(error, nmc, cksum.length); |
3007 | if (error) |
3008 | goto nfsmout; |
3009 | if (flavor != RPCSEC_GSS || cksum.length > KRB5_MAX_MIC_SIZE) |
3010 | error = NFSERR_AUTHERR | AUTH_BADVERF; |
3011 | else { |
3012 | MALLOC(cksum.value, void *, cksum.length, M_TEMP, M_WAITOK); |
3013 | nfsm_chain_get_opaque(error, nmc, cksum.length, cksum.value); |
3014 | } |
3015 | if (error) |
3016 | goto nfsmout; |
3017 | |
3018 | /* Now verify the client's call header checksum */ |
3019 | major = gss_krb5_verify_mic_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id, nmc->nmc_mhead, 0, header_len, &cksum, NULL); |
3020 | (void)gss_release_buffer(NULL, &cksum); |
3021 | if (major != GSS_S_COMPLETE) { |
3022 | printf("Server header: gss_krb5_verify_mic_mbuf failed %d\n" , error); |
3023 | error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM; |
3024 | goto nfsmout; |
3025 | } |
3026 | |
3027 | nd->nd_gss_seqnum = seqnum; |
3028 | |
3029 | /* |
3030 | * Set up the user's cred |
3031 | */ |
3032 | bzero(&temp_pcred, sizeof(temp_pcred)); |
3033 | temp_pcred.cr_uid = cp->gss_svc_uid; |
3034 | bcopy(cp->gss_svc_gids, temp_pcred.cr_groups, |
3035 | sizeof(gid_t) * cp->gss_svc_ngroups); |
3036 | temp_pcred.cr_ngroups = cp->gss_svc_ngroups; |
3037 | |
3038 | nd->nd_cr = posix_cred_create(&temp_pcred); |
3039 | if (nd->nd_cr == NULL) { |
3040 | error = ENOMEM; |
3041 | goto nfsmout; |
3042 | } |
3043 | clock_get_uptime(&cp->gss_svc_incarnation); |
3044 | |
3045 | /* |
3046 | * If the call arguments are integrity or privacy protected |
3047 | * then we need to check them here. |
3048 | */ |
3049 | switch (service) { |
3050 | case RPCSEC_GSS_SVC_NONE: |
3051 | /* nothing to do */ |
3052 | break; |
3053 | case RPCSEC_GSS_SVC_INTEGRITY: |
3054 | /* |
3055 | * Here's what we expect in the integrity call args: |
3056 | * |
3057 | * - length of seq num + call args (4 bytes) |
3058 | * - sequence number (4 bytes) |
3059 | * - call args (variable bytes) |
3060 | * - length of checksum token |
3061 | * - checksum of seqnum + call args |
3062 | */ |
3063 | nfsm_chain_get_32(error, nmc, arglen); // length of args |
3064 | if (arglen > NFS_MAXPACKET) { |
3065 | error = EBADRPC; |
3066 | goto nfsmout; |
3067 | } |
3068 | |
3069 | nmc_tmp = *nmc; |
3070 | nfsm_chain_adv(error, &nmc_tmp, arglen); |
3071 | nfsm_chain_get_32(error, &nmc_tmp, cksum.length); |
3072 | cksum.value = NULL; |
3073 | if (cksum.length > 0 && cksum.length < GSS_MAX_MIC_LEN) |
3074 | MALLOC(cksum.value, void *, cksum.length, M_TEMP, M_WAITOK); |
3075 | |
3076 | if (cksum.value == NULL) { |
3077 | error = EBADRPC; |
3078 | goto nfsmout; |
3079 | } |
3080 | nfsm_chain_get_opaque(error, &nmc_tmp, cksum.length, cksum.value); |
3081 | |
3082 | /* Verify the checksum over the call args */ |
3083 | start = nfsm_chain_offset(nmc); |
3084 | |
3085 | major = gss_krb5_verify_mic_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id, |
3086 | nmc->nmc_mhead, start, arglen, &cksum, NULL); |
3087 | FREE(cksum.value, M_TEMP); |
3088 | if (major != GSS_S_COMPLETE) { |
3089 | printf("Server args: gss_krb5_verify_mic_mbuf failed %d\n" , error); |
3090 | error = EBADRPC; |
3091 | goto nfsmout; |
3092 | } |
3093 | |
3094 | /* |
3095 | * Get the sequence number prepended to the args |
3096 | * and compare it against the one sent in the |
3097 | * call credential. |
3098 | */ |
3099 | nfsm_chain_get_32(error, nmc, seqnum); |
3100 | if (seqnum != nd->nd_gss_seqnum) { |
3101 | error = EBADRPC; // returns as GARBAGEARGS |
3102 | goto nfsmout; |
3103 | } |
3104 | break; |
3105 | case RPCSEC_GSS_SVC_PRIVACY: |
3106 | /* |
3107 | * Here's what we expect in the privacy call args: |
3108 | * |
3109 | * - length of wrap token |
3110 | * - wrap token (37-40 bytes) |
3111 | */ |
3112 | prev_mbuf = nmc->nmc_mcur; |
3113 | nfsm_chain_get_32(error, nmc, arglen); // length of args |
3114 | if (arglen > NFS_MAXPACKET) { |
3115 | error = EBADRPC; |
3116 | goto nfsmout; |
3117 | } |
3118 | |
3119 | /* Get the wrap token (current mbuf in the chain starting at the current offset) */ |
3120 | start = nmc->nmc_ptr - (caddr_t)mbuf_data(nmc->nmc_mcur); |
3121 | |
3122 | /* split out the wrap token */ |
3123 | argsize = arglen; |
3124 | error = gss_normalize_mbuf(nmc->nmc_mcur, start, &argsize, &reply_mbuf, &pad_mbuf, 0); |
3125 | if (error) |
3126 | goto nfsmout; |
3127 | |
3128 | assert(argsize == arglen); |
3129 | if (pad_mbuf) { |
3130 | assert(nfsm_pad(arglen) == mbuf_len(pad_mbuf)); |
3131 | mbuf_free(pad_mbuf); |
3132 | } else { |
3133 | assert(nfsm_pad(arglen) == 0); |
3134 | } |
3135 | |
3136 | major = gss_krb5_unwrap_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id, &reply_mbuf, 0, arglen, NULL, NULL); |
3137 | if (major != GSS_S_COMPLETE) { |
3138 | printf("%s: gss_krb5_unwrap_mbuf failes %d\n" , __func__, error); |
3139 | goto nfsmout; |
3140 | } |
3141 | |
3142 | /* Now replace the wrapped arguments with the unwrapped ones */ |
3143 | mbuf_setnext(prev_mbuf, reply_mbuf); |
3144 | nmc->nmc_mcur = reply_mbuf; |
3145 | nmc->nmc_ptr = mbuf_data(reply_mbuf); |
3146 | nmc->nmc_left = mbuf_len(reply_mbuf); |
3147 | |
3148 | /* |
3149 | * - sequence number (4 bytes) |
3150 | * - call args |
3151 | */ |
3152 | |
3153 | // nfsm_chain_reverse(nmc, nfsm_pad(toklen)); |
3154 | |
3155 | /* |
3156 | * Get the sequence number prepended to the args |
3157 | * and compare it against the one sent in the |
3158 | * call credential. |
3159 | */ |
3160 | nfsm_chain_get_32(error, nmc, seqnum); |
3161 | if (seqnum != nd->nd_gss_seqnum) { |
3162 | printf("%s: Sequence number mismatch seqnum = %d nd->nd_gss_seqnum = %d\n" , |
3163 | __func__, seqnum, nd->nd_gss_seqnum); |
3164 | printmbuf("reply_mbuf" , nmc->nmc_mhead, 0, 0); |
3165 | printf("reply_mbuf %p nmc_head %p\n" , reply_mbuf, nmc->nmc_mhead); |
3166 | error = EBADRPC; // returns as GARBAGEARGS |
3167 | goto nfsmout; |
3168 | } |
3169 | break; |
3170 | } |
3171 | } else { |
3172 | uint32_t verflen; |
3173 | /* |
3174 | * If the proc is RPCSEC_GSS_INIT or RPCSEC_GSS_CONTINUE_INIT |
3175 | * then we expect a null verifier. |
3176 | */ |
3177 | nfsm_chain_get_32(error, nmc, flavor); |
3178 | nfsm_chain_get_32(error, nmc, verflen); |
3179 | if (error || flavor != RPCAUTH_NULL || verflen > 0) |
3180 | error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM; |
3181 | if (error) { |
3182 | if (proc == RPCSEC_GSS_INIT) { |
3183 | lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp); |
3184 | FREE(cp, M_TEMP); |
3185 | cp = NULL; |
3186 | } |
3187 | goto nfsmout; |
3188 | } |
3189 | } |
3190 | |
3191 | nd->nd_gss_context = cp; |
3192 | return 0; |
3193 | nfsmout: |
3194 | if (cp) |
3195 | nfs_gss_svc_ctx_deref(cp); |
3196 | return (error); |
3197 | } |
3198 | |
3199 | /* |
3200 | * Insert the server's verifier into the RPC reply header. |
3201 | * It contains a signed checksum of the sequence number that |
3202 | * was received in the RPC call. |
3203 | * Then go on to add integrity or privacy if necessary. |
3204 | */ |
3205 | int |
3206 | nfs_gss_svc_verf_put(struct nfsrv_descript *nd, struct nfsm_chain *nmc) |
3207 | { |
3208 | struct nfs_gss_svc_ctx *cp; |
3209 | int error = 0; |
3210 | gss_buffer_desc cksum, seqbuf; |
3211 | uint32_t network_seqnum; |
3212 | cp = nd->nd_gss_context; |
3213 | uint32_t major; |
3214 | |
3215 | if (cp->gss_svc_major != GSS_S_COMPLETE) { |
3216 | /* |
3217 | * If the context isn't yet complete |
3218 | * then return a null verifier. |
3219 | */ |
3220 | nfsm_chain_add_32(error, nmc, RPCAUTH_NULL); |
3221 | nfsm_chain_add_32(error, nmc, 0); |
3222 | return (error); |
3223 | } |
3224 | |
3225 | /* |
3226 | * Compute checksum of the request seq number |
3227 | * If it's the final reply of context setup |
3228 | * then return the checksum of the context |
3229 | * window size. |
3230 | */ |
3231 | seqbuf.length = NFSX_UNSIGNED; |
3232 | if (cp->gss_svc_proc == RPCSEC_GSS_INIT || |
3233 | cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) |
3234 | network_seqnum = htonl(cp->gss_svc_seqwin); |
3235 | else |
3236 | network_seqnum = htonl(nd->nd_gss_seqnum); |
3237 | seqbuf.value = &network_seqnum; |
3238 | |
3239 | major = gss_krb5_get_mic((uint32_t *)&error, cp->gss_svc_ctx_id, 0, &seqbuf, &cksum); |
3240 | if (major != GSS_S_COMPLETE) |
3241 | return (error); |
3242 | |
3243 | /* |
3244 | * Now wrap it in a token and add |
3245 | * the verifier to the reply. |
3246 | */ |
3247 | nfsm_chain_add_32(error, nmc, RPCSEC_GSS); |
3248 | nfsm_chain_add_32(error, nmc, cksum.length); |
3249 | nfsm_chain_add_opaque(error, nmc, cksum.value, cksum.length); |
3250 | gss_release_buffer(NULL, &cksum); |
3251 | |
3252 | return (error); |
3253 | } |
3254 | |
3255 | /* |
3256 | * The results aren't available yet, but if they need to be |
3257 | * checksummed for integrity protection or encrypted, then |
3258 | * we can record the start offset here, insert a place-holder |
3259 | * for the results length, as well as the sequence number. |
3260 | * The rest of the work is done later by nfs_gss_svc_protect_reply() |
3261 | * when the results are available. |
3262 | */ |
3263 | int |
3264 | nfs_gss_svc_prepare_reply(struct nfsrv_descript *nd, struct nfsm_chain *nmc) |
3265 | { |
3266 | struct nfs_gss_svc_ctx *cp = nd->nd_gss_context; |
3267 | int error = 0; |
3268 | |
3269 | if (cp->gss_svc_proc == RPCSEC_GSS_INIT || |
3270 | cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) |
3271 | return (0); |
3272 | |
3273 | switch (nd->nd_sec) { |
3274 | case RPCAUTH_KRB5: |
3275 | /* Nothing to do */ |
3276 | break; |
3277 | case RPCAUTH_KRB5I: |
3278 | case RPCAUTH_KRB5P: |
3279 | nd->nd_gss_mb = nmc->nmc_mcur; // record current mbuf |
3280 | nfsm_chain_finish_mbuf(error, nmc); // split the chain here |
3281 | break; |
3282 | } |
3283 | |
3284 | return (error); |
3285 | } |
3286 | |
3287 | /* |
3288 | * The results are checksummed or encrypted for return to the client |
3289 | */ |
3290 | int |
3291 | nfs_gss_svc_protect_reply(struct nfsrv_descript *nd, mbuf_t mrep __unused) |
3292 | { |
3293 | struct nfs_gss_svc_ctx *cp = nd->nd_gss_context; |
3294 | struct nfsm_chain nmrep_res, *nmc_res = &nmrep_res; |
3295 | mbuf_t mb, results; |
3296 | uint32_t reslen; |
3297 | int error = 0; |
3298 | |
3299 | /* XXX |
3300 | * Using a reference to the mbuf where we previously split the reply |
3301 | * mbuf chain, we split the mbuf chain argument into two mbuf chains, |
3302 | * one that allows us to prepend a length field or token, (nmc_pre) |
3303 | * and the second which holds just the results that we're going to |
3304 | * checksum and/or encrypt. When we're done, we join the chains back |
3305 | * together. |
3306 | */ |
3307 | |
3308 | mb = nd->nd_gss_mb; // the mbuf where we split |
3309 | results = mbuf_next(mb); // first mbuf in the results |
3310 | error = mbuf_setnext(mb, NULL); // disconnect the chains |
3311 | if (error) |
3312 | return (error); |
3313 | nfs_gss_nfsm_chain(nmc_res, mb); // set up the prepend chain |
3314 | nfsm_chain_build_done(error, nmc_res); |
3315 | if (error) |
3316 | return (error); |
3317 | |
3318 | if (nd->nd_sec == RPCAUTH_KRB5I) { |
3319 | error = rpc_gss_integ_data_create(cp->gss_svc_ctx_id, &results, nd->nd_gss_seqnum, &reslen); |
3320 | } else { |
3321 | /* RPCAUTH_KRB5P */ |
3322 | error = rpc_gss_priv_data_create(cp->gss_svc_ctx_id, &results, nd->nd_gss_seqnum, &reslen); |
3323 | } |
3324 | nfs_gss_append_chain(nmc_res, results); // Append the results mbufs |
3325 | nfsm_chain_build_done(error, nmc_res); |
3326 | |
3327 | return (error); |
3328 | } |
3329 | |
3330 | /* |
3331 | * This function handles the context setup calls from the client. |
3332 | * Essentially, it implements the NFS null procedure calls when |
3333 | * an RPCSEC_GSS credential is used. |
3334 | * This is the context maintenance function. It creates and |
3335 | * destroys server contexts at the whim of the client. |
3336 | * During context creation, it receives GSS-API tokens from the |
3337 | * client, passes them up to gssd, and returns a received token |
3338 | * back to the client in the null procedure reply. |
3339 | */ |
3340 | int |
3341 | nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t *mrepp) |
3342 | { |
3343 | struct nfs_gss_svc_ctx *cp = NULL; |
3344 | int error = 0; |
3345 | int autherr = 0; |
3346 | struct nfsm_chain *nmreq, nmrep; |
3347 | int sz; |
3348 | |
3349 | nmreq = &nd->nd_nmreq; |
3350 | nfsm_chain_null(&nmrep); |
3351 | *mrepp = NULL; |
3352 | cp = nd->nd_gss_context; |
3353 | nd->nd_repstat = 0; |
3354 | |
3355 | switch (cp->gss_svc_proc) { |
3356 | case RPCSEC_GSS_INIT: |
3357 | nfs_gss_svc_ctx_insert(cp); |
3358 | /* FALLTHRU */ |
3359 | |
3360 | case RPCSEC_GSS_CONTINUE_INIT: |
3361 | /* Get the token from the request */ |
3362 | nfsm_chain_get_32(error, nmreq, cp->gss_svc_tokenlen); |
3363 | cp->gss_svc_token = NULL; |
3364 | if (cp->gss_svc_tokenlen > 0 && cp->gss_svc_tokenlen < GSS_MAX_TOKEN_LEN) |
3365 | MALLOC(cp->gss_svc_token, u_char *, cp->gss_svc_tokenlen, M_TEMP, M_WAITOK); |
3366 | if (cp->gss_svc_token == NULL) { |
3367 | autherr = RPCSEC_GSS_CREDPROBLEM; |
3368 | break; |
3369 | } |
3370 | nfsm_chain_get_opaque(error, nmreq, cp->gss_svc_tokenlen, cp->gss_svc_token); |
3371 | |
3372 | /* Use the token in a gss_accept_sec_context upcall */ |
3373 | error = nfs_gss_svc_gssd_upcall(cp); |
3374 | if (error) { |
3375 | autherr = RPCSEC_GSS_CREDPROBLEM; |
3376 | if (error == NFSERR_EAUTH) |
3377 | error = 0; |
3378 | break; |
3379 | } |
3380 | |
3381 | /* |
3382 | * If the context isn't complete, pass the new token |
3383 | * back to the client for another round. |
3384 | */ |
3385 | if (cp->gss_svc_major != GSS_S_COMPLETE) |
3386 | break; |
3387 | |
3388 | /* |
3389 | * Now the server context is complete. |
3390 | * Finish setup. |
3391 | */ |
3392 | clock_get_uptime(&cp->gss_svc_incarnation); |
3393 | |
3394 | cp->gss_svc_seqwin = GSS_SVC_SEQWINDOW; |
3395 | MALLOC(cp->gss_svc_seqbits, uint32_t *, |
3396 | nfsm_rndup((cp->gss_svc_seqwin + 7) / 8), M_TEMP, M_WAITOK|M_ZERO); |
3397 | if (cp->gss_svc_seqbits == NULL) { |
3398 | autherr = RPCSEC_GSS_CREDPROBLEM; |
3399 | break; |
3400 | } |
3401 | break; |
3402 | |
3403 | case RPCSEC_GSS_DATA: |
3404 | /* Just a nullproc ping - do nothing */ |
3405 | break; |
3406 | |
3407 | case RPCSEC_GSS_DESTROY: |
3408 | /* |
3409 | * Don't destroy the context immediately because |
3410 | * other active requests might still be using it. |
3411 | * Instead, schedule it for destruction after |
3412 | * GSS_CTX_PEND time has elapsed. |
3413 | */ |
3414 | cp = nfs_gss_svc_ctx_find(cp->gss_svc_handle); |
3415 | if (cp != NULL) { |
3416 | cp->gss_svc_handle = 0; // so it can't be found |
3417 | lck_mtx_lock(cp->gss_svc_mtx); |
3418 | clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, |
3419 | &cp->gss_svc_incarnation); |
3420 | lck_mtx_unlock(cp->gss_svc_mtx); |
3421 | } |
3422 | break; |
3423 | default: |
3424 | autherr = RPCSEC_GSS_CREDPROBLEM; |
3425 | break; |
3426 | } |
3427 | |
3428 | /* Now build the reply */ |
3429 | |
3430 | if (nd->nd_repstat == 0) |
3431 | nd->nd_repstat = autherr ? (NFSERR_AUTHERR | autherr) : NFSERR_RETVOID; |
3432 | sz = 7 * NFSX_UNSIGNED + nfsm_rndup(cp->gss_svc_tokenlen); // size of results |
3433 | error = nfsrv_rephead(nd, slp, &nmrep, sz); |
3434 | *mrepp = nmrep.nmc_mhead; |
3435 | if (error || autherr) |
3436 | goto nfsmout; |
3437 | |
3438 | if (cp->gss_svc_proc == RPCSEC_GSS_INIT || |
3439 | cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) { |
3440 | nfsm_chain_add_32(error, &nmrep, sizeof(cp->gss_svc_handle)); |
3441 | nfsm_chain_add_32(error, &nmrep, cp->gss_svc_handle); |
3442 | |
3443 | nfsm_chain_add_32(error, &nmrep, cp->gss_svc_major); |
3444 | nfsm_chain_add_32(error, &nmrep, cp->gss_svc_minor); |
3445 | nfsm_chain_add_32(error, &nmrep, cp->gss_svc_seqwin); |
3446 | |
3447 | nfsm_chain_add_32(error, &nmrep, cp->gss_svc_tokenlen); |
3448 | if (cp->gss_svc_token != NULL) { |
3449 | nfsm_chain_add_opaque(error, &nmrep, cp->gss_svc_token, cp->gss_svc_tokenlen); |
3450 | FREE(cp->gss_svc_token, M_TEMP); |
3451 | cp->gss_svc_token = NULL; |
3452 | } |
3453 | } |
3454 | |
3455 | nfsmout: |
3456 | if (autherr != 0) { |
3457 | nd->nd_gss_context = NULL; |
3458 | LIST_REMOVE(cp, gss_svc_entries); |
3459 | if (cp->gss_svc_seqbits != NULL) |
3460 | FREE(cp->gss_svc_seqbits, M_TEMP); |
3461 | if (cp->gss_svc_token != NULL) |
3462 | FREE(cp->gss_svc_token, M_TEMP); |
3463 | lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp); |
3464 | FREE(cp, M_TEMP); |
3465 | } |
3466 | |
3467 | nfsm_chain_build_done(error, &nmrep); |
3468 | if (error) { |
3469 | nfsm_chain_cleanup(&nmrep); |
3470 | *mrepp = NULL; |
3471 | } |
3472 | return (error); |
3473 | } |
3474 | |
3475 | /* |
3476 | * This is almost a mirror-image of the client side upcall. |
3477 | * It passes and receives a token, but invokes gss_accept_sec_context. |
3478 | * If it's the final call of the context setup, then gssd also returns |
3479 | * the session key and the user's UID. |
3480 | */ |
3481 | static int |
3482 | nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *cp) |
3483 | { |
3484 | kern_return_t kr; |
3485 | mach_port_t mp; |
3486 | int retry_cnt = 0; |
3487 | gssd_byte_buffer octx = NULL; |
3488 | uint32_t lucidlen = 0; |
3489 | void *lucid_ctx_buffer; |
3490 | uint32_t ret_flags; |
3491 | vm_map_copy_t itoken = NULL; |
3492 | gssd_byte_buffer otoken = NULL; |
3493 | mach_msg_type_number_t otokenlen; |
3494 | int error = 0; |
3495 | char svcname[] = "nfs" ; |
3496 | |
3497 | kr = host_get_gssd_port(host_priv_self(), &mp); |
3498 | if (kr != KERN_SUCCESS) { |
3499 | printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status %x (%d)\n" , kr, kr); |
3500 | goto out; |
3501 | } |
3502 | if (!IPC_PORT_VALID(mp)) { |
3503 | printf("nfs_gss_svc_gssd_upcall: gssd port not valid\n" ); |
3504 | goto out; |
3505 | } |
3506 | |
3507 | if (cp->gss_svc_tokenlen > 0) |
3508 | nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken); |
3509 | |
3510 | retry: |
3511 | printf("Calling mach_gss_accept_sec_context\n" ); |
3512 | kr = mach_gss_accept_sec_context( |
3513 | mp, |
3514 | (gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_svc_tokenlen, |
3515 | svcname, |
3516 | 0, |
3517 | &cp->gss_svc_context, |
3518 | &cp->gss_svc_cred_handle, |
3519 | &ret_flags, |
3520 | &cp->gss_svc_uid, |
3521 | cp->gss_svc_gids, |
3522 | &cp->gss_svc_ngroups, |
3523 | &octx, (mach_msg_type_number_t *) &lucidlen, |
3524 | &otoken, &otokenlen, |
3525 | &cp->gss_svc_major, |
3526 | &cp->gss_svc_minor); |
3527 | |
3528 | printf("mach_gss_accept_sec_context returned %d\n" , kr); |
3529 | if (kr != KERN_SUCCESS) { |
3530 | printf("nfs_gss_svc_gssd_upcall failed: %x (%d)\n" , kr, kr); |
3531 | if (kr == MIG_SERVER_DIED && cp->gss_svc_context == 0 && |
3532 | retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES) { |
3533 | if (cp->gss_svc_tokenlen > 0) |
3534 | nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken); |
3535 | goto retry; |
3536 | } |
3537 | host_release_special_port(mp); |
3538 | goto out; |
3539 | } |
3540 | |
3541 | host_release_special_port(mp); |
3542 | |
3543 | if (lucidlen > 0) { |
3544 | if (lucidlen > MAX_LUCIDLEN) { |
3545 | printf("nfs_gss_svc_gssd_upcall: bad context length (%d)\n" , lucidlen); |
3546 | vm_map_copy_discard((vm_map_copy_t) octx); |
3547 | vm_map_copy_discard((vm_map_copy_t) otoken); |
3548 | goto out; |
3549 | } |
3550 | MALLOC(lucid_ctx_buffer, void *, lucidlen, M_TEMP, M_WAITOK | M_ZERO); |
3551 | error = nfs_gss_mach_vmcopyout((vm_map_copy_t) octx, lucidlen, lucid_ctx_buffer); |
3552 | if (error) { |
3553 | vm_map_copy_discard((vm_map_copy_t) otoken); |
3554 | FREE(lucid_ctx_buffer, M_TEMP); |
3555 | goto out; |
3556 | } |
3557 | if (cp->gss_svc_ctx_id) |
3558 | gss_krb5_destroy_context(cp->gss_svc_ctx_id); |
3559 | cp->gss_svc_ctx_id = gss_krb5_make_context(lucid_ctx_buffer, lucidlen); |
3560 | if (cp->gss_svc_ctx_id == NULL) { |
3561 | printf("Failed to make context from lucid_ctx_buffer\n" ); |
3562 | goto out; |
3563 | } |
3564 | } |
3565 | |
3566 | /* Free context token used as input */ |
3567 | if (cp->gss_svc_token) |
3568 | FREE(cp->gss_svc_token, M_TEMP); |
3569 | cp->gss_svc_token = NULL; |
3570 | cp->gss_svc_tokenlen = 0; |
3571 | |
3572 | if (otokenlen > 0) { |
3573 | /* Set context token to gss output token */ |
3574 | MALLOC(cp->gss_svc_token, u_char *, otokenlen, M_TEMP, M_WAITOK); |
3575 | if (cp->gss_svc_token == NULL) { |
3576 | printf("nfs_gss_svc_gssd_upcall: could not allocate %d bytes\n" , otokenlen); |
3577 | vm_map_copy_discard((vm_map_copy_t) otoken); |
3578 | return (ENOMEM); |
3579 | } |
3580 | error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_svc_token); |
3581 | if (error) { |
3582 | FREE(cp->gss_svc_token, M_TEMP); |
3583 | cp->gss_svc_token = NULL; |
3584 | return (NFSERR_EAUTH); |
3585 | } |
3586 | cp->gss_svc_tokenlen = otokenlen; |
3587 | } |
3588 | |
3589 | return (0); |
3590 | |
3591 | out: |
3592 | FREE(cp->gss_svc_token, M_TEMP); |
3593 | cp->gss_svc_tokenlen = 0; |
3594 | cp->gss_svc_token = NULL; |
3595 | |
3596 | return (NFSERR_EAUTH); |
3597 | } |
3598 | |
3599 | /* |
3600 | * Validate the sequence number in the credential as described |
3601 | * in RFC 2203 Section 5.3.3.1 |
3602 | * |
3603 | * Here the window of valid sequence numbers is represented by |
3604 | * a bitmap. As each sequence number is received, its bit is |
3605 | * set in the bitmap. An invalid sequence number lies below |
3606 | * the lower bound of the window, or is within the window but |
3607 | * has its bit already set. |
3608 | */ |
3609 | static int |
3610 | nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *cp, uint32_t seq) |
3611 | { |
3612 | uint32_t *bits = cp->gss_svc_seqbits; |
3613 | uint32_t win = cp->gss_svc_seqwin; |
3614 | uint32_t i; |
3615 | |
3616 | lck_mtx_lock(cp->gss_svc_mtx); |
3617 | |
3618 | /* |
3619 | * If greater than the window upper bound, |
3620 | * move the window up, and set the bit. |
3621 | */ |
3622 | if (seq > cp->gss_svc_seqmax) { |
3623 | if (seq - cp->gss_svc_seqmax > win) |
3624 | bzero(bits, nfsm_rndup((win + 7) / 8)); |
3625 | else |
3626 | for (i = cp->gss_svc_seqmax + 1; i < seq; i++) |
3627 | win_resetbit(bits, i % win); |
3628 | win_setbit(bits, seq % win); |
3629 | cp->gss_svc_seqmax = seq; |
3630 | lck_mtx_unlock(cp->gss_svc_mtx); |
3631 | return (1); |
3632 | } |
3633 | |
3634 | /* |
3635 | * Invalid if below the lower bound of the window |
3636 | */ |
3637 | if (seq <= cp->gss_svc_seqmax - win) { |
3638 | lck_mtx_unlock(cp->gss_svc_mtx); |
3639 | return (0); |
3640 | } |
3641 | |
3642 | /* |
3643 | * In the window, invalid if the bit is already set |
3644 | */ |
3645 | if (win_getbit(bits, seq % win)) { |
3646 | lck_mtx_unlock(cp->gss_svc_mtx); |
3647 | return (0); |
3648 | } |
3649 | win_setbit(bits, seq % win); |
3650 | lck_mtx_unlock(cp->gss_svc_mtx); |
3651 | return (1); |
3652 | } |
3653 | |
3654 | /* |
3655 | * Drop a reference to a context |
3656 | * |
3657 | * Note that it's OK for the context to exist |
3658 | * with a refcount of zero. The refcount isn't |
3659 | * checked until we're about to reap an expired one. |
3660 | */ |
3661 | void |
3662 | nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx *cp) |
3663 | { |
3664 | lck_mtx_lock(cp->gss_svc_mtx); |
3665 | if (cp->gss_svc_refcnt > 0) |
3666 | cp->gss_svc_refcnt--; |
3667 | else |
3668 | printf("nfs_gss_ctx_deref: zero refcount\n" ); |
3669 | lck_mtx_unlock(cp->gss_svc_mtx); |
3670 | } |
3671 | |
3672 | /* |
3673 | * Called at NFS server shutdown - destroy all contexts |
3674 | */ |
3675 | void |
3676 | nfs_gss_svc_cleanup(void) |
3677 | { |
3678 | struct nfs_gss_svc_ctx_hashhead *head; |
3679 | struct nfs_gss_svc_ctx *cp, *ncp; |
3680 | int i; |
3681 | |
3682 | lck_mtx_lock(nfs_gss_svc_ctx_mutex); |
3683 | |
3684 | /* |
3685 | * Run through all the buckets |
3686 | */ |
3687 | for (i = 0; i < SVC_CTX_HASHSZ; i++) { |
3688 | /* |
3689 | * Remove and free all entries in the bucket |
3690 | */ |
3691 | head = &nfs_gss_svc_ctx_hashtbl[i]; |
3692 | LIST_FOREACH_SAFE(cp, head, gss_svc_entries, ncp) { |
3693 | LIST_REMOVE(cp, gss_svc_entries); |
3694 | if (cp->gss_svc_seqbits) |
3695 | FREE(cp->gss_svc_seqbits, M_TEMP); |
3696 | lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp); |
3697 | FREE(cp, M_TEMP); |
3698 | } |
3699 | } |
3700 | |
3701 | lck_mtx_unlock(nfs_gss_svc_ctx_mutex); |
3702 | } |
3703 | |
3704 | #endif /* NFSSERVER */ |
3705 | |
3706 | |
3707 | /************* |
3708 | * The following functions are used by both client and server. |
3709 | */ |
3710 | |
3711 | /* |
3712 | * Release a host special port that was obtained by host_get_special_port |
3713 | * or one of its macros (host_get_gssd_port in this case). |
3714 | * This really should be in a public kpi. |
3715 | */ |
3716 | |
3717 | /* This should be in a public header if this routine is not */ |
3718 | extern void ipc_port_release_send(ipc_port_t); |
3719 | extern ipc_port_t ipc_port_copy_send(ipc_port_t); |
3720 | |
3721 | static void |
3722 | host_release_special_port(mach_port_t mp) |
3723 | { |
3724 | if (IPC_PORT_VALID(mp)) |
3725 | ipc_port_release_send(mp); |
3726 | } |
3727 | |
3728 | static mach_port_t |
3729 | host_copy_special_port(mach_port_t mp) |
3730 | { |
3731 | return (ipc_port_copy_send(mp)); |
3732 | } |
3733 | |
3734 | /* |
3735 | * The token that is sent and received in the gssd upcall |
3736 | * has unbounded variable length. Mach RPC does not pass |
3737 | * the token in-line. Instead it uses page mapping to handle |
3738 | * these parameters. This function allocates a VM buffer |
3739 | * to hold the token for an upcall and copies the token |
3740 | * (received from the client) into it. The VM buffer is |
3741 | * marked with a src_destroy flag so that the upcall will |
3742 | * automatically de-allocate the buffer when the upcall is |
3743 | * complete. |
3744 | */ |
3745 | static void |
3746 | nfs_gss_mach_alloc_buffer(u_char *buf, uint32_t buflen, vm_map_copy_t *addr) |
3747 | { |
3748 | kern_return_t kr; |
3749 | vm_offset_t kmem_buf; |
3750 | vm_size_t tbuflen; |
3751 | |
3752 | *addr = NULL; |
3753 | if (buf == NULL || buflen == 0) |
3754 | return; |
3755 | |
3756 | tbuflen = vm_map_round_page(buflen, |
3757 | vm_map_page_mask(ipc_kernel_map)); |
3758 | kr = vm_allocate_kernel(ipc_kernel_map, &kmem_buf, tbuflen, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_FILE); |
3759 | if (kr != 0) { |
3760 | printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n" ); |
3761 | return; |
3762 | } |
3763 | |
3764 | kr = vm_map_wire_kernel(ipc_kernel_map, |
3765 | vm_map_trunc_page(kmem_buf, |
3766 | vm_map_page_mask(ipc_kernel_map)), |
3767 | vm_map_round_page(kmem_buf + tbuflen, |
3768 | vm_map_page_mask(ipc_kernel_map)), |
3769 | VM_PROT_READ|VM_PROT_WRITE, VM_KERN_MEMORY_FILE, FALSE); |
3770 | if (kr != 0) { |
3771 | printf("nfs_gss_mach_alloc_buffer: vm_map_wire failed\n" ); |
3772 | return; |
3773 | } |
3774 | |
3775 | bcopy(buf, (void *) kmem_buf, buflen); |
3776 | // Shouldn't need to bzero below since vm_allocate returns zeroed pages |
3777 | // bzero(kmem_buf + buflen, tbuflen - buflen); |
3778 | |
3779 | kr = vm_map_unwire(ipc_kernel_map, |
3780 | vm_map_trunc_page(kmem_buf, |
3781 | vm_map_page_mask(ipc_kernel_map)), |
3782 | vm_map_round_page(kmem_buf + tbuflen, |
3783 | vm_map_page_mask(ipc_kernel_map)), |
3784 | FALSE); |
3785 | if (kr != 0) { |
3786 | printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n" ); |
3787 | return; |
3788 | } |
3789 | |
3790 | kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t) kmem_buf, |
3791 | (vm_map_size_t) buflen, TRUE, addr); |
3792 | if (kr != 0) { |
3793 | printf("nfs_gss_mach_alloc_buffer: vm_map_copyin failed\n" ); |
3794 | return; |
3795 | } |
3796 | } |
3797 | |
3798 | /* |
3799 | * Here we handle a token received from the gssd via an upcall. |
3800 | * The received token resides in an allocate VM buffer. |
3801 | * We copy the token out of this buffer to a chunk of malloc'ed |
3802 | * memory of the right size, then de-allocate the VM buffer. |
3803 | */ |
3804 | static int |
3805 | nfs_gss_mach_vmcopyout(vm_map_copy_t in, uint32_t len, u_char *out) |
3806 | { |
3807 | vm_map_offset_t map_data; |
3808 | vm_offset_t data; |
3809 | int error; |
3810 | |
3811 | error = vm_map_copyout(ipc_kernel_map, &map_data, in); |
3812 | if (error) |
3813 | return (error); |
3814 | |
3815 | data = CAST_DOWN(vm_offset_t, map_data); |
3816 | bcopy((void *) data, out, len); |
3817 | vm_deallocate(ipc_kernel_map, data, len); |
3818 | |
3819 | return (0); |
3820 | } |
3821 | |
3822 | /* |
3823 | * Return the number of bytes in an mbuf chain. |
3824 | */ |
3825 | static int |
3826 | nfs_gss_mchain_length(mbuf_t mhead) |
3827 | { |
3828 | mbuf_t mb; |
3829 | int len = 0; |
3830 | |
3831 | for (mb = mhead; mb; mb = mbuf_next(mb)) |
3832 | len += mbuf_len(mb); |
3833 | |
3834 | return (len); |
3835 | } |
3836 | |
3837 | /* |
3838 | * Append an args or results mbuf chain to the header chain |
3839 | */ |
3840 | static int |
3841 | nfs_gss_append_chain(struct nfsm_chain *nmc, mbuf_t mc) |
3842 | { |
3843 | int error = 0; |
3844 | mbuf_t mb, tail; |
3845 | |
3846 | /* Connect the mbuf chains */ |
3847 | error = mbuf_setnext(nmc->nmc_mcur, mc); |
3848 | if (error) |
3849 | return (error); |
3850 | |
3851 | /* Find the last mbuf in the chain */ |
3852 | tail = NULL; |
3853 | for (mb = mc; mb; mb = mbuf_next(mb)) |
3854 | tail = mb; |
3855 | |
3856 | nmc->nmc_mcur = tail; |
3857 | nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail); |
3858 | nmc->nmc_left = mbuf_trailingspace(tail); |
3859 | |
3860 | return (0); |
3861 | } |
3862 | |
3863 | /* |
3864 | * Convert an mbuf chain to an NFS mbuf chain |
3865 | */ |
3866 | static void |
3867 | nfs_gss_nfsm_chain(struct nfsm_chain *nmc, mbuf_t mc) |
3868 | { |
3869 | mbuf_t mb, tail; |
3870 | |
3871 | /* Find the last mbuf in the chain */ |
3872 | tail = NULL; |
3873 | for (mb = mc; mb; mb = mbuf_next(mb)) |
3874 | tail = mb; |
3875 | |
3876 | nmc->nmc_mhead = mc; |
3877 | nmc->nmc_mcur = tail; |
3878 | nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail); |
3879 | nmc->nmc_left = mbuf_trailingspace(tail); |
3880 | nmc->nmc_flags = 0; |
3881 | } |
3882 | |
3883 | |
3884 | |
3885 | #if 0 |
3886 | #define DISPLAYLEN 16 |
3887 | #define MAXDISPLAYLEN 256 |
3888 | |
3889 | static void |
3890 | hexdump(const char *msg, void *data, size_t len) |
3891 | { |
3892 | size_t i, j; |
3893 | u_char *d = data; |
3894 | char *p, disbuf[3*DISPLAYLEN+1]; |
3895 | |
3896 | printf("NFS DEBUG %s len=%d:\n" , msg, (uint32_t)len); |
3897 | if (len > MAXDISPLAYLEN) |
3898 | len = MAXDISPLAYLEN; |
3899 | |
3900 | for (i = 0; i < len; i += DISPLAYLEN) { |
3901 | for (p = disbuf, j = 0; (j + i) < len && j < DISPLAYLEN; j++, p += 3) |
3902 | snprintf(p, 4, "%02x " , d[i + j]); |
3903 | printf("\t%s\n" , disbuf); |
3904 | } |
3905 | } |
3906 | #endif |
3907 | |