1/*-
2 * Copyright (c) 2008-2010 Apple Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <string.h>
31
32#include <sys/kernel.h>
33#include <sys/proc.h>
34#include <sys/systm.h>
35
36#include <kern/host.h>
37#include <kern/kalloc.h>
38#include <kern/locks.h>
39#include <kern/sched_prim.h>
40
41#include <libkern/OSAtomic.h>
42
43#include <bsm/audit.h>
44#include <bsm/audit_internal.h>
45
46#include <security/audit/audit_bsd.h>
47#include <security/audit/audit.h>
48#include <security/audit/audit_private.h>
49
50#include <mach/host_priv.h>
51#include <mach/host_special_ports.h>
52#include <mach/audit_triggers_server.h>
53
54#include <os/overflow.h>
55
56extern void ipc_port_release_send(ipc_port_t port);
57
58#if CONFIG_AUDIT
59struct mhdr {
60 size_t mh_size;
61 au_malloc_type_t *mh_type;
62 u_long mh_magic;
63 char mh_data[0];
64};
65
66/*
67 * The lock group for the audit subsystem.
68 */
69static lck_grp_t *audit_lck_grp = NULL;
70
71#define AUDIT_MHMAGIC 0x4D656C53
72
73#if AUDIT_MALLOC_DEBUG
74#define AU_MAX_SHORTDESC 20
75#define AU_MAX_LASTCALLER 20
76struct au_malloc_debug_info {
77 SInt64 md_size;
78 SInt64 md_maxsize;
79 SInt32 md_inuse;
80 SInt32 md_maxused;
81 unsigned md_type;
82 unsigned md_magic;
83 char md_shortdesc[AU_MAX_SHORTDESC];
84 char md_lastcaller[AU_MAX_LASTCALLER];
85};
86typedef struct au_malloc_debug_info au_malloc_debug_info_t;
87
88au_malloc_type_t *audit_malloc_types[NUM_MALLOC_TYPES];
89
90static int audit_sysctl_malloc_debug(struct sysctl_oid *oidp, void *arg1,
91 int arg2, struct sysctl_req *req);
92
93SYSCTL_PROC(_kern, OID_AUTO, audit_malloc_debug, CTLFLAG_RD, NULL, 0,
94 audit_sysctl_malloc_debug, "S,audit_malloc_debug",
95 "Current malloc debug info for auditing.");
96
97#define AU_MALLOC_DBINFO_SZ \
98 (NUM_MALLOC_TYPES * sizeof(au_malloc_debug_info_t))
99
100/*
101 * Copy out the malloc debug info via the sysctl interface. The userland code
102 * is something like the following:
103 *
104 * error = sysctlbyname("kern.audit_malloc_debug", buffer_ptr, &buffer_len,
105 * NULL, 0);
106 */
107static int
108audit_sysctl_malloc_debug(__unused struct sysctl_oid *oidp, __unused void *arg1,
109 __unused int arg2, struct sysctl_req *req)
110{
111 int i;
112 size_t sz;
113 au_malloc_debug_info_t *amdi_ptr, *nxt_ptr;
114 int err;
115
116 /*
117 * This provides a read-only node.
118 */
119 if (req->newptr != USER_ADDR_NULL)
120 return (EPERM);
121
122 /*
123 * If just querying then return the space required.
124 */
125 if (req->oldptr == USER_ADDR_NULL) {
126 req->oldidx = AU_MALLOC_DBINFO_SZ;
127 return (0);
128 }
129
130 /*
131 * Alloc a temporary buffer.
132 */
133 if (req->oldlen < AU_MALLOC_DBINFO_SZ)
134 return (ENOMEM);
135 amdi_ptr = (au_malloc_debug_info_t *)kalloc(AU_MALLOC_DBINFO_SZ);
136 if (amdi_ptr == NULL)
137 return (ENOMEM);
138 bzero(amdi_ptr, AU_MALLOC_DBINFO_SZ);
139
140 /*
141 * Build the record array.
142 */
143 sz = 0;
144 nxt_ptr = amdi_ptr;
145 for(i = 0; i < NUM_MALLOC_TYPES; i++) {
146 if (audit_malloc_types[i] == NULL)
147 continue;
148 if (audit_malloc_types[i]->mt_magic != M_MAGIC) {
149 nxt_ptr->md_magic = audit_malloc_types[i]->mt_magic;
150 continue;
151 }
152 nxt_ptr->md_magic = audit_malloc_types[i]->mt_magic;
153 nxt_ptr->md_size = audit_malloc_types[i]->mt_size;
154 nxt_ptr->md_maxsize = audit_malloc_types[i]->mt_maxsize;
155 nxt_ptr->md_inuse = (int)audit_malloc_types[i]->mt_inuse;
156 nxt_ptr->md_maxused = (int)audit_malloc_types[i]->mt_maxused;
157 strlcpy(nxt_ptr->md_shortdesc,
158 audit_malloc_types[i]->mt_shortdesc, AU_MAX_SHORTDESC - 1);
159 strlcpy(nxt_ptr->md_lastcaller,
160 audit_malloc_types[i]->mt_lastcaller, AU_MAX_LASTCALLER-1);
161 sz += sizeof(au_malloc_debug_info_t);
162 nxt_ptr++;
163 }
164
165 req->oldlen = sz;
166 err = SYSCTL_OUT(req, amdi_ptr, sz);
167 kfree(amdi_ptr, AU_MALLOC_DBINFO_SZ);
168
169 return (err);
170}
171#endif /* AUDIT_MALLOC_DEBUG */
172
173/*
174 * BSD malloc()
175 *
176 * If the M_NOWAIT flag is set then it may not block and return NULL.
177 * If the M_ZERO flag is set then zero out the buffer.
178 */
179void *
180#if AUDIT_MALLOC_DEBUG
181_audit_malloc(size_t size, au_malloc_type_t *type, int flags, const char *fn)
182#else
183_audit_malloc(size_t size, au_malloc_type_t *type, int flags)
184#endif
185{
186 struct mhdr *hdr;
187 size_t memsize;
188 if (os_add_overflow(sizeof(*hdr), size, &memsize)) {
189 return (NULL);
190 }
191
192 if (size == 0)
193 return (NULL);
194 if (flags & M_NOWAIT) {
195 hdr = (void *)kalloc_noblock(memsize);
196 } else {
197 hdr = (void *)kalloc(memsize);
198 if (hdr == NULL)
199 panic("_audit_malloc: kernel memory exhausted");
200 }
201 if (hdr == NULL)
202 return (NULL);
203 hdr->mh_size = memsize;
204 hdr->mh_type = type;
205 hdr->mh_magic = AUDIT_MHMAGIC;
206 if (flags & M_ZERO)
207 memset(hdr->mh_data, 0, size);
208#if AUDIT_MALLOC_DEBUG
209 if (type != NULL && type->mt_type < NUM_MALLOC_TYPES) {
210 OSAddAtomic64(memsize, &type->mt_size);
211 type->mt_maxsize = max(type->mt_size, type->mt_maxsize);
212 OSAddAtomic(1, &type->mt_inuse);
213 type->mt_maxused = max(type->mt_inuse, type->mt_maxused);
214 type->mt_lastcaller = fn;
215 audit_malloc_types[type->mt_type] = type;
216 }
217#endif /* AUDIT_MALLOC_DEBUG */
218 return (hdr->mh_data);
219}
220
221/*
222 * BSD free()
223 */
224void
225#if AUDIT_MALLOC_DEBUG
226_audit_free(void *addr, au_malloc_type_t *type)
227#else
228_audit_free(void *addr, __unused au_malloc_type_t *type)
229#endif
230{
231 struct mhdr *hdr;
232
233 if (addr == NULL)
234 return;
235 hdr = addr; hdr--;
236
237 if (hdr->mh_magic != AUDIT_MHMAGIC) {
238 panic("_audit_free(): hdr->mh_magic (%lx) != AUDIT_MHMAGIC", hdr->mh_magic);
239 }
240
241#if AUDIT_MALLOC_DEBUG
242 if (type != NULL) {
243 OSAddAtomic64(-hdr->mh_size, &type->mt_size);
244 OSAddAtomic(-1, &type->mt_inuse);
245 }
246#endif /* AUDIT_MALLOC_DEBUG */
247 kfree(hdr, hdr->mh_size);
248}
249
250/*
251 * Initialize a condition variable. Must be called before use.
252 */
253void
254_audit_cv_init(struct cv *cvp, const char *desc)
255{
256
257 if (desc == NULL)
258 cvp->cv_description = "UNKNOWN";
259 else
260 cvp->cv_description = desc;
261 cvp->cv_waiters = 0;
262}
263
264/*
265 * Destory a condition variable.
266 */
267void
268_audit_cv_destroy(struct cv *cvp)
269{
270
271 cvp->cv_description = NULL;
272 cvp->cv_waiters = 0;
273}
274
275/*
276 * Signal a condition variable, wakes up one waiting thread.
277 */
278void
279_audit_cv_signal(struct cv *cvp)
280{
281
282 if (cvp->cv_waiters > 0) {
283 wakeup_one((caddr_t)cvp);
284 cvp->cv_waiters--;
285 }
286}
287
288/*
289 * Broadcast a signal to a condition variable.
290 */
291void
292_audit_cv_broadcast(struct cv *cvp)
293{
294
295 if (cvp->cv_waiters > 0) {
296 wakeup((caddr_t)cvp);
297 cvp->cv_waiters = 0;
298 }
299}
300
301/*
302 * Wait on a condition variable. A cv_signal or cv_broadcast on the same
303 * condition variable will resume the thread. It is recommended that the mutex
304 * be held when cv_signal or cv_broadcast are called.
305 */
306void
307_audit_cv_wait(struct cv *cvp, lck_mtx_t *mp, const char *desc)
308{
309
310 cvp->cv_waiters++;
311 (void) msleep(cvp, mp, PZERO, desc, 0);
312}
313
314/*
315 * Wait on a condition variable, allowing interruption by signals. Return 0
316 * if the thread was resumed with cv_signal or cv_broadcast, EINTR or
317 * ERESTART if a signal was caught. If ERESTART is returned the system call
318 * should be restarted if possible.
319 */
320int
321_audit_cv_wait_sig(struct cv *cvp, lck_mtx_t *mp, const char *desc)
322{
323
324 cvp->cv_waiters++;
325 return (msleep(cvp, mp, PSOCK | PCATCH, desc, 0));
326}
327
328/*
329 * BSD Mutexes.
330 */
331void
332#if DIAGNOSTIC
333_audit_mtx_init(struct mtx *mp, const char *lckname)
334#else
335_audit_mtx_init(struct mtx *mp, __unused const char *lckname)
336#endif
337{
338 mp->mtx_lock = lck_mtx_alloc_init(audit_lck_grp, LCK_ATTR_NULL);
339 KASSERT(mp->mtx_lock != NULL,
340 ("_audit_mtx_init: Could not allocate a mutex."));
341#if DIAGNOSTIC
342 strlcpy(mp->mtx_name, lckname, AU_MAX_LCK_NAME);
343#endif
344}
345
346void
347_audit_mtx_destroy(struct mtx *mp)
348{
349
350 if (mp->mtx_lock) {
351 lck_mtx_free(mp->mtx_lock, audit_lck_grp);
352 mp->mtx_lock = NULL;
353 }
354}
355
356/*
357 * BSD rw locks.
358 */
359void
360#if DIAGNOSTIC
361_audit_rw_init(struct rwlock *lp, const char *lckname)
362#else
363_audit_rw_init(struct rwlock *lp, __unused const char *lckname)
364#endif
365{
366 lp->rw_lock = lck_rw_alloc_init(audit_lck_grp, LCK_ATTR_NULL);
367 KASSERT(lp->rw_lock != NULL,
368 ("_audit_rw_init: Could not allocate a rw lock."));
369#if DIAGNOSTIC
370 strlcpy(lp->rw_name, lckname, AU_MAX_LCK_NAME);
371#endif
372}
373
374void
375_audit_rw_destroy(struct rwlock *lp)
376{
377
378 if (lp->rw_lock) {
379 lck_rw_free(lp->rw_lock, audit_lck_grp);
380 lp->rw_lock = NULL;
381 }
382}
383/*
384 * Wait on a condition variable in a continuation (i.e. yield kernel stack).
385 * A cv_signal or cv_broadcast on the same condition variable will cause
386 * the thread to be scheduled.
387 */
388int
389_audit_cv_wait_continuation(struct cv *cvp, lck_mtx_t *mp, thread_continue_t function)
390{
391 int status = KERN_SUCCESS;
392
393 cvp->cv_waiters++;
394 assert_wait(cvp, THREAD_UNINT);
395 lck_mtx_unlock(mp);
396
397 status = thread_block(function);
398
399 /* should not be reached, but just in case, re-lock */
400 lck_mtx_lock(mp);
401
402 return status;
403}
404
405/*
406 * Simple recursive lock.
407 */
408void
409#if DIAGNOSTIC
410_audit_rlck_init(struct rlck *lp, const char *lckname)
411#else
412_audit_rlck_init(struct rlck *lp, __unused const char *lckname)
413#endif
414{
415
416 lp->rl_mtx = lck_mtx_alloc_init(audit_lck_grp, LCK_ATTR_NULL);
417 KASSERT(lp->rl_mtx != NULL,
418 ("_audit_rlck_init: Could not allocate a recursive lock."));
419#if DIAGNOSTIC
420 strlcpy(lp->rl_name, lckname, AU_MAX_LCK_NAME);
421#endif
422 lp->rl_thread = 0;
423 lp->rl_recurse = 0;
424}
425
426/*
427 * Recursive lock. Allow same thread to recursively lock the same lock.
428 */
429void
430_audit_rlck_lock(struct rlck *lp)
431{
432
433 if (lp->rl_thread == current_thread()) {
434 OSAddAtomic(1, &lp->rl_recurse);
435 KASSERT(lp->rl_recurse < 10000,
436 ("_audit_rlck_lock: lock nested too deep."));
437 } else {
438 lck_mtx_lock(lp->rl_mtx);
439 lp->rl_thread = current_thread();
440 lp->rl_recurse = 1;
441 }
442}
443
444/*
445 * Recursive unlock. It should be the same thread that does the unlock.
446 */
447void
448_audit_rlck_unlock(struct rlck *lp)
449{
450 KASSERT(lp->rl_thread == current_thread(),
451 ("_audit_rlck_unlock(): Don't own lock."));
452
453 /* Note: OSAddAtomic returns old value. */
454 if (OSAddAtomic(-1, &lp->rl_recurse) == 1) {
455 lp->rl_thread = 0;
456 lck_mtx_unlock(lp->rl_mtx);
457 }
458}
459
460void
461_audit_rlck_destroy(struct rlck *lp)
462{
463
464 if (lp->rl_mtx) {
465 lck_mtx_free(lp->rl_mtx, audit_lck_grp);
466 lp->rl_mtx = NULL;
467 }
468}
469
470/*
471 * Recursive lock assert.
472 */
473void
474_audit_rlck_assert(struct rlck *lp, u_int assert)
475{
476 thread_t cthd = current_thread();
477
478 if (assert == LCK_MTX_ASSERT_OWNED && lp->rl_thread == cthd)
479 panic("recursive lock (%p) not held by this thread (%p).",
480 lp, cthd);
481 if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->rl_thread != 0)
482 panic("recursive lock (%p) held by thread (%p).",
483 lp, cthd);
484}
485
486/*
487 * Simple sleep lock.
488 */
489void
490#if DIAGNOSTIC
491_audit_slck_init(struct slck *lp, const char *lckname)
492#else
493_audit_slck_init(struct slck *lp, __unused const char *lckname)
494#endif
495{
496
497 lp->sl_mtx = lck_mtx_alloc_init(audit_lck_grp, LCK_ATTR_NULL);
498 KASSERT(lp->sl_mtx != NULL,
499 ("_audit_slck_init: Could not allocate a sleep lock."));
500#if DIAGNOSTIC
501 strlcpy(lp->sl_name, lckname, AU_MAX_LCK_NAME);
502#endif
503 lp->sl_locked = 0;
504 lp->sl_waiting = 0;
505}
506
507/*
508 * Sleep lock lock. The 'intr' flag determines if the lock is interruptible.
509 * If 'intr' is true then signals or other events can interrupt the sleep lock.
510 */
511wait_result_t
512_audit_slck_lock(struct slck *lp, int intr)
513{
514 wait_result_t res = THREAD_AWAKENED;
515
516 lck_mtx_lock(lp->sl_mtx);
517 while (lp->sl_locked && res == THREAD_AWAKENED) {
518 lp->sl_waiting = 1;
519 res = lck_mtx_sleep(lp->sl_mtx, LCK_SLEEP_DEFAULT,
520 (event_t) lp, (intr) ? THREAD_INTERRUPTIBLE : THREAD_UNINT);
521 }
522 if (res == THREAD_AWAKENED)
523 lp->sl_locked = 1;
524 lck_mtx_unlock(lp->sl_mtx);
525
526 return (res);
527}
528
529/*
530 * Sleep lock unlock. Wake up all the threads waiting for this lock.
531 */
532void
533_audit_slck_unlock(struct slck *lp)
534{
535
536 lck_mtx_lock(lp->sl_mtx);
537 lp->sl_locked = 0;
538 if (lp->sl_waiting) {
539 lp->sl_waiting = 0;
540
541 /* Wake up *all* sleeping threads. */
542 wakeup((event_t) lp);
543 }
544 lck_mtx_unlock(lp->sl_mtx);
545}
546
547/*
548 * Sleep lock try. Don't sleep if it doesn't get the lock.
549 */
550int
551_audit_slck_trylock(struct slck *lp)
552{
553 int result;
554
555 lck_mtx_lock(lp->sl_mtx);
556 result = !lp->sl_locked;
557 if (result)
558 lp->sl_locked = 1;
559 lck_mtx_unlock(lp->sl_mtx);
560
561 return (result);
562}
563
564/*
565 * Sleep lock assert.
566 */
567void
568_audit_slck_assert(struct slck *lp, u_int assert)
569{
570
571 if (assert == LCK_MTX_ASSERT_OWNED && lp->sl_locked == 0)
572 panic("sleep lock (%p) not held.", lp);
573 if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->sl_locked == 1)
574 panic("sleep lock (%p) held.", lp);
575}
576
577void
578_audit_slck_destroy(struct slck *lp)
579{
580
581 if (lp->sl_mtx) {
582 lck_mtx_free(lp->sl_mtx, audit_lck_grp);
583 lp->sl_mtx = NULL;
584 }
585}
586
587/*
588 * XXXss - This code was taken from bsd/netinet6/icmp6.c. Maybe ppsratecheck()
589 * should be made global in icmp6.c.
590 */
591#ifndef timersub
592#define timersub(tvp, uvp, vvp) \
593 do { \
594 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
595 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
596 if ((vvp)->tv_usec < 0) { \
597 (vvp)->tv_sec--; \
598 (vvp)->tv_usec += 1000000; \
599 } \
600 } while (0)
601#endif
602
603/*
604 * Packets (or events) per second limitation.
605 */
606int
607_audit_ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
608{
609 struct timeval tv, delta;
610 int rv;
611
612 microtime(&tv);
613
614 timersub(&tv, lasttime, &delta);
615
616 /*
617 * Check for 0,0 so that the message will be seen at least once.
618 * If more than one second has passed since the last update of
619 * lasttime, reset the counter.
620 *
621 * we do increment *curpps even in *curpps < maxpps case, as some may
622 * try to use *curpps for stat purposes as well.
623 */
624 if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
625 delta.tv_sec >= 1) {
626 *lasttime = tv;
627 *curpps = 0;
628 rv = 1;
629 } else if (maxpps < 0)
630 rv = 1;
631 else if (*curpps < maxpps)
632 rv = 1;
633 else
634 rv = 0;
635 if (*curpps + 1 > 0)
636 *curpps = *curpps + 1;
637
638 return (rv);
639}
640
641/*
642 * Initialize lock group for audit related locks/mutexes.
643 */
644void
645_audit_lck_grp_init(void)
646{
647 audit_lck_grp = lck_grp_alloc_init("Audit", LCK_GRP_ATTR_NULL);
648
649 KASSERT(audit_lck_grp != NULL,
650 ("audit_get_lck_grp: Could not allocate the audit lock group."));
651}
652
653int
654audit_send_trigger(unsigned int trigger)
655{
656 mach_port_t audit_port;
657 int error;
658
659 error = host_get_audit_control_port(host_priv_self(), &audit_port);
660 if (error == KERN_SUCCESS && audit_port != MACH_PORT_NULL) {
661 (void)audit_triggers(audit_port, trigger);
662 ipc_port_release_send(audit_port);
663 return (0);
664 } else {
665 printf("Cannot get audit control port\n");
666 return (error);
667 }
668}
669#endif /* CONFIG_AUDIT */
670