1/*
2 * IDENTIFICATION:
3 * stub generated Tue Apr 9 11:33:41 2019
4 * with a MiG generated by bootstrap_cmds-96.20.2.200.4
5 * OPTIONS:
6 * KernelServer
7 */
8
9/* Module map */
10
11#define __MIG_check__Request__map_subsystem__ 1
12
13#include "vm32_map_server.h"
14
15#ifndef mig_internal
16#define mig_internal static __inline__
17#endif /* mig_internal */
18
19#ifndef mig_external
20#define mig_external
21#endif /* mig_external */
22
23#if !defined(__MigTypeCheck) && defined(TypeCheck)
24#define __MigTypeCheck TypeCheck /* Legacy setting */
25#endif /* !defined(__MigTypeCheck) */
26
27#if !defined(__MigKernelSpecificCode) && defined(_MIG_KERNEL_SPECIFIC_CODE_)
28#define __MigKernelSpecificCode _MIG_KERNEL_SPECIFIC_CODE_ /* Legacy setting */
29#endif /* !defined(__MigKernelSpecificCode) */
30
31#ifndef LimitCheck
32#define LimitCheck 0
33#endif /* LimitCheck */
34
35#ifndef min
36#define min(a,b) ( ((a) < (b))? (a): (b) )
37#endif /* min */
38
39#if !defined(_WALIGN_)
40#define _WALIGN_(x) (((x) + 3) & ~3)
41#endif /* !defined(_WALIGN_) */
42
43#if !defined(_WALIGNSZ_)
44#define _WALIGNSZ_(x) _WALIGN_(sizeof(x))
45#endif /* !defined(_WALIGNSZ_) */
46
47#ifndef UseStaticTemplates
48#define UseStaticTemplates 0
49#endif /* UseStaticTemplates */
50
51#ifndef __DeclareRcvRpc
52#define __DeclareRcvRpc(_NUM_, _NAME_)
53#endif /* __DeclareRcvRpc */
54
55#ifndef __BeforeRcvRpc
56#define __BeforeRcvRpc(_NUM_, _NAME_)
57#endif /* __BeforeRcvRpc */
58
59#ifndef __AfterRcvRpc
60#define __AfterRcvRpc(_NUM_, _NAME_)
61#endif /* __AfterRcvRpc */
62
63#ifndef __DeclareRcvSimple
64#define __DeclareRcvSimple(_NUM_, _NAME_)
65#endif /* __DeclareRcvSimple */
66
67#ifndef __BeforeRcvSimple
68#define __BeforeRcvSimple(_NUM_, _NAME_)
69#endif /* __BeforeRcvSimple */
70
71#ifndef __AfterRcvSimple
72#define __AfterRcvSimple(_NUM_, _NAME_)
73#endif /* __AfterRcvSimple */
74
75#define novalue void
76
77#if __MigKernelSpecificCode
78#define msgh_request_port msgh_remote_port
79#define MACH_MSGH_BITS_REQUEST(bits) MACH_MSGH_BITS_REMOTE(bits)
80#define msgh_reply_port msgh_local_port
81#define MACH_MSGH_BITS_REPLY(bits) MACH_MSGH_BITS_LOCAL(bits)
82#else
83#define msgh_request_port msgh_local_port
84#define MACH_MSGH_BITS_REQUEST(bits) MACH_MSGH_BITS_LOCAL(bits)
85#define msgh_reply_port msgh_remote_port
86#define MACH_MSGH_BITS_REPLY(bits) MACH_MSGH_BITS_REMOTE(bits)
87#endif /* __MigKernelSpecificCode */
88
89#define MIG_RETURN_ERROR(X, code) {\
90 ((mig_reply_error_t *)X)->RetCode = code;\
91 ((mig_reply_error_t *)X)->NDR = NDR_record;\
92 return;\
93 }
94
95/* Forward Declarations */
96
97
98mig_internal novalue _Xregion
99 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
100
101mig_internal novalue _Xallocate
102 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
103
104mig_internal novalue _Xdeallocate
105 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
106
107mig_internal novalue _Xprotect
108 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
109
110mig_internal novalue _Xinherit
111 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
112
113mig_internal novalue _Xread
114 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
115
116mig_internal novalue _Xread_list
117 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
118
119mig_internal novalue _Xwrite
120 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
121
122mig_internal novalue _Xcopy
123 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
124
125mig_internal novalue _Xread_overwrite
126 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
127
128mig_internal novalue _Xmsync
129 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
130
131mig_internal novalue _Xbehavior_set
132 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
133
134mig_internal novalue _Xmap
135 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
136
137mig_internal novalue _Xmachine_attribute
138 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
139
140mig_internal novalue _Xremap
141 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
142
143mig_internal novalue _X_task_wire
144 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
145
146mig_internal novalue _Xmake_memory_entry
147 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
148
149mig_internal novalue _Xmap_page_query
150 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
151
152mig_internal novalue _Xregion_info
153 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
154
155mig_internal novalue _Xmapped_pages_info
156 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
157
158mig_internal novalue _Xregion_recurse
159 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
160
161mig_internal novalue _Xregion_recurse_64
162 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
163
164mig_internal novalue _Xregion_info_64
165 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
166
167mig_internal novalue _Xregion_64
168 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
169
170mig_internal novalue _Xmake_memory_entry_64
171 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
172
173mig_internal novalue _Xmap_64
174 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
175
176mig_internal novalue _Xpurgable_control
177 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
178
179mig_internal novalue _X_map_exec_lockdown
180 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
181
182
183#if ( __MigTypeCheck )
184#if __MIG_check__Request__map_subsystem__
185#if !defined(__MIG_check__Request__region_t__defined)
186#define __MIG_check__Request__region_t__defined
187
188mig_internal kern_return_t __MIG_check__Request__region_t(__attribute__((__unused__)) __Request__region_t *In0P)
189{
190
191 typedef __Request__region_t __Request;
192#if __MigTypeCheck
193 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
194 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
195 return MIG_BAD_ARGUMENTS;
196#endif /* __MigTypeCheck */
197
198 return MACH_MSG_SUCCESS;
199}
200#endif /* !defined(__MIG_check__Request__region_t__defined) */
201#endif /* __MIG_check__Request__map_subsystem__ */
202#endif /* ( __MigTypeCheck ) */
203
204
205/* Routine region */
206mig_internal novalue _Xregion
207 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
208{
209
210#ifdef __MigPackStructs
211#pragma pack(4)
212#endif
213 typedef struct {
214 mach_msg_header_t Head;
215 NDR_record_t NDR;
216 vm32_address_t address;
217 vm_region_flavor_t flavor;
218 mach_msg_type_number_t infoCnt;
219 mach_msg_trailer_t trailer;
220 } Request __attribute__((unused));
221#ifdef __MigPackStructs
222#pragma pack()
223#endif
224 typedef __Request__region_t __Request;
225 typedef __Reply__region_t Reply __attribute__((unused));
226
227 /*
228 * typedef struct {
229 * mach_msg_header_t Head;
230 * NDR_record_t NDR;
231 * kern_return_t RetCode;
232 * } mig_reply_error_t;
233 */
234
235 Request *In0P = (Request *) InHeadP;
236 Reply *OutP = (Reply *) OutHeadP;
237#ifdef __MIG_check__Request__region_t__defined
238 kern_return_t check_result;
239#endif /* __MIG_check__Request__region_t__defined */
240
241#if __MigKernelSpecificCode
242#if UseStaticTemplates
243 const static mach_msg_port_descriptor_t object_nameTemplate = {
244 /* name = */ MACH_PORT_NULL,
245 /* pad1 = */ 0,
246 /* pad2 = */ 0,
247 /* disp = */ 17,
248 /* type = */ MACH_MSG_PORT_DESCRIPTOR,
249 };
250#endif /* UseStaticTemplates */
251
252#else
253#if UseStaticTemplates
254 const static mach_msg_port_descriptor_t object_nameTemplate = {
255 /* name = */ MACH_PORT_NULL,
256 /* pad1 = */ 0,
257 /* pad2 = */ 0,
258 /* disp = */ 17,
259 /* type = */ MACH_MSG_PORT_DESCRIPTOR,
260 };
261#endif /* UseStaticTemplates */
262
263#endif /* __MigKernelSpecificCode */
264 kern_return_t RetCode;
265 vm_map_t target_task;
266
267 __DeclareRcvRpc(3800, "region")
268 __BeforeRcvRpc(3800, "region")
269
270#if defined(__MIG_check__Request__region_t__defined)
271 check_result = __MIG_check__Request__region_t((__Request *)In0P);
272 if (check_result != MACH_MSG_SUCCESS)
273 { MIG_RETURN_ERROR(OutP, check_result); }
274#endif /* defined(__MIG_check__Request__region_t__defined) */
275
276#if UseStaticTemplates
277 OutP->object_name = object_nameTemplate;
278#else /* UseStaticTemplates */
279#if __MigKernelSpecificCode
280 OutP->object_name.disposition = 17;
281#else
282 OutP->object_name.disposition = 17;
283#endif /* __MigKernelSpecificCode */
284#if !(defined(KERNEL) && defined(__LP64__))
285 OutP->object_name.pad1 = 0;
286#endif
287 OutP->object_name.pad2 = 0;
288 OutP->object_name.type = MACH_MSG_PORT_DESCRIPTOR;
289#if defined(KERNEL)
290 OutP->object_name.pad_end = 0;
291#endif
292#endif /* UseStaticTemplates */
293
294
295 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
296
297 OutP->infoCnt = 10;
298 if (In0P->infoCnt < OutP->infoCnt)
299 OutP->infoCnt = In0P->infoCnt;
300
301 RetCode = vm32_region(target_task, &In0P->address, &OutP->size, In0P->flavor, OutP->info, &OutP->infoCnt, &OutP->object_name.name);
302 vm_map_deallocate(target_task);
303 if (RetCode != KERN_SUCCESS) {
304 MIG_RETURN_ERROR(OutP, RetCode);
305 }
306#if __MigKernelSpecificCode
307#endif /* __MigKernelSpecificCode */
308
309 OutP->NDR = NDR_record;
310
311
312 OutP->address = In0P->address;
313 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply) - 40) + (((4 * OutP->infoCnt)));
314
315 OutP->Head.msgh_bits |= MACH_MSGH_BITS_COMPLEX;
316 OutP->msgh_body.msgh_descriptor_count = 1;
317 __AfterRcvRpc(3800, "region")
318}
319
320#if ( __MigTypeCheck )
321#if __MIG_check__Request__map_subsystem__
322#if !defined(__MIG_check__Request__allocate_t__defined)
323#define __MIG_check__Request__allocate_t__defined
324
325mig_internal kern_return_t __MIG_check__Request__allocate_t(__attribute__((__unused__)) __Request__allocate_t *In0P)
326{
327
328 typedef __Request__allocate_t __Request;
329#if __MigTypeCheck
330 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
331 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
332 return MIG_BAD_ARGUMENTS;
333#endif /* __MigTypeCheck */
334
335 return MACH_MSG_SUCCESS;
336}
337#endif /* !defined(__MIG_check__Request__allocate_t__defined) */
338#endif /* __MIG_check__Request__map_subsystem__ */
339#endif /* ( __MigTypeCheck ) */
340
341
342/* Routine allocate */
343mig_internal novalue _Xallocate
344 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
345{
346
347#ifdef __MigPackStructs
348#pragma pack(4)
349#endif
350 typedef struct {
351 mach_msg_header_t Head;
352 NDR_record_t NDR;
353 vm32_address_t address;
354 vm32_size_t size;
355 int flags;
356 mach_msg_trailer_t trailer;
357 } Request __attribute__((unused));
358#ifdef __MigPackStructs
359#pragma pack()
360#endif
361 typedef __Request__allocate_t __Request;
362 typedef __Reply__allocate_t Reply __attribute__((unused));
363
364 /*
365 * typedef struct {
366 * mach_msg_header_t Head;
367 * NDR_record_t NDR;
368 * kern_return_t RetCode;
369 * } mig_reply_error_t;
370 */
371
372 Request *In0P = (Request *) InHeadP;
373 Reply *OutP = (Reply *) OutHeadP;
374#ifdef __MIG_check__Request__allocate_t__defined
375 kern_return_t check_result;
376#endif /* __MIG_check__Request__allocate_t__defined */
377
378#if __MigKernelSpecificCode
379#else
380#endif /* __MigKernelSpecificCode */
381 vm_map_t target_task;
382
383 __DeclareRcvRpc(3801, "allocate")
384 __BeforeRcvRpc(3801, "allocate")
385
386#if defined(__MIG_check__Request__allocate_t__defined)
387 check_result = __MIG_check__Request__allocate_t((__Request *)In0P);
388 if (check_result != MACH_MSG_SUCCESS)
389 { MIG_RETURN_ERROR(OutP, check_result); }
390#endif /* defined(__MIG_check__Request__allocate_t__defined) */
391
392 target_task = convert_port_entry_to_map(In0P->Head.msgh_request_port);
393
394 OutP->RetCode = vm32_allocate(target_task, &In0P->address, In0P->size, In0P->flags);
395 vm_map_deallocate(target_task);
396 if (OutP->RetCode != KERN_SUCCESS) {
397 MIG_RETURN_ERROR(OutP, OutP->RetCode);
398 }
399#if __MigKernelSpecificCode
400#endif /* __MigKernelSpecificCode */
401
402 OutP->NDR = NDR_record;
403
404
405 OutP->address = In0P->address;
406
407 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
408 __AfterRcvRpc(3801, "allocate")
409}
410
411#if ( __MigTypeCheck )
412#if __MIG_check__Request__map_subsystem__
413#if !defined(__MIG_check__Request__deallocate_t__defined)
414#define __MIG_check__Request__deallocate_t__defined
415
416mig_internal kern_return_t __MIG_check__Request__deallocate_t(__attribute__((__unused__)) __Request__deallocate_t *In0P)
417{
418
419 typedef __Request__deallocate_t __Request;
420#if __MigTypeCheck
421 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
422 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
423 return MIG_BAD_ARGUMENTS;
424#endif /* __MigTypeCheck */
425
426 return MACH_MSG_SUCCESS;
427}
428#endif /* !defined(__MIG_check__Request__deallocate_t__defined) */
429#endif /* __MIG_check__Request__map_subsystem__ */
430#endif /* ( __MigTypeCheck ) */
431
432
433/* Routine deallocate */
434mig_internal novalue _Xdeallocate
435 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
436{
437
438#ifdef __MigPackStructs
439#pragma pack(4)
440#endif
441 typedef struct {
442 mach_msg_header_t Head;
443 NDR_record_t NDR;
444 vm32_address_t address;
445 vm32_size_t size;
446 mach_msg_trailer_t trailer;
447 } Request __attribute__((unused));
448#ifdef __MigPackStructs
449#pragma pack()
450#endif
451 typedef __Request__deallocate_t __Request;
452 typedef __Reply__deallocate_t Reply __attribute__((unused));
453
454 /*
455 * typedef struct {
456 * mach_msg_header_t Head;
457 * NDR_record_t NDR;
458 * kern_return_t RetCode;
459 * } mig_reply_error_t;
460 */
461
462 Request *In0P = (Request *) InHeadP;
463 Reply *OutP = (Reply *) OutHeadP;
464#ifdef __MIG_check__Request__deallocate_t__defined
465 kern_return_t check_result;
466#endif /* __MIG_check__Request__deallocate_t__defined */
467
468#if __MigKernelSpecificCode
469#else
470#endif /* __MigKernelSpecificCode */
471 vm_map_t target_task;
472
473 __DeclareRcvRpc(3802, "deallocate")
474 __BeforeRcvRpc(3802, "deallocate")
475
476#if defined(__MIG_check__Request__deallocate_t__defined)
477 check_result = __MIG_check__Request__deallocate_t((__Request *)In0P);
478 if (check_result != MACH_MSG_SUCCESS)
479 { MIG_RETURN_ERROR(OutP, check_result); }
480#endif /* defined(__MIG_check__Request__deallocate_t__defined) */
481
482 target_task = convert_port_entry_to_map(In0P->Head.msgh_request_port);
483
484 OutP->RetCode = vm32_deallocate(target_task, In0P->address, In0P->size);
485 vm_map_deallocate(target_task);
486#if __MigKernelSpecificCode
487#endif /* __MigKernelSpecificCode */
488
489 OutP->NDR = NDR_record;
490
491
492 __AfterRcvRpc(3802, "deallocate")
493}
494
495#if ( __MigTypeCheck )
496#if __MIG_check__Request__map_subsystem__
497#if !defined(__MIG_check__Request__protect_t__defined)
498#define __MIG_check__Request__protect_t__defined
499
500mig_internal kern_return_t __MIG_check__Request__protect_t(__attribute__((__unused__)) __Request__protect_t *In0P)
501{
502
503 typedef __Request__protect_t __Request;
504#if __MigTypeCheck
505 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
506 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
507 return MIG_BAD_ARGUMENTS;
508#endif /* __MigTypeCheck */
509
510 return MACH_MSG_SUCCESS;
511}
512#endif /* !defined(__MIG_check__Request__protect_t__defined) */
513#endif /* __MIG_check__Request__map_subsystem__ */
514#endif /* ( __MigTypeCheck ) */
515
516
517/* Routine protect */
518mig_internal novalue _Xprotect
519 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
520{
521
522#ifdef __MigPackStructs
523#pragma pack(4)
524#endif
525 typedef struct {
526 mach_msg_header_t Head;
527 NDR_record_t NDR;
528 vm32_address_t address;
529 vm32_size_t size;
530 boolean_t set_maximum;
531 vm_prot_t new_protection;
532 mach_msg_trailer_t trailer;
533 } Request __attribute__((unused));
534#ifdef __MigPackStructs
535#pragma pack()
536#endif
537 typedef __Request__protect_t __Request;
538 typedef __Reply__protect_t Reply __attribute__((unused));
539
540 /*
541 * typedef struct {
542 * mach_msg_header_t Head;
543 * NDR_record_t NDR;
544 * kern_return_t RetCode;
545 * } mig_reply_error_t;
546 */
547
548 Request *In0P = (Request *) InHeadP;
549 Reply *OutP = (Reply *) OutHeadP;
550#ifdef __MIG_check__Request__protect_t__defined
551 kern_return_t check_result;
552#endif /* __MIG_check__Request__protect_t__defined */
553
554#if __MigKernelSpecificCode
555#else
556#endif /* __MigKernelSpecificCode */
557 vm_map_t target_task;
558
559 __DeclareRcvRpc(3803, "protect")
560 __BeforeRcvRpc(3803, "protect")
561
562#if defined(__MIG_check__Request__protect_t__defined)
563 check_result = __MIG_check__Request__protect_t((__Request *)In0P);
564 if (check_result != MACH_MSG_SUCCESS)
565 { MIG_RETURN_ERROR(OutP, check_result); }
566#endif /* defined(__MIG_check__Request__protect_t__defined) */
567
568 target_task = convert_port_entry_to_map(In0P->Head.msgh_request_port);
569
570 OutP->RetCode = vm32_protect(target_task, In0P->address, In0P->size, In0P->set_maximum, In0P->new_protection);
571 vm_map_deallocate(target_task);
572#if __MigKernelSpecificCode
573#endif /* __MigKernelSpecificCode */
574
575 OutP->NDR = NDR_record;
576
577
578 __AfterRcvRpc(3803, "protect")
579}
580
581#if ( __MigTypeCheck )
582#if __MIG_check__Request__map_subsystem__
583#if !defined(__MIG_check__Request__inherit_t__defined)
584#define __MIG_check__Request__inherit_t__defined
585
586mig_internal kern_return_t __MIG_check__Request__inherit_t(__attribute__((__unused__)) __Request__inherit_t *In0P)
587{
588
589 typedef __Request__inherit_t __Request;
590#if __MigTypeCheck
591 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
592 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
593 return MIG_BAD_ARGUMENTS;
594#endif /* __MigTypeCheck */
595
596 return MACH_MSG_SUCCESS;
597}
598#endif /* !defined(__MIG_check__Request__inherit_t__defined) */
599#endif /* __MIG_check__Request__map_subsystem__ */
600#endif /* ( __MigTypeCheck ) */
601
602
603/* Routine inherit */
604mig_internal novalue _Xinherit
605 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
606{
607
608#ifdef __MigPackStructs
609#pragma pack(4)
610#endif
611 typedef struct {
612 mach_msg_header_t Head;
613 NDR_record_t NDR;
614 vm32_address_t address;
615 vm32_size_t size;
616 vm_inherit_t new_inheritance;
617 mach_msg_trailer_t trailer;
618 } Request __attribute__((unused));
619#ifdef __MigPackStructs
620#pragma pack()
621#endif
622 typedef __Request__inherit_t __Request;
623 typedef __Reply__inherit_t Reply __attribute__((unused));
624
625 /*
626 * typedef struct {
627 * mach_msg_header_t Head;
628 * NDR_record_t NDR;
629 * kern_return_t RetCode;
630 * } mig_reply_error_t;
631 */
632
633 Request *In0P = (Request *) InHeadP;
634 Reply *OutP = (Reply *) OutHeadP;
635#ifdef __MIG_check__Request__inherit_t__defined
636 kern_return_t check_result;
637#endif /* __MIG_check__Request__inherit_t__defined */
638
639#if __MigKernelSpecificCode
640#else
641#endif /* __MigKernelSpecificCode */
642 vm_map_t target_task;
643
644 __DeclareRcvRpc(3804, "inherit")
645 __BeforeRcvRpc(3804, "inherit")
646
647#if defined(__MIG_check__Request__inherit_t__defined)
648 check_result = __MIG_check__Request__inherit_t((__Request *)In0P);
649 if (check_result != MACH_MSG_SUCCESS)
650 { MIG_RETURN_ERROR(OutP, check_result); }
651#endif /* defined(__MIG_check__Request__inherit_t__defined) */
652
653 target_task = convert_port_entry_to_map(In0P->Head.msgh_request_port);
654
655 OutP->RetCode = vm32_inherit(target_task, In0P->address, In0P->size, In0P->new_inheritance);
656 vm_map_deallocate(target_task);
657#if __MigKernelSpecificCode
658#endif /* __MigKernelSpecificCode */
659
660 OutP->NDR = NDR_record;
661
662
663 __AfterRcvRpc(3804, "inherit")
664}
665
666#if ( __MigTypeCheck )
667#if __MIG_check__Request__map_subsystem__
668#if !defined(__MIG_check__Request__read_t__defined)
669#define __MIG_check__Request__read_t__defined
670
671mig_internal kern_return_t __MIG_check__Request__read_t(__attribute__((__unused__)) __Request__read_t *In0P)
672{
673
674 typedef __Request__read_t __Request;
675#if __MigTypeCheck
676 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
677 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
678 return MIG_BAD_ARGUMENTS;
679#endif /* __MigTypeCheck */
680
681 return MACH_MSG_SUCCESS;
682}
683#endif /* !defined(__MIG_check__Request__read_t__defined) */
684#endif /* __MIG_check__Request__map_subsystem__ */
685#endif /* ( __MigTypeCheck ) */
686
687
688/* Routine read */
689mig_internal novalue _Xread
690 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
691{
692
693#ifdef __MigPackStructs
694#pragma pack(4)
695#endif
696 typedef struct {
697 mach_msg_header_t Head;
698 NDR_record_t NDR;
699 vm32_address_t address;
700 vm32_size_t size;
701 mach_msg_trailer_t trailer;
702 } Request __attribute__((unused));
703#ifdef __MigPackStructs
704#pragma pack()
705#endif
706 typedef __Request__read_t __Request;
707 typedef __Reply__read_t Reply __attribute__((unused));
708
709 /*
710 * typedef struct {
711 * mach_msg_header_t Head;
712 * NDR_record_t NDR;
713 * kern_return_t RetCode;
714 * } mig_reply_error_t;
715 */
716
717 Request *In0P = (Request *) InHeadP;
718 Reply *OutP = (Reply *) OutHeadP;
719#ifdef __MIG_check__Request__read_t__defined
720 kern_return_t check_result;
721#endif /* __MIG_check__Request__read_t__defined */
722
723#if __MigKernelSpecificCode
724#if UseStaticTemplates
725 const static mach_msg_ool_descriptor_t dataTemplate = {
726 /* addr = */ (void *)0,
727 /* size = */ 0,
728 /* deal = */ FALSE,
729 /* copy = */ MACH_MSG_VIRTUAL_COPY,
730 /* pad2 = */ 0,
731 /* type = */ MACH_MSG_OOL_DESCRIPTOR,
732 };
733#endif /* UseStaticTemplates */
734
735#else
736#if UseStaticTemplates
737 const static mach_msg_ool_descriptor_t dataTemplate = {
738 /* addr = */ (void *)0,
739 /* size = */ 0,
740 /* deal = */ FALSE,
741 /* copy = */ MACH_MSG_VIRTUAL_COPY,
742 /* pad2 = */ 0,
743 /* type = */ MACH_MSG_OOL_DESCRIPTOR,
744 };
745#endif /* UseStaticTemplates */
746
747#endif /* __MigKernelSpecificCode */
748 kern_return_t RetCode;
749 vm_map_t target_task;
750
751 __DeclareRcvRpc(3805, "read")
752 __BeforeRcvRpc(3805, "read")
753
754#if defined(__MIG_check__Request__read_t__defined)
755 check_result = __MIG_check__Request__read_t((__Request *)In0P);
756 if (check_result != MACH_MSG_SUCCESS)
757 { MIG_RETURN_ERROR(OutP, check_result); }
758#endif /* defined(__MIG_check__Request__read_t__defined) */
759
760#if UseStaticTemplates
761 OutP->data = dataTemplate;
762#else /* UseStaticTemplates */
763 OutP->data.deallocate = FALSE;
764 OutP->data.copy = MACH_MSG_VIRTUAL_COPY;
765 OutP->data.pad1 = 0;
766 OutP->data.type = MACH_MSG_OOL_DESCRIPTOR;
767#if defined(KERNEL) && !defined(__LP64__)
768 OutP->data.pad_end = 0;
769#endif
770#endif /* UseStaticTemplates */
771
772
773 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
774
775 RetCode = vm32_read(target_task, In0P->address, In0P->size, (vm_offset_t *)&(OutP->data.address), &OutP->dataCnt);
776 vm_map_deallocate(target_task);
777 if (RetCode != KERN_SUCCESS) {
778 MIG_RETURN_ERROR(OutP, RetCode);
779 }
780#if __MigKernelSpecificCode
781#endif /* __MigKernelSpecificCode */
782 OutP->data.size = OutP->dataCnt;
783
784
785 OutP->NDR = NDR_record;
786
787
788 OutP->Head.msgh_bits |= MACH_MSGH_BITS_COMPLEX;
789 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
790 OutP->msgh_body.msgh_descriptor_count = 1;
791 __AfterRcvRpc(3805, "read")
792}
793
794#if ( __MigTypeCheck )
795#if __MIG_check__Request__map_subsystem__
796#if !defined(__MIG_check__Request__read_list_t__defined)
797#define __MIG_check__Request__read_list_t__defined
798
799mig_internal kern_return_t __MIG_check__Request__read_list_t(__attribute__((__unused__)) __Request__read_list_t *In0P)
800{
801
802 typedef __Request__read_list_t __Request;
803#if __MigTypeCheck
804 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
805 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
806 return MIG_BAD_ARGUMENTS;
807#endif /* __MigTypeCheck */
808
809 return MACH_MSG_SUCCESS;
810}
811#endif /* !defined(__MIG_check__Request__read_list_t__defined) */
812#endif /* __MIG_check__Request__map_subsystem__ */
813#endif /* ( __MigTypeCheck ) */
814
815
816/* Routine read_list */
817mig_internal novalue _Xread_list
818 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
819{
820
821#ifdef __MigPackStructs
822#pragma pack(4)
823#endif
824 typedef struct {
825 mach_msg_header_t Head;
826 NDR_record_t NDR;
827 vm32_read_entry_t data_list;
828 natural_t count;
829 mach_msg_trailer_t trailer;
830 } Request __attribute__((unused));
831#ifdef __MigPackStructs
832#pragma pack()
833#endif
834 typedef __Request__read_list_t __Request;
835 typedef __Reply__read_list_t Reply __attribute__((unused));
836
837 /*
838 * typedef struct {
839 * mach_msg_header_t Head;
840 * NDR_record_t NDR;
841 * kern_return_t RetCode;
842 * } mig_reply_error_t;
843 */
844
845 Request *In0P = (Request *) InHeadP;
846 Reply *OutP = (Reply *) OutHeadP;
847#ifdef __MIG_check__Request__read_list_t__defined
848 kern_return_t check_result;
849#endif /* __MIG_check__Request__read_list_t__defined */
850
851#if __MigKernelSpecificCode
852#else
853#endif /* __MigKernelSpecificCode */
854 vm_map_t target_task;
855
856 __DeclareRcvRpc(3806, "read_list")
857 __BeforeRcvRpc(3806, "read_list")
858
859#if defined(__MIG_check__Request__read_list_t__defined)
860 check_result = __MIG_check__Request__read_list_t((__Request *)In0P);
861 if (check_result != MACH_MSG_SUCCESS)
862 { MIG_RETURN_ERROR(OutP, check_result); }
863#endif /* defined(__MIG_check__Request__read_list_t__defined) */
864
865 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
866
867 OutP->RetCode = vm32_read_list(target_task, In0P->data_list, In0P->count);
868 vm_map_deallocate(target_task);
869 if (OutP->RetCode != KERN_SUCCESS) {
870 MIG_RETURN_ERROR(OutP, OutP->RetCode);
871 }
872#if __MigKernelSpecificCode
873#endif /* __MigKernelSpecificCode */
874
875 OutP->NDR = NDR_record;
876
877
878 { typedef struct { char data[2048]; } *sp;
879 * (sp) OutP->data_list = * (sp) In0P->data_list;
880 }
881
882 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
883 __AfterRcvRpc(3806, "read_list")
884}
885
886#if ( __MigTypeCheck )
887#if __MIG_check__Request__map_subsystem__
888#if !defined(__MIG_check__Request__write_t__defined)
889#define __MIG_check__Request__write_t__defined
890
891mig_internal kern_return_t __MIG_check__Request__write_t(__attribute__((__unused__)) __Request__write_t *In0P)
892{
893
894 typedef __Request__write_t __Request;
895#if __MigTypeCheck
896 if (!(In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
897 (In0P->msgh_body.msgh_descriptor_count != 1) ||
898 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
899 return MIG_BAD_ARGUMENTS;
900#endif /* __MigTypeCheck */
901
902#if __MigTypeCheck
903 if (In0P->data.type != MACH_MSG_OOL_DESCRIPTOR)
904 return MIG_TYPE_ERROR;
905#endif /* __MigTypeCheck */
906
907#if __MigTypeCheck
908 if (In0P->data.size != In0P->dataCnt)
909 return MIG_TYPE_ERROR;
910#endif /* __MigTypeCheck */
911
912 return MACH_MSG_SUCCESS;
913}
914#endif /* !defined(__MIG_check__Request__write_t__defined) */
915#endif /* __MIG_check__Request__map_subsystem__ */
916#endif /* ( __MigTypeCheck ) */
917
918
919/* Routine write */
920mig_internal novalue _Xwrite
921 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
922{
923
924#ifdef __MigPackStructs
925#pragma pack(4)
926#endif
927 typedef struct {
928 mach_msg_header_t Head;
929 /* start of the kernel processed data */
930 mach_msg_body_t msgh_body;
931 mach_msg_ool_descriptor_t data;
932 /* end of the kernel processed data */
933 NDR_record_t NDR;
934 vm32_address_t address;
935 mach_msg_type_number_t dataCnt;
936 mach_msg_trailer_t trailer;
937 } Request __attribute__((unused));
938#ifdef __MigPackStructs
939#pragma pack()
940#endif
941 typedef __Request__write_t __Request;
942 typedef __Reply__write_t Reply __attribute__((unused));
943
944 /*
945 * typedef struct {
946 * mach_msg_header_t Head;
947 * NDR_record_t NDR;
948 * kern_return_t RetCode;
949 * } mig_reply_error_t;
950 */
951
952 Request *In0P = (Request *) InHeadP;
953 Reply *OutP = (Reply *) OutHeadP;
954#ifdef __MIG_check__Request__write_t__defined
955 kern_return_t check_result;
956#endif /* __MIG_check__Request__write_t__defined */
957
958#if __MigKernelSpecificCode
959#else
960#endif /* __MigKernelSpecificCode */
961 vm_map_t target_task;
962
963 __DeclareRcvRpc(3807, "write")
964 __BeforeRcvRpc(3807, "write")
965
966#if defined(__MIG_check__Request__write_t__defined)
967 check_result = __MIG_check__Request__write_t((__Request *)In0P);
968 if (check_result != MACH_MSG_SUCCESS)
969 { MIG_RETURN_ERROR(OutP, check_result); }
970#endif /* defined(__MIG_check__Request__write_t__defined) */
971
972 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
973
974 OutP->RetCode = vm32_write(target_task, In0P->address, (vm_offset_t)(In0P->data.address), In0P->data.size);
975 vm_map_deallocate(target_task);
976#if __MigKernelSpecificCode
977#endif /* __MigKernelSpecificCode */
978
979 OutP->NDR = NDR_record;
980
981
982 __AfterRcvRpc(3807, "write")
983}
984
985#if ( __MigTypeCheck )
986#if __MIG_check__Request__map_subsystem__
987#if !defined(__MIG_check__Request__copy_t__defined)
988#define __MIG_check__Request__copy_t__defined
989
990mig_internal kern_return_t __MIG_check__Request__copy_t(__attribute__((__unused__)) __Request__copy_t *In0P)
991{
992
993 typedef __Request__copy_t __Request;
994#if __MigTypeCheck
995 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
996 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
997 return MIG_BAD_ARGUMENTS;
998#endif /* __MigTypeCheck */
999
1000 return MACH_MSG_SUCCESS;
1001}
1002#endif /* !defined(__MIG_check__Request__copy_t__defined) */
1003#endif /* __MIG_check__Request__map_subsystem__ */
1004#endif /* ( __MigTypeCheck ) */
1005
1006
1007/* Routine copy */
1008mig_internal novalue _Xcopy
1009 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
1010{
1011
1012#ifdef __MigPackStructs
1013#pragma pack(4)
1014#endif
1015 typedef struct {
1016 mach_msg_header_t Head;
1017 NDR_record_t NDR;
1018 vm32_address_t source_address;
1019 vm32_size_t size;
1020 vm32_address_t dest_address;
1021 mach_msg_trailer_t trailer;
1022 } Request __attribute__((unused));
1023#ifdef __MigPackStructs
1024#pragma pack()
1025#endif
1026 typedef __Request__copy_t __Request;
1027 typedef __Reply__copy_t Reply __attribute__((unused));
1028
1029 /*
1030 * typedef struct {
1031 * mach_msg_header_t Head;
1032 * NDR_record_t NDR;
1033 * kern_return_t RetCode;
1034 * } mig_reply_error_t;
1035 */
1036
1037 Request *In0P = (Request *) InHeadP;
1038 Reply *OutP = (Reply *) OutHeadP;
1039#ifdef __MIG_check__Request__copy_t__defined
1040 kern_return_t check_result;
1041#endif /* __MIG_check__Request__copy_t__defined */
1042
1043#if __MigKernelSpecificCode
1044#else
1045#endif /* __MigKernelSpecificCode */
1046 vm_map_t target_task;
1047
1048 __DeclareRcvRpc(3808, "copy")
1049 __BeforeRcvRpc(3808, "copy")
1050
1051#if defined(__MIG_check__Request__copy_t__defined)
1052 check_result = __MIG_check__Request__copy_t((__Request *)In0P);
1053 if (check_result != MACH_MSG_SUCCESS)
1054 { MIG_RETURN_ERROR(OutP, check_result); }
1055#endif /* defined(__MIG_check__Request__copy_t__defined) */
1056
1057 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
1058
1059 OutP->RetCode = vm32_copy(target_task, In0P->source_address, In0P->size, In0P->dest_address);
1060 vm_map_deallocate(target_task);
1061#if __MigKernelSpecificCode
1062#endif /* __MigKernelSpecificCode */
1063
1064 OutP->NDR = NDR_record;
1065
1066
1067 __AfterRcvRpc(3808, "copy")
1068}
1069
1070#if ( __MigTypeCheck )
1071#if __MIG_check__Request__map_subsystem__
1072#if !defined(__MIG_check__Request__read_overwrite_t__defined)
1073#define __MIG_check__Request__read_overwrite_t__defined
1074
1075mig_internal kern_return_t __MIG_check__Request__read_overwrite_t(__attribute__((__unused__)) __Request__read_overwrite_t *In0P)
1076{
1077
1078 typedef __Request__read_overwrite_t __Request;
1079#if __MigTypeCheck
1080 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1081 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1082 return MIG_BAD_ARGUMENTS;
1083#endif /* __MigTypeCheck */
1084
1085 return MACH_MSG_SUCCESS;
1086}
1087#endif /* !defined(__MIG_check__Request__read_overwrite_t__defined) */
1088#endif /* __MIG_check__Request__map_subsystem__ */
1089#endif /* ( __MigTypeCheck ) */
1090
1091
1092/* Routine read_overwrite */
1093mig_internal novalue _Xread_overwrite
1094 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
1095{
1096
1097#ifdef __MigPackStructs
1098#pragma pack(4)
1099#endif
1100 typedef struct {
1101 mach_msg_header_t Head;
1102 NDR_record_t NDR;
1103 vm32_address_t address;
1104 vm32_size_t size;
1105 vm32_address_t data;
1106 mach_msg_trailer_t trailer;
1107 } Request __attribute__((unused));
1108#ifdef __MigPackStructs
1109#pragma pack()
1110#endif
1111 typedef __Request__read_overwrite_t __Request;
1112 typedef __Reply__read_overwrite_t Reply __attribute__((unused));
1113
1114 /*
1115 * typedef struct {
1116 * mach_msg_header_t Head;
1117 * NDR_record_t NDR;
1118 * kern_return_t RetCode;
1119 * } mig_reply_error_t;
1120 */
1121
1122 Request *In0P = (Request *) InHeadP;
1123 Reply *OutP = (Reply *) OutHeadP;
1124#ifdef __MIG_check__Request__read_overwrite_t__defined
1125 kern_return_t check_result;
1126#endif /* __MIG_check__Request__read_overwrite_t__defined */
1127
1128#if __MigKernelSpecificCode
1129#else
1130#endif /* __MigKernelSpecificCode */
1131 vm_map_t target_task;
1132
1133 __DeclareRcvRpc(3809, "read_overwrite")
1134 __BeforeRcvRpc(3809, "read_overwrite")
1135
1136#if defined(__MIG_check__Request__read_overwrite_t__defined)
1137 check_result = __MIG_check__Request__read_overwrite_t((__Request *)In0P);
1138 if (check_result != MACH_MSG_SUCCESS)
1139 { MIG_RETURN_ERROR(OutP, check_result); }
1140#endif /* defined(__MIG_check__Request__read_overwrite_t__defined) */
1141
1142 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
1143
1144 OutP->RetCode = vm32_read_overwrite(target_task, In0P->address, In0P->size, In0P->data, &OutP->outsize);
1145 vm_map_deallocate(target_task);
1146 if (OutP->RetCode != KERN_SUCCESS) {
1147 MIG_RETURN_ERROR(OutP, OutP->RetCode);
1148 }
1149#if __MigKernelSpecificCode
1150#endif /* __MigKernelSpecificCode */
1151
1152 OutP->NDR = NDR_record;
1153
1154
1155 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
1156 __AfterRcvRpc(3809, "read_overwrite")
1157}
1158
1159#if ( __MigTypeCheck )
1160#if __MIG_check__Request__map_subsystem__
1161#if !defined(__MIG_check__Request__msync_t__defined)
1162#define __MIG_check__Request__msync_t__defined
1163
1164mig_internal kern_return_t __MIG_check__Request__msync_t(__attribute__((__unused__)) __Request__msync_t *In0P)
1165{
1166
1167 typedef __Request__msync_t __Request;
1168#if __MigTypeCheck
1169 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1170 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1171 return MIG_BAD_ARGUMENTS;
1172#endif /* __MigTypeCheck */
1173
1174 return MACH_MSG_SUCCESS;
1175}
1176#endif /* !defined(__MIG_check__Request__msync_t__defined) */
1177#endif /* __MIG_check__Request__map_subsystem__ */
1178#endif /* ( __MigTypeCheck ) */
1179
1180
1181/* Routine msync */
1182mig_internal novalue _Xmsync
1183 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
1184{
1185
1186#ifdef __MigPackStructs
1187#pragma pack(4)
1188#endif
1189 typedef struct {
1190 mach_msg_header_t Head;
1191 NDR_record_t NDR;
1192 vm32_address_t address;
1193 vm32_size_t size;
1194 vm_sync_t sync_flags;
1195 mach_msg_trailer_t trailer;
1196 } Request __attribute__((unused));
1197#ifdef __MigPackStructs
1198#pragma pack()
1199#endif
1200 typedef __Request__msync_t __Request;
1201 typedef __Reply__msync_t Reply __attribute__((unused));
1202
1203 /*
1204 * typedef struct {
1205 * mach_msg_header_t Head;
1206 * NDR_record_t NDR;
1207 * kern_return_t RetCode;
1208 * } mig_reply_error_t;
1209 */
1210
1211 Request *In0P = (Request *) InHeadP;
1212 Reply *OutP = (Reply *) OutHeadP;
1213#ifdef __MIG_check__Request__msync_t__defined
1214 kern_return_t check_result;
1215#endif /* __MIG_check__Request__msync_t__defined */
1216
1217#if __MigKernelSpecificCode
1218#else
1219#endif /* __MigKernelSpecificCode */
1220 vm_map_t target_task;
1221
1222 __DeclareRcvRpc(3810, "msync")
1223 __BeforeRcvRpc(3810, "msync")
1224
1225#if defined(__MIG_check__Request__msync_t__defined)
1226 check_result = __MIG_check__Request__msync_t((__Request *)In0P);
1227 if (check_result != MACH_MSG_SUCCESS)
1228 { MIG_RETURN_ERROR(OutP, check_result); }
1229#endif /* defined(__MIG_check__Request__msync_t__defined) */
1230
1231 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
1232
1233 OutP->RetCode = vm32_msync(target_task, In0P->address, In0P->size, In0P->sync_flags);
1234 vm_map_deallocate(target_task);
1235#if __MigKernelSpecificCode
1236#endif /* __MigKernelSpecificCode */
1237
1238 OutP->NDR = NDR_record;
1239
1240
1241 __AfterRcvRpc(3810, "msync")
1242}
1243
1244#if ( __MigTypeCheck )
1245#if __MIG_check__Request__map_subsystem__
1246#if !defined(__MIG_check__Request__behavior_set_t__defined)
1247#define __MIG_check__Request__behavior_set_t__defined
1248
1249mig_internal kern_return_t __MIG_check__Request__behavior_set_t(__attribute__((__unused__)) __Request__behavior_set_t *In0P)
1250{
1251
1252 typedef __Request__behavior_set_t __Request;
1253#if __MigTypeCheck
1254 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1255 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1256 return MIG_BAD_ARGUMENTS;
1257#endif /* __MigTypeCheck */
1258
1259 return MACH_MSG_SUCCESS;
1260}
1261#endif /* !defined(__MIG_check__Request__behavior_set_t__defined) */
1262#endif /* __MIG_check__Request__map_subsystem__ */
1263#endif /* ( __MigTypeCheck ) */
1264
1265
1266/* Routine behavior_set */
1267mig_internal novalue _Xbehavior_set
1268 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
1269{
1270
1271#ifdef __MigPackStructs
1272#pragma pack(4)
1273#endif
1274 typedef struct {
1275 mach_msg_header_t Head;
1276 NDR_record_t NDR;
1277 vm32_address_t address;
1278 vm32_size_t size;
1279 vm_behavior_t new_behavior;
1280 mach_msg_trailer_t trailer;
1281 } Request __attribute__((unused));
1282#ifdef __MigPackStructs
1283#pragma pack()
1284#endif
1285 typedef __Request__behavior_set_t __Request;
1286 typedef __Reply__behavior_set_t Reply __attribute__((unused));
1287
1288 /*
1289 * typedef struct {
1290 * mach_msg_header_t Head;
1291 * NDR_record_t NDR;
1292 * kern_return_t RetCode;
1293 * } mig_reply_error_t;
1294 */
1295
1296 Request *In0P = (Request *) InHeadP;
1297 Reply *OutP = (Reply *) OutHeadP;
1298#ifdef __MIG_check__Request__behavior_set_t__defined
1299 kern_return_t check_result;
1300#endif /* __MIG_check__Request__behavior_set_t__defined */
1301
1302#if __MigKernelSpecificCode
1303#else
1304#endif /* __MigKernelSpecificCode */
1305 vm_map_t target_task;
1306
1307 __DeclareRcvRpc(3811, "behavior_set")
1308 __BeforeRcvRpc(3811, "behavior_set")
1309
1310#if defined(__MIG_check__Request__behavior_set_t__defined)
1311 check_result = __MIG_check__Request__behavior_set_t((__Request *)In0P);
1312 if (check_result != MACH_MSG_SUCCESS)
1313 { MIG_RETURN_ERROR(OutP, check_result); }
1314#endif /* defined(__MIG_check__Request__behavior_set_t__defined) */
1315
1316 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
1317
1318 OutP->RetCode = vm32_behavior_set(target_task, In0P->address, In0P->size, In0P->new_behavior);
1319 vm_map_deallocate(target_task);
1320#if __MigKernelSpecificCode
1321#endif /* __MigKernelSpecificCode */
1322
1323 OutP->NDR = NDR_record;
1324
1325
1326 __AfterRcvRpc(3811, "behavior_set")
1327}
1328
1329#if ( __MigTypeCheck )
1330#if __MIG_check__Request__map_subsystem__
1331#if !defined(__MIG_check__Request__map_t__defined)
1332#define __MIG_check__Request__map_t__defined
1333
1334mig_internal kern_return_t __MIG_check__Request__map_t(__attribute__((__unused__)) __Request__map_t *In0P)
1335{
1336
1337 typedef __Request__map_t __Request;
1338#if __MigTypeCheck
1339 if (!(In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1340 (In0P->msgh_body.msgh_descriptor_count != 1) ||
1341 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1342 return MIG_BAD_ARGUMENTS;
1343#endif /* __MigTypeCheck */
1344
1345#if __MigTypeCheck
1346 if (In0P->object.type != MACH_MSG_PORT_DESCRIPTOR ||
1347 In0P->object.disposition != 17)
1348 return MIG_TYPE_ERROR;
1349#endif /* __MigTypeCheck */
1350
1351 return MACH_MSG_SUCCESS;
1352}
1353#endif /* !defined(__MIG_check__Request__map_t__defined) */
1354#endif /* __MIG_check__Request__map_subsystem__ */
1355#endif /* ( __MigTypeCheck ) */
1356
1357
1358/* Routine map */
1359mig_internal novalue _Xmap
1360 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
1361{
1362
1363#ifdef __MigPackStructs
1364#pragma pack(4)
1365#endif
1366 typedef struct {
1367 mach_msg_header_t Head;
1368 /* start of the kernel processed data */
1369 mach_msg_body_t msgh_body;
1370 mach_msg_port_descriptor_t object;
1371 /* end of the kernel processed data */
1372 NDR_record_t NDR;
1373 vm32_address_t address;
1374 vm32_size_t size;
1375 vm32_address_t mask;
1376 int flags;
1377 vm32_offset_t offset;
1378 boolean_t copy;
1379 vm_prot_t cur_protection;
1380 vm_prot_t max_protection;
1381 vm_inherit_t inheritance;
1382 mach_msg_trailer_t trailer;
1383 } Request __attribute__((unused));
1384#ifdef __MigPackStructs
1385#pragma pack()
1386#endif
1387 typedef __Request__map_t __Request;
1388 typedef __Reply__map_t Reply __attribute__((unused));
1389
1390 /*
1391 * typedef struct {
1392 * mach_msg_header_t Head;
1393 * NDR_record_t NDR;
1394 * kern_return_t RetCode;
1395 * } mig_reply_error_t;
1396 */
1397
1398 Request *In0P = (Request *) InHeadP;
1399 Reply *OutP = (Reply *) OutHeadP;
1400#ifdef __MIG_check__Request__map_t__defined
1401 kern_return_t check_result;
1402#endif /* __MIG_check__Request__map_t__defined */
1403
1404#if __MigKernelSpecificCode
1405#else
1406#endif /* __MigKernelSpecificCode */
1407 vm_map_t target_task;
1408
1409 __DeclareRcvRpc(3812, "map")
1410 __BeforeRcvRpc(3812, "map")
1411
1412#if defined(__MIG_check__Request__map_t__defined)
1413 check_result = __MIG_check__Request__map_t((__Request *)In0P);
1414 if (check_result != MACH_MSG_SUCCESS)
1415 { MIG_RETURN_ERROR(OutP, check_result); }
1416#endif /* defined(__MIG_check__Request__map_t__defined) */
1417
1418 target_task = convert_port_entry_to_map(In0P->Head.msgh_request_port);
1419
1420 OutP->RetCode = vm32_map(target_task, &In0P->address, In0P->size, In0P->mask, In0P->flags, null_conversion(In0P->object.name), In0P->offset, In0P->copy, In0P->cur_protection, In0P->max_protection, In0P->inheritance);
1421 vm_map_deallocate(target_task);
1422 if (OutP->RetCode != KERN_SUCCESS) {
1423 MIG_RETURN_ERROR(OutP, OutP->RetCode);
1424 }
1425#if __MigKernelSpecificCode
1426
1427 if (IP_VALID((ipc_port_t)In0P->object.name))
1428 ipc_port_release_send((ipc_port_t)In0P->object.name);
1429#endif /* __MigKernelSpecificCode */
1430
1431 OutP->NDR = NDR_record;
1432
1433
1434 OutP->address = In0P->address;
1435
1436 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
1437 __AfterRcvRpc(3812, "map")
1438}
1439
1440#if ( __MigTypeCheck )
1441#if __MIG_check__Request__map_subsystem__
1442#if !defined(__MIG_check__Request__machine_attribute_t__defined)
1443#define __MIG_check__Request__machine_attribute_t__defined
1444
1445mig_internal kern_return_t __MIG_check__Request__machine_attribute_t(__attribute__((__unused__)) __Request__machine_attribute_t *In0P)
1446{
1447
1448 typedef __Request__machine_attribute_t __Request;
1449#if __MigTypeCheck
1450 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1451 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1452 return MIG_BAD_ARGUMENTS;
1453#endif /* __MigTypeCheck */
1454
1455 return MACH_MSG_SUCCESS;
1456}
1457#endif /* !defined(__MIG_check__Request__machine_attribute_t__defined) */
1458#endif /* __MIG_check__Request__map_subsystem__ */
1459#endif /* ( __MigTypeCheck ) */
1460
1461
1462/* Routine machine_attribute */
1463mig_internal novalue _Xmachine_attribute
1464 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
1465{
1466
1467#ifdef __MigPackStructs
1468#pragma pack(4)
1469#endif
1470 typedef struct {
1471 mach_msg_header_t Head;
1472 NDR_record_t NDR;
1473 vm32_address_t address;
1474 vm32_size_t size;
1475 vm_machine_attribute_t attribute;
1476 vm_machine_attribute_val_t value;
1477 mach_msg_trailer_t trailer;
1478 } Request __attribute__((unused));
1479#ifdef __MigPackStructs
1480#pragma pack()
1481#endif
1482 typedef __Request__machine_attribute_t __Request;
1483 typedef __Reply__machine_attribute_t Reply __attribute__((unused));
1484
1485 /*
1486 * typedef struct {
1487 * mach_msg_header_t Head;
1488 * NDR_record_t NDR;
1489 * kern_return_t RetCode;
1490 * } mig_reply_error_t;
1491 */
1492
1493 Request *In0P = (Request *) InHeadP;
1494 Reply *OutP = (Reply *) OutHeadP;
1495#ifdef __MIG_check__Request__machine_attribute_t__defined
1496 kern_return_t check_result;
1497#endif /* __MIG_check__Request__machine_attribute_t__defined */
1498
1499#if __MigKernelSpecificCode
1500#else
1501#endif /* __MigKernelSpecificCode */
1502 vm_map_t target_task;
1503
1504 __DeclareRcvRpc(3813, "machine_attribute")
1505 __BeforeRcvRpc(3813, "machine_attribute")
1506
1507#if defined(__MIG_check__Request__machine_attribute_t__defined)
1508 check_result = __MIG_check__Request__machine_attribute_t((__Request *)In0P);
1509 if (check_result != MACH_MSG_SUCCESS)
1510 { MIG_RETURN_ERROR(OutP, check_result); }
1511#endif /* defined(__MIG_check__Request__machine_attribute_t__defined) */
1512
1513 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
1514
1515 OutP->RetCode = vm32_machine_attribute(target_task, In0P->address, In0P->size, In0P->attribute, &In0P->value);
1516 vm_map_deallocate(target_task);
1517 if (OutP->RetCode != KERN_SUCCESS) {
1518 MIG_RETURN_ERROR(OutP, OutP->RetCode);
1519 }
1520#if __MigKernelSpecificCode
1521#endif /* __MigKernelSpecificCode */
1522
1523 OutP->NDR = NDR_record;
1524
1525
1526 OutP->value = In0P->value;
1527
1528 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
1529 __AfterRcvRpc(3813, "machine_attribute")
1530}
1531
1532#if ( __MigTypeCheck )
1533#if __MIG_check__Request__map_subsystem__
1534#if !defined(__MIG_check__Request__remap_t__defined)
1535#define __MIG_check__Request__remap_t__defined
1536
1537mig_internal kern_return_t __MIG_check__Request__remap_t(__attribute__((__unused__)) __Request__remap_t *In0P)
1538{
1539
1540 typedef __Request__remap_t __Request;
1541#if __MigTypeCheck
1542 if (!(In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1543 (In0P->msgh_body.msgh_descriptor_count != 1) ||
1544 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1545 return MIG_BAD_ARGUMENTS;
1546#endif /* __MigTypeCheck */
1547
1548#if __MigTypeCheck
1549 if (In0P->src_task.type != MACH_MSG_PORT_DESCRIPTOR ||
1550 In0P->src_task.disposition != 17)
1551 return MIG_TYPE_ERROR;
1552#endif /* __MigTypeCheck */
1553
1554 return MACH_MSG_SUCCESS;
1555}
1556#endif /* !defined(__MIG_check__Request__remap_t__defined) */
1557#endif /* __MIG_check__Request__map_subsystem__ */
1558#endif /* ( __MigTypeCheck ) */
1559
1560
1561/* Routine remap */
1562mig_internal novalue _Xremap
1563 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
1564{
1565
1566#ifdef __MigPackStructs
1567#pragma pack(4)
1568#endif
1569 typedef struct {
1570 mach_msg_header_t Head;
1571 /* start of the kernel processed data */
1572 mach_msg_body_t msgh_body;
1573 mach_msg_port_descriptor_t src_task;
1574 /* end of the kernel processed data */
1575 NDR_record_t NDR;
1576 vm32_address_t target_address;
1577 vm32_size_t size;
1578 vm32_address_t mask;
1579 boolean_t anywhere;
1580 vm32_address_t src_address;
1581 boolean_t copy;
1582 vm_inherit_t inheritance;
1583 mach_msg_trailer_t trailer;
1584 } Request __attribute__((unused));
1585#ifdef __MigPackStructs
1586#pragma pack()
1587#endif
1588 typedef __Request__remap_t __Request;
1589 typedef __Reply__remap_t Reply __attribute__((unused));
1590
1591 /*
1592 * typedef struct {
1593 * mach_msg_header_t Head;
1594 * NDR_record_t NDR;
1595 * kern_return_t RetCode;
1596 * } mig_reply_error_t;
1597 */
1598
1599 Request *In0P = (Request *) InHeadP;
1600 Reply *OutP = (Reply *) OutHeadP;
1601#ifdef __MIG_check__Request__remap_t__defined
1602 kern_return_t check_result;
1603#endif /* __MIG_check__Request__remap_t__defined */
1604
1605#if __MigKernelSpecificCode
1606#else
1607#endif /* __MigKernelSpecificCode */
1608 vm_map_t target_task;
1609 vm_map_t src_task;
1610
1611 __DeclareRcvRpc(3814, "remap")
1612 __BeforeRcvRpc(3814, "remap")
1613
1614#if defined(__MIG_check__Request__remap_t__defined)
1615 check_result = __MIG_check__Request__remap_t((__Request *)In0P);
1616 if (check_result != MACH_MSG_SUCCESS)
1617 { MIG_RETURN_ERROR(OutP, check_result); }
1618#endif /* defined(__MIG_check__Request__remap_t__defined) */
1619
1620 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
1621
1622 src_task = convert_port_to_map(In0P->src_task.name);
1623
1624 OutP->RetCode = vm32_remap(target_task, &In0P->target_address, In0P->size, In0P->mask, In0P->anywhere, src_task, In0P->src_address, In0P->copy, &OutP->cur_protection, &OutP->max_protection, In0P->inheritance);
1625 vm_map_deallocate(src_task);
1626 vm_map_deallocate(target_task);
1627 if (OutP->RetCode != KERN_SUCCESS) {
1628 MIG_RETURN_ERROR(OutP, OutP->RetCode);
1629 }
1630#if __MigKernelSpecificCode
1631
1632 if (IP_VALID((ipc_port_t)In0P->src_task.name))
1633 ipc_port_release_send((ipc_port_t)In0P->src_task.name);
1634#endif /* __MigKernelSpecificCode */
1635
1636 OutP->NDR = NDR_record;
1637
1638
1639 OutP->target_address = In0P->target_address;
1640
1641 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
1642 __AfterRcvRpc(3814, "remap")
1643}
1644
1645#if ( __MigTypeCheck )
1646#if __MIG_check__Request__map_subsystem__
1647#if !defined(__MIG_check__Request___task_wire_t__defined)
1648#define __MIG_check__Request___task_wire_t__defined
1649
1650mig_internal kern_return_t __MIG_check__Request___task_wire_t(__attribute__((__unused__)) __Request___task_wire_t *In0P)
1651{
1652
1653 typedef __Request___task_wire_t __Request;
1654#if __MigTypeCheck
1655 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1656 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1657 return MIG_BAD_ARGUMENTS;
1658#endif /* __MigTypeCheck */
1659
1660 return MACH_MSG_SUCCESS;
1661}
1662#endif /* !defined(__MIG_check__Request___task_wire_t__defined) */
1663#endif /* __MIG_check__Request__map_subsystem__ */
1664#endif /* ( __MigTypeCheck ) */
1665
1666
1667/* Routine _task_wire */
1668mig_internal novalue _X_task_wire
1669 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
1670{
1671
1672#ifdef __MigPackStructs
1673#pragma pack(4)
1674#endif
1675 typedef struct {
1676 mach_msg_header_t Head;
1677 NDR_record_t NDR;
1678 boolean_t must_wire;
1679 mach_msg_trailer_t trailer;
1680 } Request __attribute__((unused));
1681#ifdef __MigPackStructs
1682#pragma pack()
1683#endif
1684 typedef __Request___task_wire_t __Request;
1685 typedef __Reply___task_wire_t Reply __attribute__((unused));
1686
1687 /*
1688 * typedef struct {
1689 * mach_msg_header_t Head;
1690 * NDR_record_t NDR;
1691 * kern_return_t RetCode;
1692 * } mig_reply_error_t;
1693 */
1694
1695 Request *In0P = (Request *) InHeadP;
1696 Reply *OutP = (Reply *) OutHeadP;
1697#ifdef __MIG_check__Request___task_wire_t__defined
1698 kern_return_t check_result;
1699#endif /* __MIG_check__Request___task_wire_t__defined */
1700
1701#if __MigKernelSpecificCode
1702#else
1703#endif /* __MigKernelSpecificCode */
1704 vm_map_t target_task;
1705
1706 __DeclareRcvRpc(3815, "_task_wire")
1707 __BeforeRcvRpc(3815, "_task_wire")
1708
1709#if defined(__MIG_check__Request___task_wire_t__defined)
1710 check_result = __MIG_check__Request___task_wire_t((__Request *)In0P);
1711 if (check_result != MACH_MSG_SUCCESS)
1712 { MIG_RETURN_ERROR(OutP, check_result); }
1713#endif /* defined(__MIG_check__Request___task_wire_t__defined) */
1714
1715 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
1716
1717 OutP->RetCode = vm32__task_wire(target_task, In0P->must_wire);
1718 vm_map_deallocate(target_task);
1719#if __MigKernelSpecificCode
1720#endif /* __MigKernelSpecificCode */
1721
1722 OutP->NDR = NDR_record;
1723
1724
1725 __AfterRcvRpc(3815, "_task_wire")
1726}
1727
1728#if ( __MigTypeCheck )
1729#if __MIG_check__Request__map_subsystem__
1730#if !defined(__MIG_check__Request__make_memory_entry_t__defined)
1731#define __MIG_check__Request__make_memory_entry_t__defined
1732
1733mig_internal kern_return_t __MIG_check__Request__make_memory_entry_t(__attribute__((__unused__)) __Request__make_memory_entry_t *In0P)
1734{
1735
1736 typedef __Request__make_memory_entry_t __Request;
1737#if __MigTypeCheck
1738 if (!(In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1739 (In0P->msgh_body.msgh_descriptor_count != 1) ||
1740 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1741 return MIG_BAD_ARGUMENTS;
1742#endif /* __MigTypeCheck */
1743
1744#if __MigTypeCheck
1745 if (In0P->parent_entry.type != MACH_MSG_PORT_DESCRIPTOR ||
1746 In0P->parent_entry.disposition != 17)
1747 return MIG_TYPE_ERROR;
1748#endif /* __MigTypeCheck */
1749
1750 return MACH_MSG_SUCCESS;
1751}
1752#endif /* !defined(__MIG_check__Request__make_memory_entry_t__defined) */
1753#endif /* __MIG_check__Request__map_subsystem__ */
1754#endif /* ( __MigTypeCheck ) */
1755
1756
1757/* Routine make_memory_entry */
1758mig_internal novalue _Xmake_memory_entry
1759 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
1760{
1761
1762#ifdef __MigPackStructs
1763#pragma pack(4)
1764#endif
1765 typedef struct {
1766 mach_msg_header_t Head;
1767 /* start of the kernel processed data */
1768 mach_msg_body_t msgh_body;
1769 mach_msg_port_descriptor_t parent_entry;
1770 /* end of the kernel processed data */
1771 NDR_record_t NDR;
1772 vm32_size_t size;
1773 vm32_offset_t offset;
1774 vm_prot_t permission;
1775 mach_msg_trailer_t trailer;
1776 } Request __attribute__((unused));
1777#ifdef __MigPackStructs
1778#pragma pack()
1779#endif
1780 typedef __Request__make_memory_entry_t __Request;
1781 typedef __Reply__make_memory_entry_t Reply __attribute__((unused));
1782
1783 /*
1784 * typedef struct {
1785 * mach_msg_header_t Head;
1786 * NDR_record_t NDR;
1787 * kern_return_t RetCode;
1788 * } mig_reply_error_t;
1789 */
1790
1791 Request *In0P = (Request *) InHeadP;
1792 Reply *OutP = (Reply *) OutHeadP;
1793#ifdef __MIG_check__Request__make_memory_entry_t__defined
1794 kern_return_t check_result;
1795#endif /* __MIG_check__Request__make_memory_entry_t__defined */
1796
1797#if __MigKernelSpecificCode
1798#if UseStaticTemplates
1799 const static mach_msg_port_descriptor_t object_handleTemplate = {
1800 /* name = */ MACH_PORT_NULL,
1801 /* pad1 = */ 0,
1802 /* pad2 = */ 0,
1803 /* disp = */ 17,
1804 /* type = */ MACH_MSG_PORT_DESCRIPTOR,
1805 };
1806#endif /* UseStaticTemplates */
1807
1808#else
1809#if UseStaticTemplates
1810 const static mach_msg_port_descriptor_t object_handleTemplate = {
1811 /* name = */ MACH_PORT_NULL,
1812 /* pad1 = */ 0,
1813 /* pad2 = */ 0,
1814 /* disp = */ 17,
1815 /* type = */ MACH_MSG_PORT_DESCRIPTOR,
1816 };
1817#endif /* UseStaticTemplates */
1818
1819#endif /* __MigKernelSpecificCode */
1820 kern_return_t RetCode;
1821 vm_map_t target_task;
1822 mem_entry_name_port_t object_handle;
1823
1824 __DeclareRcvRpc(3816, "make_memory_entry")
1825 __BeforeRcvRpc(3816, "make_memory_entry")
1826
1827#if defined(__MIG_check__Request__make_memory_entry_t__defined)
1828 check_result = __MIG_check__Request__make_memory_entry_t((__Request *)In0P);
1829 if (check_result != MACH_MSG_SUCCESS)
1830 { MIG_RETURN_ERROR(OutP, check_result); }
1831#endif /* defined(__MIG_check__Request__make_memory_entry_t__defined) */
1832
1833#if UseStaticTemplates
1834 OutP->object_handle = object_handleTemplate;
1835#else /* UseStaticTemplates */
1836#if __MigKernelSpecificCode
1837 OutP->object_handle.disposition = 17;
1838#else
1839 OutP->object_handle.disposition = 17;
1840#endif /* __MigKernelSpecificCode */
1841#if !(defined(KERNEL) && defined(__LP64__))
1842 OutP->object_handle.pad1 = 0;
1843#endif
1844 OutP->object_handle.pad2 = 0;
1845 OutP->object_handle.type = MACH_MSG_PORT_DESCRIPTOR;
1846#if defined(KERNEL)
1847 OutP->object_handle.pad_end = 0;
1848#endif
1849#endif /* UseStaticTemplates */
1850
1851
1852 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
1853
1854 RetCode = vm32_make_memory_entry(target_task, &In0P->size, In0P->offset, In0P->permission, &object_handle, null_conversion(In0P->parent_entry.name));
1855 vm_map_deallocate(target_task);
1856 if (RetCode != KERN_SUCCESS) {
1857 MIG_RETURN_ERROR(OutP, RetCode);
1858 }
1859#if __MigKernelSpecificCode
1860
1861 if (IP_VALID((ipc_port_t)In0P->parent_entry.name))
1862 ipc_port_release_send((ipc_port_t)In0P->parent_entry.name);
1863#endif /* __MigKernelSpecificCode */
1864 OutP->object_handle.name = (mach_port_t)null_conversion(object_handle);
1865
1866
1867 OutP->NDR = NDR_record;
1868
1869
1870 OutP->size = In0P->size;
1871
1872 OutP->Head.msgh_bits |= MACH_MSGH_BITS_COMPLEX;
1873 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
1874 OutP->msgh_body.msgh_descriptor_count = 1;
1875 __AfterRcvRpc(3816, "make_memory_entry")
1876}
1877
1878#if ( __MigTypeCheck )
1879#if __MIG_check__Request__map_subsystem__
1880#if !defined(__MIG_check__Request__map_page_query_t__defined)
1881#define __MIG_check__Request__map_page_query_t__defined
1882
1883mig_internal kern_return_t __MIG_check__Request__map_page_query_t(__attribute__((__unused__)) __Request__map_page_query_t *In0P)
1884{
1885
1886 typedef __Request__map_page_query_t __Request;
1887#if __MigTypeCheck
1888 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1889 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1890 return MIG_BAD_ARGUMENTS;
1891#endif /* __MigTypeCheck */
1892
1893 return MACH_MSG_SUCCESS;
1894}
1895#endif /* !defined(__MIG_check__Request__map_page_query_t__defined) */
1896#endif /* __MIG_check__Request__map_subsystem__ */
1897#endif /* ( __MigTypeCheck ) */
1898
1899
1900/* Routine map_page_query */
1901mig_internal novalue _Xmap_page_query
1902 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
1903{
1904
1905#ifdef __MigPackStructs
1906#pragma pack(4)
1907#endif
1908 typedef struct {
1909 mach_msg_header_t Head;
1910 NDR_record_t NDR;
1911 vm32_offset_t offset;
1912 mach_msg_trailer_t trailer;
1913 } Request __attribute__((unused));
1914#ifdef __MigPackStructs
1915#pragma pack()
1916#endif
1917 typedef __Request__map_page_query_t __Request;
1918 typedef __Reply__map_page_query_t Reply __attribute__((unused));
1919
1920 /*
1921 * typedef struct {
1922 * mach_msg_header_t Head;
1923 * NDR_record_t NDR;
1924 * kern_return_t RetCode;
1925 * } mig_reply_error_t;
1926 */
1927
1928 Request *In0P = (Request *) InHeadP;
1929 Reply *OutP = (Reply *) OutHeadP;
1930#ifdef __MIG_check__Request__map_page_query_t__defined
1931 kern_return_t check_result;
1932#endif /* __MIG_check__Request__map_page_query_t__defined */
1933
1934#if __MigKernelSpecificCode
1935#else
1936#endif /* __MigKernelSpecificCode */
1937 vm_map_t target_map;
1938
1939 __DeclareRcvRpc(3817, "map_page_query")
1940 __BeforeRcvRpc(3817, "map_page_query")
1941
1942#if defined(__MIG_check__Request__map_page_query_t__defined)
1943 check_result = __MIG_check__Request__map_page_query_t((__Request *)In0P);
1944 if (check_result != MACH_MSG_SUCCESS)
1945 { MIG_RETURN_ERROR(OutP, check_result); }
1946#endif /* defined(__MIG_check__Request__map_page_query_t__defined) */
1947
1948 target_map = convert_port_to_map(In0P->Head.msgh_request_port);
1949
1950 OutP->RetCode = vm32_map_page_query(target_map, In0P->offset, &OutP->disposition, &OutP->ref_count);
1951 vm_map_deallocate(target_map);
1952 if (OutP->RetCode != KERN_SUCCESS) {
1953 MIG_RETURN_ERROR(OutP, OutP->RetCode);
1954 }
1955#if __MigKernelSpecificCode
1956#endif /* __MigKernelSpecificCode */
1957
1958 OutP->NDR = NDR_record;
1959
1960
1961 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
1962 __AfterRcvRpc(3817, "map_page_query")
1963}
1964
1965#if ( __MigTypeCheck )
1966#if __MIG_check__Request__map_subsystem__
1967#if !defined(__MIG_check__Request__region_info_t__defined)
1968#define __MIG_check__Request__region_info_t__defined
1969
1970mig_internal kern_return_t __MIG_check__Request__region_info_t(__attribute__((__unused__)) __Request__region_info_t *In0P)
1971{
1972
1973 typedef __Request__region_info_t __Request;
1974#if __MigTypeCheck
1975 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
1976 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
1977 return MIG_BAD_ARGUMENTS;
1978#endif /* __MigTypeCheck */
1979
1980 return MACH_MSG_SUCCESS;
1981}
1982#endif /* !defined(__MIG_check__Request__region_info_t__defined) */
1983#endif /* __MIG_check__Request__map_subsystem__ */
1984#endif /* ( __MigTypeCheck ) */
1985
1986
1987/* Routine region_info */
1988mig_internal novalue _Xregion_info
1989 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
1990{
1991
1992#ifdef __MigPackStructs
1993#pragma pack(4)
1994#endif
1995 typedef struct {
1996 mach_msg_header_t Head;
1997 NDR_record_t NDR;
1998 vm32_address_t address;
1999 mach_msg_trailer_t trailer;
2000 } Request __attribute__((unused));
2001#ifdef __MigPackStructs
2002#pragma pack()
2003#endif
2004 typedef __Request__region_info_t __Request;
2005 typedef __Reply__region_info_t Reply __attribute__((unused));
2006
2007 /*
2008 * typedef struct {
2009 * mach_msg_header_t Head;
2010 * NDR_record_t NDR;
2011 * kern_return_t RetCode;
2012 * } mig_reply_error_t;
2013 */
2014
2015 Request *In0P = (Request *) InHeadP;
2016 Reply *OutP = (Reply *) OutHeadP;
2017#ifdef __MIG_check__Request__region_info_t__defined
2018 kern_return_t check_result;
2019#endif /* __MIG_check__Request__region_info_t__defined */
2020
2021#if __MigKernelSpecificCode
2022#if UseStaticTemplates
2023 const static mach_msg_ool_descriptor_t objectsTemplate = {
2024 /* addr = */ (void *)0,
2025 /* size = */ 0,
2026 /* deal = */ FALSE,
2027 /* copy = */ MACH_MSG_VIRTUAL_COPY,
2028 /* pad2 = */ 0,
2029 /* type = */ MACH_MSG_OOL_DESCRIPTOR,
2030 };
2031#endif /* UseStaticTemplates */
2032
2033#else
2034#if UseStaticTemplates
2035 const static mach_msg_ool_descriptor_t objectsTemplate = {
2036 /* addr = */ (void *)0,
2037 /* size = */ 0,
2038 /* deal = */ FALSE,
2039 /* copy = */ MACH_MSG_VIRTUAL_COPY,
2040 /* pad2 = */ 0,
2041 /* type = */ MACH_MSG_OOL_DESCRIPTOR,
2042 };
2043#endif /* UseStaticTemplates */
2044
2045#endif /* __MigKernelSpecificCode */
2046 kern_return_t RetCode;
2047 vm_map_t task;
2048
2049 __DeclareRcvRpc(3818, "region_info")
2050 __BeforeRcvRpc(3818, "region_info")
2051
2052#if defined(__MIG_check__Request__region_info_t__defined)
2053 check_result = __MIG_check__Request__region_info_t((__Request *)In0P);
2054 if (check_result != MACH_MSG_SUCCESS)
2055 { MIG_RETURN_ERROR(OutP, check_result); }
2056#endif /* defined(__MIG_check__Request__region_info_t__defined) */
2057
2058#if UseStaticTemplates
2059 OutP->objects = objectsTemplate;
2060#else /* UseStaticTemplates */
2061 OutP->objects.deallocate = FALSE;
2062 OutP->objects.copy = MACH_MSG_VIRTUAL_COPY;
2063 OutP->objects.pad1 = 0;
2064 OutP->objects.type = MACH_MSG_OOL_DESCRIPTOR;
2065#if defined(KERNEL) && !defined(__LP64__)
2066 OutP->objects.pad_end = 0;
2067#endif
2068#endif /* UseStaticTemplates */
2069
2070
2071 task = convert_port_to_map(In0P->Head.msgh_request_port);
2072
2073 RetCode = vm32_region_info(task, In0P->address, &OutP->region, (vm_info_object_array_t *)&(OutP->objects.address), &OutP->objectsCnt);
2074 vm_map_deallocate(task);
2075 if (RetCode != KERN_SUCCESS) {
2076 MIG_RETURN_ERROR(OutP, RetCode);
2077 }
2078#if __MigKernelSpecificCode
2079#endif /* __MigKernelSpecificCode */
2080 OutP->objects.size = OutP->objectsCnt * 84;
2081
2082
2083 OutP->NDR = NDR_record;
2084
2085
2086 OutP->Head.msgh_bits |= MACH_MSGH_BITS_COMPLEX;
2087 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
2088 OutP->msgh_body.msgh_descriptor_count = 1;
2089 __AfterRcvRpc(3818, "region_info")
2090}
2091
2092#if ( __MigTypeCheck )
2093#if __MIG_check__Request__map_subsystem__
2094#if !defined(__MIG_check__Request__mapped_pages_info_t__defined)
2095#define __MIG_check__Request__mapped_pages_info_t__defined
2096
2097mig_internal kern_return_t __MIG_check__Request__mapped_pages_info_t(__attribute__((__unused__)) __Request__mapped_pages_info_t *In0P)
2098{
2099
2100 typedef __Request__mapped_pages_info_t __Request;
2101#if __MigTypeCheck
2102 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2103 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2104 return MIG_BAD_ARGUMENTS;
2105#endif /* __MigTypeCheck */
2106
2107 return MACH_MSG_SUCCESS;
2108}
2109#endif /* !defined(__MIG_check__Request__mapped_pages_info_t__defined) */
2110#endif /* __MIG_check__Request__map_subsystem__ */
2111#endif /* ( __MigTypeCheck ) */
2112
2113
2114/* Routine mapped_pages_info */
2115mig_internal novalue _Xmapped_pages_info
2116 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
2117{
2118
2119#ifdef __MigPackStructs
2120#pragma pack(4)
2121#endif
2122 typedef struct {
2123 mach_msg_header_t Head;
2124 mach_msg_trailer_t trailer;
2125 } Request __attribute__((unused));
2126#ifdef __MigPackStructs
2127#pragma pack()
2128#endif
2129 typedef __Request__mapped_pages_info_t __Request;
2130 typedef __Reply__mapped_pages_info_t Reply __attribute__((unused));
2131
2132 /*
2133 * typedef struct {
2134 * mach_msg_header_t Head;
2135 * NDR_record_t NDR;
2136 * kern_return_t RetCode;
2137 * } mig_reply_error_t;
2138 */
2139
2140 Request *In0P = (Request *) InHeadP;
2141 Reply *OutP = (Reply *) OutHeadP;
2142#ifdef __MIG_check__Request__mapped_pages_info_t__defined
2143 kern_return_t check_result;
2144#endif /* __MIG_check__Request__mapped_pages_info_t__defined */
2145
2146#if __MigKernelSpecificCode
2147#if UseStaticTemplates
2148 const static mach_msg_ool_descriptor_t pagesTemplate = {
2149 /* addr = */ (void *)0,
2150 /* size = */ 0,
2151 /* deal = */ FALSE,
2152 /* copy = */ MACH_MSG_VIRTUAL_COPY,
2153 /* pad2 = */ 0,
2154 /* type = */ MACH_MSG_OOL_DESCRIPTOR,
2155 };
2156#endif /* UseStaticTemplates */
2157
2158#else
2159#if UseStaticTemplates
2160 const static mach_msg_ool_descriptor_t pagesTemplate = {
2161 /* addr = */ (void *)0,
2162 /* size = */ 0,
2163 /* deal = */ FALSE,
2164 /* copy = */ MACH_MSG_VIRTUAL_COPY,
2165 /* pad2 = */ 0,
2166 /* type = */ MACH_MSG_OOL_DESCRIPTOR,
2167 };
2168#endif /* UseStaticTemplates */
2169
2170#endif /* __MigKernelSpecificCode */
2171 kern_return_t RetCode;
2172 vm_map_t task;
2173
2174 __DeclareRcvRpc(3819, "mapped_pages_info")
2175 __BeforeRcvRpc(3819, "mapped_pages_info")
2176
2177#if defined(__MIG_check__Request__mapped_pages_info_t__defined)
2178 check_result = __MIG_check__Request__mapped_pages_info_t((__Request *)In0P);
2179 if (check_result != MACH_MSG_SUCCESS)
2180 { MIG_RETURN_ERROR(OutP, check_result); }
2181#endif /* defined(__MIG_check__Request__mapped_pages_info_t__defined) */
2182
2183#if UseStaticTemplates
2184 OutP->pages = pagesTemplate;
2185#else /* UseStaticTemplates */
2186 OutP->pages.deallocate = FALSE;
2187 OutP->pages.copy = MACH_MSG_VIRTUAL_COPY;
2188 OutP->pages.pad1 = 0;
2189 OutP->pages.type = MACH_MSG_OOL_DESCRIPTOR;
2190#if defined(KERNEL) && !defined(__LP64__)
2191 OutP->pages.pad_end = 0;
2192#endif
2193#endif /* UseStaticTemplates */
2194
2195
2196 task = convert_port_to_map(In0P->Head.msgh_request_port);
2197
2198 RetCode = vm32_mapped_pages_info(task, (page_address_array_t *)&(OutP->pages.address), &OutP->pagesCnt);
2199 vm_map_deallocate(task);
2200 if (RetCode != KERN_SUCCESS) {
2201 MIG_RETURN_ERROR(OutP, RetCode);
2202 }
2203#if __MigKernelSpecificCode
2204#endif /* __MigKernelSpecificCode */
2205 OutP->pages.size = OutP->pagesCnt * 4;
2206
2207
2208 OutP->NDR = NDR_record;
2209
2210
2211 OutP->Head.msgh_bits |= MACH_MSGH_BITS_COMPLEX;
2212 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
2213 OutP->msgh_body.msgh_descriptor_count = 1;
2214 __AfterRcvRpc(3819, "mapped_pages_info")
2215}
2216
2217#if ( __MigTypeCheck )
2218#if __MIG_check__Request__map_subsystem__
2219#if !defined(__MIG_check__Request__region_recurse_t__defined)
2220#define __MIG_check__Request__region_recurse_t__defined
2221
2222mig_internal kern_return_t __MIG_check__Request__region_recurse_t(__attribute__((__unused__)) __Request__region_recurse_t *In0P)
2223{
2224
2225 typedef __Request__region_recurse_t __Request;
2226#if __MigTypeCheck
2227 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2228 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2229 return MIG_BAD_ARGUMENTS;
2230#endif /* __MigTypeCheck */
2231
2232 return MACH_MSG_SUCCESS;
2233}
2234#endif /* !defined(__MIG_check__Request__region_recurse_t__defined) */
2235#endif /* __MIG_check__Request__map_subsystem__ */
2236#endif /* ( __MigTypeCheck ) */
2237
2238
2239/* Routine region_recurse */
2240mig_internal novalue _Xregion_recurse
2241 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
2242{
2243
2244#ifdef __MigPackStructs
2245#pragma pack(4)
2246#endif
2247 typedef struct {
2248 mach_msg_header_t Head;
2249 NDR_record_t NDR;
2250 vm32_address_t address;
2251 natural_t nesting_depth;
2252 mach_msg_type_number_t infoCnt;
2253 mach_msg_trailer_t trailer;
2254 } Request __attribute__((unused));
2255#ifdef __MigPackStructs
2256#pragma pack()
2257#endif
2258 typedef __Request__region_recurse_t __Request;
2259 typedef __Reply__region_recurse_t Reply __attribute__((unused));
2260
2261 /*
2262 * typedef struct {
2263 * mach_msg_header_t Head;
2264 * NDR_record_t NDR;
2265 * kern_return_t RetCode;
2266 * } mig_reply_error_t;
2267 */
2268
2269 Request *In0P = (Request *) InHeadP;
2270 Reply *OutP = (Reply *) OutHeadP;
2271#ifdef __MIG_check__Request__region_recurse_t__defined
2272 kern_return_t check_result;
2273#endif /* __MIG_check__Request__region_recurse_t__defined */
2274
2275#if __MigKernelSpecificCode
2276#else
2277#endif /* __MigKernelSpecificCode */
2278 vm_map_t target_task;
2279
2280 __DeclareRcvRpc(3821, "region_recurse")
2281 __BeforeRcvRpc(3821, "region_recurse")
2282
2283#if defined(__MIG_check__Request__region_recurse_t__defined)
2284 check_result = __MIG_check__Request__region_recurse_t((__Request *)In0P);
2285 if (check_result != MACH_MSG_SUCCESS)
2286 { MIG_RETURN_ERROR(OutP, check_result); }
2287#endif /* defined(__MIG_check__Request__region_recurse_t__defined) */
2288
2289 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
2290
2291 OutP->infoCnt = 19;
2292 if (In0P->infoCnt < OutP->infoCnt)
2293 OutP->infoCnt = In0P->infoCnt;
2294
2295 OutP->RetCode = vm32_region_recurse(target_task, &In0P->address, &OutP->size, &In0P->nesting_depth, OutP->info, &OutP->infoCnt);
2296 vm_map_deallocate(target_task);
2297 if (OutP->RetCode != KERN_SUCCESS) {
2298 MIG_RETURN_ERROR(OutP, OutP->RetCode);
2299 }
2300#if __MigKernelSpecificCode
2301#endif /* __MigKernelSpecificCode */
2302
2303 OutP->NDR = NDR_record;
2304
2305
2306 OutP->address = In0P->address;
2307
2308 OutP->nesting_depth = In0P->nesting_depth;
2309 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply) - 76) + (((4 * OutP->infoCnt)));
2310
2311 __AfterRcvRpc(3821, "region_recurse")
2312}
2313
2314#if ( __MigTypeCheck )
2315#if __MIG_check__Request__map_subsystem__
2316#if !defined(__MIG_check__Request__region_recurse_64_t__defined)
2317#define __MIG_check__Request__region_recurse_64_t__defined
2318
2319mig_internal kern_return_t __MIG_check__Request__region_recurse_64_t(__attribute__((__unused__)) __Request__region_recurse_64_t *In0P)
2320{
2321
2322 typedef __Request__region_recurse_64_t __Request;
2323#if __MigTypeCheck
2324 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2325 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2326 return MIG_BAD_ARGUMENTS;
2327#endif /* __MigTypeCheck */
2328
2329 return MACH_MSG_SUCCESS;
2330}
2331#endif /* !defined(__MIG_check__Request__region_recurse_64_t__defined) */
2332#endif /* __MIG_check__Request__map_subsystem__ */
2333#endif /* ( __MigTypeCheck ) */
2334
2335
2336/* Routine region_recurse_64 */
2337mig_internal novalue _Xregion_recurse_64
2338 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
2339{
2340
2341#ifdef __MigPackStructs
2342#pragma pack(4)
2343#endif
2344 typedef struct {
2345 mach_msg_header_t Head;
2346 NDR_record_t NDR;
2347 vm32_address_t address;
2348 natural_t nesting_depth;
2349 mach_msg_type_number_t infoCnt;
2350 mach_msg_trailer_t trailer;
2351 } Request __attribute__((unused));
2352#ifdef __MigPackStructs
2353#pragma pack()
2354#endif
2355 typedef __Request__region_recurse_64_t __Request;
2356 typedef __Reply__region_recurse_64_t Reply __attribute__((unused));
2357
2358 /*
2359 * typedef struct {
2360 * mach_msg_header_t Head;
2361 * NDR_record_t NDR;
2362 * kern_return_t RetCode;
2363 * } mig_reply_error_t;
2364 */
2365
2366 Request *In0P = (Request *) InHeadP;
2367 Reply *OutP = (Reply *) OutHeadP;
2368#ifdef __MIG_check__Request__region_recurse_64_t__defined
2369 kern_return_t check_result;
2370#endif /* __MIG_check__Request__region_recurse_64_t__defined */
2371
2372#if __MigKernelSpecificCode
2373#else
2374#endif /* __MigKernelSpecificCode */
2375 vm_map_t target_task;
2376
2377 __DeclareRcvRpc(3822, "region_recurse_64")
2378 __BeforeRcvRpc(3822, "region_recurse_64")
2379
2380#if defined(__MIG_check__Request__region_recurse_64_t__defined)
2381 check_result = __MIG_check__Request__region_recurse_64_t((__Request *)In0P);
2382 if (check_result != MACH_MSG_SUCCESS)
2383 { MIG_RETURN_ERROR(OutP, check_result); }
2384#endif /* defined(__MIG_check__Request__region_recurse_64_t__defined) */
2385
2386 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
2387
2388 OutP->infoCnt = 19;
2389 if (In0P->infoCnt < OutP->infoCnt)
2390 OutP->infoCnt = In0P->infoCnt;
2391
2392 OutP->RetCode = vm32_region_recurse_64(target_task, &In0P->address, &OutP->size, &In0P->nesting_depth, OutP->info, &OutP->infoCnt);
2393 vm_map_deallocate(target_task);
2394 if (OutP->RetCode != KERN_SUCCESS) {
2395 MIG_RETURN_ERROR(OutP, OutP->RetCode);
2396 }
2397#if __MigKernelSpecificCode
2398#endif /* __MigKernelSpecificCode */
2399
2400 OutP->NDR = NDR_record;
2401
2402
2403 OutP->address = In0P->address;
2404
2405 OutP->nesting_depth = In0P->nesting_depth;
2406 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply) - 76) + (((4 * OutP->infoCnt)));
2407
2408 __AfterRcvRpc(3822, "region_recurse_64")
2409}
2410
2411#if ( __MigTypeCheck )
2412#if __MIG_check__Request__map_subsystem__
2413#if !defined(__MIG_check__Request__region_info_64_t__defined)
2414#define __MIG_check__Request__region_info_64_t__defined
2415
2416mig_internal kern_return_t __MIG_check__Request__region_info_64_t(__attribute__((__unused__)) __Request__region_info_64_t *In0P)
2417{
2418
2419 typedef __Request__region_info_64_t __Request;
2420#if __MigTypeCheck
2421 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2422 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2423 return MIG_BAD_ARGUMENTS;
2424#endif /* __MigTypeCheck */
2425
2426 return MACH_MSG_SUCCESS;
2427}
2428#endif /* !defined(__MIG_check__Request__region_info_64_t__defined) */
2429#endif /* __MIG_check__Request__map_subsystem__ */
2430#endif /* ( __MigTypeCheck ) */
2431
2432
2433/* Routine region_info_64 */
2434mig_internal novalue _Xregion_info_64
2435 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
2436{
2437
2438#ifdef __MigPackStructs
2439#pragma pack(4)
2440#endif
2441 typedef struct {
2442 mach_msg_header_t Head;
2443 NDR_record_t NDR;
2444 vm32_address_t address;
2445 mach_msg_trailer_t trailer;
2446 } Request __attribute__((unused));
2447#ifdef __MigPackStructs
2448#pragma pack()
2449#endif
2450 typedef __Request__region_info_64_t __Request;
2451 typedef __Reply__region_info_64_t Reply __attribute__((unused));
2452
2453 /*
2454 * typedef struct {
2455 * mach_msg_header_t Head;
2456 * NDR_record_t NDR;
2457 * kern_return_t RetCode;
2458 * } mig_reply_error_t;
2459 */
2460
2461 Request *In0P = (Request *) InHeadP;
2462 Reply *OutP = (Reply *) OutHeadP;
2463#ifdef __MIG_check__Request__region_info_64_t__defined
2464 kern_return_t check_result;
2465#endif /* __MIG_check__Request__region_info_64_t__defined */
2466
2467#if __MigKernelSpecificCode
2468#if UseStaticTemplates
2469 const static mach_msg_ool_descriptor_t objectsTemplate = {
2470 /* addr = */ (void *)0,
2471 /* size = */ 0,
2472 /* deal = */ FALSE,
2473 /* copy = */ MACH_MSG_VIRTUAL_COPY,
2474 /* pad2 = */ 0,
2475 /* type = */ MACH_MSG_OOL_DESCRIPTOR,
2476 };
2477#endif /* UseStaticTemplates */
2478
2479#else
2480#if UseStaticTemplates
2481 const static mach_msg_ool_descriptor_t objectsTemplate = {
2482 /* addr = */ (void *)0,
2483 /* size = */ 0,
2484 /* deal = */ FALSE,
2485 /* copy = */ MACH_MSG_VIRTUAL_COPY,
2486 /* pad2 = */ 0,
2487 /* type = */ MACH_MSG_OOL_DESCRIPTOR,
2488 };
2489#endif /* UseStaticTemplates */
2490
2491#endif /* __MigKernelSpecificCode */
2492 kern_return_t RetCode;
2493 vm_map_t task;
2494
2495 __DeclareRcvRpc(3823, "region_info_64")
2496 __BeforeRcvRpc(3823, "region_info_64")
2497
2498#if defined(__MIG_check__Request__region_info_64_t__defined)
2499 check_result = __MIG_check__Request__region_info_64_t((__Request *)In0P);
2500 if (check_result != MACH_MSG_SUCCESS)
2501 { MIG_RETURN_ERROR(OutP, check_result); }
2502#endif /* defined(__MIG_check__Request__region_info_64_t__defined) */
2503
2504#if UseStaticTemplates
2505 OutP->objects = objectsTemplate;
2506#else /* UseStaticTemplates */
2507 OutP->objects.deallocate = FALSE;
2508 OutP->objects.copy = MACH_MSG_VIRTUAL_COPY;
2509 OutP->objects.pad1 = 0;
2510 OutP->objects.type = MACH_MSG_OOL_DESCRIPTOR;
2511#if defined(KERNEL) && !defined(__LP64__)
2512 OutP->objects.pad_end = 0;
2513#endif
2514#endif /* UseStaticTemplates */
2515
2516
2517 task = convert_port_to_map(In0P->Head.msgh_request_port);
2518
2519 RetCode = vm32_region_info_64(task, In0P->address, &OutP->region, (vm_info_object_array_t *)&(OutP->objects.address), &OutP->objectsCnt);
2520 vm_map_deallocate(task);
2521 if (RetCode != KERN_SUCCESS) {
2522 MIG_RETURN_ERROR(OutP, RetCode);
2523 }
2524#if __MigKernelSpecificCode
2525#endif /* __MigKernelSpecificCode */
2526 OutP->objects.size = OutP->objectsCnt * 84;
2527
2528
2529 OutP->NDR = NDR_record;
2530
2531
2532 OutP->Head.msgh_bits |= MACH_MSGH_BITS_COMPLEX;
2533 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
2534 OutP->msgh_body.msgh_descriptor_count = 1;
2535 __AfterRcvRpc(3823, "region_info_64")
2536}
2537
2538#if ( __MigTypeCheck )
2539#if __MIG_check__Request__map_subsystem__
2540#if !defined(__MIG_check__Request__region_64_t__defined)
2541#define __MIG_check__Request__region_64_t__defined
2542
2543mig_internal kern_return_t __MIG_check__Request__region_64_t(__attribute__((__unused__)) __Request__region_64_t *In0P)
2544{
2545
2546 typedef __Request__region_64_t __Request;
2547#if __MigTypeCheck
2548 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2549 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2550 return MIG_BAD_ARGUMENTS;
2551#endif /* __MigTypeCheck */
2552
2553 return MACH_MSG_SUCCESS;
2554}
2555#endif /* !defined(__MIG_check__Request__region_64_t__defined) */
2556#endif /* __MIG_check__Request__map_subsystem__ */
2557#endif /* ( __MigTypeCheck ) */
2558
2559
2560/* Routine region_64 */
2561mig_internal novalue _Xregion_64
2562 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
2563{
2564
2565#ifdef __MigPackStructs
2566#pragma pack(4)
2567#endif
2568 typedef struct {
2569 mach_msg_header_t Head;
2570 NDR_record_t NDR;
2571 vm32_address_t address;
2572 vm_region_flavor_t flavor;
2573 mach_msg_type_number_t infoCnt;
2574 mach_msg_trailer_t trailer;
2575 } Request __attribute__((unused));
2576#ifdef __MigPackStructs
2577#pragma pack()
2578#endif
2579 typedef __Request__region_64_t __Request;
2580 typedef __Reply__region_64_t Reply __attribute__((unused));
2581
2582 /*
2583 * typedef struct {
2584 * mach_msg_header_t Head;
2585 * NDR_record_t NDR;
2586 * kern_return_t RetCode;
2587 * } mig_reply_error_t;
2588 */
2589
2590 Request *In0P = (Request *) InHeadP;
2591 Reply *OutP = (Reply *) OutHeadP;
2592#ifdef __MIG_check__Request__region_64_t__defined
2593 kern_return_t check_result;
2594#endif /* __MIG_check__Request__region_64_t__defined */
2595
2596#if __MigKernelSpecificCode
2597#if UseStaticTemplates
2598 const static mach_msg_port_descriptor_t object_nameTemplate = {
2599 /* name = */ MACH_PORT_NULL,
2600 /* pad1 = */ 0,
2601 /* pad2 = */ 0,
2602 /* disp = */ 17,
2603 /* type = */ MACH_MSG_PORT_DESCRIPTOR,
2604 };
2605#endif /* UseStaticTemplates */
2606
2607#else
2608#if UseStaticTemplates
2609 const static mach_msg_port_descriptor_t object_nameTemplate = {
2610 /* name = */ MACH_PORT_NULL,
2611 /* pad1 = */ 0,
2612 /* pad2 = */ 0,
2613 /* disp = */ 17,
2614 /* type = */ MACH_MSG_PORT_DESCRIPTOR,
2615 };
2616#endif /* UseStaticTemplates */
2617
2618#endif /* __MigKernelSpecificCode */
2619 kern_return_t RetCode;
2620 vm_map_t target_task;
2621
2622 __DeclareRcvRpc(3824, "region_64")
2623 __BeforeRcvRpc(3824, "region_64")
2624
2625#if defined(__MIG_check__Request__region_64_t__defined)
2626 check_result = __MIG_check__Request__region_64_t((__Request *)In0P);
2627 if (check_result != MACH_MSG_SUCCESS)
2628 { MIG_RETURN_ERROR(OutP, check_result); }
2629#endif /* defined(__MIG_check__Request__region_64_t__defined) */
2630
2631#if UseStaticTemplates
2632 OutP->object_name = object_nameTemplate;
2633#else /* UseStaticTemplates */
2634#if __MigKernelSpecificCode
2635 OutP->object_name.disposition = 17;
2636#else
2637 OutP->object_name.disposition = 17;
2638#endif /* __MigKernelSpecificCode */
2639#if !(defined(KERNEL) && defined(__LP64__))
2640 OutP->object_name.pad1 = 0;
2641#endif
2642 OutP->object_name.pad2 = 0;
2643 OutP->object_name.type = MACH_MSG_PORT_DESCRIPTOR;
2644#if defined(KERNEL)
2645 OutP->object_name.pad_end = 0;
2646#endif
2647#endif /* UseStaticTemplates */
2648
2649
2650 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
2651
2652 OutP->infoCnt = 10;
2653 if (In0P->infoCnt < OutP->infoCnt)
2654 OutP->infoCnt = In0P->infoCnt;
2655
2656 RetCode = vm32_region_64(target_task, &In0P->address, &OutP->size, In0P->flavor, OutP->info, &OutP->infoCnt, &OutP->object_name.name);
2657 vm_map_deallocate(target_task);
2658 if (RetCode != KERN_SUCCESS) {
2659 MIG_RETURN_ERROR(OutP, RetCode);
2660 }
2661#if __MigKernelSpecificCode
2662#endif /* __MigKernelSpecificCode */
2663
2664 OutP->NDR = NDR_record;
2665
2666
2667 OutP->address = In0P->address;
2668 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply) - 40) + (((4 * OutP->infoCnt)));
2669
2670 OutP->Head.msgh_bits |= MACH_MSGH_BITS_COMPLEX;
2671 OutP->msgh_body.msgh_descriptor_count = 1;
2672 __AfterRcvRpc(3824, "region_64")
2673}
2674
2675#if ( __MigTypeCheck )
2676#if __MIG_check__Request__map_subsystem__
2677#if !defined(__MIG_check__Request__make_memory_entry_64_t__defined)
2678#define __MIG_check__Request__make_memory_entry_64_t__defined
2679
2680mig_internal kern_return_t __MIG_check__Request__make_memory_entry_64_t(__attribute__((__unused__)) __Request__make_memory_entry_64_t *In0P)
2681{
2682
2683 typedef __Request__make_memory_entry_64_t __Request;
2684#if __MigTypeCheck
2685 if (!(In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2686 (In0P->msgh_body.msgh_descriptor_count != 1) ||
2687 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2688 return MIG_BAD_ARGUMENTS;
2689#endif /* __MigTypeCheck */
2690
2691#if __MigTypeCheck
2692 if (In0P->parent_entry.type != MACH_MSG_PORT_DESCRIPTOR ||
2693 In0P->parent_entry.disposition != 17)
2694 return MIG_TYPE_ERROR;
2695#endif /* __MigTypeCheck */
2696
2697 return MACH_MSG_SUCCESS;
2698}
2699#endif /* !defined(__MIG_check__Request__make_memory_entry_64_t__defined) */
2700#endif /* __MIG_check__Request__map_subsystem__ */
2701#endif /* ( __MigTypeCheck ) */
2702
2703
2704/* Routine make_memory_entry_64 */
2705mig_internal novalue _Xmake_memory_entry_64
2706 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
2707{
2708
2709#ifdef __MigPackStructs
2710#pragma pack(4)
2711#endif
2712 typedef struct {
2713 mach_msg_header_t Head;
2714 /* start of the kernel processed data */
2715 mach_msg_body_t msgh_body;
2716 mach_msg_port_descriptor_t parent_entry;
2717 /* end of the kernel processed data */
2718 NDR_record_t NDR;
2719 memory_object_size_t size;
2720 memory_object_offset_t offset;
2721 vm_prot_t permission;
2722 mach_msg_trailer_t trailer;
2723 } Request __attribute__((unused));
2724#ifdef __MigPackStructs
2725#pragma pack()
2726#endif
2727 typedef __Request__make_memory_entry_64_t __Request;
2728 typedef __Reply__make_memory_entry_64_t Reply __attribute__((unused));
2729
2730 /*
2731 * typedef struct {
2732 * mach_msg_header_t Head;
2733 * NDR_record_t NDR;
2734 * kern_return_t RetCode;
2735 * } mig_reply_error_t;
2736 */
2737
2738 Request *In0P = (Request *) InHeadP;
2739 Reply *OutP = (Reply *) OutHeadP;
2740#ifdef __MIG_check__Request__make_memory_entry_64_t__defined
2741 kern_return_t check_result;
2742#endif /* __MIG_check__Request__make_memory_entry_64_t__defined */
2743
2744#if __MigKernelSpecificCode
2745#if UseStaticTemplates
2746 const static mach_msg_port_descriptor_t object_handleTemplate = {
2747 /* name = */ MACH_PORT_NULL,
2748 /* pad1 = */ 0,
2749 /* pad2 = */ 0,
2750 /* disp = */ 17,
2751 /* type = */ MACH_MSG_PORT_DESCRIPTOR,
2752 };
2753#endif /* UseStaticTemplates */
2754
2755#else
2756#if UseStaticTemplates
2757 const static mach_msg_port_descriptor_t object_handleTemplate = {
2758 /* name = */ MACH_PORT_NULL,
2759 /* pad1 = */ 0,
2760 /* pad2 = */ 0,
2761 /* disp = */ 17,
2762 /* type = */ MACH_MSG_PORT_DESCRIPTOR,
2763 };
2764#endif /* UseStaticTemplates */
2765
2766#endif /* __MigKernelSpecificCode */
2767 kern_return_t RetCode;
2768 vm_map_t target_task;
2769
2770 __DeclareRcvRpc(3825, "make_memory_entry_64")
2771 __BeforeRcvRpc(3825, "make_memory_entry_64")
2772
2773#if defined(__MIG_check__Request__make_memory_entry_64_t__defined)
2774 check_result = __MIG_check__Request__make_memory_entry_64_t((__Request *)In0P);
2775 if (check_result != MACH_MSG_SUCCESS)
2776 { MIG_RETURN_ERROR(OutP, check_result); }
2777#endif /* defined(__MIG_check__Request__make_memory_entry_64_t__defined) */
2778
2779#if UseStaticTemplates
2780 OutP->object_handle = object_handleTemplate;
2781#else /* UseStaticTemplates */
2782#if __MigKernelSpecificCode
2783 OutP->object_handle.disposition = 17;
2784#else
2785 OutP->object_handle.disposition = 17;
2786#endif /* __MigKernelSpecificCode */
2787#if !(defined(KERNEL) && defined(__LP64__))
2788 OutP->object_handle.pad1 = 0;
2789#endif
2790 OutP->object_handle.pad2 = 0;
2791 OutP->object_handle.type = MACH_MSG_PORT_DESCRIPTOR;
2792#if defined(KERNEL)
2793 OutP->object_handle.pad_end = 0;
2794#endif
2795#endif /* UseStaticTemplates */
2796
2797
2798 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
2799
2800 RetCode = vm32_make_memory_entry_64(target_task, &In0P->size, In0P->offset, In0P->permission, &OutP->object_handle.name, null_conversion(In0P->parent_entry.name));
2801 vm_map_deallocate(target_task);
2802 if (RetCode != KERN_SUCCESS) {
2803 MIG_RETURN_ERROR(OutP, RetCode);
2804 }
2805#if __MigKernelSpecificCode
2806
2807 if (IP_VALID((ipc_port_t)In0P->parent_entry.name))
2808 ipc_port_release_send((ipc_port_t)In0P->parent_entry.name);
2809#endif /* __MigKernelSpecificCode */
2810
2811 OutP->NDR = NDR_record;
2812
2813
2814 OutP->size = In0P->size;
2815
2816 OutP->Head.msgh_bits |= MACH_MSGH_BITS_COMPLEX;
2817 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
2818 OutP->msgh_body.msgh_descriptor_count = 1;
2819 __AfterRcvRpc(3825, "make_memory_entry_64")
2820}
2821
2822#if ( __MigTypeCheck )
2823#if __MIG_check__Request__map_subsystem__
2824#if !defined(__MIG_check__Request__map_64_t__defined)
2825#define __MIG_check__Request__map_64_t__defined
2826
2827mig_internal kern_return_t __MIG_check__Request__map_64_t(__attribute__((__unused__)) __Request__map_64_t *In0P)
2828{
2829
2830 typedef __Request__map_64_t __Request;
2831#if __MigTypeCheck
2832 if (!(In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2833 (In0P->msgh_body.msgh_descriptor_count != 1) ||
2834 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2835 return MIG_BAD_ARGUMENTS;
2836#endif /* __MigTypeCheck */
2837
2838#if __MigTypeCheck
2839 if (In0P->object.type != MACH_MSG_PORT_DESCRIPTOR ||
2840 In0P->object.disposition != 17)
2841 return MIG_TYPE_ERROR;
2842#endif /* __MigTypeCheck */
2843
2844 return MACH_MSG_SUCCESS;
2845}
2846#endif /* !defined(__MIG_check__Request__map_64_t__defined) */
2847#endif /* __MIG_check__Request__map_subsystem__ */
2848#endif /* ( __MigTypeCheck ) */
2849
2850
2851/* Routine map_64 */
2852mig_internal novalue _Xmap_64
2853 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
2854{
2855
2856#ifdef __MigPackStructs
2857#pragma pack(4)
2858#endif
2859 typedef struct {
2860 mach_msg_header_t Head;
2861 /* start of the kernel processed data */
2862 mach_msg_body_t msgh_body;
2863 mach_msg_port_descriptor_t object;
2864 /* end of the kernel processed data */
2865 NDR_record_t NDR;
2866 vm32_address_t address;
2867 vm32_size_t size;
2868 vm32_address_t mask;
2869 int flags;
2870 memory_object_offset_t offset;
2871 boolean_t copy;
2872 vm_prot_t cur_protection;
2873 vm_prot_t max_protection;
2874 vm_inherit_t inheritance;
2875 mach_msg_trailer_t trailer;
2876 } Request __attribute__((unused));
2877#ifdef __MigPackStructs
2878#pragma pack()
2879#endif
2880 typedef __Request__map_64_t __Request;
2881 typedef __Reply__map_64_t Reply __attribute__((unused));
2882
2883 /*
2884 * typedef struct {
2885 * mach_msg_header_t Head;
2886 * NDR_record_t NDR;
2887 * kern_return_t RetCode;
2888 * } mig_reply_error_t;
2889 */
2890
2891 Request *In0P = (Request *) InHeadP;
2892 Reply *OutP = (Reply *) OutHeadP;
2893#ifdef __MIG_check__Request__map_64_t__defined
2894 kern_return_t check_result;
2895#endif /* __MIG_check__Request__map_64_t__defined */
2896
2897#if __MigKernelSpecificCode
2898#else
2899#endif /* __MigKernelSpecificCode */
2900 vm_map_t target_task;
2901
2902 __DeclareRcvRpc(3826, "map_64")
2903 __BeforeRcvRpc(3826, "map_64")
2904
2905#if defined(__MIG_check__Request__map_64_t__defined)
2906 check_result = __MIG_check__Request__map_64_t((__Request *)In0P);
2907 if (check_result != MACH_MSG_SUCCESS)
2908 { MIG_RETURN_ERROR(OutP, check_result); }
2909#endif /* defined(__MIG_check__Request__map_64_t__defined) */
2910
2911 target_task = convert_port_entry_to_map(In0P->Head.msgh_request_port);
2912
2913 OutP->RetCode = vm32_map_64(target_task, &In0P->address, In0P->size, In0P->mask, In0P->flags, null_conversion(In0P->object.name), In0P->offset, In0P->copy, In0P->cur_protection, In0P->max_protection, In0P->inheritance);
2914 vm_map_deallocate(target_task);
2915 if (OutP->RetCode != KERN_SUCCESS) {
2916 MIG_RETURN_ERROR(OutP, OutP->RetCode);
2917 }
2918#if __MigKernelSpecificCode
2919
2920 if (IP_VALID((ipc_port_t)In0P->object.name))
2921 ipc_port_release_send((ipc_port_t)In0P->object.name);
2922#endif /* __MigKernelSpecificCode */
2923
2924 OutP->NDR = NDR_record;
2925
2926
2927 OutP->address = In0P->address;
2928
2929 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
2930 __AfterRcvRpc(3826, "map_64")
2931}
2932
2933#if ( __MigTypeCheck )
2934#if __MIG_check__Request__map_subsystem__
2935#if !defined(__MIG_check__Request__purgable_control_t__defined)
2936#define __MIG_check__Request__purgable_control_t__defined
2937
2938mig_internal kern_return_t __MIG_check__Request__purgable_control_t(__attribute__((__unused__)) __Request__purgable_control_t *In0P)
2939{
2940
2941 typedef __Request__purgable_control_t __Request;
2942#if __MigTypeCheck
2943 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
2944 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
2945 return MIG_BAD_ARGUMENTS;
2946#endif /* __MigTypeCheck */
2947
2948 return MACH_MSG_SUCCESS;
2949}
2950#endif /* !defined(__MIG_check__Request__purgable_control_t__defined) */
2951#endif /* __MIG_check__Request__map_subsystem__ */
2952#endif /* ( __MigTypeCheck ) */
2953
2954
2955/* Routine purgable_control */
2956mig_internal novalue _Xpurgable_control
2957 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
2958{
2959
2960#ifdef __MigPackStructs
2961#pragma pack(4)
2962#endif
2963 typedef struct {
2964 mach_msg_header_t Head;
2965 NDR_record_t NDR;
2966 vm32_address_t address;
2967 vm_purgable_t control;
2968 int state;
2969 mach_msg_trailer_t trailer;
2970 } Request __attribute__((unused));
2971#ifdef __MigPackStructs
2972#pragma pack()
2973#endif
2974 typedef __Request__purgable_control_t __Request;
2975 typedef __Reply__purgable_control_t Reply __attribute__((unused));
2976
2977 /*
2978 * typedef struct {
2979 * mach_msg_header_t Head;
2980 * NDR_record_t NDR;
2981 * kern_return_t RetCode;
2982 * } mig_reply_error_t;
2983 */
2984
2985 Request *In0P = (Request *) InHeadP;
2986 Reply *OutP = (Reply *) OutHeadP;
2987#ifdef __MIG_check__Request__purgable_control_t__defined
2988 kern_return_t check_result;
2989#endif /* __MIG_check__Request__purgable_control_t__defined */
2990
2991#if __MigKernelSpecificCode
2992#else
2993#endif /* __MigKernelSpecificCode */
2994 vm_map_t target_task;
2995
2996 __DeclareRcvRpc(3830, "purgable_control")
2997 __BeforeRcvRpc(3830, "purgable_control")
2998
2999#if defined(__MIG_check__Request__purgable_control_t__defined)
3000 check_result = __MIG_check__Request__purgable_control_t((__Request *)In0P);
3001 if (check_result != MACH_MSG_SUCCESS)
3002 { MIG_RETURN_ERROR(OutP, check_result); }
3003#endif /* defined(__MIG_check__Request__purgable_control_t__defined) */
3004
3005 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
3006
3007 OutP->RetCode = vm32_purgable_control(target_task, In0P->address, In0P->control, &In0P->state);
3008 vm_map_deallocate(target_task);
3009 if (OutP->RetCode != KERN_SUCCESS) {
3010 MIG_RETURN_ERROR(OutP, OutP->RetCode);
3011 }
3012#if __MigKernelSpecificCode
3013#endif /* __MigKernelSpecificCode */
3014
3015 OutP->NDR = NDR_record;
3016
3017
3018 OutP->state = In0P->state;
3019
3020 OutP->Head.msgh_size = (mach_msg_size_t)(sizeof(Reply));
3021 __AfterRcvRpc(3830, "purgable_control")
3022}
3023
3024#if ( __MigTypeCheck )
3025#if __MIG_check__Request__map_subsystem__
3026#if !defined(__MIG_check__Request___map_exec_lockdown_t__defined)
3027#define __MIG_check__Request___map_exec_lockdown_t__defined
3028
3029mig_internal kern_return_t __MIG_check__Request___map_exec_lockdown_t(__attribute__((__unused__)) __Request___map_exec_lockdown_t *In0P)
3030{
3031
3032 typedef __Request___map_exec_lockdown_t __Request;
3033#if __MigTypeCheck
3034 if ((In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) ||
3035 (In0P->Head.msgh_size != (mach_msg_size_t)sizeof(__Request)))
3036 return MIG_BAD_ARGUMENTS;
3037#endif /* __MigTypeCheck */
3038
3039 return MACH_MSG_SUCCESS;
3040}
3041#endif /* !defined(__MIG_check__Request___map_exec_lockdown_t__defined) */
3042#endif /* __MIG_check__Request__map_subsystem__ */
3043#endif /* ( __MigTypeCheck ) */
3044
3045
3046/* Routine _map_exec_lockdown */
3047mig_internal novalue _X_map_exec_lockdown
3048 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
3049{
3050
3051#ifdef __MigPackStructs
3052#pragma pack(4)
3053#endif
3054 typedef struct {
3055 mach_msg_header_t Head;
3056 mach_msg_trailer_t trailer;
3057 } Request __attribute__((unused));
3058#ifdef __MigPackStructs
3059#pragma pack()
3060#endif
3061 typedef __Request___map_exec_lockdown_t __Request;
3062 typedef __Reply___map_exec_lockdown_t Reply __attribute__((unused));
3063
3064 /*
3065 * typedef struct {
3066 * mach_msg_header_t Head;
3067 * NDR_record_t NDR;
3068 * kern_return_t RetCode;
3069 * } mig_reply_error_t;
3070 */
3071
3072 Request *In0P = (Request *) InHeadP;
3073 Reply *OutP = (Reply *) OutHeadP;
3074#ifdef __MIG_check__Request___map_exec_lockdown_t__defined
3075 kern_return_t check_result;
3076#endif /* __MIG_check__Request___map_exec_lockdown_t__defined */
3077
3078#if __MigKernelSpecificCode
3079#else
3080#endif /* __MigKernelSpecificCode */
3081 vm_map_t target_task;
3082
3083 __DeclareRcvRpc(3831, "_map_exec_lockdown")
3084 __BeforeRcvRpc(3831, "_map_exec_lockdown")
3085
3086#if defined(__MIG_check__Request___map_exec_lockdown_t__defined)
3087 check_result = __MIG_check__Request___map_exec_lockdown_t((__Request *)In0P);
3088 if (check_result != MACH_MSG_SUCCESS)
3089 { MIG_RETURN_ERROR(OutP, check_result); }
3090#endif /* defined(__MIG_check__Request___map_exec_lockdown_t__defined) */
3091
3092 target_task = convert_port_to_map(In0P->Head.msgh_request_port);
3093
3094 OutP->RetCode = vm32__map_exec_lockdown(target_task);
3095 vm_map_deallocate(target_task);
3096#if __MigKernelSpecificCode
3097#endif /* __MigKernelSpecificCode */
3098
3099 OutP->NDR = NDR_record;
3100
3101
3102 __AfterRcvRpc(3831, "_map_exec_lockdown")
3103}
3104
3105
3106
3107/* Description of this subsystem, for use in direct RPC */
3108const struct vm32_map_subsystem vm32_map_subsystem = {
3109 map_server_routine,
3110 3800,
3111 3832,
3112 (mach_msg_size_t)sizeof(union __ReplyUnion__vm32_map_subsystem),
3113 (vm_address_t)0,
3114 {
3115 { (mig_impl_routine_t) 0,
3116 (mig_stub_routine_t) _Xregion, 7, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__region_t)},
3117 { (mig_impl_routine_t) 0,
3118 (mig_stub_routine_t) _Xallocate, 4, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__allocate_t)},
3119 { (mig_impl_routine_t) 0,
3120 (mig_stub_routine_t) _Xdeallocate, 3, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__deallocate_t)},
3121 { (mig_impl_routine_t) 0,
3122 (mig_stub_routine_t) _Xprotect, 5, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__protect_t)},
3123 { (mig_impl_routine_t) 0,
3124 (mig_stub_routine_t) _Xinherit, 4, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__inherit_t)},
3125 { (mig_impl_routine_t) 0,
3126 (mig_stub_routine_t) _Xread, 5, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__read_t)},
3127 { (mig_impl_routine_t) 0,
3128 (mig_stub_routine_t) _Xread_list, 3, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__read_list_t)},
3129 { (mig_impl_routine_t) 0,
3130 (mig_stub_routine_t) _Xwrite, 4, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__write_t)},
3131 { (mig_impl_routine_t) 0,
3132 (mig_stub_routine_t) _Xcopy, 4, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__copy_t)},
3133 { (mig_impl_routine_t) 0,
3134 (mig_stub_routine_t) _Xread_overwrite, 5, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__read_overwrite_t)},
3135 { (mig_impl_routine_t) 0,
3136 (mig_stub_routine_t) _Xmsync, 4, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__msync_t)},
3137 { (mig_impl_routine_t) 0,
3138 (mig_stub_routine_t) _Xbehavior_set, 4, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__behavior_set_t)},
3139 { (mig_impl_routine_t) 0,
3140 (mig_stub_routine_t) _Xmap, 11, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__map_t)},
3141 { (mig_impl_routine_t) 0,
3142 (mig_stub_routine_t) _Xmachine_attribute, 5, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__machine_attribute_t)},
3143 { (mig_impl_routine_t) 0,
3144 (mig_stub_routine_t) _Xremap, 11, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__remap_t)},
3145 { (mig_impl_routine_t) 0,
3146 (mig_stub_routine_t) _X_task_wire, 2, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply___task_wire_t)},
3147 { (mig_impl_routine_t) 0,
3148 (mig_stub_routine_t) _Xmake_memory_entry, 6, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__make_memory_entry_t)},
3149 { (mig_impl_routine_t) 0,
3150 (mig_stub_routine_t) _Xmap_page_query, 4, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__map_page_query_t)},
3151 { (mig_impl_routine_t) 0,
3152 (mig_stub_routine_t) _Xregion_info, 5, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__region_info_t)},
3153 { (mig_impl_routine_t) 0,
3154 (mig_stub_routine_t) _Xmapped_pages_info, 3, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__mapped_pages_info_t)},
3155 {0, 0, 0, 0, 0, 0},
3156 { (mig_impl_routine_t) 0,
3157 (mig_stub_routine_t) _Xregion_recurse, 6, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__region_recurse_t)},
3158 { (mig_impl_routine_t) 0,
3159 (mig_stub_routine_t) _Xregion_recurse_64, 6, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__region_recurse_64_t)},
3160 { (mig_impl_routine_t) 0,
3161 (mig_stub_routine_t) _Xregion_info_64, 5, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__region_info_64_t)},
3162 { (mig_impl_routine_t) 0,
3163 (mig_stub_routine_t) _Xregion_64, 7, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__region_64_t)},
3164 { (mig_impl_routine_t) 0,
3165 (mig_stub_routine_t) _Xmake_memory_entry_64, 7, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__make_memory_entry_64_t)},
3166 { (mig_impl_routine_t) 0,
3167 (mig_stub_routine_t) _Xmap_64, 12, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__map_64_t)},
3168 {0, 0, 0, 0, 0, 0},
3169 {0, 0, 0, 0, 0, 0},
3170 {0, 0, 0, 0, 0, 0},
3171 { (mig_impl_routine_t) 0,
3172 (mig_stub_routine_t) _Xpurgable_control, 4, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply__purgable_control_t)},
3173 { (mig_impl_routine_t) 0,
3174 (mig_stub_routine_t) _X_map_exec_lockdown, 1, 0, (routine_arg_descriptor_t)0, (mach_msg_size_t)sizeof(__Reply___map_exec_lockdown_t)},
3175 }
3176};
3177
3178mig_external boolean_t map_server
3179 (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
3180{
3181 /*
3182 * typedef struct {
3183 * mach_msg_header_t Head;
3184 * NDR_record_t NDR;
3185 * kern_return_t RetCode;
3186 * } mig_reply_error_t;
3187 */
3188
3189 register mig_routine_t routine;
3190
3191 OutHeadP->msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REPLY(InHeadP->msgh_bits), 0);
3192 OutHeadP->msgh_remote_port = InHeadP->msgh_reply_port;
3193 /* Minimal size: routine() will update it if different */
3194 OutHeadP->msgh_size = (mach_msg_size_t)sizeof(mig_reply_error_t);
3195 OutHeadP->msgh_local_port = MACH_PORT_NULL;
3196 OutHeadP->msgh_id = InHeadP->msgh_id + 100;
3197 OutHeadP->msgh_reserved = 0;
3198
3199 if ((InHeadP->msgh_id > 3831) || (InHeadP->msgh_id < 3800) ||
3200 ((routine = vm32_map_subsystem.routine[InHeadP->msgh_id - 3800].stub_routine) == 0)) {
3201 ((mig_reply_error_t *)OutHeadP)->NDR = NDR_record;
3202 ((mig_reply_error_t *)OutHeadP)->RetCode = MIG_BAD_ID;
3203 return FALSE;
3204 }
3205 (*routine) (InHeadP, OutHeadP);
3206 return TRUE;
3207}
3208
3209mig_external mig_routine_t map_server_routine
3210 (mach_msg_header_t *InHeadP)
3211{
3212 register int msgh_id;
3213
3214 msgh_id = InHeadP->msgh_id - 3800;
3215
3216 if ((msgh_id > 31) || (msgh_id < 0))
3217 return 0;
3218
3219 return vm32_map_subsystem.routine[msgh_id].stub_routine;
3220}
3221