예제 #1
0
파일: vm_unix.c 프로젝트: Bitesher/xnu
int
vslock(
	user_addr_t	addr,
	user_size_t	len)
{
	kern_return_t	kret;
	vm_map_t	map;

	map = current_map();
	kret = vm_map_wire(map,
			   vm_map_trunc_page(addr,
					     vm_map_page_mask(map)),
			   vm_map_round_page(addr+len,
					     vm_map_page_mask(map)), 
			   VM_PROT_READ | VM_PROT_WRITE,
			   FALSE);

	switch (kret) {
	case KERN_SUCCESS:
		return (0);
	case KERN_INVALID_ADDRESS:
	case KERN_NO_SPACE:
		return (ENOMEM);
	case KERN_PROTECTION_FAILURE:
		return (EACCES);
	default:
		return (EINVAL);
	}
}
예제 #2
0
int
minherit(__unused proc_t p, struct minherit_args *uap, __unused register_t *retval)
{
	mach_vm_offset_t addr;
	mach_vm_size_t size;
	register vm_inherit_t inherit;
	vm_map_t	user_map;
	kern_return_t	result;

	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(len, uap->len);
	AUDIT_ARG(value, uap->inherit);

	addr = (mach_vm_offset_t)uap->addr;
	size = (mach_vm_size_t)uap->len;
	inherit = uap->inherit;

	user_map = current_map();
	result = mach_vm_inherit(user_map, addr, size,
				inherit);
	switch (result) {
	case KERN_SUCCESS:
		return (0);
	case KERN_PROTECTION_FAILURE:
		return (EACCES);
	}
	return (EINVAL);
}
예제 #3
0
int
munmap(__unused proc_t p, struct munmap_args *uap, __unused register_t *retval)
{
	mach_vm_offset_t	user_addr;
	mach_vm_size_t	user_size;
	kern_return_t	result;

	user_addr = (mach_vm_offset_t) uap->addr;
	user_size = (mach_vm_size_t) uap->len;

	AUDIT_ARG(addr, user_addr);
	AUDIT_ARG(len, user_size);

	if (user_addr & PAGE_MASK_64) {
		/* UNIX SPEC: user address is not page-aligned, return EINVAL */
		return EINVAL;
	}

	if (user_addr + user_size < user_addr)
		return(EINVAL);

	if (user_size == 0) {
		/* UNIX SPEC: size is 0, return EINVAL */
		return EINVAL;
	}

	result = mach_vm_deallocate(current_map(), user_addr, user_size);
	if (result != KERN_SUCCESS) {
		return(EINVAL);
	}
	return(0);
}
예제 #4
0
int
useracc(
	user_addr_t	addr,
	user_size_t	len,
	int	prot)
{
	return (vm_map_check_protection(
			current_map(),
			vm_map_trunc_page(addr), vm_map_round_page(addr+len),
			prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE));
}
예제 #5
0
파일: c1_ValueMap.cpp 프로젝트: dain/graal
bool ShortLoopOptimizer::process(BlockBegin* loop_header) {
  TRACE_VALUE_NUMBERING(tty->print_cr("** loop header block"));

  _too_complicated_loop = false;
  _loop_blocks.clear();
  _loop_blocks.append(loop_header);

  for (int i = 0; i < _loop_blocks.length(); i++) {
    BlockBegin* block = _loop_blocks.at(i);
    TRACE_VALUE_NUMBERING(tty->print_cr("processing loop block B%d", block->block_id()));

    if (block->is_set(BlockBegin::exception_entry_flag)) {
      // this would be too complicated
      return false;
    }

    // add predecessors to worklist
    for (int j = block->number_of_preds() - 1; j >= 0; j--) {
      BlockBegin* pred = block->pred_at(j);

      if (pred->is_set(BlockBegin::osr_entry_flag)) {
        return false;
      }

      ValueMap* pred_map = value_map_of(pred);
      if (pred_map != NULL) {
        current_map()->kill_map(pred_map);
      } else if (!_loop_blocks.contains(pred)) {
        if (_loop_blocks.length() >= ValueMapMaxLoopSize) {
          return false;
        }
        _loop_blocks.append(pred);
      }
    }

    // use the instruction visitor for killing values
    for (Value instr = block->next(); instr != NULL; instr = instr->next()) {
      instr->visit(this);
      if (_too_complicated_loop) {
        return false;
      }
    }
  }

  bool optimistic = this->_gvn->compilation()->is_optimistic();

  if (UseLoopInvariantCodeMotion && optimistic) {
    LoopInvariantCodeMotion code_motion(this, _gvn, loop_header, &_loop_blocks);
  }

  TRACE_VALUE_NUMBERING(tty->print_cr("** loop successfully optimized"));
  return true;
}
예제 #6
0
파일: L17-1.CPP 프로젝트: CHMyFork/GPBB
void main()
{
   unsigned int init_length, x, y, seed;
   unsigned long generation = 0;
   char gen_text[80];
   long bios_time, start_bios_time;

   cellmap current_map(cellmap_height, cellmap_width);
   cellmap next_map(cellmap_height, cellmap_width);

   // Get the seed; seed randomly if 0 entered
   cout << "Seed (0 for random seed): ";
   cin >> seed;
   if (seed == 0) seed = (unsigned) time(NULL);

   // Randomly initialize the initial cell map
   cout << "Initializing...";
   srand(seed);
   init_length = (cellmap_height * cellmap_width) / 2;
   do {
      x = random(cellmap_width);
      y = random(cellmap_height);
      next_map.set_cell(x, y);
   } while (--init_length);
   current_map.copy_cells(next_map); // put init map in current_map

   enter_display_mode();

   // Keep recalculating and redisplaying generations until a key
   // is pressed
   show_text(0, MSG_LINE, "Generation: ");
   start_bios_time = _bios_timeofday(_TIME_GETCLOCK, &bios_time);
   do {
      generation++;
      sprintf(gen_text, "%10lu", generation);
      show_text(1, GENERATION_LINE, gen_text);
      // Recalculate and draw the next generation
      current_map.next_generation(next_map);
      // Make current_map current again
      current_map.copy_cells(next_map);
#if LIMIT_18_HZ
      // Limit to a maximum of 18.2 frames per second, for visibility
      do {
         _bios_timeofday(_TIME_GETCLOCK, &bios_time);
      } while (start_bios_time == bios_time);
      start_bios_time = bios_time;
#endif
   } while (!kbhit());
   getch();    // clear keypress
   exit_display_mode();
   cout << "Total generations: " << generation << "\nSeed: " <<
         seed << "\n";
}
예제 #7
0
파일: vm_unix.c 프로젝트: Bitesher/xnu
int
vsunlock(
	user_addr_t addr,
	user_size_t len,
	__unused int dirtied)
{
#if FIXME  /* [ */
	pmap_t		pmap;
	vm_page_t	pg;
	vm_map_offset_t	vaddr;
	ppnum_t		paddr;
#endif  /* FIXME ] */
	kern_return_t	kret;
	vm_map_t	map;

	map = current_map();

#if FIXME  /* [ */
	if (dirtied) {
		pmap = get_task_pmap(current_task());
		for (vaddr = vm_map_trunc_page(addr, PAGE_MASK);
		     vaddr < vm_map_round_page(addr+len, PAGE_MASK);
		     vaddr += PAGE_SIZE) {
			paddr = pmap_extract(pmap, vaddr);
			pg = PHYS_TO_VM_PAGE(paddr);
			vm_page_set_modified(pg);
		}
	}
#endif  /* FIXME ] */
#ifdef	lint
	dirtied++;
#endif	/* lint */
	kret = vm_map_unwire(map,
			     vm_map_trunc_page(addr,
					       vm_map_page_mask(map)),
			     vm_map_round_page(addr+len,
					       vm_map_page_mask(map)),
			     FALSE);
	switch (kret) {
	case KERN_SUCCESS:
		return (0);
	case KERN_INVALID_ADDRESS:
	case KERN_NO_SPACE:
		return (ENOMEM);
	case KERN_PROTECTION_FAILURE:
		return (EACCES);
	default:
		return (EINVAL);
	}
}
예제 #8
0
int
madvise(__unused proc_t p, struct madvise_args *uap, __unused register_t *retval)
{
	vm_map_t user_map;
	mach_vm_offset_t start;
	mach_vm_size_t size;
	vm_behavior_t new_behavior;
	kern_return_t	result;

	/*
	 * Since this routine is only advisory, we default to conservative
	 * behavior.
	 */
	switch (uap->behav) {
		case MADV_RANDOM:
			new_behavior = VM_BEHAVIOR_RANDOM;
			break;
		case MADV_SEQUENTIAL: 
			new_behavior = VM_BEHAVIOR_SEQUENTIAL;
			break;
		case MADV_NORMAL:
			new_behavior = VM_BEHAVIOR_DEFAULT;
			break;
		case MADV_WILLNEED:
			new_behavior = VM_BEHAVIOR_WILLNEED;
			break;
		case MADV_DONTNEED:
			new_behavior = VM_BEHAVIOR_DONTNEED;
			break;
		default:
			return(EINVAL);
	}

	start = (mach_vm_offset_t) uap->addr;
	size = (mach_vm_size_t) uap->len;
	
	user_map = current_map();

	result = mach_vm_behavior_set(user_map, start, size, new_behavior);
	switch (result) {
		case KERN_SUCCESS:
			return (0);
		case KERN_INVALID_ADDRESS:
			return (ENOMEM);
	}

	return (EINVAL);
}
예제 #9
0
파일: vm_unix.c 프로젝트: Bitesher/xnu
int
useracc(
	user_addr_t	addr,
	user_size_t	len,
	int	prot)
{
	vm_map_t	map;

	map = current_map();
	return (vm_map_check_protection(
			map,
			vm_map_trunc_page(addr,
					  vm_map_page_mask(map)),
			vm_map_round_page(addr+len,
					  vm_map_page_mask(map)),
			prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE));
}
예제 #10
0
int copyoutmap(
	vm_map_t map,
	char 	*fromaddr, 
	char	*toaddr,
	int 	length)
{
	if (vm_map_pmap(map) == kernel_pmap) {
		/* assume a correct copy */
		memcpy(toaddr, fromaddr, length);
		return 0;
	}

	if (current_map() == map)
		return copyout(fromaddr, toaddr, length);

	return 1;
}
예제 #11
0
int
munlock(__unused proc_t p, struct munlock_args *uap, __unused register_t *retval)
{
	mach_vm_offset_t addr;
	mach_vm_size_t size;
	vm_map_t user_map;
	kern_return_t	result;

	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(addr, uap->len);

	addr = (mach_vm_offset_t) uap->addr;
	size = (mach_vm_size_t)uap->len;
	user_map = current_map();

	/* JMM - need to remove all wirings by spec - this just removes one */
	result = mach_vm_wire(host_priv_self(), user_map, addr, size, VM_PROT_NONE);
	return (result == KERN_SUCCESS ? 0 : ENOMEM);
}
예제 #12
0
파일: kern_mman.c 프로젝트: onlynone/xnu
int
mlock(__unused proc_t p, struct mlock_args *uap, __unused int32_t *retvalval)
{
	vm_map_t user_map;
	vm_map_offset_t addr;
	vm_map_size_t size, pageoff;
	kern_return_t	result;

	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(len, uap->len);

	addr = (vm_map_offset_t) uap->addr;
	size = (vm_map_size_t)uap->len;

	/* disable wrap around */
	if (addr + size < addr)
		return (EINVAL);

	if (size == 0)
		return (0);

	user_map = current_map();
	pageoff = (addr & vm_map_page_mask(user_map));
	addr -= pageoff;
	size = vm_map_round_page(size+pageoff, vm_map_page_mask(user_map));

	/* have to call vm_map_wire directly to pass "I don't know" protections */
	result = vm_map_wire(user_map, addr, addr+size, VM_PROT_NONE | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_MLOCK), TRUE);

	if (result == KERN_RESOURCE_SHORTAGE)
		return EAGAIN;
	else if (result == KERN_PROTECTION_FAILURE)
		return EACCES;
	else if (result != KERN_SUCCESS)
		return ENOMEM;

	return 0;	/* KERN_SUCCESS */
}
예제 #13
0
mach_msg_return_t
mach_msg_receive(
	mach_msg_header_t 	*msg,
	mach_msg_option_t 	option,
	mach_msg_size_t 	rcv_size,
	mach_port_t 		rcv_name,
	mach_msg_timeout_t 	time_out,
	mach_port_t 		notify)
{
	ipc_thread_t self = current_thread();
	ipc_space_t space = current_space();
	vm_map_t map = current_map();
	ipc_object_t object;
	ipc_mqueue_t mqueue;
	ipc_kmsg_t kmsg;
	mach_port_seqno_t seqno;
	mach_msg_return_t mr;

	mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
	if (mr != MACH_MSG_SUCCESS)
		return mr;
	/* hold ref for object; mqueue is locked */

	/*
	 *	ipc_mqueue_receive may not return, because if we block
	 *	then our kernel stack may be discarded.  So we save
	 *	state here for mach_msg_receive_continue to pick up.
	 */

	self->ith_msg = msg;
	self->ith_option = option;
	self->ith_rcv_size = rcv_size;
	self->ith_timeout = time_out;
	self->ith_notify = notify;
	self->ith_object = object;
	self->ith_mqueue = mqueue;

	if (option & MACH_RCV_LARGE) {
		mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
					rcv_size, time_out,
					FALSE, mach_msg_receive_continue,
					&kmsg, &seqno);
		/* mqueue is unlocked */
		ipc_object_release(object);
		if (mr != MACH_MSG_SUCCESS) {
			if (mr == MACH_RCV_TOO_LARGE) {
				mach_msg_size_t real_size =
					(mach_msg_size_t) (vm_offset_t) kmsg;

				assert(real_size > rcv_size);

				(void) copyout(&real_size,
					       &msg->msgh_size,
					       sizeof(mach_msg_size_t));
			}

			return mr;
		}

		kmsg->ikm_header.msgh_seqno = seqno;
		assert(kmsg->ikm_header.msgh_size <= rcv_size);
	} else {
		mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
					MACH_MSG_SIZE_MAX, time_out,
					FALSE, mach_msg_receive_continue,
					&kmsg, &seqno);
		/* mqueue is unlocked */
		ipc_object_release(object);
		if (mr != MACH_MSG_SUCCESS)
			return mr;

		kmsg->ikm_header.msgh_seqno = seqno;
		if (kmsg->ikm_header.msgh_size > rcv_size) {
			ipc_kmsg_copyout_dest(kmsg, space);
			(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
			return MACH_RCV_TOO_LARGE;
		}
	}

	if (option & MACH_RCV_NOTIFY) {
		if (notify == MACH_PORT_NULL)
			mr = MACH_RCV_INVALID_NOTIFY;
		else
			mr = ipc_kmsg_copyout(kmsg, space, map, notify);
	} else
		mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
	if (mr != MACH_MSG_SUCCESS) {
		if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
			(void) ipc_kmsg_put(msg, kmsg,
					    kmsg->ikm_header.msgh_size);
		} else {
			ipc_kmsg_copyout_dest(kmsg, space);
			(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
		}

		return mr;
	}

	return ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
}
예제 #14
0
 void          kill_field(ciField* field)       {
     current_map()->kill_field(field);
 }
예제 #15
0
 void          kill_array(ValueType* type)      {
     current_map()->kill_array(type);
 }
예제 #16
0
파일: kern_mman.c 프로젝트: argp/xnu
	     uap->behav == MADV_FREE_REUSABLE)) {
		printf("** FOURK_COMPAT: %d[%s] "
		       "failing madvise(0x%llx,0x%llx,%s)\n",
		       p->p_pid, p->p_comm, start, size,
		       ((uap->behav == MADV_FREE_REUSABLE)
			? "MADV_FREE_REUSABLE"
			: "MADV_FREE"));
		DTRACE_VM3(fourk_compat_madvise,
			   uint64_t, start,
			   uint64_t, size,
			   int, uap->behav);
		return EINVAL;
	}
#endif /* __arm64__ */

	user_map = current_map();

	result = mach_vm_behavior_set(user_map, start, size, new_behavior);
	switch (result) {
	case KERN_SUCCESS:
		return 0;
	case KERN_INVALID_ADDRESS:
		return EINVAL;
	case KERN_NO_SPACE:	
		return ENOMEM;
	}

	return EINVAL;
}

int
예제 #17
0
 // implementation for abstract methods of ValueNumberingVisitor
 void          kill_memory()                    {
     current_map()->kill_memory();
 }
예제 #18
0
파일: c1_ValueMap.cpp 프로젝트: dain/graal
 void      kill_field(ciField* field, bool all_offsets)  {
   current_map()->kill_field(field, all_offsets);
   assert(field->type()->basic_type() >= 0 && field->type()->basic_type() <= T_ARRAY, "Invalid type");
   _has_field_store[field->type()->basic_type()] = true;
 }
예제 #19
0
파일: kern_mman.c 프로젝트: argp/xnu
int
mprotect(__unused proc_t p, struct mprotect_args *uap, __unused int32_t *retval)
{
	vm_prot_t prot;
	mach_vm_offset_t	user_addr;
	mach_vm_size_t	user_size;
	kern_return_t	result;
	vm_map_t	user_map;
#if CONFIG_MACF
	int error;
#endif

	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(len, uap->len);
	AUDIT_ARG(value32, uap->prot);

	user_map = current_map();
	user_addr = (mach_vm_offset_t) uap->addr;
	user_size = (mach_vm_size_t) uap->len;
	prot = (vm_prot_t)(uap->prot & (VM_PROT_ALL | VM_PROT_TRUSTED | VM_PROT_STRIP_READ));

	if (user_addr & vm_map_page_mask(user_map)) {
		/* UNIX SPEC: user address is not page-aligned, return EINVAL */
		return EINVAL;
	}
		
#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
	if (prot & VM_PROT_READ)
		prot |= VM_PROT_EXECUTE;
#endif
#endif /* notyet */

#if 3936456
	if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
		prot |= VM_PROT_READ;
#endif	/* 3936456 */

#if defined(__arm64__)
	if (prot & VM_PROT_STRIP_READ)
		prot &= ~(VM_PROT_READ | VM_PROT_STRIP_READ);
#endif

#if CONFIG_MACF
	/*
	 * The MAC check for mprotect is of limited use for 2 reasons:
	 * Without mmap revocation, the caller could have asked for the max
	 * protections initially instead of a reduced set, so a mprotect
	 * check would offer no new security.
	 * It is not possible to extract the vnode from the pager object(s)
	 * of the target memory range.
	 * However, the MAC check may be used to prevent a process from,
	 * e.g., making the stack executable.
	 */
	error = mac_proc_check_mprotect(p, user_addr,
	    		user_size, prot);
	if (error)
		return (error);
#endif

	if(prot & VM_PROT_TRUSTED) {
#if CONFIG_DYNAMIC_CODE_SIGNING
		/* CODE SIGNING ENFORCEMENT - JIT support */
		/* The special protection value VM_PROT_TRUSTED requests that we treat
		 * this page as if it had a valid code signature.
		 * If this is enabled, there MUST be a MAC policy implementing the 
		 * mac_proc_check_mprotect() hook above. Otherwise, Codesigning will be
		 * compromised because the check would always succeed and thusly any
		 * process could sign dynamically. */
		result = vm_map_sign(
			user_map, 
			vm_map_trunc_page(user_addr,
					  vm_map_page_mask(user_map)),
			vm_map_round_page(user_addr+user_size,
					  vm_map_page_mask(user_map)));
		switch (result) {
			case KERN_SUCCESS:
				break;
			case KERN_INVALID_ADDRESS:
				/* UNIX SPEC: for an invalid address range, return ENOMEM */
				return ENOMEM;
			default:
				return EINVAL;
		}
#else
		return ENOTSUP;
#endif
	}
	prot &= ~VM_PROT_TRUSTED;
	
	result = mach_vm_protect(user_map, user_addr, user_size,
				 FALSE, prot);
	switch (result) {
	case KERN_SUCCESS:
		return (0);
	case KERN_PROTECTION_FAILURE:
		return (EACCES);
	case KERN_INVALID_ADDRESS:
		/* UNIX SPEC: for an invalid address range, return ENOMEM */
		return ENOMEM;
	}
	return (EINVAL);
}
예제 #20
0
mach_msg_return_t
mach_msg_overwrite(
	mach_msg_header_t		*msg,
	mach_msg_option_t		option,
	mach_msg_size_t		send_size,
	mach_msg_size_t		rcv_size,
	mach_port_name_t		rcv_name,
	__unused mach_msg_timeout_t	msg_timeout,
	__unused mach_port_name_t	notify,
	__unused mach_msg_header_t	*rcv_msg,
       __unused mach_msg_size_t	rcv_msg_size)
{
	ipc_space_t space = current_space();
	vm_map_t map = current_map();
	ipc_kmsg_t kmsg;
	mach_port_seqno_t seqno;
	mach_msg_return_t mr;
	mach_msg_trailer_size_t trailer_size;

	if (option & MACH_SEND_MSG) {
		mach_msg_size_t	msg_and_trailer_size;
		mach_msg_max_trailer_t	*max_trailer;

		if ((send_size < sizeof(mach_msg_header_t)) || (send_size & 3))
			return MACH_SEND_MSG_TOO_SMALL;

		if (send_size > MACH_MSG_SIZE_MAX - MAX_TRAILER_SIZE)
			return MACH_SEND_TOO_LARGE;

		msg_and_trailer_size = send_size + MAX_TRAILER_SIZE;
		kmsg = ipc_kmsg_alloc(msg_and_trailer_size);

		if (kmsg == IKM_NULL)
			return MACH_SEND_NO_BUFFER;

		(void) memcpy((void *) kmsg->ikm_header, (const void *) msg, send_size);

		kmsg->ikm_header->msgh_size = send_size;

		/* 
		 * Reserve for the trailer the largest space (MAX_TRAILER_SIZE)
		 * However, the internal size field of the trailer (msgh_trailer_size)
		 * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to optimize
		 * the cases where no implicit data is requested.
		 */
		max_trailer = (mach_msg_max_trailer_t *) ((vm_offset_t)kmsg->ikm_header + send_size);
		max_trailer->msgh_sender = current_thread()->task->sec_token;
		max_trailer->msgh_audit = current_thread()->task->audit_token;
		max_trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0;
		max_trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;

		mr = ipc_kmsg_copyin(kmsg, space, map, &option);

		if (mr != MACH_MSG_SUCCESS) {
			ipc_kmsg_free(kmsg);
			return mr;
		}

		do {
			mr = ipc_kmsg_send(kmsg, MACH_MSG_OPTION_NONE, MACH_MSG_TIMEOUT_NONE);
		 } while (mr == MACH_SEND_INTERRUPTED);

		assert(mr == MACH_MSG_SUCCESS);
	}

	if (option & MACH_RCV_MSG) {
		thread_t self = current_thread();

		do {
			ipc_object_t object;
			ipc_mqueue_t mqueue;

			mr = ipc_mqueue_copyin(space, rcv_name,
					       &mqueue, &object);
			if (mr != MACH_MSG_SUCCESS)
				return mr;
			/* hold ref for object */

			self->ith_continuation = (void (*)(mach_msg_return_t))0;
			ipc_mqueue_receive(mqueue,
					   MACH_MSG_OPTION_NONE,
					   MACH_MSG_SIZE_MAX,
					   MACH_MSG_TIMEOUT_NONE,
					   THREAD_ABORTSAFE);
			mr = self->ith_state;
			kmsg = self->ith_kmsg;
			seqno = self->ith_seqno;

			io_release(object);

		} while (mr == MACH_RCV_INTERRUPTED);
		if (mr != MACH_MSG_SUCCESS)
			return mr;


		trailer_size = ipc_kmsg_add_trailer(kmsg, space, option, current_thread(), seqno, TRUE,
				kmsg->ikm_header->msgh_remote_port->ip_context);

		if (rcv_size < (kmsg->ikm_header->msgh_size + trailer_size)) {
			ipc_kmsg_copyout_dest(kmsg, space);
			(void) memcpy((void *) msg, (const void *) kmsg->ikm_header, sizeof *msg);
			ipc_kmsg_free(kmsg);
			return MACH_RCV_TOO_LARGE;
		}

		mr = ipc_kmsg_copyout(kmsg, space, map, MACH_MSG_BODY_NULL, option);
		if (mr != MACH_MSG_SUCCESS) {
			if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
				ipc_kmsg_put_to_kernel(msg, kmsg,
						kmsg->ikm_header->msgh_size + trailer_size);
			} else {
				ipc_kmsg_copyout_dest(kmsg, space);
				(void) memcpy((void *) msg, (const void *) kmsg->ikm_header, sizeof *msg);
				ipc_kmsg_free(kmsg);
			}

			return mr;
		}

		(void) memcpy((void *) msg, (const void *) kmsg->ikm_header,
			      kmsg->ikm_header->msgh_size + trailer_size);
		ipc_kmsg_free(kmsg);
	}

	return MACH_MSG_SUCCESS;
}
예제 #21
0
static vm_map_t
_current_map(void)
{
	return current_map();
}
예제 #22
0
파일: c1_ValueMap.cpp 프로젝트: dain/graal
 void      kill_array(ValueType* type)                   {
   current_map()->kill_array(type);
   BasicType basic_type = as_BasicType(type); assert(basic_type >= 0 && basic_type <= T_ARRAY, "Invalid type");
   _has_indexed_store[basic_type] = true;
 }
예제 #23
0
int
mincore(__unused proc_t p, struct mincore_args *uap, __unused register_t *retval)
{
	mach_vm_offset_t addr, first_addr, end;
	vm_map_t map;
	user_addr_t vec;
	int error;
	int vecindex, lastvecindex;
	int mincoreinfo=0;
	int pqueryinfo;
	kern_return_t	ret;
	int numref;

	char c;

	map = current_map();

	/*
	 * Make sure that the addresses presented are valid for user
	 * mode.
	 */
	first_addr = addr = mach_vm_trunc_page(uap->addr);
	end = addr + mach_vm_round_page(uap->len);

	if (end < addr)
		return (EINVAL);

	/*
	 * Address of byte vector
	 */
	vec = uap->vec;

	map = current_map();

	/*
	 * Do this on a map entry basis so that if the pages are not
	 * in the current processes address space, we can easily look
	 * up the pages elsewhere.
	 */
	lastvecindex = -1;
	for( ; addr < end; addr += PAGE_SIZE ) {
		pqueryinfo = 0;
		ret = mach_vm_page_query(map, addr, &pqueryinfo, &numref);
		if (ret != KERN_SUCCESS) 
			pqueryinfo = 0;
		mincoreinfo = 0;
		if (pqueryinfo & VM_PAGE_QUERY_PAGE_PRESENT)
			mincoreinfo |= MINCORE_INCORE;
		if (pqueryinfo & VM_PAGE_QUERY_PAGE_REF)
			mincoreinfo |= MINCORE_REFERENCED;
		if (pqueryinfo & VM_PAGE_QUERY_PAGE_DIRTY)
			mincoreinfo |= MINCORE_MODIFIED;
		
		
		/*
		 * calculate index into user supplied byte vector
		 */
		vecindex = (addr - first_addr)>> PAGE_SHIFT;

		/*
		 * If we have skipped map entries, we need to make sure that
		 * the byte vector is zeroed for those skipped entries.
		 */
		while((lastvecindex + 1) < vecindex) {
			c = 0;
			error = copyout(&c, vec + lastvecindex, 1);
			if (error) {
				return (EFAULT);
			}
			++lastvecindex;
		}

		/*
		 * Pass the page information to the user
		 */
		c = (char)mincoreinfo;
		error = copyout(&c, vec + vecindex, 1);
		if (error) {
			return (EFAULT);
		}
		lastvecindex = vecindex;
	}


	/*
	 * Zero the last entries in the byte vector.
	 */
	vecindex = (end - first_addr) >> PAGE_SHIFT;
	while((lastvecindex + 1) < vecindex) {
		c = 0;
		error = copyout(&c, vec + lastvecindex, 1);
		if (error) {
			return (EFAULT);
		}
		++lastvecindex;
	}
	
	return (0);
}
예제 #24
0
파일: ipc_mig.c 프로젝트: aglab2/darwin-xnu
 */

mach_msg_return_t
mach_msg_overwrite(
	mach_msg_header_t		*msg,
	mach_msg_option_t		option,
	mach_msg_size_t		send_size,
	mach_msg_size_t		rcv_size,
	mach_port_name_t		rcv_name,
	__unused mach_msg_timeout_t	msg_timeout,
	mach_msg_priority_t	override,
	__unused mach_msg_header_t	*rcv_msg,
       __unused mach_msg_size_t	rcv_msg_size)
{
	ipc_space_t space = current_space();
	vm_map_t map = current_map();
	ipc_kmsg_t kmsg;
	mach_port_seqno_t seqno;
	mach_msg_return_t mr;
	mach_msg_trailer_size_t trailer_size;

	if (option & MACH_SEND_MSG) {
		mach_msg_size_t	msg_and_trailer_size;
		mach_msg_max_trailer_t	*max_trailer;

		if ((send_size & 3) ||
		    send_size < sizeof(mach_msg_header_t) ||
		    (send_size < sizeof(mach_msg_body_t) && (msg->msgh_bits & MACH_MSGH_BITS_COMPLEX)))
			return MACH_SEND_MSG_TOO_SMALL;

		if (send_size > MACH_MSG_SIZE_MAX - MAX_TRAILER_SIZE)
예제 #25
0
kern_return_t
map_fd_funneled(
	int			fd,
	vm_object_offset_t	offset,
	vm_offset_t		*va,
	boolean_t		findspace,
	vm_size_t		size)
{
	kern_return_t	result;
	struct fileproc	*fp;
	struct vnode	*vp;
	void *	pager;
	vm_offset_t	map_addr=0;
	vm_size_t	map_size;
	int		err=0;
	vm_map_t	my_map;
	proc_t		p = current_proc();
	struct vnode_attr vattr;

	/*
	 *	Find the inode; verify that it's a regular file.
	 */

	err = fp_lookup(p, fd, &fp, 0);
	if (err)
		return(err);
	
	if (fp->f_fglob->fg_type != DTYPE_VNODE){
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}

	if (!(fp->f_fglob->fg_flag & FREAD)) {
		err = KERN_PROTECTION_FAILURE;
		goto bad;
	}

	vp = (struct vnode *)fp->f_fglob->fg_data;
	err = vnode_getwithref(vp);
	if(err != 0) 
		goto bad;

	if (vp->v_type != VREG) {
		(void)vnode_put(vp);
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}

	AUDIT_ARG(vnpath, vp, ARG_VNODE1);

	/*
	 * POSIX: mmap needs to update access time for mapped files
	 */
	if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
		VATTR_INIT(&vattr);
		nanotime(&vattr.va_access_time);
		VATTR_SET_ACTIVE(&vattr, va_access_time);
		vnode_setattr(vp, &vattr, vfs_context_current());
	}
	
	if (offset & PAGE_MASK_64) {
		printf("map_fd: file offset not page aligned(%d : %s)\n",p->p_pid, p->p_comm);
		(void)vnode_put(vp);
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}
	map_size = round_page(size);

	/*
	 * Allow user to map in a zero length file.
	 */
	if (size == 0) {
		(void)vnode_put(vp);
		err = KERN_SUCCESS;
		goto bad;
	}
	/*
	 *	Map in the file.
	 */
	pager = (void *)ubc_getpager(vp);
	if (pager == NULL) {
		(void)vnode_put(vp);
		err = KERN_FAILURE;
		goto bad;
	}


	my_map = current_map();

	result = vm_map_64(
			my_map,
			&map_addr, map_size, (vm_offset_t)0, 
			VM_FLAGS_ANYWHERE, pager, offset, TRUE,
			VM_PROT_DEFAULT, VM_PROT_ALL,
			VM_INHERIT_DEFAULT);
	if (result != KERN_SUCCESS) {
		(void)vnode_put(vp);
		err = result;
		goto bad;
	}


	if (!findspace) {
		vm_offset_t	dst_addr;
		vm_map_copy_t	tmp;

		if (copyin(CAST_USER_ADDR_T(va), &dst_addr, sizeof (dst_addr))	||
					trunc_page_32(dst_addr) != dst_addr) {
			(void) vm_map_remove(
					my_map,
					map_addr, map_addr + map_size,
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = KERN_INVALID_ADDRESS;
			goto bad;
		}

		result = vm_map_copyin(my_map, (vm_map_address_t)map_addr,
				       (vm_map_size_t)map_size, TRUE, &tmp);
		if (result != KERN_SUCCESS) {
			
			(void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
					vm_map_round_page(map_addr + map_size),
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = result;
			goto bad;
		}

		result = vm_map_copy_overwrite(my_map,
					(vm_map_address_t)dst_addr, tmp, FALSE);
		if (result != KERN_SUCCESS) {
			vm_map_copy_discard(tmp);
			(void)vnode_put(vp);
			err = result;
			goto bad;
		}
	} else {
		if (copyout(&map_addr, CAST_USER_ADDR_T(va), sizeof (map_addr))) {
			(void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
					vm_map_round_page(map_addr + map_size),
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = KERN_INVALID_ADDRESS;
			goto bad;
		}
	}

	ubc_setthreadcred(vp, current_proc(), current_thread());
	(void)ubc_map(vp, (PROT_READ | PROT_EXEC));
	(void)vnode_put(vp);
	err = 0;
bad:
	fp_drop(p, fd, fp, 0);
	return (err);
}
예제 #26
0
void
mach_msg_receive_continue(void)
{
	ipc_thread_t self = current_thread();
	ipc_space_t space = current_space();
	vm_map_t map = current_map();
	mach_msg_header_t *msg = self->ith_msg;
	mach_msg_option_t option = self->ith_option;
	mach_msg_size_t rcv_size = self->ith_rcv_size;
	mach_msg_timeout_t time_out = self->ith_timeout;
	mach_port_t notify = self->ith_notify;
	ipc_object_t object = self->ith_object;
	ipc_mqueue_t mqueue = self->ith_mqueue;
	ipc_kmsg_t kmsg;
	mach_port_seqno_t seqno;
	mach_msg_return_t mr;

	if (option & MACH_RCV_LARGE) {
		mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
					rcv_size, time_out,
					TRUE, mach_msg_receive_continue,
					&kmsg, &seqno);
		/* mqueue is unlocked */
		ipc_object_release(object);
		if (mr != MACH_MSG_SUCCESS) {
			if (mr == MACH_RCV_TOO_LARGE) {
				mach_msg_size_t real_size =
					(mach_msg_size_t) (vm_offset_t) kmsg;

				assert(real_size > rcv_size);

				(void) copyout(&real_size,
					       &msg->msgh_size,
					       sizeof(mach_msg_size_t));
			}

			thread_syscall_return(mr);
			/*NOTREACHED*/
		}

		kmsg->ikm_header.msgh_seqno = seqno;
		assert(kmsg->ikm_header.msgh_size <= rcv_size);
	} else {
		mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
					MACH_MSG_SIZE_MAX, time_out,
					TRUE, mach_msg_receive_continue,
					&kmsg, &seqno);
		/* mqueue is unlocked */
		ipc_object_release(object);
		if (mr != MACH_MSG_SUCCESS) {
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}

		kmsg->ikm_header.msgh_seqno = seqno;
		if (kmsg->ikm_header.msgh_size > rcv_size) {
			ipc_kmsg_copyout_dest(kmsg, space);
			(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
			thread_syscall_return(MACH_RCV_TOO_LARGE);
			/*NOTREACHED*/
		}
	}

	if (option & MACH_RCV_NOTIFY) {
		if (notify == MACH_PORT_NULL)
			mr = MACH_RCV_INVALID_NOTIFY;
		else
			mr = ipc_kmsg_copyout(kmsg, space, map, notify);
	} else
		mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
	if (mr != MACH_MSG_SUCCESS) {
		if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
			(void) ipc_kmsg_put(msg, kmsg,
					    kmsg->ikm_header.msgh_size);
		} else {
			ipc_kmsg_copyout_dest(kmsg, space);
			(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
		}

		thread_syscall_return(mr);
		/*NOTREACHED*/
	}

	mr = ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
	thread_syscall_return(mr);
	/*NOTREACHED*/
}
예제 #27
0
int
msync_nocancel(__unused proc_t p, struct msync_nocancel_args *uap, __unused register_t *retval)
{
	mach_vm_offset_t addr;
	mach_vm_size_t size;
	int flags;
	vm_map_t user_map;
	int rv;
	vm_sync_t sync_flags=0;

	addr = (mach_vm_offset_t) uap->addr;
	size = (mach_vm_size_t)uap->len;

	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_msync) | DBG_FUNC_NONE), (uint32_t)(addr >> 32), (uint32_t)(size >> 32), 0, 0, 0);

	if (addr & PAGE_MASK_64) {
		/* UNIX SPEC: user address is not page-aligned, return EINVAL */
		return EINVAL;
	}
	if (size == 0) {
		/*
		 * We cannot support this properly without maintaining
		 * list all mmaps done. Cannot use vm_map_entry as they could be
		 * split or coalesced by indepenedant actions. So instead of 
		 * inaccurate results, lets just return error as invalid size
		 * specified
		 */
		return (EINVAL); /* XXX breaks posix apps */
	}

	flags = uap->flags;
	/* disallow contradictory flags */
	if ((flags & (MS_SYNC|MS_ASYNC)) == (MS_SYNC|MS_ASYNC))
		return (EINVAL);

	if (flags & MS_KILLPAGES)
	        sync_flags |= VM_SYNC_KILLPAGES;
	if (flags & MS_DEACTIVATE)
	        sync_flags |= VM_SYNC_DEACTIVATE;
	if (flags & MS_INVALIDATE)
	        sync_flags |= VM_SYNC_INVALIDATE;

	if ( !(flags & (MS_KILLPAGES | MS_DEACTIVATE))) {
	        if (flags & MS_ASYNC) 
		        sync_flags |= VM_SYNC_ASYNCHRONOUS;
		else 
		        sync_flags |= VM_SYNC_SYNCHRONOUS;
	}

	sync_flags |= VM_SYNC_CONTIGUOUS;	/* complain if holes */

	user_map = current_map();
	rv = mach_vm_msync(user_map, addr, size, sync_flags);

	switch (rv) {
	case KERN_SUCCESS:
		break;
	case KERN_INVALID_ADDRESS:	/* hole in region being sync'ed */
		return (ENOMEM);
	case KERN_FAILURE:
		return (EIO);
	default:
		return (EINVAL);
	}
	return (0);
}
예제 #28
0
/*
 * XXX Internally, we use VM_PROT_* somewhat interchangeably, but the correct
 * XXX usage is PROT_* from an interface perspective.  Thus the values of
 * XXX VM_PROT_* and PROT_* need to correspond.
 */
int
mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval)
{
	/*
	 *	Map in special device (must be SHARED) or file
	 */
	struct fileproc *fp;
	register struct		vnode *vp;
	int			flags;
	int			prot, file_prot;
	int			err=0;
	vm_map_t		user_map;
	kern_return_t		result;
	mach_vm_offset_t	user_addr;
	mach_vm_size_t		user_size;
	vm_object_offset_t	pageoff;
	vm_object_offset_t	file_pos;
	int			alloc_flags=0;
	boolean_t		docow;
	vm_prot_t		maxprot;
	void 			*handle;
	vm_pager_t		pager;
	int 			mapanon=0;
	int 			fpref=0;
	int error =0;
	int fd = uap->fd;

	user_addr = (mach_vm_offset_t)uap->addr;
	user_size = (mach_vm_size_t) uap->len;

	AUDIT_ARG(addr, user_addr);
	AUDIT_ARG(len, user_size);
	AUDIT_ARG(fd, uap->fd);

	prot = (uap->prot & VM_PROT_ALL);
#if 3777787
	/*
	 * Since the hardware currently does not support writing without
	 * read-before-write, or execution-without-read, if the request is
	 * for write or execute access, we must imply read access as well;
	 * otherwise programs expecting this to work will fail to operate.
	 */
	if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
		prot |= VM_PROT_READ;
#endif	/* radar 3777787 */

	flags = uap->flags;
	vp = NULLVP;

	/*
	 * The vm code does not have prototypes & compiler doesn't do the'
	 * the right thing when you cast 64bit value and pass it in function 
	 * call. So here it is.
	 */
	file_pos = (vm_object_offset_t)uap->pos;


	/* make sure mapping fits into numeric range etc */
	if (file_pos + user_size > (vm_object_offset_t)-PAGE_SIZE_64)
		return (EINVAL);

	/*
	 * Align the file position to a page boundary,
	 * and save its page offset component.
	 */
	pageoff = (file_pos & PAGE_MASK);
	file_pos -= (vm_object_offset_t)pageoff;


	/* Adjust size for rounding (on both ends). */
	user_size += pageoff;			/* low end... */
	user_size = mach_vm_round_page(user_size);	/* hi end */


	/*
	 * Check for illegal addresses.  Watch out for address wrap... Note
	 * that VM_*_ADDRESS are not constants due to casts (argh).
	 */
	if (flags & MAP_FIXED) {
		/*
		 * The specified address must have the same remainder
		 * as the file offset taken modulo PAGE_SIZE, so it
		 * should be aligned after adjustment by pageoff.
		 */
		user_addr -= pageoff;
		if (user_addr & PAGE_MASK)
		return (EINVAL);
	}
#ifdef notyet
	/* DO not have apis to get this info, need to wait till then*/
	/*
	 * XXX for non-fixed mappings where no hint is provided or
	 * the hint would fall in the potential heap space,
	 * place it after the end of the largest possible heap.
	 *
	 * There should really be a pmap call to determine a reasonable
	 * location.
	 */
	else if (addr < mach_vm_round_page(p->p_vmspace->vm_daddr + MAXDSIZ))
		addr = mach_vm_round_page(p->p_vmspace->vm_daddr + MAXDSIZ);

#endif

	alloc_flags = 0;

	if (flags & MAP_ANON) {
		/*
		 * Mapping blank space is trivial.  Use positive fds as the alias
		 * value for memory tracking. 
		 */
		if (fd != -1) {
			/*
			 * Use "fd" to pass (some) Mach VM allocation flags,
			 * (see the VM_FLAGS_* definitions).
			 */
			alloc_flags = fd & (VM_FLAGS_ALIAS_MASK |
					    VM_FLAGS_PURGABLE);
			if (alloc_flags != fd) {
				/* reject if there are any extra flags */
				return EINVAL;
			}
		}
			
		handle = NULL;
		maxprot = VM_PROT_ALL;
		file_pos = 0;
		mapanon = 1;
	} else {
		struct vnode_attr va;
		vfs_context_t ctx = vfs_context_current();

		/*
		 * Mapping file, get fp for validation. Obtain vnode and make
		 * sure it is of appropriate type.
		 */
		err = fp_lookup(p, fd, &fp, 0);
		if (err)
			return(err);
		fpref = 1;
		if(fp->f_fglob->fg_type == DTYPE_PSXSHM) {
			uap->addr = (user_addr_t)user_addr;
			uap->len = (user_size_t)user_size;
			uap->prot = prot;
			uap->flags = flags;
			uap->pos = file_pos;
			error = pshm_mmap(p, uap, retval, fp, (off_t)pageoff);
			goto bad;
		}

		if (fp->f_fglob->fg_type != DTYPE_VNODE) {
			error = EINVAL;
			goto bad;
		}
		vp = (struct vnode *)fp->f_fglob->fg_data;
		error = vnode_getwithref(vp);
		if(error != 0)
			goto bad;

		if (vp->v_type != VREG && vp->v_type != VCHR) {
			(void)vnode_put(vp);
			error = EINVAL;
			goto bad;
		}

		AUDIT_ARG(vnpath, vp, ARG_VNODE1);
		
		/*
		 * POSIX: mmap needs to update access time for mapped files
		 */
		if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
			VATTR_INIT(&va);
			nanotime(&va.va_access_time);
			VATTR_SET_ACTIVE(&va, va_access_time);
			vnode_setattr(vp, &va, ctx);
		}
		
		/*
		 * XXX hack to handle use of /dev/zero to map anon memory (ala
		 * SunOS).
		 */
		if (vp->v_type == VCHR || vp->v_type == VSTR) {
			(void)vnode_put(vp);
			error = ENODEV;
			goto bad;
		} else {
			/*
			 * Ensure that file and memory protections are
			 * compatible.  Note that we only worry about
			 * writability if mapping is shared; in this case,
			 * current and max prot are dictated by the open file.
			 * XXX use the vnode instead?  Problem is: what
			 * credentials do we use for determination? What if
			 * proc does a setuid?
			 */
			maxprot = VM_PROT_EXECUTE;	/* ??? */
			if (fp->f_fglob->fg_flag & FREAD)
				maxprot |= VM_PROT_READ;
			else if (prot & PROT_READ) {
				(void)vnode_put(vp);
				error = EACCES;
				goto bad;
			}
			/*
			 * If we are sharing potential changes (either via
			 * MAP_SHARED or via the implicit sharing of character
			 * device mappings), and we are trying to get write
			 * permission although we opened it without asking
			 * for it, bail out. 
			 */

			if ((flags & MAP_SHARED) != 0) {
				if ((fp->f_fglob->fg_flag & FWRITE) != 0) {
 					/*
 					 * check for write access
 					 *
 					 * Note that we already made this check when granting FWRITE
 					 * against the file, so it seems redundant here.
 					 */
 					error = vnode_authorize(vp, NULL, KAUTH_VNODE_CHECKIMMUTABLE, ctx);
 
 					/* if not granted for any reason, but we wanted it, bad */
 					if ((prot & PROT_WRITE) && (error != 0)) {
 						vnode_put(vp);
  						goto bad;
  					}
 
 					/* if writable, remember */
 					if (error == 0)
  						maxprot |= VM_PROT_WRITE;

				} else if ((prot & PROT_WRITE) != 0) {
					(void)vnode_put(vp);
					error = EACCES;
					goto bad;
				}
			} else
				maxprot |= VM_PROT_WRITE;

			handle = (void *)vp;
#if CONFIG_MACF
			error = mac_file_check_mmap(vfs_context_ucred(ctx),
			    fp->f_fglob, prot, flags, &maxprot);
			if (error) {
				(void)vnode_put(vp);
				goto bad;
			}
#endif /* MAC */
		}
	}

	if (user_size == 0)  {
		if (!mapanon)
			(void)vnode_put(vp);
		error = 0;
		goto bad;
	}

	/*
	 *	We bend a little - round the start and end addresses
	 *	to the nearest page boundary.
	 */
	user_size = mach_vm_round_page(user_size);

	if (file_pos & PAGE_MASK_64) {
		if (!mapanon)
			(void)vnode_put(vp);
		error = EINVAL;
		goto bad;
	}

	user_map = current_map();

	if ((flags & MAP_FIXED) == 0) {
		alloc_flags |= VM_FLAGS_ANYWHERE;
		user_addr = mach_vm_round_page(user_addr);
	} else {
		if (user_addr != mach_vm_trunc_page(user_addr)) {
		        if (!mapanon)
			        (void)vnode_put(vp);
			error = EINVAL;
			goto bad;
		}
		/*
		 * mmap(MAP_FIXED) will replace any existing mappings in the
		 * specified range, if the new mapping is successful.
		 * If we just deallocate the specified address range here,
		 * another thread might jump in and allocate memory in that
		 * range before we get a chance to establish the new mapping,
		 * and we won't have a chance to restore the old mappings.
		 * So we use VM_FLAGS_OVERWRITE to let Mach VM know that it
		 * has to deallocate the existing mappings and establish the
		 * new ones atomically.
		 */
		alloc_flags |= VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
	}

	if (flags & MAP_NOCACHE)
		alloc_flags |= VM_FLAGS_NO_CACHE;

	/*
	 * Lookup/allocate object.
	 */
	if (handle == NULL) {
		pager = NULL;
#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
		if (prot & VM_PROT_READ)
			prot |= VM_PROT_EXECUTE;
		if (maxprot & VM_PROT_READ)
			maxprot |= VM_PROT_EXECUTE;
#endif
#endif

#if 3777787
		if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			prot |= VM_PROT_READ;
		if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			maxprot |= VM_PROT_READ;
#endif	/* radar 3777787 */

		result = vm_map_enter_mem_object(user_map,
						 &user_addr, user_size,
						 0, alloc_flags,
						 IPC_PORT_NULL, 0, FALSE,
						 prot, maxprot,
						 (flags & MAP_SHARED) ?
						 VM_INHERIT_SHARE : 
						 VM_INHERIT_DEFAULT);
		if (result != KERN_SUCCESS) 
				goto out;
	} else {
		pager = (vm_pager_t)ubc_getpager(vp);
		
		if (pager == NULL) {
			(void)vnode_put(vp);
			error = ENOMEM;
			goto bad;
		}

		/*
		 *  Set credentials:
		 *	FIXME: if we're writing the file we need a way to
		 *      ensure that someone doesn't replace our R/W creds
		 * 	with ones that only work for read.
		 */

		ubc_setthreadcred(vp, p, current_thread());
		docow = FALSE;
		if ((flags & (MAP_ANON|MAP_SHARED)) == 0) {
			docow = TRUE;
		}

#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
		if (prot & VM_PROT_READ)
			prot |= VM_PROT_EXECUTE;
		if (maxprot & VM_PROT_READ)
			maxprot |= VM_PROT_EXECUTE;
#endif
#endif /* notyet */

#if 3777787
		if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			prot |= VM_PROT_READ;
		if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			maxprot |= VM_PROT_READ;
#endif	/* radar 3777787 */

		result = vm_map_enter_mem_object(user_map,
						 &user_addr, user_size,
						 0, alloc_flags,
						 (ipc_port_t)pager, file_pos,
						 docow, prot, maxprot, 
						 (flags & MAP_SHARED) ?
						 VM_INHERIT_SHARE : 
						 VM_INHERIT_DEFAULT);

		if (result != KERN_SUCCESS)  {
				(void)vnode_put(vp);
				goto out;
		}

		file_prot = prot & (PROT_READ | PROT_WRITE | PROT_EXEC);
		if (docow) {
			/* private mapping: won't write to the file */
			file_prot &= ~PROT_WRITE;
		}
		(void) ubc_map(vp, file_prot);
	}

	if (!mapanon)
		(void)vnode_put(vp);

out:
	switch (result) {
	case KERN_SUCCESS:
		*retval = user_addr + pageoff;
		error = 0;
		break;
	case KERN_INVALID_ADDRESS:
	case KERN_NO_SPACE:
		error =  ENOMEM;
		break;
	case KERN_PROTECTION_FAILURE:
		error =  EACCES;
		break;
	default:
		error =  EINVAL;
		break;
	}
bad:
	if (fpref)
		fp_drop(p, fd, fp, 0);

	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_mmap) | DBG_FUNC_NONE), fd, (uint32_t)(*retval), (uint32_t)user_size, error, 0);
	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO2, SYS_mmap) | DBG_FUNC_NONE), (uint32_t)(*retval >> 32), (uint32_t)(user_size >> 32),
			      (uint32_t)(file_pos >> 32), (uint32_t)file_pos, 0);

	return(error);
}
예제 #29
0
mach_msg_return_t
mach_msg_trap(
	mach_msg_header_t 	*msg,
	mach_msg_option_t 	option,
	mach_msg_size_t 	send_size,
	mach_msg_size_t 	rcv_size,
	mach_port_t 		rcv_name,
	mach_msg_timeout_t 	time_out,
	mach_port_t 		notify)
{
	mach_msg_return_t mr;

	/* first check for common cases */

	if (option == (MACH_SEND_MSG|MACH_RCV_MSG)) {
		ipc_thread_t self = current_thread();
		ipc_space_t space = self->task->itk_space;
		ipc_kmsg_t kmsg;
		ipc_port_t dest_port;
		ipc_object_t rcv_object;
		ipc_mqueue_t rcv_mqueue;
		mach_msg_size_t reply_size;

		/*
		 *	This case is divided into ten sections, each
		 *	with a label.  There are five optimized
		 *	sections and six unoptimized sections, which
		 *	do the same thing but handle all possible
		 *	cases and are slower.
		 *
		 *	The five sections for an RPC are
		 *	    1) Get request message into a buffer.
		 *		(fast_get or slow_get)
		 *	    2) Copyin request message and rcv_name.
		 *		(fast_copyin or slow_copyin)
		 *	    3) Enqueue request and dequeue reply.
		 *		(fast_send_receive or
		 *		 slow_send and slow_receive)
		 *	    4) Copyout reply message.
		 *		(fast_copyout or slow_copyout)
		 *	    5) Put reply message to user's buffer.
		 *		(fast_put or slow_put)
		 *
		 *	Keep the locking hierarchy firmly in mind.
		 *	(First spaces, then ports, then port sets,
		 *	then message queues.)  Only a non-blocking
		 *	attempt can be made to acquire locks out of
		 *	order, or acquire two locks on the same level.
		 *	Acquiring two locks on the same level will
		 *	fail if the objects are really the same,
		 *	unless simple locking is disabled.  This is OK,
		 *	because then the extra unlock does nothing.
		 *
		 *	There are two major reasons these RPCs can't use
		 *	ipc_thread_switch, and use slow_send/slow_receive:
		 *		1) Kernel RPCs.
		 *		2) Servers fall behind clients, so
		 *		client doesn't find a blocked server thread and
		 *		server finds waiting messages and can't block.
		 */

	/*
	    fast_get:
	*/
		/*
		 *	optimized ipc_kmsg_get
		 *
		 *	No locks, references, or messages held.
		 *	We must clear ikm_cache before copyinmsg.
		 */

		if ((send_size > IKM_SAVED_MSG_SIZE) ||
		    (send_size < sizeof(mach_msg_header_t)) ||
		    (send_size & 3) ||
		    ((kmsg = ikm_cache()) == IKM_NULL))
			goto slow_get;

		ikm_cache() = IKM_NULL;
		ikm_check_initialized(kmsg, IKM_SAVED_KMSG_SIZE);

		if (copyinmsg(msg, &kmsg->ikm_header,
			      send_size)) {
			ikm_free(kmsg);
			goto slow_get;
		}

		kmsg->ikm_header.msgh_size = send_size;

	    fast_copyin:
		/*
		 *	optimized ipc_kmsg_copyin/ipc_mqueue_copyin
		 *
		 *	We have the request message data in kmsg.
		 *	Must still do copyin, send, receive, etc.
		 *
		 *	If the message isn't simple, we can't combine
		 *	ipc_kmsg_copyin_header and ipc_mqueue_copyin,
		 *	because copyin of the message body might
		 *	affect rcv_name.
		 */

		switch (kmsg->ikm_header.msgh_bits) {
		    case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
					MACH_MSG_TYPE_MAKE_SEND_ONCE): {
			ipc_entry_t table;
			ipc_entry_num_t size;
			ipc_port_t reply_port;

			/* sending a request message */

		    {
			mach_port_index_t index;
			mach_port_gen_t gen;

		    {
			mach_port_t reply_name =
				kmsg->ikm_header.msgh_local_port;

			if (reply_name != rcv_name)
				goto slow_copyin;

			/* optimized ipc_entry_lookup of reply_name */

			index = MACH_PORT_INDEX(reply_name);
			gen = MACH_PORT_GEN(reply_name);
		    }

			is_read_lock(space);
			assert(space->is_active);

			size = space->is_table_size;
			table = space->is_table;

			if (index >= size)
				goto abort_request_copyin;

		    {
			ipc_entry_t entry;
			ipc_entry_bits_t bits;

			entry = &table[index];
			bits = entry->ie_bits;

			/* check generation number and type bit */

			if ((bits & (IE_BITS_GEN_MASK|
				     MACH_PORT_TYPE_RECEIVE)) !=
			    (gen | MACH_PORT_TYPE_RECEIVE))
				goto abort_request_copyin;

			reply_port = (ipc_port_t) entry->ie_object;
			assert(reply_port != IP_NULL);
		    }
		    }

			/* optimized ipc_entry_lookup of dest_name */

		    {
			mach_port_index_t index;
			mach_port_gen_t gen;

		    {
			mach_port_t dest_name =
				kmsg->ikm_header.msgh_remote_port;

			index = MACH_PORT_INDEX(dest_name);
			gen = MACH_PORT_GEN(dest_name);
		    }

			if (index >= size)
				goto abort_request_copyin;

		    {
			ipc_entry_t entry;
			ipc_entry_bits_t bits;

			entry = &table[index];
			bits = entry->ie_bits;

			/* check generation number and type bit */

			if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
			    (gen | MACH_PORT_TYPE_SEND))
				goto abort_request_copyin;

			assert(IE_BITS_UREFS(bits) > 0);

			dest_port = (ipc_port_t) entry->ie_object;
			assert(dest_port != IP_NULL);
		    }
		    }

			/*
			 *	To do an atomic copyin, need simultaneous
			 *	locks on both ports and the space.  If
			 *	dest_port == reply_port, and simple locking is
			 *	enabled, then we will abort.  Otherwise it's
			 *	OK to unlock twice.
			 */

			ip_lock(dest_port);
			if (!ip_active(dest_port) ||
			    !ip_lock_try(reply_port)) {
				ip_unlock(dest_port);
				goto abort_request_copyin;
			}
			is_read_unlock(space);

			assert(dest_port->ip_srights > 0);
			dest_port->ip_srights++;
			ip_reference(dest_port);

			assert(ip_active(reply_port));
			assert(reply_port->ip_receiver_name ==
			       kmsg->ikm_header.msgh_local_port);
			assert(reply_port->ip_receiver == space);

			reply_port->ip_sorights++;
			ip_reference(reply_port);

			kmsg->ikm_header.msgh_bits =
				MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
					       MACH_MSG_TYPE_PORT_SEND_ONCE);
			kmsg->ikm_header.msgh_remote_port =
					(mach_port_t) dest_port;
			kmsg->ikm_header.msgh_local_port =
					(mach_port_t) reply_port;

			/* make sure we can queue to the destination */

			if (dest_port->ip_receiver == ipc_space_kernel) {
				/*
				 * The kernel server has a reference to
				 * the reply port, which it hands back
				 * to us in the reply message.  We do
				 * not need to keep another reference to
				 * it.
				 */
				ip_unlock(reply_port);

				assert(ip_active(dest_port));
				ip_unlock(dest_port);
				goto kernel_send;
			}

			if (dest_port->ip_msgcount >= dest_port->ip_qlimit)
				goto abort_request_send_receive;

			/* optimized ipc_mqueue_copyin */

			if (reply_port->ip_pset != IPS_NULL)
				goto abort_request_send_receive;

			rcv_object = (ipc_object_t) reply_port;
			io_reference(rcv_object);
			rcv_mqueue = &reply_port->ip_messages;
			imq_lock(rcv_mqueue);
			io_unlock(rcv_object);
			goto fast_send_receive;

		    abort_request_copyin:
			is_read_unlock(space);
			goto slow_copyin;

		    abort_request_send_receive:
			ip_unlock(dest_port);
			ip_unlock(reply_port);
			goto slow_send;
		    }

		    case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
			ipc_entry_num_t size;
			ipc_entry_t table;

			/* sending a reply message */

		    {
			mach_port_t reply_name =
				kmsg->ikm_header.msgh_local_port;

			if (reply_name != MACH_PORT_NULL)
				goto slow_copyin;
		    }

			is_write_lock(space);
			assert(space->is_active);

			/* optimized ipc_entry_lookup */

			size = space->is_table_size;
			table = space->is_table;

		    {
			ipc_entry_t entry;
			mach_port_gen_t gen;
			mach_port_index_t index;

		    {
			mach_port_t dest_name =
				kmsg->ikm_header.msgh_remote_port;

			index = MACH_PORT_INDEX(dest_name);
			gen = MACH_PORT_GEN(dest_name);
		    }

			if (index >= size)
				goto abort_reply_dest_copyin;

			entry = &table[index];

			/* check generation, collision bit, and type bit */

			if ((entry->ie_bits & (IE_BITS_GEN_MASK|
					       IE_BITS_COLLISION|
					       MACH_PORT_TYPE_SEND_ONCE)) !=
			    (gen | MACH_PORT_TYPE_SEND_ONCE))
				goto abort_reply_dest_copyin;

			/* optimized ipc_right_copyin */

			assert(IE_BITS_TYPE(entry->ie_bits) ==
						MACH_PORT_TYPE_SEND_ONCE);
			assert(IE_BITS_UREFS(entry->ie_bits) == 1);
			assert((entry->ie_bits & IE_BITS_MAREQUEST) == 0);

			if (entry->ie_request != 0)
				goto abort_reply_dest_copyin;

			dest_port = (ipc_port_t) entry->ie_object;
			assert(dest_port != IP_NULL);

			ip_lock(dest_port);
			if (!ip_active(dest_port)) {
				ip_unlock(dest_port);
				goto abort_reply_dest_copyin;
			}

			assert(dest_port->ip_sorights > 0);

			/* optimized ipc_entry_dealloc */

			entry->ie_next = table->ie_next;
			table->ie_next = index;
			entry->ie_bits = gen;
			entry->ie_object = IO_NULL;
		    }

			kmsg->ikm_header.msgh_bits =
				MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
					       0);
			kmsg->ikm_header.msgh_remote_port =
					(mach_port_t) dest_port;

			/* make sure we can queue to the destination */

			assert(dest_port->ip_receiver != ipc_space_kernel);

			/* optimized ipc_entry_lookup/ipc_mqueue_copyin */

		    {
			ipc_entry_t entry;
			ipc_entry_bits_t bits;

		    {
			mach_port_index_t index;
			mach_port_gen_t gen;

			index = MACH_PORT_INDEX(rcv_name);
			gen = MACH_PORT_GEN(rcv_name);

			if (index >= size)
				goto abort_reply_rcv_copyin;

			entry = &table[index];
			bits = entry->ie_bits;

			/* check generation number */

			if ((bits & IE_BITS_GEN_MASK) != gen)
				goto abort_reply_rcv_copyin;
		    }

			/* check type bits; looking for receive or set */

			if (bits & MACH_PORT_TYPE_PORT_SET) {
				ipc_pset_t rcv_pset;

				rcv_pset = (ipc_pset_t) entry->ie_object;
				assert(rcv_pset != IPS_NULL);

				ips_lock(rcv_pset);
				assert(ips_active(rcv_pset));

				rcv_object = (ipc_object_t) rcv_pset;
				rcv_mqueue = &rcv_pset->ips_messages;
			} else if (bits & MACH_PORT_TYPE_RECEIVE) {
				ipc_port_t rcv_port;

				rcv_port = (ipc_port_t) entry->ie_object;
				assert(rcv_port != IP_NULL);

				if (!ip_lock_try(rcv_port))
					goto abort_reply_rcv_copyin;
				assert(ip_active(rcv_port));

				if (rcv_port->ip_pset != IPS_NULL) {
					ip_unlock(rcv_port);
					goto abort_reply_rcv_copyin;
				}

				rcv_object = (ipc_object_t) rcv_port;
				rcv_mqueue = &rcv_port->ip_messages;
			} else
				goto abort_reply_rcv_copyin;
		    }

			is_write_unlock(space);
			io_reference(rcv_object);
			imq_lock(rcv_mqueue);
			io_unlock(rcv_object);
			goto fast_send_receive;

		    abort_reply_dest_copyin:
			is_write_unlock(space);
			goto slow_copyin;

		    abort_reply_rcv_copyin:
			ip_unlock(dest_port);
			is_write_unlock(space);
			goto slow_send;
		    }

		    default:
			goto slow_copyin;
		}
		/*NOTREACHED*/

	    fast_send_receive:
		/*
		 *	optimized ipc_mqueue_send/ipc_mqueue_receive
		 *
		 *	Finished get/copyin of kmsg and copyin of rcv_name.
		 *	space is unlocked, dest_port is locked,
		 *	we can queue kmsg to dest_port,
		 *	rcv_mqueue is locked, rcv_object holds a ref,
		 *	if rcv_object is a port it isn't in a port set
		 *
		 *	Note that if simple locking is turned off,
		 *	then we could have dest_mqueue == rcv_mqueue
		 *	and not abort when we try to lock dest_mqueue.
		 */

		assert(ip_active(dest_port));
		assert(dest_port->ip_receiver != ipc_space_kernel);
		assert((dest_port->ip_msgcount < dest_port->ip_qlimit) ||
		       (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
						MACH_MSG_TYPE_PORT_SEND_ONCE));
		assert((kmsg->ikm_header.msgh_bits &
						MACH_MSGH_BITS_CIRCULAR) == 0);

	    {
		ipc_mqueue_t dest_mqueue;
		ipc_thread_t receiver;

	    {
		ipc_pset_t dest_pset;

		dest_pset = dest_port->ip_pset;
		if (dest_pset == IPS_NULL)
			dest_mqueue = &dest_port->ip_messages;
		else
			dest_mqueue = &dest_pset->ips_messages;
	    }

		if (!imq_lock_try(dest_mqueue)) {
		    abort_send_receive:
			ip_unlock(dest_port);
			imq_unlock(rcv_mqueue);
			ipc_object_release(rcv_object);
			goto slow_send;
		}

		receiver = ipc_thread_queue_first(&dest_mqueue->imq_threads);
		if ((receiver == ITH_NULL) ||
		    (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages)
								!= IKM_NULL)) {
			imq_unlock(dest_mqueue);
			goto abort_send_receive;
		}

		/*
		 *	There is a receiver thread waiting, and
		 *	there is no reply message for us to pick up.
		 *	We have hope of hand-off, so save state.
		 */

		self->ith_msg = msg;
		self->ith_rcv_size = rcv_size;
		self->ith_object = rcv_object;
		self->ith_mqueue = rcv_mqueue;

		if ((receiver->swap_func == (void (*)()) mach_msg_continue) &&
		    thread_handoff(self, mach_msg_continue, receiver)) {
			assert(current_thread() == receiver);

			/*
			 *	We can use the optimized receive code,
			 *	because the receiver is using no options.
			 */
		} else if ((receiver->swap_func ==
				(void (*)()) exception_raise_continue) &&
			   thread_handoff(self, mach_msg_continue, receiver)) {
			counter(c_mach_msg_trap_block_exc++);
			assert(current_thread() == receiver);

			/*
			 *	We are a reply message coming back through
			 *	the optimized exception-handling path.
			 *	Finish with rcv_mqueue and dest_mqueue,
			 *	and then jump to exception code with
			 *	dest_port still locked.  We don't bother
			 *	with a sequence number in this case.
			 */

			ipc_thread_enqueue_macro(
				&rcv_mqueue->imq_threads, self);
			self->ith_state = MACH_RCV_IN_PROGRESS;
			self->ith_msize = MACH_MSG_SIZE_MAX;
			imq_unlock(rcv_mqueue);

			ipc_thread_rmqueue_first_macro(
				&dest_mqueue->imq_threads, receiver);
			imq_unlock(dest_mqueue);

			exception_raise_continue_fast(dest_port, kmsg);
			/*NOTREACHED*/
			return MACH_MSG_SUCCESS;
		} else if ((send_size <= receiver->ith_msize) &&
			   thread_handoff(self, mach_msg_continue, receiver)) {
			assert(current_thread() == receiver);

			if ((receiver->swap_func ==
				(void (*)()) mach_msg_receive_continue) &&
			    ((receiver->ith_option & MACH_RCV_NOTIFY) == 0)) {
				/*
				 *	We can still use the optimized code.
				 */
			} else {
				counter(c_mach_msg_trap_block_slow++);
				/*
				 *	We are running as the receiver,
				 *	but we can't use the optimized code.
				 *	Finish send/receive processing.
				 */

				dest_port->ip_msgcount++;
				ip_unlock(dest_port);

				ipc_thread_enqueue_macro(
					&rcv_mqueue->imq_threads, self);
				self->ith_state = MACH_RCV_IN_PROGRESS;
				self->ith_msize = MACH_MSG_SIZE_MAX;
				imq_unlock(rcv_mqueue);

				ipc_thread_rmqueue_first_macro(
					&dest_mqueue->imq_threads, receiver);
				receiver->ith_state = MACH_MSG_SUCCESS;
				receiver->ith_kmsg = kmsg;
				receiver->ith_seqno = dest_port->ip_seqno++;
				imq_unlock(dest_mqueue);

				/*
				 *	Call the receiver's continuation.
				 */

				receiver->wait_result = THREAD_AWAKENED;
				(*receiver->swap_func)();
				/*NOTREACHED*/
				return MACH_MSG_SUCCESS;
			}
		} else {
			/*
			 *	The receiver can't accept the message,
			 *	or we can't switch to the receiver.
			 */

			imq_unlock(dest_mqueue);
			goto abort_send_receive;
		}
		counter(c_mach_msg_trap_block_fast++);

		/*
		 *	Safe to unlock dest_port now that we are
		 *	committed to this path, because we hold
		 *	dest_mqueue locked.  We never bother changing
		 *	dest_port->ip_msgcount.
		 */

		ip_unlock(dest_port);

		/*
		 *	We need to finish preparing self for its
		 *	time asleep in rcv_mqueue.
		 */

		ipc_thread_enqueue_macro(&rcv_mqueue->imq_threads, self);
		self->ith_state = MACH_RCV_IN_PROGRESS;
		self->ith_msize = MACH_MSG_SIZE_MAX;
		imq_unlock(rcv_mqueue);

		/*
		 *	Finish extracting receiver from dest_mqueue.
		 */

		ipc_thread_rmqueue_first_macro(
			&dest_mqueue->imq_threads, receiver);
		kmsg->ikm_header.msgh_seqno = dest_port->ip_seqno++;
		imq_unlock(dest_mqueue);

		/*
		 *	We don't have to do any post-dequeue processing of
		 *	the message.  We never incremented ip_msgcount, we
		 *	know it has no msg-accepted request, and blocked
		 *	senders aren't a worry because we found the port
		 *	with a receiver waiting.
		 */

		self = receiver;
		space = self->task->itk_space;

		msg = self->ith_msg;
		rcv_size = self->ith_rcv_size;
		rcv_object = self->ith_object;

		/* inline ipc_object_release */
		io_lock(rcv_object);
		io_release(rcv_object);
		io_check_unlock(rcv_object);
	    }

	    fast_copyout:
		/*
		 *	Nothing locked and no references held, except
		 *	we have kmsg with msgh_seqno filled in.  Must
		 *	still check against rcv_size and do
		 *	ipc_kmsg_copyout/ipc_kmsg_put.
		 */

		assert((ipc_port_t) kmsg->ikm_header.msgh_remote_port
						== dest_port);

		reply_size = kmsg->ikm_header.msgh_size;
		if (rcv_size < reply_size)
			goto slow_copyout;

		/* optimized ipc_kmsg_copyout/ipc_kmsg_copyout_header */

		switch (kmsg->ikm_header.msgh_bits) {
		    case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
					MACH_MSG_TYPE_PORT_SEND_ONCE): {
			ipc_port_t reply_port =
				(ipc_port_t) kmsg->ikm_header.msgh_local_port;
			mach_port_t dest_name, reply_name;

			/* receiving a request message */

			if (!IP_VALID(reply_port))
				goto slow_copyout;

			is_write_lock(space);
			assert(space->is_active);

			/*
			 *	To do an atomic copyout, need simultaneous
			 *	locks on both ports and the space.  If
			 *	dest_port == reply_port, and simple locking is
			 *	enabled, then we will abort.  Otherwise it's
			 *	OK to unlock twice.
			 */

			ip_lock(dest_port);
			if (!ip_active(dest_port) ||
			    !ip_lock_try(reply_port))
				goto abort_request_copyout;

			if (!ip_active(reply_port)) {
				ip_unlock(reply_port);
				goto abort_request_copyout;
			}

			assert(reply_port->ip_sorights > 0);
			ip_unlock(reply_port);

		    {
			ipc_entry_t table;
			ipc_entry_t entry;
			mach_port_index_t index;

			/* optimized ipc_entry_get */

			table = space->is_table;
			index = table->ie_next;

			if (index == 0)
				goto abort_request_copyout;

			entry = &table[index];
			table->ie_next = entry->ie_next;
			entry->ie_request = 0;

		    {
			mach_port_gen_t gen;

			assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
			gen = entry->ie_bits + IE_BITS_GEN_ONE;

			reply_name = MACH_PORT_MAKE(index, gen);

			/* optimized ipc_right_copyout */

			entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
		    }

			assert(MACH_PORT_VALID(reply_name));
			entry->ie_object = (ipc_object_t) reply_port;
			is_write_unlock(space);
		    }

			/* optimized ipc_object_copyout_dest */

			assert(dest_port->ip_srights > 0);
			ip_release(dest_port);

			if (dest_port->ip_receiver == space)
				dest_name = dest_port->ip_receiver_name;
			else
				dest_name = MACH_PORT_NULL;

			if ((--dest_port->ip_srights == 0) &&
			    (dest_port->ip_nsrequest != IP_NULL)) {
				ipc_port_t nsrequest;
				mach_port_mscount_t mscount;

				/* a rather rare case */

				nsrequest = dest_port->ip_nsrequest;
				mscount = dest_port->ip_mscount;
				dest_port->ip_nsrequest = IP_NULL;
				ip_unlock(dest_port);

				ipc_notify_no_senders(nsrequest, mscount);
			} else
				ip_unlock(dest_port);

			if (! ipc_port_flag_protected_payload(dest_port)) {
				kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
					MACH_MSG_TYPE_PORT_SEND_ONCE,
					MACH_MSG_TYPE_PORT_SEND);
				kmsg->ikm_header.msgh_local_port = dest_name;
			} else {
				kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
					MACH_MSG_TYPE_PORT_SEND_ONCE,
					MACH_MSG_TYPE_PROTECTED_PAYLOAD);
				kmsg->ikm_header.msgh_protected_payload =
					dest_port->ip_protected_payload;
			}
			kmsg->ikm_header.msgh_remote_port = reply_name;
			goto fast_put;

		    abort_request_copyout:
			ip_unlock(dest_port);
			is_write_unlock(space);
			goto slow_copyout;
		    }

		    case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
			mach_port_t dest_name;

			/* receiving a reply message */

			ip_lock(dest_port);
			if (!ip_active(dest_port))
				goto slow_copyout;

			/* optimized ipc_object_copyout_dest */

			assert(dest_port->ip_sorights > 0);

			if (dest_port->ip_receiver == space) {
				ip_release(dest_port);
				dest_port->ip_sorights--;
				dest_name = dest_port->ip_receiver_name;
				ip_unlock(dest_port);
			} else {
				ip_unlock(dest_port);

				ipc_notify_send_once(dest_port);
				dest_name = MACH_PORT_NULL;
			}

			if (! ipc_port_flag_protected_payload(dest_port)) {
				kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
					0,
					MACH_MSG_TYPE_PORT_SEND_ONCE);
				kmsg->ikm_header.msgh_local_port = dest_name;
			} else {
				kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
					0,
					MACH_MSG_TYPE_PROTECTED_PAYLOAD);
				kmsg->ikm_header.msgh_protected_payload =
					dest_port->ip_protected_payload;
			}
			kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
			goto fast_put;
		    }

		    case MACH_MSGH_BITS_COMPLEX|
			 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
			mach_port_t dest_name;

			/* receiving a complex reply message */

			ip_lock(dest_port);
			if (!ip_active(dest_port))
				goto slow_copyout;

			/* optimized ipc_object_copyout_dest */

			assert(dest_port->ip_sorights > 0);

			if (dest_port->ip_receiver == space) {
				ip_release(dest_port);
				dest_port->ip_sorights--;
				dest_name = dest_port->ip_receiver_name;
				ip_unlock(dest_port);
			} else {
				ip_unlock(dest_port);

				ipc_notify_send_once(dest_port);
				dest_name = MACH_PORT_NULL;
			}

			if (! ipc_port_flag_protected_payload(dest_port)) {
				kmsg->ikm_header.msgh_bits =
					MACH_MSGH_BITS_COMPLEX
					| MACH_MSGH_BITS(
						0,
						MACH_MSG_TYPE_PORT_SEND_ONCE);
				kmsg->ikm_header.msgh_local_port = dest_name;
			} else {
				kmsg->ikm_header.msgh_bits =
					MACH_MSGH_BITS_COMPLEX
					| MACH_MSGH_BITS(
					    0,
					    MACH_MSG_TYPE_PROTECTED_PAYLOAD);
				kmsg->ikm_header.msgh_protected_payload =
					dest_port->ip_protected_payload;
			}
			kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;

			mr = ipc_kmsg_copyout_body(
				(vm_offset_t) (&kmsg->ikm_header + 1),
				(vm_offset_t) &kmsg->ikm_header
					+ kmsg->ikm_header.msgh_size,
				space,
				current_map());

			if (mr != MACH_MSG_SUCCESS) {
				(void) ipc_kmsg_put(msg, kmsg,
					kmsg->ikm_header.msgh_size);
				return mr | MACH_RCV_BODY_ERROR;
			}
			goto fast_put;
		    }

		    default:
			goto slow_copyout;
		}
		/*NOTREACHED*/

	    fast_put:
		/*
		 *	We have the reply message data in kmsg,
		 *	and the reply message size in reply_size.
		 *	Just need to copy it out to the user and free kmsg.
		 *	We must check ikm_cache after copyoutmsg.
		 */

		ikm_check_initialized(kmsg, kmsg->ikm_size);

		if ((kmsg->ikm_size != IKM_SAVED_KMSG_SIZE) ||
		    copyoutmsg(&kmsg->ikm_header, msg,
			       reply_size) ||
		    (ikm_cache() != IKM_NULL))
			goto slow_put;

		ikm_cache() = kmsg;
		thread_syscall_return(MACH_MSG_SUCCESS);
		/*NOTREACHED*/
		return MACH_MSG_SUCCESS; /* help for the compiler */

		/*
		 *	The slow path has a few non-register temporary
		 *	variables used only for call-by-reference.
		 */

	    {
		ipc_kmsg_t temp_kmsg;
		mach_port_seqno_t temp_seqno;
		ipc_object_t temp_rcv_object;
		ipc_mqueue_t temp_rcv_mqueue;

	    slow_get:
		/*
		 *	No locks, references, or messages held.
		 *	Still have to get the request, send it,
		 *	receive reply, etc.
		 */

		mr = ipc_kmsg_get(msg, send_size, &temp_kmsg);
		if (mr != MACH_MSG_SUCCESS) {
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}
		kmsg = temp_kmsg;

		/* try to get back on optimized path */
		goto fast_copyin;

	    slow_copyin:
		/*
		 *	We have the message data in kmsg, but
		 *	we still need to copyin, send it,
		 *	receive a reply, and do copyout.
		 */

		mr = ipc_kmsg_copyin(kmsg, space, current_map(),
				     MACH_PORT_NULL);
		if (mr != MACH_MSG_SUCCESS) {
			ikm_free(kmsg);
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}

		/* try to get back on optimized path */

		if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR)
			goto slow_send;

		dest_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
		assert(IP_VALID(dest_port));

		ip_lock(dest_port);
		if (dest_port->ip_receiver == ipc_space_kernel) {
			assert(ip_active(dest_port));
			ip_unlock(dest_port);
			goto kernel_send;
		}

		if (ip_active(dest_port) &&
		    ((dest_port->ip_msgcount < dest_port->ip_qlimit) ||
		     (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
					MACH_MSG_TYPE_PORT_SEND_ONCE)))
		{
		    /*
		     *	Try an optimized ipc_mqueue_copyin.
		     *	It will work if this is a request message.
		     */

		    ipc_port_t reply_port;

		    reply_port = (ipc_port_t)
					kmsg->ikm_header.msgh_local_port;
		    if (IP_VALID(reply_port)) {
			if (ip_lock_try(reply_port)) {
			    if (ip_active(reply_port) &&
				reply_port->ip_receiver == space &&
				reply_port->ip_receiver_name == rcv_name &&
				reply_port->ip_pset == IPS_NULL)
			    {
				/* Grab a reference to the reply port. */
				rcv_object = (ipc_object_t) reply_port;
				io_reference(rcv_object);
				rcv_mqueue = &reply_port->ip_messages;
				imq_lock(rcv_mqueue);
				io_unlock(rcv_object);
				goto fast_send_receive;
			    }
			    ip_unlock(reply_port);
			}
		    }
		}

		ip_unlock(dest_port);
		goto slow_send;

	    kernel_send:
		/*
		 *	Special case: send message to kernel services.
		 *	The request message has been copied into the
		 *	kmsg.  Nothing is locked.
		 */

	    {
		ipc_port_t	reply_port;

		/*
		 * Perform the kernel function.
		 */

		kmsg = ipc_kobject_server(kmsg);
		if (kmsg == IKM_NULL) {
			/*
			 * No reply.  Take the
			 * slow receive path.
			 */
			goto slow_get_rcv_port;
		}

		/*
		 * Check that:
		 *	the reply port is alive
		 *	we hold the receive right
		 *	the name has not changed.
		 *	the port is not in a set
		 * If any of these are not true,
		 * we cannot directly receive the reply
		 * message.
		 */
		reply_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
		ip_lock(reply_port);

		if ((!ip_active(reply_port)) ||
		    (reply_port->ip_receiver != space) ||
		    (reply_port->ip_receiver_name != rcv_name) ||
		    (reply_port->ip_pset != IPS_NULL))
		{
			ip_unlock(reply_port);
			ipc_mqueue_send_always(kmsg);
			goto slow_get_rcv_port;
		}

		rcv_mqueue = &reply_port->ip_messages;
		imq_lock(rcv_mqueue);
		/* keep port locked, and don`t change ref count yet */

		/*
		 * If there are messages on the port
		 * or other threads waiting for a message,
		 * we cannot directly receive the reply.
		 */
		if ((ipc_thread_queue_first(&rcv_mqueue->imq_threads)
			!= ITH_NULL) ||
		    (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages)
			!= IKM_NULL))
		{
			imq_unlock(rcv_mqueue);
			ip_unlock(reply_port);
			ipc_mqueue_send_always(kmsg);
			goto slow_get_rcv_port;
		}

		/*
		 * We can directly receive this reply.
		 * Since the kernel reply never blocks,
		 * it holds no message_accepted request.
		 * Since there were no messages queued
		 * on the reply port, there should be
		 * no threads blocked waiting to send.
		 */

		assert(kmsg->ikm_marequest == IMAR_NULL);
		assert(ipc_thread_queue_first(&reply_port->ip_blocked)
				== ITH_NULL);

		dest_port = reply_port;
		kmsg->ikm_header.msgh_seqno = dest_port->ip_seqno++;
		imq_unlock(rcv_mqueue);

		/*
		 * inline ipc_object_release.
		 * Port is still locked.
		 * Reference count was not incremented.
		 */
		ip_check_unlock(reply_port);

		/* copy out the kernel reply */
		goto fast_copyout;
	    }

	    slow_send:
		/*
		 *	Nothing is locked.  We have acquired kmsg, but
		 *	we still need to send it and receive a reply.
		 */

		mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
				     MACH_MSG_TIMEOUT_NONE);
		if (mr != MACH_MSG_SUCCESS) {
			mr |= ipc_kmsg_copyout_pseudo(kmsg, space,
						      current_map());

			assert(kmsg->ikm_marequest == IMAR_NULL);
			(void) ipc_kmsg_put(msg, kmsg,
					    kmsg->ikm_header.msgh_size);
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}

	    slow_get_rcv_port:
		/*
		 * We have sent the message.  Copy in the receive port.
		 */
		mr = ipc_mqueue_copyin(space, rcv_name,
				       &temp_rcv_mqueue, &temp_rcv_object);
		if (mr != MACH_MSG_SUCCESS) {
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}
		rcv_mqueue = temp_rcv_mqueue;
		rcv_object = temp_rcv_object;
		/* hold ref for rcv_object; rcv_mqueue is locked */

	/*
	    slow_receive:
	*/
		/*
		 *	Now we have sent the request and copied in rcv_name,
		 *	so rcv_mqueue is locked and hold ref for rcv_object.
		 *	Just receive a reply and try to get back to fast path.
		 *
		 *	ipc_mqueue_receive may not return, because if we block
		 *	then our kernel stack may be discarded.  So we save
		 *	state here for mach_msg_continue to pick up.
		 */

		self->ith_msg = msg;
		self->ith_rcv_size = rcv_size;
		self->ith_object = rcv_object;
		self->ith_mqueue = rcv_mqueue;

		mr = ipc_mqueue_receive(rcv_mqueue,
					MACH_MSG_OPTION_NONE,
					MACH_MSG_SIZE_MAX,
					MACH_MSG_TIMEOUT_NONE,
					FALSE, mach_msg_continue,
		       			&temp_kmsg, &temp_seqno);
		/* rcv_mqueue is unlocked */
		ipc_object_release(rcv_object);
		if (mr != MACH_MSG_SUCCESS) {
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}

		(kmsg = temp_kmsg)->ikm_header.msgh_seqno = temp_seqno;
		dest_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
		goto fast_copyout;

	    slow_copyout:
		/*
		 *	Nothing locked and no references held, except
		 *	we have kmsg with msgh_seqno filled in.  Must
		 *	still check against rcv_size and do
		 *	ipc_kmsg_copyout/ipc_kmsg_put.
		 */

		reply_size = kmsg->ikm_header.msgh_size;
		if (rcv_size < reply_size) {
			ipc_kmsg_copyout_dest(kmsg, space);
			(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
			thread_syscall_return(MACH_RCV_TOO_LARGE);
			/*NOTREACHED*/
		}

		mr = ipc_kmsg_copyout(kmsg, space, current_map(),
				      MACH_PORT_NULL);
		if (mr != MACH_MSG_SUCCESS) {
			if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
				(void) ipc_kmsg_put(msg, kmsg,
						kmsg->ikm_header.msgh_size);
			} else {
				ipc_kmsg_copyout_dest(kmsg, space);
				(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
			}

			thread_syscall_return(mr);
			/*NOTREACHED*/
		}

		/* try to get back on optimized path */

		goto fast_put;

	    slow_put:
		mr = ipc_kmsg_put(msg, kmsg, reply_size);
		thread_syscall_return(mr);
		/*NOTREACHED*/
	    }
	} else if (option == MACH_SEND_MSG) {
		ipc_space_t space = current_space();
		vm_map_t map = current_map();
		ipc_kmsg_t kmsg;

		mr = ipc_kmsg_get(msg, send_size, &kmsg);
		if (mr != MACH_MSG_SUCCESS)
			return mr;

		mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL);
		if (mr != MACH_MSG_SUCCESS) {
			ikm_free(kmsg);
			return mr;
		}

		mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
				     MACH_MSG_TIMEOUT_NONE);
		if (mr != MACH_MSG_SUCCESS) {
			mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map);

			assert(kmsg->ikm_marequest == IMAR_NULL);
			(void) ipc_kmsg_put(msg, kmsg,
					    kmsg->ikm_header.msgh_size);
		}

		return mr;
	} else if (option == MACH_RCV_MSG) {
		ipc_thread_t self = current_thread();
		ipc_space_t space = current_space();
		vm_map_t map = current_map();
		ipc_object_t object;
		ipc_mqueue_t mqueue;
		ipc_kmsg_t kmsg;
		mach_port_seqno_t seqno;

		mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
		if (mr != MACH_MSG_SUCCESS)
			return mr;
		/* hold ref for object; mqueue is locked */

		/*
		 *	ipc_mqueue_receive may not return, because if we block
		 *	then our kernel stack may be discarded.  So we save
		 *	state here for mach_msg_continue to pick up.
		 */

		self->ith_msg = msg;
		self->ith_rcv_size = rcv_size;
		self->ith_object = object;
		self->ith_mqueue = mqueue;

		mr = ipc_mqueue_receive(mqueue,
					MACH_MSG_OPTION_NONE,
					MACH_MSG_SIZE_MAX,
					MACH_MSG_TIMEOUT_NONE,
					FALSE, mach_msg_continue,
					&kmsg, &seqno);
		/* mqueue is unlocked */
		ipc_object_release(object);
		if (mr != MACH_MSG_SUCCESS)
			return mr;

		kmsg->ikm_header.msgh_seqno = seqno;
		if (rcv_size < kmsg->ikm_header.msgh_size) {
			ipc_kmsg_copyout_dest(kmsg, space);
			(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
			return MACH_RCV_TOO_LARGE;
		}

		mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
		if (mr != MACH_MSG_SUCCESS) {
			if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
				(void) ipc_kmsg_put(msg, kmsg,
						kmsg->ikm_header.msgh_size);
			} else {
				ipc_kmsg_copyout_dest(kmsg, space);
				(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
			}

			return mr;
		}

		return ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
	} else if (option == MACH_MSG_OPTION_NONE) {
		/*
		 *	We can measure the "null mach_msg_trap"
		 *	(syscall entry and thread_syscall_return exit)
		 *	with this path.
		 */

		thread_syscall_return(MACH_MSG_SUCCESS);
		/*NOTREACHED*/
	}

	if (option & MACH_SEND_MSG) {
		mr = mach_msg_send(msg, option, send_size,
				   time_out, notify);
		if (mr != MACH_MSG_SUCCESS)
			return mr;
	}

	if (option & MACH_RCV_MSG) {
		mr = mach_msg_receive(msg, option, rcv_size, rcv_name,
				      time_out, notify);
		if (mr != MACH_MSG_SUCCESS)
			return mr;
	}

	return MACH_MSG_SUCCESS;
}
예제 #30
0
int
mprotect(__unused proc_t p, struct mprotect_args *uap, __unused register_t *retval)
{
	register vm_prot_t prot;
	mach_vm_offset_t	user_addr;
	mach_vm_size_t	user_size;
	kern_return_t	result;
	vm_map_t	user_map;
#if CONFIG_MACF
	int error;
#endif

	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(len, uap->len);
	AUDIT_ARG(value, uap->prot);

	user_addr = (mach_vm_offset_t) uap->addr;
	user_size = (mach_vm_size_t) uap->len;
	prot = (vm_prot_t)(uap->prot & VM_PROT_ALL);

	if (user_addr & PAGE_MASK_64) {
		/* UNIX SPEC: user address is not page-aligned, return EINVAL */
		return EINVAL;
	}
		
#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
	if (prot & VM_PROT_READ)
		prot |= VM_PROT_EXECUTE;
#endif
#endif /* notyet */

#if 3936456
	if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
		prot |= VM_PROT_READ;
#endif	/* 3936456 */

	user_map = current_map();

#if CONFIG_MACF
	/*
	 * The MAC check for mprotect is of limited use for 2 reasons:
	 * Without mmap revocation, the caller could have asked for the max
	 * protections initially instead of a reduced set, so a mprotect
	 * check would offer no new security.
	 * It is not possible to extract the vnode from the pager object(s)
	 * of the target memory range.
	 * However, the MAC check may be used to prevent a process from,
	 * e.g., making the stack executable.
	 */
	error = mac_proc_check_mprotect(p, user_addr,
	    		user_size, prot);
	if (error)
		return (error);
#endif
	result = mach_vm_protect(user_map, user_addr, user_size,
				 FALSE, prot);
	switch (result) {
	case KERN_SUCCESS:
		return (0);
	case KERN_PROTECTION_FAILURE:
		return (EACCES);
	case KERN_INVALID_ADDRESS:
		/* UNIX SPEC: for an invalid address range, return ENOMEM */
		return ENOMEM;
	}
	return (EINVAL);
}