Пример #1
0
struct peropen *
netfs_make_peropen (struct node *np, int flags, struct peropen *context)
{
  struct peropen *po = malloc (sizeof (struct peropen));
  
  po->filepointer = 0;
  po->lock_status = LOCK_UN;
  po->refcnt = 0;
  po->openstat = flags;
  po->np = np;

  if (context)
    {
      po->root_parent = context->root_parent;
      if (po->root_parent != MACH_PORT_NULL)
	mach_port_mod_refs (mach_task_self (), po->root_parent,
			    MACH_PORT_RIGHT_SEND, 1);

      po->shadow_root = context->shadow_root;
      if (po->shadow_root)
	netfs_nref (po->shadow_root);

      po->shadow_root_parent = context->shadow_root_parent;
      if (po->shadow_root_parent != MACH_PORT_NULL)
	mach_port_mod_refs (mach_task_self (), po->shadow_root_parent,
			    MACH_PORT_RIGHT_SEND, 1);
    }

  netfs_nref (np);

  return po;
}
Пример #2
0
kern_return_t
get_privileged_ports (host_priv_t *host_priv_ptr, device_t *device_master_ptr)
{
  if (! _hurd_host_priv)
    {
      error_t err;

      if (_hurd_ports)
	/* We have gotten some initial ports, so perhaps
	   we have a proc server to talk to.  */
	err = __USEPORT (PROC, __proc_getprivports (port,
						    &_hurd_host_priv,
						    &_hurd_device_master));
      else
	return MACH_SEND_INVALID_DEST;

      if (err)
	return err;
    }

  if (host_priv_ptr)
    {
      mach_port_mod_refs (mach_task_self (),
			  _hurd_host_priv, MACH_PORT_RIGHT_SEND, 1);
      *host_priv_ptr = _hurd_host_priv;
    }
  if (device_master_ptr)
    {
      mach_port_mod_refs (mach_task_self (),
			  _hurd_device_master, MACH_PORT_RIGHT_SEND, 1);
      *device_master_ptr = _hurd_device_master;
    }
  return KERN_SUCCESS;
}
static void __CFMessagePortDeallocate(CFTypeRef cf) {
    CFMessagePortRef ms = (CFMessagePortRef)cf;
    __CFMessagePortSetIsDeallocing(ms);
    CFMessagePortInvalidate(ms);
    // Delay cleanup of _replies until here so that invalidation during
    // SendRequest does not cause _replies to disappear out from under that function.
    if (NULL != ms->_replies) {
	CFRelease(ms->_replies);
    }
    if (NULL != ms->_name) {
	CFRelease(ms->_name);
    }
    if (NULL != ms->_port) {
	if (__CFMessagePortExtraMachRef(ms)) {
	    mach_port_mod_refs(mach_task_self(), CFMachPortGetPort(ms->_port), MACH_PORT_RIGHT_SEND, -1);
	    mach_port_mod_refs(mach_task_self(), CFMachPortGetPort(ms->_port), MACH_PORT_RIGHT_RECEIVE, -1);
	}
	CFMachPortInvalidate(ms->_port);
	CFRelease(ms->_port);
    }

    // A remote message port for a local message port in the same process will get the
    // same mach port, and the remote port will keep the mach port from being torn down,
    // thus keeping the remote port from getting any sort of death notification and
    // auto-invalidating; so we manually implement the 'auto-invalidation' here by
    // tickling each remote port to check its state after any message port is destroyed,
    // but most importantly after local message ports are destroyed.
    __CFLock(&__CFAllMessagePortsLock);
    CFMessagePortRef *remotePorts = NULL;
    CFIndex cnt = 0;
    if (NULL != __CFAllRemoteMessagePorts) {
	cnt = CFDictionaryGetCount(__CFAllRemoteMessagePorts);
	remotePorts = CFAllocatorAllocate(kCFAllocatorSystemDefault, cnt * sizeof(CFMessagePortRef), __kCFAllocatorGCScannedMemory);
	CFDictionaryGetKeysAndValues(__CFAllRemoteMessagePorts, NULL, (const void **)remotePorts);
	for (CFIndex idx = 0; idx < cnt; idx++) {
	    CFRetain(remotePorts[idx]);
	}
    }
    __CFUnlock(&__CFAllMessagePortsLock);
    if (remotePorts) {
	for (CFIndex idx = 0; idx < cnt; idx++) {
	    // as a side-effect, this will auto-invalidate the CFMessagePort if the CFMachPort is invalid
	    CFMessagePortIsValid(remotePorts[idx]);
	    CFRelease(remotePorts[idx]);
	}
	CFAllocatorDeallocate(kCFAllocatorSystemDefault, remotePorts);
    }
}
Пример #4
0
static boolean_t k5_ipc_request_demux (mach_msg_header_t *request,
                                       mach_msg_header_t *reply)
{
    boolean_t handled = 0;

    if (!handled) {
        handled = k5_ipc_request_server (request, reply);
    }

    /* Our session has a send right. If that goes away it's time to quit. */
    if (!handled && (request->msgh_id == MACH_NOTIFY_NO_SENDERS &&
                     request->msgh_local_port == g_notify_port)) {
        g_ready_to_quit = 1;
        handled = 1;
    }

    /* Check here for a client death.  If so remove it */
    if (!handled && request->msgh_id == MACH_NOTIFY_NO_SENDERS) {
        kern_return_t err = KERN_SUCCESS;

        err = k5_ipc_server_remove_client (request->msgh_local_port);

        if (!err) {
            err = mach_port_mod_refs (mach_task_self (),
                                      request->msgh_local_port,
                                      MACH_PORT_RIGHT_RECEIVE, -1);
        }

        if (!err) {
            handled = 1;  /* was a port we are tracking */
        }
    }

    return handled;
}
Пример #5
0
void CEeExecutor::AddExceptionHandler()
{
    assert(g_eeExecutor == nullptr);
    g_eeExecutor = this;

#if defined(_WIN32)
    m_handler = AddVectoredExceptionHandler(TRUE, &CEeExecutor::HandleException);
    assert(m_handler != NULL);
#elif defined(__ANDROID__)
    struct sigaction sigAction;
    sigAction.sa_handler	= nullptr;
    sigAction.sa_sigaction	= &HandleException;
    sigAction.sa_flags		= SA_SIGINFO;
    sigemptyset(&sigAction.sa_mask);
    int result = sigaction(SIGSEGV, &sigAction, nullptr);
    assert(result >= 0);
#elif defined(__APPLE__)
    kern_return_t result = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &m_port);
    assert(result == KERN_SUCCESS);

    m_handlerThread = std::thread([this] () {
        HandlerThreadProc();
    });

    result = mach_port_insert_right(mach_task_self(), m_port, m_port, MACH_MSG_TYPE_MAKE_SEND);
    assert(result == KERN_SUCCESS);

    result = thread_set_exception_ports(mach_thread_self(), EXC_MASK_BAD_ACCESS, m_port, EXCEPTION_STATE | MACH_EXCEPTION_CODES, STATE_FLAVOR);
    assert(result == KERN_SUCCESS);

    result = mach_port_mod_refs(mach_task_self(), m_port, MACH_PORT_RIGHT_SEND, -1);
    assert(result == KERN_SUCCESS);
#endif
}
Пример #6
0
bool SharedMemory::createHandle(Handle& handle, Protection protection)
{
    ASSERT(!handle.m_port);
    ASSERT(!handle.m_size);

    mach_vm_address_t address = toVMAddress(m_data);
    memory_object_size_t size = round_page(m_size);

    mach_port_t port;

    if (protection == ReadWrite && m_port) {
        // Just re-use the port we have.
        port = m_port;
        if (mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1) != KERN_SUCCESS)
            return false;
    } else {
        // Create a mach port that represents the shared memory.
        kern_return_t kr = mach_make_memory_entry_64(mach_task_self(), &size, address, machProtection(protection), &port, MACH_PORT_NULL);
        if (kr != KERN_SUCCESS)
            return false;

        ASSERT(size >= round_page(m_size));
    }

    handle.m_port = port;
    handle.m_size = size;

    return true;
}
/**
 * Initialize a new memory object reference, mapping @a task_addr from @a task into the current process. The mapping
 * will be copy-on-write, and will be checked to ensure a minimum protection value of VM_PROT_READ.
 *
 * @param mobj Memory object to be initialized.
 * @param task The task from which the memory will be mapped.
 * @param task_addr The task-relative address of the memory to be mapped. This is not required to fall on a page boundry.
 * @param length The total size of the mapping to create.
 * @param require_full If false, short mappings will be permitted in the case where a memory object of the requested length
 * does not exist at the target address. It is the caller's responsibility to validate the resulting length of the
 * mapping, eg, using plcrash_async_mobject_remap_address() and similar. If true, and the entire requested page range is
 * not valid, the mapping request will fail.
 *
 * @return On success, returns PLCRASH_ESUCCESS. On failure, one of the plcrash_error_t error values will be returned, and no
 * mapping will be performed.
 */
plcrash_error_t plcrash_async_mobject_init (plcrash_async_mobject_t *mobj, mach_port_t task, pl_vm_address_t task_addr, pl_vm_size_t length, bool require_full) {
    plcrash_error_t err;

    /* Perform the page mapping */
    err = plcrash_async_mobject_remap_pages_workaround(task, task_addr, length, require_full, &mobj->vm_address, &mobj->vm_length);
    if (err != PLCRASH_ESUCCESS)
        return err;

    /* Determine the offset and length of the actual data */
    mobj->address = mobj->vm_address + (task_addr - mach_vm_trunc_page(task_addr));
    mobj->length = mobj->vm_length - (mobj->address - mobj->vm_address);

    /* Ensure that the length is capped to the user's requested length, rather than the total length once rounded up
     * to a full page. The length might already be smaller than the requested length if require_full is false. */
    if (mobj->length > length)
        mobj->length = length;

    /* Determine the difference between the target and local mappings. Note that this needs to be computed on either two page
     * aligned addresses, or two non-page aligned addresses. Mixing task_addr and vm_address would return an incorrect offset. */
    mobj->vm_slide = task_addr - mobj->address;
    
    /* Save the task-relative address */
    mobj->task_address = task_addr;
    
    /* Save the task reference */
    mobj->task = task;
    mach_port_mod_refs(mach_task_self(), mobj->task, MACH_PORT_RIGHT_SEND, 1);

    return PLCRASH_ESUCCESS;
}
Пример #8
0
struct i_mem_object *
inode_pager_check_request(
	memory_object_t		mem_obj,
	memory_object_control_t	mem_obj_control)
{
	struct i_mem_object	*imo;
	kern_return_t		kr;

	imo = MO_TO_IMO(mem_obj);

	ASSERT(imo->imo_mem_obj_control == mem_obj_control);
	ASSERT(imo->imo_urefs > 0);

	if (++imo->imo_urefs > inode_pager_max_urefs) {
		/* deallocate excess user refs to the control port */
		kr = mach_port_mod_refs(mach_task_self(),
					mem_obj_control,
					MACH_PORT_RIGHT_SEND,
					- (imo->imo_urefs - 1));
		if (kr != KERN_SUCCESS) {
			MACH3_DEBUG(0, kr, ("inode_pager_check_request: mach_port_mod_urefs"));
			panic("inode_pager_check_request: mod_refs");
		}
		imo->imo_urefs = 1;
	}

	return imo;
}
static void
si_async_workunit_release(si_async_workunit_t *r)
{
	if (r == NULL) return;

	if (OSAtomicDecrement32Barrier(&(r->refcount)) != 0) return;

#ifdef CALL_TRACE
	fprintf(stderr, "** %s freeing worklist item %p\n", __func__, r);
#endif

	si_async_worklist_remove_unit(r);

	if (r->resitem != NULL) si_item_release(r->resitem);
	if (r->reslist != NULL) si_list_release(r->reslist);

	if (r->str1 != NULL) free(r->str1);
	if (r->str2 != NULL) free(r->str2);
	if (r->str3 != NULL) free(r->str3);

	/* release send-once right if it has not been used */
	if (r->send != MACH_PORT_NULL) mach_port_deallocate(mach_task_self(), r->send);

	/* release receive right */
	mach_port_mod_refs(mach_task_self(), r->port, MACH_PORT_RIGHT_RECEIVE, -1);

	free(r);
}
Пример #10
0
void
delete_server(server_t *serverp)
{
	service_t *servicep;
	service_t *next;

	info("Deleting server %s", serverp->cmd);
	ASSERT(serverp->prev->next == serverp);
	ASSERT(serverp->next->prev == serverp);
	serverp->prev->next = serverp->next;
	serverp->next->prev = serverp->prev;

	for (  servicep = FIRST(services)
	     ; !IS_END(servicep, services)
	     ; servicep = next)
	{
		next = NEXT(servicep);
	  	if (serverp == servicep->server)
			delete_service(servicep);
	}

	deallocate_bootstrap(serverp->bootstrap);

	if (serverp->port)
		mach_port_mod_refs(mach_task_self(), serverp->port,
				   MACH_PORT_RIGHT_RECEIVE, -1);

	free(serverp);
}	
/**
 * Free all Mach-O binary image resources.
 *
 * @warning This method is not async safe.
 */
void apigee_plcrash_nasync_macho_free (apigee_plcrash_async_macho_t *image) {
    if (image->name != NULL)
        free(image->name);
    
    apigee_plcrash_async_mobject_free(&image->load_cmds);

    mach_port_mod_refs(mach_task_self(), image->task, MACH_PORT_RIGHT_SEND, -1);
}
CF_INLINE void _cfmp_mod_refs(mach_port_t const port, const Boolean doSend, const Boolean doReceive) {
    // NOTE: do receive right first per: https://howto.apple.com/wiki/pages/r853A7H2j/Mach_Ports_and_You.html
    if (doReceive) {
        mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_RECEIVE, -1);
    }
    if (doSend) {
        mach_port_deallocate(mach_task_self(), port);
    }    
}
Пример #13
0
vproc_t vprocmgr_lookup_vproc(const char *label)
{
	struct vproc_s *vp = NULL;
	
	mach_port_t mp = MACH_PORT_NULL;
	kern_return_t kr = vproc_mig_port_for_label(bootstrap_port, (char *)label, &mp);
	if( kr == BOOTSTRAP_SUCCESS ) {
		vp = (struct vproc_s *)calloc(1, sizeof(struct vproc_s));
		if( vp ) {
			vp->refcount = 1;
			mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_SEND, 1);
			vp->j_port = mp;
		}
		mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_SEND, -1);
	}
	
	return vp;
}
//
// Subsidiary process management.
// This does not go through the generic securityd-client setup.
//
void ClientSession::childCheckIn(Port serverPort, Port taskPort)
{
	Port securitydPort = findSecurityd();
	mach_port_t originPort = MACH_PORT_NULL;
	IPCN(ucsp_client_verifyPrivileged2(securitydPort.port(), mig_get_reply_port(), &securitydCreds, &rcode, &originPort));
	if (originPort != securitydPort.port())
		CssmError::throwMe(CSSM_ERRCODE_VERIFICATION_FAILURE);
	mach_port_mod_refs(mach_task_self(), originPort, MACH_PORT_RIGHT_SEND, -1);
	check(ucsp_client_childCheckIn(securitydPort, serverPort, taskPort));
}
void print_mach_service(launch_data_t obj, const char *key, void *context)
{
	if (launch_data_get_type(obj) == LAUNCH_DATA_MACHPORT) {
		fprintf(stdout, "%s: %d\n", key, launch_data_get_machport(obj));
		mach_port_deallocate(mach_task_self(), launch_data_get_machport(obj));
		mach_port_mod_refs(mach_task_self(), launch_data_get_machport(obj), MACH_PORT_RIGHT_RECEIVE, -1);
	} else {
		fprintf(stdout, "%s: not a mach port\n", key);
	}
}
Пример #16
0
/*ARGSUSED*/
void
mig_dealloc_reply_port(mach_port_t p)
{
	mach_port_t port;

	port = mig_reply_port;
	mig_reply_port = MACH_PORT_NULL;

	(void) mach_port_mod_refs(mach_task_self(), port,
				  MACH_PORT_RIGHT_RECEIVE, -1);
}
Пример #17
0
/* Allocate a reference for the memory object backing the pager
   USER_PAGER with protection PROT and return it.  */
mach_port_t
user_pager_get_filemap (struct user_pager *user_pager, vm_prot_t prot)
{
  error_t err;

  /* Add a reference for each call, the caller will deallocate it.  */
  err = mach_port_mod_refs (mach_task_self (), user_pager->memobj,
                            MACH_PORT_RIGHT_SEND, +1);
  assert_perror (err);

  return user_pager->memobj;
}
Пример #18
0
/* Return a memory object paging on STORE.  [among other reasons,] this may
   fail because store contains non-contiguous regions on the underlying
   object.  In such a case you can try calling some of the routines below to
   get a pager.  */
error_t
store_map (const struct store *store, vm_prot_t prot,
	   mach_port_t *memobj)
{
  error_t (*map) (const struct store *store, vm_prot_t prot,
		  mach_port_t *memobj) =
    store->class->map;
  error_t err = map ? (*map) (store, prot, memobj) : EOPNOTSUPP;

  if (err == EOPNOTSUPP && store->source != MACH_PORT_NULL)
    /* Can't map the store directly, but we know it represents the file
       STORE->source, so we can try mapping that instead.  */
    {
      mach_port_t rd_memobj, wr_memobj;
      int ro = (store->flags & STORE_HARD_READONLY);

      if ((prot & VM_PROT_WRITE) && ro)
	return EACCES;

      err = io_map (store->port, &rd_memobj, &wr_memobj);
      if (! err)
	{
	  *memobj = rd_memobj;

	  if (!ro || wr_memobj != MACH_PORT_NULL)
	    /* If either we or the server think this object is writable, then
	       the write-memory-object must be the same as the read one (if
	       we only care about reading, then it can be null too).  */
	    {
	      if (rd_memobj == wr_memobj)
		{
		  if (rd_memobj != MACH_PORT_NULL)
		    mach_port_mod_refs (mach_task_self (), rd_memobj,
					MACH_PORT_RIGHT_SEND, -1);
		}
	      else
		{
		  if (rd_memobj != MACH_PORT_NULL)
		    mach_port_deallocate (mach_task_self (), rd_memobj);
		  if (wr_memobj != MACH_PORT_NULL)
		    mach_port_deallocate (mach_task_self (), wr_memobj);
		  err = EOPNOTSUPP;
		}
	    }
	}
    }

  return err;
}
Пример #19
0
/**
 * Free the memory mapping.
 *
 * @note Unlike most free() functions in this API, this function is async-safe.
 */
void plcrash_async_mobject_free (plcrash_async_mobject_t *mobj) {
    kern_return_t kt;
    
#ifdef PL_HAVE_MACH_VM
    kt = mach_vm_deallocate(mach_task_self(), mobj->vm_address, mobj->vm_length);
#else
    kt = vm_deallocate(mach_task_self(), mobj->vm_address, mobj->vm_length);
#endif
    
    if (kt != KERN_SUCCESS)
        PLCF_DEBUG("vm_deallocate() failure: %d", kt);

    /* Decrement our task refcount */
    mach_port_mod_refs(mach_task_self(), mobj->task, MACH_PORT_RIGHT_SEND, -1);
}
Пример #20
0
kern_return_t
bootstrap_unprivileged(mach_port_t bp, mach_port_t *unpriv_port)
{
	kern_return_t kr;

	*unpriv_port = MACH_PORT_NULL;

	kr = mach_port_mod_refs(mach_task_self(), bp, MACH_PORT_RIGHT_SEND, 1);

	if (kr == KERN_SUCCESS) {
		*unpriv_port = bp;
	}

	return kr;
}
Пример #21
0
int
_kernelrpc_mach_port_mod_refs_trap(struct _kernelrpc_mach_port_mod_refs_args *args)
{
	task_t task = port_name_to_task(args->target);
	int rv = MACH_SEND_INVALID_DEST;

	if (task != current_task())
		goto done;

	rv = mach_port_mod_refs(task->itk_space, args->name, args->right, args->delta);
	
done:
	if (task)
		task_deallocate(task);
	return (rv);
}
Пример #22
0
void InstallExceptionHandler()
{
	mach_port_t port;
	CheckKR("mach_port_allocate", mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port));
	std::thread exc_thread(ExceptionThread, port);
	exc_thread.detach();
	// Obtain a send right for thread_set_exception_ports to copy...
	CheckKR("mach_port_insert_right", mach_port_insert_right(mach_task_self(), port, port, MACH_MSG_TYPE_MAKE_SEND));
	// Mach tries the following exception ports in order: thread, task, host.
	// Debuggers set the task port, so we grab the thread port.
	CheckKR("thread_set_exception_ports", thread_set_exception_ports(mach_thread_self(), EXC_MASK_BAD_ACCESS, port, EXCEPTION_STATE | MACH_EXCEPTION_CODES, x86_THREAD_STATE64));
	// ...and get rid of our copy so that MACH_NOTIFY_NO_SENDERS works.
	CheckKR("mach_port_mod_refs", mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, -1));
	mach_port_t previous;
	CheckKR("mach_port_request_notification", mach_port_request_notification(mach_task_self(), port, MACH_NOTIFY_NO_SENDERS, 0, port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous));
}
Пример #23
0
 static void cancelHandler(void* source)
 {
     EventSource* eventSource = static_cast<EventSource*>(source);
     
     mach_port_t machPort = dispatch_source_get_handle(eventSource->m_dispatchSource);
     
     switch (eventSource->m_eventType) {
     case MachPortDataAvailable:
         // Release our receive right.
         mach_port_mod_refs(mach_task_self(), machPort, MACH_PORT_RIGHT_RECEIVE, -1);
         break;
     case MachPortDeadNameNotification:
         // Release our send right.
         mach_port_deallocate(mach_task_self(), machPort);
         break;
     }
 }
Пример #24
0
kern_return_t
bootstrap_status(mach_port_t bp, name_t service_name, bootstrap_status_t *service_active)
{
	mach_port_t p;

	if (bootstrap_check_in(bp, service_name, &p) == BOOTSTRAP_SUCCESS) {
		mach_port_mod_refs(mach_task_self(), p, MACH_PORT_RIGHT_RECEIVE, -1);
		*service_active = BOOTSTRAP_STATUS_ON_DEMAND;
		return BOOTSTRAP_SUCCESS;
	} else if (bootstrap_look_up(bp, service_name, &p) == BOOTSTRAP_SUCCESS) {
		mach_port_deallocate(mach_task_self(), p);
		*service_active = BOOTSTRAP_STATUS_ACTIVE;
		return BOOTSTRAP_SUCCESS;
	}

	return BOOTSTRAP_UNKNOWN_SERVICE;
}
Пример #25
0
/* Implement proc_getexecdata as described in <hurd/process.defs>. */
kern_return_t
S_proc_getexecdata (struct proc *p,
		    mach_port_t **ports,
		    mach_msg_type_name_t *portspoly,
		    size_t *nports,
		    int **ints,
		    size_t *nints)
{
  int i;
  int ports_allocated = 0;
  /* No need to check P here; we don't use it. */

  if (!std_port_array)
    return ENOENT;

  if (*nports < n_std_ports)
    {
      *ports = mmap (0, round_page (n_std_ports * sizeof (mach_port_t)),
		     PROT_READ|PROT_WRITE, MAP_ANON, 0, 0);
      if (*ports == MAP_FAILED)
        return ENOMEM;
      ports_allocated = 1;
    }
  memcpy (*ports, std_port_array, n_std_ports * sizeof (mach_port_t));
  *nports = n_std_ports;

  if (*nints < n_std_ints)
    {
      *ints = mmap (0, round_page (n_std_ints * sizeof (int)),
		    PROT_READ|PROT_WRITE, MAP_ANON, 0, 0);
      if (*ints == MAP_FAILED)
	{
	  if (ports_allocated)
	    munmap (*ports, round_page (n_std_ports * sizeof (mach_port_t)));
	  return ENOMEM;
	}
    }
  memcpy (*ints, std_int_array, n_std_ints * sizeof (int));
  *nints = n_std_ints;

  for (i = 0; i < n_std_ports; i++)
    mach_port_mod_refs (mach_task_self (), std_port_array[i], MACH_PORT_RIGHT_SEND, 1);
  *portspoly = MACH_MSG_TYPE_MOVE_SEND;

  return 0;
}
Пример #26
0
static void k5_ipc_client_cinfo_free (void *io_cinfo)
{
    if (io_cinfo) {
        k5_ipc_connection_info cinfo = io_cinfo;
        int i;
        
        for (i = 0; i < KIPC_SERVICE_COUNT; i++) {
            if (MACH_PORT_VALID (cinfo->connections[i].port)) {
                mach_port_mod_refs (mach_task_self(), 
                                    cinfo->connections[i].port, 
                                    MACH_PORT_RIGHT_SEND, -1 );
                cinfo->connections[i].port = MACH_PORT_NULL;
            }
        }
        /* reply_stream will always be freed by k5_ipc_send_request() */
        free (cinfo);
    }
}
Пример #27
0
/* Return objects mapping the data underlying this memory object.  If
   the object can be read then memobjrd will be provided; if the
   object can be written then memobjwr will be provided.  For objects
   where read data and write data are the same, these objects will be
   equal, otherwise they will be disjoint.  Servers are permitted to
   implement io_map but not io_map_cntl.  Some objects do not provide
   mapping; they will set none of the ports and return an error.  Such
   objects can still be accessed by io_read and io_write.  */
error_t
trivfs_S_io_map (struct trivfs_protid *cred,
                 mach_port_t reply, mach_msg_type_name_t reply_type,
                 memory_object_t *rd_obj, mach_msg_type_name_t *rd_type,
                 memory_object_t *wr_obj, mach_msg_type_name_t *wr_type)
{
    if (! cred)
        return EOPNOTSUPP;
    else if (! (cred->po->openmodes & (O_READ|O_WRITE)))
        return EBADF;
    else
    {
        mach_port_t memobj;
        int flags = cred->po->openmodes;
        vm_prot_t prot =
            ((flags & O_READ) ? VM_PROT_READ : 0)
            | ((flags & O_WRITE) ? VM_PROT_WRITE : 0);
        struct open *open = (struct open *)cred->po->hook;
        error_t err = dev_get_memory_object (open->dev, prot, &memobj);

        if (!err)
        {
            if (flags & O_READ)
                *rd_obj = memobj;
            else
                *rd_obj = MACH_PORT_NULL;
            if (flags & O_WRITE)
                *wr_obj = memobj;
            else
                *wr_obj = MACH_PORT_NULL;

            if ((flags & (O_READ|O_WRITE)) == (O_READ|O_WRITE)
                    && memobj != MACH_PORT_NULL)
                mach_port_mod_refs (mach_task_self (), memobj,
                                    MACH_PORT_RIGHT_SEND, 1);
        }

        *rd_type = *wr_type = MACH_MSG_TYPE_MOVE_SEND;

        return err;
    }
}
Пример #28
0
__private_extern__
int
server_shutdown()
{
	if (configd_port != NULL) {
		mach_port_t	service_port	= CFMachPortGetPort(configd_port);

		CFMachPortInvalidate(configd_port);
		CFRelease(configd_port);
		configd_port = NULL;

		if (service_port != MACH_PORT_NULL) {
			(void) mach_port_mod_refs(mach_task_self(),
						  service_port,
						  MACH_PORT_RIGHT_RECEIVE,
						  -1);
		}
	}

	return EX_OK;
}
Пример #29
0
void
delete_service(service_t *servicep)
{
	unlink_service(servicep);
	switch (servicep->servicetype) {
	case REGISTERED:
		info("Registered service %s deleted", servicep->name);
		mach_port_deallocate(mach_task_self(), servicep->port);
		break;
	case DECLARED:
		info("Declared service %s now unavailable", servicep->name);
		mach_port_deallocate(mach_task_self(), servicep->port);
		mach_port_mod_refs(mach_task_self(), servicep->port,
				   MACH_PORT_RIGHT_RECEIVE, -1);
		break;
	default:
		error("unknown service type %d\n", servicep->servicetype);
		break;
	}
	free(servicep);
	nservices -= 1;
}
Пример #30
0
void mod_ref_guarded_mach_port()
{
	mach_port_t port;
	mach_port_options_t options;
	mach_port_context_t gval = CONTEXT_VALUE1;
	int kret;
	
	printf("mach_port_mod_refs() guarded mach port (Expecting exception)...\n");
	options.flags = (MPO_CONTEXT_AS_GUARD);

	kret = mach_port_construct(mach_task_self(), &options, gval, &port);
	if (kret != KERN_SUCCESS)
		exit(1);
	
	kret = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_RECEIVE, -1);
	if (kret == KERN_SUCCESS) {
		printf("[FAILED]\n");
		exit(1);
	}

	return;
}