Exemplo n.º 1
0
int
main (int argc, char **argv)
{
  const task_t my_task = mach_task_self();
  error_t err;
  memory_object_t defpager;

  err = get_privileged_ports (&bootstrap_master_host_port,
			      &bootstrap_master_device_port);
  if (err)
    error (1, err, "cannot get privileged ports");

  defpager = MACH_PORT_NULL;
  err = vm_set_default_memory_manager (bootstrap_master_host_port, &defpager);
  if (err)
    error (1, err, "cannot check current default memory manager");
  if (MACH_PORT_VALID (defpager))
    error (2, 0, "Another default memory manager is already running");

  if (!(argc == 2 && !strcmp (argv[1], "-d")))
    {
      /* We don't use the `daemon' function because we might exit back to the
	 parent before the daemon has completed vm_set_default_memory_manager.
	 Instead, the parent waits for a SIGUSR1 from the child before
	 exitting, and the child sends that signal after it is set up.  */
      sigset_t set;
      signal (SIGUSR1, nohandler);
      sigemptyset (&set);
      sigaddset (&set, SIGUSR1);
      sigprocmask (SIG_BLOCK, &set, 0);
      switch (fork ())
	{
	case -1:
	  error (1, errno, "cannot become daemon");
	case 0:
	  setsid ();
	  chdir ("/");
	  close (0);
	  close (1);
	  close (2);
	  break;
	default:
	  sigemptyset (&set);
	  sigsuspend (&set);
	  _exit (0);
	}
    }

  /* Mark us as important.  */
  mach_port_t proc = getproc ();
  if (proc == MACH_PORT_NULL)
    error (3, err, "cannot get a handle to our process");

  err = proc_mark_important (proc);
  /* This might fail due to permissions or because the old proc server
     is still running, ignore any such errors.  */
  if (err && err != EPERM && err != EMIG_BAD_ID)
    error (3, err, "cannot mark us as important");

  mach_port_deallocate (mach_task_self (), proc);

  printf_init(bootstrap_master_device_port);

  /*
   * Set up the default pager.
   */
  partition_init();

  /*
   * task_set_exception_port and task_set_bootstrap_port
   * both require a send right.
   */
  (void) mach_port_insert_right(my_task, default_pager_exception_port,
				default_pager_exception_port,
				MACH_MSG_TYPE_MAKE_SEND);

  /*
   * Change our exception port.
   */
  if (!debug)
  (void) task_set_exception_port(my_task, default_pager_exception_port);

  default_pager_initialize (bootstrap_master_host_port);

  if (!(argc == 2 && !strcmp (argv[1], "-d")))
    kill (getppid (), SIGUSR1);

  /*
   * Become the default pager
   */
  default_pager();
  /*NOTREACHED*/
  return -1;
}
Exemplo n.º 2
0
/* Arrange for hurd_cancel to be called on RPC's thread if OBJECT gets notified
   that any of the things in COND have happened to PORT.  RPC should be an
   rpc on OBJECT.  */
error_t
ports_interrupt_rpc_on_notification (void *object,
				     struct rpc_info *rpc,
				     mach_port_t port, mach_msg_id_t what)
{
  int req_notify;
  struct ports_notify *pn;
  struct rpc_notify *new_req, *req;
  struct port_info *pi = object;

  pthread_mutex_lock (&_ports_lock);

  if (! MACH_PORT_VALID (port))
    /* PORT is already dead or bogus, so interrupt the rpc immediately.  */
    {
      hurd_thread_cancel (rpc->thread);
      pthread_mutex_unlock (&_ports_lock);
      return 0;
    }

  new_req = _ports_free_rpc_notifies;
  if (new_req)
    /* We got a req off the free list.  */
    _ports_free_rpc_notifies = new_req->next;
  else
    /* No free notify structs, allocate one; it's expected that 99% of the
       time we'll add a new structure, so we malloc while we don't have the
       lock, and free it if we're wrong.  */
    {
      pthread_mutex_unlock (&_ports_lock); /* Don't hold the lock during malloc. */
      new_req = malloc (sizeof (struct rpc_notify));
      if (! new_req)
	return ENOMEM;
      pthread_mutex_lock (&_ports_lock);
    }

  /* Find any existing entry for PORT/WHAT.  */
  for (pn = _ports_notifications; pn; pn = pn->next)
    if (pn->port == port && pn->what == what)
      break;

  if (! pn)
    /* A notification on a new port.  */
    {
      pn = _ports_free_ports_notifies;

      if (pn)
	_ports_free_ports_notifies = pn->next;
      else
	{
	  pn = malloc (sizeof (struct ports_notify));
	  if (! pn)
	    /* sigh.  Free what we've alloced and return.  */
	    {
	      new_req->next = _ports_free_rpc_notifies;
	      _ports_free_rpc_notifies = new_req;
	      pthread_mutex_unlock (&_ports_lock);
	      return ENOMEM;
	    }
	}

      pn->reqs = 0;
      pn->port = port;
      pn->what = what;
      pn->pending = 0;
      pthread_mutex_init (&pn->lock, NULL);

      pn->next = _ports_notifications;
      pn->prevp = &_ports_notifications;
      if (_ports_notifications)
	_ports_notifications->prevp = &pn->next;
      _ports_notifications = pn;
    }

  for (req = rpc->notifies; req; req = req->next)
    if (req->notify == pn)
      break;

  if (req)
    /* REQ is already pending for PORT/WHAT on RPC, so free NEW_REQ.  */
    {
      new_req->next = _ports_free_rpc_notifies;
      _ports_free_rpc_notifies = new_req;
    }
  else
    /* Add a new request for PORT/WHAT on RPC.  */
    {
      req = new_req;

      req->rpc = rpc;
      req->notify = pn;
      req->pending = 0;

      req->next_req = pn->reqs;
      req->prev_req_p = &pn->reqs;
      if (pn->reqs)
	pn->reqs->prev_req_p = &req->next_req;
      pn->reqs = req;

      req->next = rpc->notifies;
      rpc->notifies = req;
    }

  /* Make sure that this request results in an interrupt.  */
  req->pending++;

  /* Find out whether we should request a new notification (after we release
     _PORTS_LOCK) -- PN may be new, or left over after a previous
     notification (in which case our new request is likely to trigger an
     immediate notification).  */
  req_notify = !pn->pending;
  if (req_notify)
    pthread_mutex_lock (&pn->lock);

  pthread_mutex_unlock (&_ports_lock);

  if (req_notify)
    {
      mach_port_t old;
      error_t err =
	mach_port_request_notification (mach_task_self (), port,
					what, 1, pi->port_right,
					MACH_MSG_TYPE_MAKE_SEND_ONCE, &old);

      if (! err && old != MACH_PORT_NULL)
	mach_port_deallocate (mach_task_self (), old);

      pn->pending = 1;
      pthread_mutex_unlock (&pn->lock);

      return err;
    }
  else
    return 0;
}
Exemplo n.º 3
0
void
mono_threads_platform_free (MonoThreadInfo *info)
{
	mach_port_deallocate (current_task (), info->native_handle);
}
Exemplo n.º 4
0
int
ethernet_demuxer (mach_msg_header_t *inp,
		  mach_msg_header_t *outp)
{
  struct net_rcv_msg *msg = (struct net_rcv_msg *) inp;
  struct sk_buff *skb;
  int datalen;
  struct ether_device *edev;
  struct device *dev = 0;
  mach_port_t local_port;

  if (inp->msgh_id != NET_RCV_MSG_ID)
    return 0;

  if (MACH_MSGH_BITS_LOCAL (inp->msgh_bits) ==
      MACH_MSG_TYPE_PROTECTED_PAYLOAD)
    {
      struct port_info *pi = ports_lookup_payload (NULL,
						   inp->msgh_protected_payload,
						   NULL);
      if (pi)
	{
	  local_port = pi->port_right;
	  ports_port_deref (pi);
	}
      else
	local_port = MACH_PORT_NULL;
    }
  else
    local_port = inp->msgh_local_port;

  for (edev = ether_dev; edev; edev = edev->next)
    if (local_port == edev->readptname)
      dev = &edev->dev;

  if (! dev)
    {
      if (inp->msgh_remote_port != MACH_PORT_NULL)
	mach_port_deallocate (mach_task_self (), inp->msgh_remote_port);
      return 1;
    }

  datalen = ETH_HLEN
    + msg->packet_type.msgt_number - sizeof (struct packet_header);

  pthread_mutex_lock (&net_bh_lock);
  skb = alloc_skb (datalen, GFP_ATOMIC);
  skb_put (skb, datalen);
  skb->dev = dev;

  /* Copy the two parts of the frame into the buffer. */
  memcpy (skb->data, msg->header, ETH_HLEN);
  memcpy (skb->data + ETH_HLEN,
	  msg->packet + sizeof (struct packet_header),
	  datalen - ETH_HLEN);

  /* Drop it on the queue. */
  skb->protocol = eth_type_trans (skb, dev);
  netif_rx (skb);
  pthread_mutex_unlock (&net_bh_lock);

  return 1;
}
Exemplo n.º 5
0
Arquivo: dir.c Projeto: lenconda/hurd
/* Implement the diskfs_lookup callback from the diskfs library.  See
   <hurd/diskfs.h> for the interface specification.  */
error_t
diskfs_lookup_hard (struct node *dp, const char *name, enum lookup_type type,
		    struct node **npp, struct dirstat *ds, struct protid *cred)
{
  error_t err;
  ino_t inum;
  int namelen;
  int spec_dotdot;
  struct node *np = 0;
  int retry_dotdot = 0;
  vm_prot_t prot =
    (type == LOOKUP) ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
  memory_object_t memobj;
  vm_address_t buf = 0;
  vm_size_t buflen = 0;
  int blockaddr;
  int idx, lastidx;
  int looped;

  if ((type == REMOVE) || (type == RENAME))
    assert (npp);

  if (npp)
    *npp = 0;

  spec_dotdot = type & SPEC_DOTDOT;
  type &= ~SPEC_DOTDOT;

  namelen = strlen (name);

  if (namelen > FAT_NAME_MAX)
    return ENAMETOOLONG;
  
 try_again:
  if (ds)
    {
      ds->type = LOOKUP;
      ds->mapbuf = 0;
      ds->mapextent = 0;
    }
  if (buf)
    {
      munmap ((caddr_t) buf, buflen);
      buf = 0;
    }
  if (ds && (type == CREATE || type == RENAME))
    ds->stat = LOOKING;

  /* Map in the directory contents. */
  memobj = diskfs_get_filemap (dp, prot);

  if (memobj == MACH_PORT_NULL)
    return errno;

  buf = 0;
  /* We allow extra space in case we have to do an EXTEND.  */
  buflen = round_page (dp->dn_stat.st_size + DIRBLKSIZ);
  err = vm_map (mach_task_self (),
                &buf, buflen, 0, 1, memobj, 0, 0, prot, prot, 0);
  mach_port_deallocate (mach_task_self (), memobj);
  if (err)
    return err;

  inum = 0;

  diskfs_set_node_atime (dp);

  /* Start the lookup at DP->dn->dir_idx.  */
  idx = dp->dn->dir_idx;
  if (idx << LOG2_DIRBLKSIZ > dp->dn_stat.st_size)
    idx = 0;                    /* just in case */
  blockaddr = buf + (idx << LOG2_DIRBLKSIZ);
  looped = (idx == 0);
  lastidx = idx;
  if (lastidx == 0)
    lastidx = dp->dn_stat.st_size >> LOG2_DIRBLKSIZ;

  while (!looped || idx < lastidx)
    {
      err = dirscanblock (blockaddr, dp, idx, name, namelen, type, ds, &inum);
      if (!err)
        {
          dp->dn->dir_idx = idx;
          break;
        }
      if (err != ENOENT)
        {
          munmap ((caddr_t) buf, buflen);
          return err;
        }

      blockaddr += DIRBLKSIZ;
      idx++;
      if (blockaddr - buf >= dp->dn_stat.st_size && !looped)
        {
          /* We've gotten to the end; start back at the beginning.  */
          looped = 1;
          blockaddr = buf;
          idx = 0;
        }
    }

  diskfs_set_node_atime (dp);
  if (diskfs_synchronous)
    diskfs_node_update (dp, 1);

  /* If err is set here, it's ENOENT, and we don't want to
     think about that as an error yet.  */
  err = 0;

  if (inum && npp)
    {
      if (namelen != 2 || name[0] != '.' || name[1] != '.')
        {
          if (inum == dp->cache_id)
            {
              np = dp;
              diskfs_nref (np);
            }
          else
            {
              err = diskfs_cached_lookup_in_dirbuf (inum, &np, buf);
              if (err)
                goto out;
            }
        }

      /* We are looking up "..".  */
      /* Check to see if this is the root of the filesystem.  */
      else if (dp == diskfs_root_node)
        {
          err = EAGAIN;
          goto out;
        }

      /* We can't just do diskfs_cached_lookup, because we would then
         deadlock.  So we do this.  Ick.  */
      else if (retry_dotdot)
        {
          /* Check to see that we got the same answer as last time.  */
          if (inum != retry_dotdot)
            {
              /* Drop what we *thought* was .. (but isn't any more) and
                 try *again*.  */
              diskfs_nput (np);
              pthread_mutex_unlock (&dp->lock);
              err = diskfs_cached_lookup_in_dirbuf (inum, &np, buf);
              pthread_mutex_lock (&dp->lock);
              if (err)
                goto out;
              retry_dotdot = inum;
              goto try_again;
            }
          /* Otherwise, we got it fine and np is already set properly.  */
        }
      else if (!spec_dotdot)
        {
          /* Lock them in the proper order, and then
             repeat the directory scan to see if this is still
             right.  */
          pthread_mutex_unlock (&dp->lock);
          err = diskfs_cached_lookup_in_dirbuf (inum, &np, buf);
          pthread_mutex_lock (&dp->lock);
          if (err)
            goto out;
          retry_dotdot = inum;
          goto try_again;
        }

      /* Here below are the spec dotdot cases.  */
      else if (type == RENAME || type == REMOVE)
        np = diskfs_cached_ifind (inum);

      else if (type == LOOKUP)
        {
          diskfs_nput (dp);
          err = diskfs_cached_lookup_in_dirbuf (inum, &np, buf);
          if (err)
            goto out;
        }
      else
        assert (0);
    }

  if ((type == CREATE || type == RENAME) && !inum && ds && ds->stat == LOOKING)
    {
      /* We didn't find any room, so mark ds to extend the dir.  */
      ds->type = CREATE;
      ds->stat = EXTEND;
      ds->idx = dp->dn_stat.st_size >> LOG2_DIRBLKSIZ;
    }
Exemplo n.º 6
0
void HPMPrivate::run()
{
    mach_port_t masterPort = 0;
    IONotificationPortRef notifyPort = 0;
    io_iterator_t rawAddedIter = 0;
    io_iterator_t rawRemovedIter = 0;

    // Create an IOMasterPort for accessing IOKit
    kern_return_t kr = IOMasterPort(MACH_PORT_NULL, &masterPort);
    if (kr || !masterPort)
    {
        qWarning() << Q_FUNC_INFO << "Unable to create a master I/O Kit port" << (void*) kr;
        return;
    }

    // Create a new dictionary for matching device classes
    CFMutableDictionaryRef matchingDict = IOServiceMatching(kIOUSBDeviceClassName);
    if (!matchingDict)
    {
        qWarning() << Q_FUNC_INFO << "Unable to create a USB matching dictionary";
        mach_port_deallocate(mach_task_self(), masterPort);
        return;
    }

    // Take an extra reference because IOServiceAddMatchingNotification consumes one
    matchingDict = (CFMutableDictionaryRef) CFRetain(matchingDict);

    // Store the thread's run loop context
    loop = CFRunLoopGetCurrent();
    // New notification port
    notifyPort = IONotificationPortCreate(masterPort);

    CFRunLoopSourceRef runLoopSource = IONotificationPortGetRunLoopSource(notifyPort);
    CFRunLoopAddSource(loop, runLoopSource, kCFRunLoopDefaultMode);

    // Listen to device add notifications
    kr = IOServiceAddMatchingNotification(notifyPort,
                                          kIOFirstMatchNotification,
                                          matchingDict,
                                          onHPMPrivateRawDeviceAdded,
                                          (void*) this,
                                          &rawAddedIter);
    if (kr != kIOReturnSuccess)
        qFatal("Unable to add notification for device additions");

    // Iterate over set of matching devices to access already-present devices
    // and to arm the notification.
    onHPMPrivateRawDeviceAdded(this, rawAddedIter);

    // Listen to device removal notifications
    kr = IOServiceAddMatchingNotification(notifyPort,
                                          kIOTerminatedNotification,
                                          matchingDict,
                                          onHPMPrivateRawDeviceRemoved,
                                          (void*) this,
                                          &rawRemovedIter);
    if (kr != kIOReturnSuccess)
        qFatal("Unable to add notification for device termination");

    // Iterate over set of matching devices to release each one and to
    // arm the notification.
    onHPMPrivateRawDeviceRemoved(this, rawRemovedIter);

    // No longer needed
    mach_port_deallocate(mach_task_self(), masterPort);
    masterPort = 0;

    // Start the run loop inside this thread. The thread "stops" here.
    CFRunLoopRun();

    // Destroy the notification port when the thread exits
    IONotificationPortDestroy(notifyPort);
    notifyPort = 0;
}
Exemplo n.º 7
0
int
Ndb_GetRUsage(ndb_rusage* dst)
{
  int res = -1;
#ifdef _WIN32
  FILETIME create_time;
  FILETIME exit_time;
  FILETIME kernel_time;
  FILETIME user_time;

  dst->ru_minflt = 0;
  dst->ru_majflt = 0;
  dst->ru_nvcsw = 0;
  dst->ru_nivcsw = 0;

  /**
   * GetThreadTimes times are updated once per timer interval, so can't
   * be used for microsecond measurements, but it is good enough for
   * keeping track of CPU usage on a second basis.
   */
  bool ret = GetThreadTimes( GetCurrentThread(),
                             &create_time,
                             &exit_time,
                             &kernel_time,
                             &user_time);
  if (ret)
  {
    /* Successful return */
    res = 0;

    Uint64 tmp = user_time.dwHighDateTime;
    tmp <<= 32;
    tmp += user_time.dwLowDateTime;
    /** 
     * Time reported in microseconds, Windows report it in
     * 100 ns intervals. So we need to divide by 10 the
     * Windows counter.
     */
    dst->ru_utime = tmp / 10;

    tmp = kernel_time.dwHighDateTime;
    tmp <<= 32;
    tmp += kernel_time.dwLowDateTime;
    dst->ru_stime = tmp / 10;
  }
  else
  {
    res = -1;
  }
#elif defined(HAVE_MAC_OS_X_THREAD_INFO)
  mach_port_t thread_port;
  kern_return_t ret_code;
  mach_msg_type_number_t basic_info_count;
  thread_basic_info_data_t basic_info;

  /**
   * mach_thread_self allocates memory so it needs to be
   * released immediately since we don't want to burden
   * the code with keeping track of this value.
   */
  thread_port = mach_thread_self();
  if (thread_port != MACH_PORT_NULL)
  {
    ret_code = thread_info(thread_port,
                           THREAD_BASIC_INFO,
                           (thread_info_t) &basic_info,
                           &basic_info_count);
  
    mach_port_deallocate(our_mach_task, thread_port);

    if (ret_code == KERN_SUCCESS)
    {
      dst->ru_minflt = 0;
      dst->ru_majflt = 0;
      dst->ru_nvcsw = 0;
      dst->ru_nivcsw = 0;

      Uint64 tmp;
      tmp = basic_info.user_time.seconds * 1000000;
      tmp += basic_info.user_time.microseconds;
      dst->ru_utime = tmp;

      tmp = basic_info.system_time.seconds * 1000000;
      tmp += basic_info.system_time.microseconds;
      dst->ru_stime = tmp;

      res = 0;
    }
    else
    {
      res = -1;
    }
  }
  else
  {
    res = -2; /* Report -2 to distinguish error cases for debugging. */
  }
#else
#ifdef HAVE_GETRUSAGE
  struct rusage tmp;
#ifdef RUSAGE_THREAD
  res = getrusage(RUSAGE_THREAD, &tmp);
#elif defined RUSAGE_LWP
  res = getrusage(RUSAGE_LWP, &tmp);
#endif

  if (res == 0)
  {
    dst->ru_utime = micros(tmp.ru_utime);
    dst->ru_stime = micros(tmp.ru_stime);
    dst->ru_minflt = tmp.ru_minflt;
    dst->ru_majflt = tmp.ru_majflt;
    dst->ru_nvcsw = tmp.ru_nvcsw;
    dst->ru_nivcsw = tmp.ru_nivcsw;
  }
#endif
#endif

  if (res != 0)
  {
    bzero(dst, sizeof(* dst));
  }
  return res;
}
Exemplo n.º 8
0
/*
 * kern_return_t
 * bootstrap_subset(mach_port_t bootstrap_port,
 *		    mach_port_t requestor_port,
 *		    mach_port_t *subset_port);
 *
 * Returns a new port to use as a bootstrap port.  This port behaves
 * exactly like the previous bootstrap_port, except that ports dynamically
 * registered via bootstrap_register() are available only to users of this
 * specific subset_port.  Lookups on the subset_port will return ports
 * registered with this port specifically, and ports registered with
 * ancestors of this subset_port.  Duplications of services already
 * registered with an ancestor port may be registered with the subset port
 * are allowed.  Services already advertised may then be effectively removed
 * by registering MACH_PORT_NULL for the service.
 * When it is detected that the requestor_port is destroied the subset
 * port and all services advertized by it are destroied as well.
 *
 * Errors:	Returns appropriate kernel errors on rpc failure.
 */
kern_return_t
x_bootstrap_subset(
	mach_port_t	bootstrap_port,
	mach_port_t	requestor_port,
	mach_port_t	*subset_port)
{
	kern_return_t result;
	bootstrap_info_t *bootstrap;
	bootstrap_info_t *subset;
	mach_port_t new_bootstrap_port;
	mach_port_t previous;

	debug("Subset create attempt: bootstrap %x, requestor: %x",
	      bootstrap_port, requestor_port);

	bootstrap = lookup_bootstrap_by_port(bootstrap_port);
	if (!bootstrap || !active_bootstrap(bootstrap))
		return BOOTSTRAP_NOT_PRIVILEGED;

	result = mach_port_allocate(
				mach_task_self(), 
				MACH_PORT_RIGHT_RECEIVE,
				&new_bootstrap_port);
	if (result != KERN_SUCCESS)
		kern_fatal(result, "mach_port_allocate");

	result = mach_port_insert_right(
				mach_task_self(),
				new_bootstrap_port,
				new_bootstrap_port,
				MACH_MSG_TYPE_MAKE_SEND);
	if (result != KERN_SUCCESS)
		kern_fatal(result, "failed to insert send right");

	result = mach_port_insert_member(
				mach_task_self(),
				new_bootstrap_port,
				bootstrap_port_set);
	if (result != KERN_SUCCESS)
		kern_fatal(result, "port_set_add");

	subset = new_bootstrap(bootstrap, new_bootstrap_port, requestor_port);

	result = mach_port_request_notification(
				mach_task_self(),
				requestor_port,
				MACH_NOTIFY_DEAD_NAME,
				0,
				notify_port,
				MACH_MSG_TYPE_MAKE_SEND_ONCE,
				&previous); 
	if (result != KERN_SUCCESS) {
		kern_error(result, "mach_port_request_notification");
		mach_port_deallocate(mach_task_self(), requestor_port);
		subset->requestor_port = MACH_PORT_NULL;
		deactivate_bootstrap(subset);
	} else if (previous != MACH_PORT_NULL) {
		debug("deallocating old notification port (%x) for requestor %x",
			  previous, requestor_port);
		result = mach_port_deallocate(
				mach_task_self(),
				previous);
		if (result != KERN_SUCCESS)
			kern_fatal(result, "mach_port_deallocate");
	}

	info("Created bootstrap subset %x parent %x requestor %x", 
		new_bootstrap_port, bootstrap_port, requestor_port);
	*subset_port = new_bootstrap_port;
	return BOOTSTRAP_SUCCESS;
}
Exemplo n.º 9
0
// ----------------------------------------------------------------------------
// wxHIDDevice::GetCount [static]
//
//  Obtains the number of devices on a system for a given HID Page (nClass)
// and HID Usage (nType).
// ----------------------------------------------------------------------------
size_t wxHIDDevice::GetCount (int nClass, int nType)
{
    //Create the mach port
    mach_port_t             pPort;
    if(IOMasterPort(bootstrap_port, &pPort) != kIOReturnSuccess)
    {
        wxLogSysError(wxT("Could not create mach port"));
        return false;
    }

    //Dictionary that will hold first
    //the matching dictionary for determining which kind of devices we want,
    //then later some registry properties from an iterator (see below)
    CFMutableDictionaryRef pDictionary = IOServiceMatching(kIOHIDDeviceKey);
    if(pDictionary == NULL)
    {
        wxLogSysError( wxT("IOServiceMatching(kIOHIDDeviceKey) failed") );
        return false;
    }

    //Here we'll filter down the services to what we want
    if (nType != -1)
    {
        CFNumberRef pType = CFNumberCreate(kCFAllocatorDefault,
                                    kCFNumberIntType, &nType);
        CFDictionarySetValue(pDictionary, CFSTR(kIOHIDPrimaryUsageKey), pType);
        CFRelease(pType);
    }
    if (nClass != -1)
    {
        CFNumberRef pClass = CFNumberCreate(kCFAllocatorDefault,
                                    kCFNumberIntType, &nClass);
        CFDictionarySetValue(pDictionary, CFSTR(kIOHIDPrimaryUsagePageKey), pClass);
        CFRelease(pClass);
    }

    //Now get the maching services
    io_iterator_t pIterator;
    if( IOServiceGetMatchingServices(pPort,
                                     pDictionary, &pIterator) != kIOReturnSuccess )
    {
        wxLogSysError(wxT("No Matching HID Services"));
        return false;
    }

    //If the iterator doesn't exist there are no devices :)
    if ( !pIterator )
        return 0;

    //Now we iterate through them
    size_t nCount = 0;
    io_object_t pObject;
    while ( (pObject = IOIteratorNext(pIterator)) != 0)
    {
        ++nCount;
        IOObjectRelease(pObject);
    }

    //cleanup
    IOObjectRelease(pIterator);
    mach_port_deallocate(mach_task_self(), pPort);

    return nCount;
}//end Create()
Exemplo n.º 10
0
/*
 * kern_return_t
 * bootstrap_check_in(mach_port_t bootstrap_port,
 *	 name_t service_name,
 *	 mach_port_t *service_portp)
 *
 * Returns receive rights to service_port of service named by service_name.
 *
 * Errors:	Returns appropriate kernel errors on rpc failure.
 *		Returns BOOTSTRAP_UNKNOWN_SERVICE, if service does not exist.
 *		Returns BOOTSTRAP_SERVICE_NOT_DECLARED, if service not declared
 *			in /etc/bootstrap.conf.
 *		Returns BOOTSTRAP_SERVICE_ACTIVE, if service has already been
 *			registered or checked-in.
 */
kern_return_t
x_bootstrap_check_in(
	mach_port_t	bootstrap_port,
	name_t		service_name,
	mach_port_t	*service_portp)
{
	kern_return_t result;
	mach_port_t previous;
	service_t *servicep;
	server_t *serverp;
	bootstrap_info_t *bootstrap;

	serverp = lookup_server_by_port(bootstrap_port);
	bootstrap = lookup_bootstrap_by_port(bootstrap_port);
	debug("Service checkin attempt for service %s bootstrap %x",
	      service_name, bootstrap_port);

	servicep = lookup_service_by_name(bootstrap, service_name);
	if (servicep == NULL || servicep->port == MACH_PORT_NULL) {
		debug("bootstrap_check_in service %s unknown%s", service_name,
			forward_ok ? " forwarding" : "");
		return  forward_ok ?
			bootstrap_check_in(
					inherited_bootstrap_port,
					service_name,
					service_portp) :
			BOOTSTRAP_UNKNOWN_SERVICE;
	}
	if (servicep->server != NULL && servicep->server != serverp) {
		debug("bootstrap_check_in service %s not privileged",
			service_name);
		 return BOOTSTRAP_NOT_PRIVILEGED;
	}
	if (!canReceive(servicep->port)) {
		ASSERT(servicep->isActive);
		debug("bootstrap_check_in service %s already active",
			service_name);
		return BOOTSTRAP_SERVICE_ACTIVE;
	}
	debug("Checkin service %s for bootstrap %x", service_name,
	      bootstrap->bootstrap_port);
	ASSERT(servicep->isActive == FALSE);
	servicep->isActive = TRUE;

	if (servicep->server != NULL_SERVER) {
		/* registered server - service needs backup */
		serverp->activity++;
		serverp->active_services++;
		result = mach_port_request_notification(
					mach_task_self(),
					servicep->port,
					MACH_NOTIFY_PORT_DESTROYED,
					0,
					backup_port,
					MACH_MSG_TYPE_MAKE_SEND_ONCE,
					&previous);
		if (result != KERN_SUCCESS)
			kern_fatal(result, "mach_port_request_notification");
	} else {
		/* one time use/created service */
		servicep->servicetype = REGISTERED;
		result = mach_port_request_notification(
					mach_task_self(),
					servicep->port,
					MACH_NOTIFY_DEAD_NAME,
					0,
					notify_port,
					MACH_MSG_TYPE_MAKE_SEND_ONCE,
					&previous);
		if (result != KERN_SUCCESS)
			kern_fatal(result, "mach_port_request_notification");
		else if (previous != MACH_PORT_NULL) {
			debug("deallocating old notification port (%x) for checked in service %x",
				previous, servicep->port);
			result = mach_port_deallocate(
						mach_task_self(),
						previous);
			if (result != KERN_SUCCESS)
				kern_fatal(result, "mach_port_deallocate");
		}
	}

	info("Check-in service %x in bootstrap %x: %s",
	      servicep->port, servicep->bootstrap->bootstrap_port, servicep->name);

	*service_portp = servicep->port;
	return BOOTSTRAP_SUCCESS;
}
Exemplo n.º 11
0
/*
 * kern_return_t
 * bootstrap_register(mach_port_t bootstrap_port,
 *	name_t service_name,
 *	mach_port_t service_port)
 *
 * Registers send rights for the port service_port for the service named by
 * service_name.  Registering a declared service or registering a service for
 * which bootstrap has receive rights via a port backup notification is
 * allowed.
 * The previous service port will be deallocated.  Restarting services wishing
 * to resume service for previous clients must first attempt to checkin to the
 * service.
 *
 * Errors:	Returns appropriate kernel errors on rpc failure.
 *		Returns BOOTSTRAP_NOT_PRIVILEGED, if request directed to
 *			unprivileged bootstrap port.
 *		Returns BOOTSTRAP_SERVICE_ACTIVE, if service has already been
 *			register or checked-in.
 */
kern_return_t
x_bootstrap_register(
	mach_port_t	bootstrap_port,
	name_t	service_name,
	mach_port_t	service_port)
{
	kern_return_t result;
	service_t *servicep;
	server_t *serverp;
	bootstrap_info_t *bootstrap;
	mach_port_t old_port;

	debug("Register attempt for service %s port %x",
	      service_name, service_port);

	/*
	 * Validate the bootstrap.
	 */
	bootstrap = lookup_bootstrap_by_port(bootstrap_port);
	if (!bootstrap || !active_bootstrap(bootstrap))
		return BOOTSTRAP_NOT_PRIVILEGED;
	  
	/*
	 * If this bootstrap port is for a server, or it's an unprivileged
	 * bootstrap can't register the port.
	 */
	serverp = lookup_server_by_port(bootstrap_port);
	servicep = lookup_service_by_name(bootstrap, service_name);
	if (servicep && servicep->server && servicep->server != serverp)
		return BOOTSTRAP_NOT_PRIVILEGED;

	if (servicep == NULL || servicep->bootstrap != bootstrap) {
		servicep = new_service(bootstrap,
				       service_name,
				       service_port,
				       ACTIVE,
				       REGISTERED,
				       NULL_SERVER);
		debug("Registered new service %s", service_name);
	} else {
		if (servicep->isActive) {
			debug("Register: service %s already active, port %x",
		 	      servicep->name, servicep->port);
			ASSERT(!canReceive(servicep->port));
			return BOOTSTRAP_SERVICE_ACTIVE;
		}
		old_port = servicep->port;
		if (servicep->servicetype == DECLARED) {
			servicep->servicetype = REGISTERED;

			if (servicep->server) {
				ASSERT(servicep->server == serverp);
				ASSERT(active_server(serverp));
				servicep->server = NULL_SERVER;
				serverp->activity++;
			}

			result = mach_port_mod_refs(
					mach_task_self(),
					old_port,
					MACH_PORT_RIGHT_RECEIVE, 
					-1);
			if (result != KERN_SUCCESS)
				kern_fatal(result, "mach_port_mod_refs");
		}
		result = mach_port_deallocate(
				mach_task_self(),
				old_port);
		if (result != KERN_SUCCESS)
			kern_fatal(result, "mach_port_mod_refs");
		
		servicep->port = service_port;
		servicep->isActive = TRUE;
		debug("Re-registered inactive service %x bootstrap %x: %s",
			servicep->port, servicep->bootstrap->bootstrap_port, service_name);
	}

	/* detect the new service port going dead */
	result = mach_port_request_notification(
			mach_task_self(),
			service_port,
			MACH_NOTIFY_DEAD_NAME,
			0,
			notify_port,
			MACH_MSG_TYPE_MAKE_SEND_ONCE,
			&old_port);
	if (result != KERN_SUCCESS) {
		debug("Can't request notification on service %x bootstrap %x: %s",
		       service_port, servicep->bootstrap->bootstrap_port, "must be dead");
		delete_service(servicep);
		return BOOTSTRAP_SUCCESS;
	} else if (old_port != MACH_PORT_NULL) {
		debug("deallocating old notification port (%x) for service %x",
		      old_port, service_port);
		result = mach_port_deallocate(
				mach_task_self(),
				old_port);
		if (result != KERN_SUCCESS)
			kern_fatal(result, "mach_port_deallocate");
	}
	info("Registered service %x bootstrap %x: %s",
	     servicep->port, servicep->bootstrap->bootstrap_port, servicep->name);
	return BOOTSTRAP_SUCCESS;
}
Exemplo n.º 12
0
	void shutTimer()
	{
		mach_port_deallocate(mach_task_self(), __clock_rt);
	}
Exemplo n.º 13
0
int
main (int argc, char **argv)
{
        kern_return_t		kr;
	mach_port_name_t	labelHandle, portName;
	char			*textlabel, textbuf[512];
	int			ch, count, dealloc, destroy, getnew, getport;
	int			gettask, reqlabel, i;

	count = 1;
	dealloc = destroy = getnew = gettask = getport = reqlabel = 0;

	/* XXX - add port lh and request lh */
	while ((ch = getopt(argc, argv, "c:dn:prtx")) != -1) {
		switch (ch) {
		case 'c':
			count = atoi(optarg);
			break;

		case 'd':
			dealloc = 1;
			break;

		case 'n':
			getnew = 1;
			textlabel = optarg;
			break;

		case 'p':
			getport = 1;
			break;

		case 'r':
			reqlabel = 1;
			break;

		case 't':
			gettask = 1;
			break;

		case 'x':
			destroy = 1;
			break;

		default:
			usage();
		}
	}

	if (getnew + gettask + getport + reqlabel != 1)
		usage();

	/* Get a new port. */
	if (getport || reqlabel) {
		kr = mach_port_allocate(mach_task_self(),
		    MACH_PORT_RIGHT_RECEIVE, &portName);
		if (kr != KERN_SUCCESS) {
			mach_error("mach_port_allocate():", kr);
			exit(1);
		}
	}

	for (i = 0; i < count; i++) {
		if (getnew) {
			/* Get a new label handle */
			kr = mac_label_new(mach_task_self(), &labelHandle,
			    textlabel);
			if (kr != KERN_SUCCESS) {
				fprintf(stderr, "mac_label_new(%s)", textlabel);
				mach_error(":", kr);
				exit(1);
			}
			printf("new label handle: 0x%x (%s)\n", labelHandle,
			    textlabel);
		}
		if (gettask) {
			/* Get label handle for our task */
			kr = mach_get_task_label(mach_task_self(),
			    &labelHandle);
			if (kr != KERN_SUCCESS) {
				mach_error("mach_get_task_label():", kr);
				exit(1);
			}
			kr = mach_get_task_label_text(mach_task_self(),
			    "sebsd", textbuf);
			if (kr != KERN_SUCCESS) {
				mach_error("mach_get_task_label_text():", kr);
				exit(1);
			}
			printf("task label handle: 0x%x (%s)\n", labelHandle,
			    textbuf);
		}
		if (getport) {
			/* Get a label handle for the new port */
			kr = mach_get_label(mach_task_self(), portName,
			    &labelHandle);
			if (kr != KERN_SUCCESS) {
				mach_error("mach_get_label():", kr);
				exit(1);
			}
			kr = mach_get_label_text(mach_task_self(), labelHandle,
			    "sebsd", textbuf);
			if (kr != KERN_SUCCESS) {
				mach_error("mach_get_label_text():", kr);
				exit(1);
			}
			printf("port label handle: 0x%x (%s)\n", labelHandle,
			    textbuf);
		}
		if (reqlabel) {
			/* Compute label handle based on port and task. */
			kr = mac_request_label(mach_task_self(), portName,
			    mach_task_self(), "mach_task", &labelHandle);
			if (kr != KERN_SUCCESS) {
				mach_error("mac_request_label():", kr);
				exit(1);
			}
			kr = mach_get_label_text(mach_task_self(), labelHandle,
			    "sebsd", textbuf);
			if (kr != KERN_SUCCESS) {
				mach_error("mach_get_label_text():", kr);
				exit(1);
			}
			printf("computed label handle: 0x%x (%s)\n",
			    labelHandle, textbuf);
		}
		if (dealloc) {
			/* Deallocate the label handle */
			kr = mach_port_deallocate(mach_task_self(), labelHandle);
			if (kr != KERN_SUCCESS) {
				mach_error("mach_port_deallocate:", kr);
				exit(1);
			}
			printf("successfully deallocated the label handle\n");
		}
		if (destroy) {
			/* Destroy the label handle */
			kr = mach_port_destroy(mach_task_self(), labelHandle);
			if (kr != KERN_SUCCESS) {
				mach_error("mach_port_destroy:", kr);
				exit(1);
			}
			printf("successfully destroyed the label handle\n");
		}
	}

	exit(0);
}
Exemplo n.º 14
0
int main(int argc, const char *argv[])
{
    time_t current_time = time(NULL);
    char* c_time_string = ctime(&current_time);
    size_t l = strlen(c_time_string);
    if (l > 0)
        c_time_string[l-1] = 0;
    DEBUG_LOG("%s: VoodooPS2Daemon 1.8.9 starting...\n", c_time_string);
    
    // Note: on Snow Leopard, the system is not ready to enumerate USB devices, so we wait a
    // bit before continuing...
    usleep(1000000);

    // first check for trackpad driver
	g_ioservice = IOServiceGetMatchingService(0, IOServiceMatching("ApplePS2SynapticsTouchPad"));
	if (!g_ioservice)
	{
        // otherwise, talk to mouse driver
        g_ioservice = IOServiceGetMatchingService(0, IOServiceMatching("ApplePS2Mouse"));
        if (!g_ioservice)
        {
            DEBUG_LOG("No ApplePS2SynapticsTouchPad or ApplePS2Mouse found\n");
            return -1;
        }
	}
    
    // Set up a signal handler so we can clean up when we're interrupted from the command line
    // or otherwise asked to terminate.
    if (SIG_ERR == signal(SIGINT, SignalHandler1))
        DEBUG_LOG("Could not establish new SIGINT handler\n");
    if (SIG_ERR == signal(SIGTERM, SignalHandler1))
        DEBUG_LOG("Could not establish new SIGTERM handler\n");
    
    // First create a master_port for my task
    mach_port_t masterPort;
    kern_return_t kr = IOMasterPort(MACH_PORT_NULL, &masterPort);
    if (kr || !masterPort)
    {
        DEBUG_LOG("ERR: Couldn't create a master IOKit Port(%08x)\n", kr);
        return -1;
    }
    
    // Create dictionary to match all USB devices
    CFMutableDictionaryRef matchingDict = IOServiceMatching(kIOUSBDeviceClassName);
    if (!matchingDict)
    {
        DEBUG_LOG("Can't create a USB matching dictionary\n");
        mach_port_deallocate(mach_task_self(), masterPort);
        return -1;
    }
    
    // Create a notification port and add its run loop event source to our run loop
    // This is how async notifications get set up.
    g_NotifyPort = IONotificationPortCreate(masterPort);
    CFRunLoopSourceRef runLoopSource = IONotificationPortGetRunLoopSource(g_NotifyPort);
    CFRunLoopRef runLoop = CFRunLoopGetCurrent();
    CFRunLoopAddSource(runLoop, runLoopSource, kCFRunLoopDefaultMode);
    
    // Now set up a notification to be called when a device is first matched by I/O Kit.
    // Note that this will not catch any devices that were already plugged in so we take
    // care of those later.
    kr = IOServiceAddMatchingNotification(g_NotifyPort, kIOFirstMatchNotification, matchingDict, DeviceAdded, NULL, &g_AddedIter);
    if (KERN_SUCCESS != kr)
    {
        DEBUG_LOG("IOServiceAddMatchingNotification failed(%08x)\n", kr);
        return -1;
    }

    // Iterate once to get already-present devices and arm the notification
    DeviceAdded(NULL, g_AddedIter);

    // Now done with the master_port
    mach_port_deallocate(mach_task_self(), masterPort);
    masterPort = 0;
    
    // Start the run loop. Now we'll receive notifications.
    CFRunLoopRun();
    
    // We should never get here
    DEBUG_LOG("Unexpectedly back from CFRunLoopRun()!\n");
    
    return 0;
}
Exemplo n.º 15
0
static gint64
get_process_stat_item (int pid, int pos, int sum, MonoProcessError *error)
{
#if defined(__APPLE__) 
	double process_user_time = 0, process_system_time = 0;//, process_percent = 0;
	task_t task;
	struct task_basic_info t_info;
	mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT, th_count;
	thread_array_t th_array;
	size_t i;

	if (pid == getpid ()) {
		/* task_for_pid () doesn't work on ios, even for the current process */
		task = mach_task_self ();
	} else {
		if (task_for_pid (mach_task_self (), pid, &task) != KERN_SUCCESS)
			RET_ERROR (MONO_PROCESS_ERROR_NOT_FOUND);
	}

	if (task_info (task, TASK_BASIC_INFO, (task_info_t)&t_info, &t_info_count) != KERN_SUCCESS) {
		if (pid != getpid ())
			mach_port_deallocate (mach_task_self (), task);
		RET_ERROR (MONO_PROCESS_ERROR_OTHER);
	}
	
	if (task_threads(task, &th_array, &th_count) != KERN_SUCCESS) {
		if (pid != getpid ())
			mach_port_deallocate (mach_task_self (), task);
		RET_ERROR (MONO_PROCESS_ERROR_OTHER);
	}
		
	for (i = 0; i < th_count; i++) {
		double thread_user_time, thread_system_time;//, thread_percent;
		
		struct thread_basic_info th_info;
		mach_msg_type_number_t th_info_count = THREAD_BASIC_INFO_COUNT;
		if (thread_info(th_array[i], THREAD_BASIC_INFO, (thread_info_t)&th_info, &th_info_count) == KERN_SUCCESS) {
			thread_user_time = th_info.user_time.seconds + th_info.user_time.microseconds / 1e6;
			thread_system_time = th_info.system_time.seconds + th_info.system_time.microseconds / 1e6;
			//thread_percent = (double)th_info.cpu_usage / TH_USAGE_SCALE;
			
			process_user_time += thread_user_time;
			process_system_time += thread_system_time;
			//process_percent += th_percent;
		}
	}
	
	for (i = 0; i < th_count; i++)
		mach_port_deallocate(task, th_array[i]);

	if (pid != getpid ())
		mach_port_deallocate (mach_task_self (), task);

	process_user_time += t_info.user_time.seconds + t_info.user_time.microseconds / 1e6;
	process_system_time += t_info.system_time.seconds + t_info.system_time.microseconds / 1e6;
    
	if (pos == 10 && sum == TRUE)
		return (gint64)((process_user_time + process_system_time) * 10000000);
	else if (pos == 10)
		return (gint64)(process_user_time * 10000000);
	else if (pos == 11)
		return (gint64)(process_system_time * 10000000);
		
	return 0;
#else
	char buf [512];
	char *s, *end;
	FILE *f;
	int len, i;
	gint64 value;

	g_snprintf (buf, sizeof (buf), "/proc/%d/stat", pid);
	f = fopen (buf, "r");
	if (!f)
		RET_ERROR (MONO_PROCESS_ERROR_NOT_FOUND);
	len = fread (buf, 1, sizeof (buf), f);
	fclose (f);
	if (len <= 0)
		RET_ERROR (MONO_PROCESS_ERROR_OTHER);
	s = strchr (buf, ')');
	if (!s)
		RET_ERROR (MONO_PROCESS_ERROR_OTHER);
	s++;
	while (g_ascii_isspace (*s)) s++;
	if (!*s)
		RET_ERROR (MONO_PROCESS_ERROR_OTHER);
	/* skip the status char */
	while (*s && !g_ascii_isspace (*s)) s++;
	if (!*s)
		RET_ERROR (MONO_PROCESS_ERROR_OTHER);
	for (i = 0; i < pos; ++i) {
		while (g_ascii_isspace (*s)) s++;
		if (!*s)
			RET_ERROR (MONO_PROCESS_ERROR_OTHER);
		while (*s && !g_ascii_isspace (*s)) s++;
		if (!*s)
			RET_ERROR (MONO_PROCESS_ERROR_OTHER);
	}
	/* we are finally at the needed item */
	value = strtoul (s, &end, 0);
	/* add also the following value */
	if (sum) {
		while (g_ascii_isspace (*s)) s++;
		if (!*s)
			RET_ERROR (MONO_PROCESS_ERROR_OTHER);
		value += strtoul (s, &end, 0);
	}
	if (error)
		*error = MONO_PROCESS_ERROR_NONE;
	return value;
#endif
}
Exemplo n.º 16
0
kern_return_t
S_exec_init (struct trivfs_protid *protid,
	     auth_t auth, process_t proc)
{
  mach_port_t host_priv, device_master, startup;
  error_t err;

  if (! protid || ! protid->isroot)
    return EPERM;

  _hurd_port_set (&_hurd_ports[INIT_PORT_PROC], proc); /* Consume.  */
  _hurd_port_set (&_hurd_ports[INIT_PORT_AUTH], auth); /* Consume.  */

  /* Do initial setup with the proc server.  */
  _hurd_proc_init (save_argv, NULL, 0);

  procserver = getproc ();

  /* Have the proc server notify us when the canonical ints and ports
     change.  This will generate an immediate callback giving us the
     initial boot-time canonical sets.  */
  {
    struct iouser *user;
    struct trivfs_protid *cred;
    mach_port_t right;

    err = iohelp_create_empty_iouser (&user);
    assert_perror (err);
    err = trivfs_open (fsys, user, 0, MACH_PORT_NULL, &cred);
    assert_perror (err);

    right = ports_get_send_right (cred);
    proc_execdata_notify (procserver, right, MACH_MSG_TYPE_COPY_SEND);
    mach_port_deallocate (mach_task_self (), right);
  }

  err = get_privileged_ports (&host_priv, &device_master);
  assert_perror (err);

  err = open_console (device_master);
  assert_perror (err);
  mach_port_deallocate (mach_task_self (), device_master);

  proc_register_version (procserver, host_priv, "exec", "", HURD_VERSION);

  startup = file_name_lookup (_SERVERS_STARTUP, 0, 0);
  if (startup == MACH_PORT_NULL)
    {
      error (0, errno, "%s", _SERVERS_STARTUP);

      /* Fall back to abusing the message port lookup.  */
      err = proc_getmsgport (procserver, HURD_PID_STARTUP, &startup);
      assert_perror (err);
    }
  mach_port_deallocate (mach_task_self (), procserver);

  /* Call startup_essential task last; init assumes we are ready to
     run once we call it. */
  err = startup_essential_task (startup, mach_task_self (), MACH_PORT_NULL,
				"exec", host_priv);
  assert_perror (err);
  mach_port_deallocate (mach_task_self (), startup);

  mach_port_deallocate (mach_task_self (), host_priv);

  return 0;
}
Exemplo n.º 17
0
void
vma_iterate (vma_iterate_callback_fn callback, void *data)
{
#if defined __linux__ /* || defined __CYGWIN__ */

  struct rofile rof;
  int c;

  /* Open the current process' maps file.  It describes one VMA per line.  */
  if (rof_open (&rof, "/proc/self/maps") < 0)
    return;

  for (;;)
    {
      unsigned long start, end;
      unsigned int flags;

      /* Parse one line.  First start and end.  */
      if (!(rof_scanf_lx (&rof, &start) >= 0
            && rof_getchar (&rof) == '-'
            && rof_scanf_lx (&rof, &end) >= 0))
        break;
      /* Then the flags.  */
      do
        c = rof_getchar (&rof);
      while (c == ' ');
      flags = 0;
      if (c == 'r')
        flags |= VMA_PROT_READ;
      c = rof_getchar (&rof);
      if (c == 'w')
        flags |= VMA_PROT_WRITE;
      c = rof_getchar (&rof);
      if (c == 'x')
        flags |= VMA_PROT_EXECUTE;
      while (c = rof_getchar (&rof), c != -1 && c != '\n')
        ;

      if (callback (data, start, end, flags))
        break;
    }
  rof_close (&rof);

#elif defined __FreeBSD__ || defined __NetBSD__

  struct rofile rof;
  int c;

  /* Open the current process' maps file.  It describes one VMA per line.  */
  if (rof_open (&rof, "/proc/curproc/map") < 0)
    return;

  for (;;)
    {
      unsigned long start, end;
      unsigned int flags;

      /* Parse one line.  First start.  */
      if (!(rof_getchar (&rof) == '0'
            && rof_getchar (&rof) == 'x'
            && rof_scanf_lx (&rof, &start) >= 0))
        break;
      while (c = rof_peekchar (&rof), c == ' ' || c == '\t')
        rof_getchar (&rof);
      /* Then end.  */
      if (!(rof_getchar (&rof) == '0'
            && rof_getchar (&rof) == 'x'
            && rof_scanf_lx (&rof, &end) >= 0))
        break;
      /* Then the flags.  */
      do
        c = rof_getchar (&rof);
      while (c == ' ');
      flags = 0;
      if (c == 'r')
        flags |= VMA_PROT_READ;
      c = rof_getchar (&rof);
      if (c == 'w')
        flags |= VMA_PROT_WRITE;
      c = rof_getchar (&rof);
      if (c == 'x')
        flags |= VMA_PROT_EXECUTE;
      while (c = rof_getchar (&rof), c != -1 && c != '\n')
        ;

      if (callback (data, start, end, flags))
        break;
    }
  rof_close (&rof);

#elif defined __sgi || defined __osf__ /* IRIX, OSF/1 */

  size_t pagesize;
  char fnamebuf[6+10+1];
  char *fname;
  int fd;
  int nmaps;
  size_t memneed;
# if HAVE_MAP_ANONYMOUS
#  define zero_fd -1
#  define map_flags MAP_ANONYMOUS
# else
  int zero_fd;
#  define map_flags 0
# endif
  void *auxmap;
  unsigned long auxmap_start;
  unsigned long auxmap_end;
  prmap_t* maps;
  prmap_t* mp;

  pagesize = getpagesize ();

  /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()).  */
  fname = fnamebuf + sizeof (fnamebuf) - 1;
  *fname = '\0';
  {
    unsigned int value = getpid ();
    do
      *--fname = (value % 10) + '0';
    while ((value = value / 10) > 0);
  }
  fname -= 6;
  memcpy (fname, "/proc/", 6);

  fd = open (fname, O_RDONLY);
  if (fd < 0)
    return;

  if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
    goto fail2;

  memneed = (nmaps + 10) * sizeof (prmap_t);
  /* Allocate memneed bytes of memory.
     We cannot use alloca here, because not much stack space is guaranteed.
     We also cannot use malloc here, because a malloc() call may call mmap()
     and thus pre-allocate available memory.
     So use mmap(), and ignore the resulting VMA.  */
  memneed = ((memneed - 1) / pagesize + 1) * pagesize;
# if !HAVE_MAP_ANONYMOUS
  zero_fd = open ("/dev/zero", O_RDONLY, 0644);
  if (zero_fd < 0)
    goto fail2;
# endif
  auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
                          map_flags | MAP_PRIVATE, zero_fd, 0);
# if !HAVE_MAP_ANONYMOUS
  close (zero_fd);
# endif
  if (auxmap == (void *) -1)
    goto fail2;
  auxmap_start = (unsigned long) auxmap;
  auxmap_end = auxmap_start + memneed;
  maps = (prmap_t *) auxmap;

  if (ioctl (fd, PIOCMAP, maps) < 0)
    goto fail1;

  for (mp = maps;;)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) mp->pr_vaddr;
      end = start + mp->pr_size;
      if (start == 0 && end == 0)
        break;
      flags = 0;
      if (mp->pr_mflags & MA_READ)
        flags |= VMA_PROT_READ;
      if (mp->pr_mflags & MA_WRITE)
        flags |= VMA_PROT_WRITE;
      if (mp->pr_mflags & MA_EXEC)
        flags |= VMA_PROT_EXECUTE;
      mp++;
      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
        {
          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
          if (start < auxmap_start)
            if (callback (data, start, auxmap_start, flags))
              break;
          if (auxmap_end - 1 < end - 1)
            if (callback (data, auxmap_end, end, flags))
              break;
        }
      else
        {
          if (callback (data, start, end, flags))
            break;
        }
    }
  munmap (auxmap, memneed);
  close (fd);
  return;

 fail1:
  munmap (auxmap, memneed);
 fail2:
  close (fd);
  return;

#elif defined __APPLE__ && defined __MACH__ /* Mac OS X */

  task_t task = mach_task_self ();
  vm_address_t address;
  vm_size_t size;

  for (address = VM_MIN_ADDRESS;; address += size)
    {
      int more;
      mach_port_t object_name;
      unsigned int flags;
      /* In Mac OS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
         32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
         mach_vm_address_t and mach_vm_size_t are always 64 bits large.
         Mac OS X 10.5 has three vm_region like methods:
           - vm_region. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. When linking dynamically, this
             function exists only in 32-bit processes. Therefore we use it only
             in 32-bit processes.
           - vm_region_64. It has arguments that depend on whether the current
             process is 32-bit or 64-bit. It interprets a flavor
             VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
             dangerous since 'struct vm_region_basic_info_64' is larger than
             'struct vm_region_basic_info'; therefore let's write
             VM_REGION_BASIC_INFO_64 explicitly.
           - mach_vm_region. It has arguments that are 64-bit always. This
             function is useful when you want to access the VM of a process
             other than the current process.
         In 64-bit processes, we could use vm_region_64 or mach_vm_region.
         I choose vm_region_64 because it uses the same types as vm_region,
         resulting in less conditional code.  */
# if defined __ppc64__ || defined __x86_64__
      struct vm_region_basic_info_64 info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;

      more = (vm_region_64 (task, &address, &size, VM_REGION_BASIC_INFO_64,
                            (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# else
      struct vm_region_basic_info info;
      mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;

      more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
                         (vm_region_info_t)&info, &info_count, &object_name)
              == KERN_SUCCESS);
# endif
      if (object_name != MACH_PORT_NULL)
        mach_port_deallocate (mach_task_self (), object_name);
      if (!more)
        break;
      flags = 0;
      if (info.protection & VM_PROT_READ)
        flags |= VMA_PROT_READ;
      if (info.protection & VM_PROT_WRITE)
        flags |= VMA_PROT_WRITE;
      if (info.protection & VM_PROT_EXECUTE)
        flags |= VMA_PROT_EXECUTE;
      if (callback (data, address, address + size, flags))
        break;
    }

#elif (defined _WIN32 || defined __WIN32__) || defined __CYGWIN__
  /* Windows platform.  Use the native Windows API.  */

  MEMORY_BASIC_INFORMATION info;
  unsigned long address = 0;

  while (VirtualQuery ((void*)address, &info, sizeof(info)) == sizeof(info))
    {
      if (info.State != MEM_FREE)
        /* Ignore areas where info.State has the value MEM_RESERVE or,
           equivalently, info.Protect has the undocumented value 0.
           This is needed, so that on Cygwin, areas used by malloc() are
           distinguished from areas reserved for future malloc().  */
        if (info.State != MEM_RESERVE)
          {
            unsigned long start, end;
            unsigned int flags;

            start = (unsigned long)info.BaseAddress;
            end = start + info.RegionSize;
            switch (info.Protect & ~(PAGE_GUARD|PAGE_NOCACHE))
              {
              case PAGE_READONLY:
                flags = VMA_PROT_READ;
                break;
              case PAGE_READWRITE:
              case PAGE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE;
                break;
              case PAGE_EXECUTE:
                flags = VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READ:
                flags = VMA_PROT_READ | VMA_PROT_EXECUTE;
                break;
              case PAGE_EXECUTE_READWRITE:
              case PAGE_EXECUTE_WRITECOPY:
                flags = VMA_PROT_READ | VMA_PROT_WRITE | VMA_PROT_EXECUTE;
                break;
              case PAGE_NOACCESS:
              default:
                flags = 0;
                break;
              }

            if (callback (data, start, end, flags))
              break;
          }
      address = (unsigned long)info.BaseAddress + info.RegionSize;
    }

#elif defined __BEOS__ || defined __HAIKU__
  /* Use the BeOS specific API.  */

  area_info info;
  int32 cookie;

  cookie = 0;
  while (get_next_area_info (0, &cookie, &info) == B_OK)
    {
      unsigned long start, end;
      unsigned int flags;

      start = (unsigned long) info.address;
      end = start + info.size;
      flags = 0;
      if (info.protection & B_READ_AREA)
        flags |= VMA_PROT_READ | VMA_PROT_EXECUTE;
      if (info.protection & B_WRITE_AREA)
        flags |= VMA_PROT_WRITE;

      if (callback (data, start, end, flags))
        break;
    }

#elif HAVE_MQUERY /* OpenBSD */

  uintptr_t pagesize;
  uintptr_t address;
  int /*bool*/ address_known_mapped;

  pagesize = getpagesize ();
  /* Avoid calling mquery with a NULL first argument, because this argument
     value has a specific meaning.  We know the NULL page is unmapped.  */
  address = pagesize;
  address_known_mapped = 0;
  for (;;)
    {
      /* Test whether the page at address is mapped.  */
      if (address_known_mapped
          || mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0)
             == (void *) -1)
        {
          /* The page at address is mapped.
             This is the start of an interval.  */
          uintptr_t start = address;
          uintptr_t end;

          /* Find the end of the interval.  */
          end = (uintptr_t) mquery ((void *) address, pagesize, 0, 0, -1, 0);
          if (end == (uintptr_t) (void *) -1)
            end = 0; /* wrap around */
          address = end;

          /* It's too complicated to find out about the flags.  Just pass 0.  */
          if (callback (data, start, end, 0))
            break;

          if (address < pagesize) /* wrap around? */
            break;
        }
      /* Here we know that the page at address is unmapped.  */
      {
        uintptr_t query_size = pagesize;

        address += pagesize;

        /* Query larger and larger blocks, to get through the unmapped address
           range with few mquery() calls.  */
        for (;;)
          {
            if (2 * query_size > query_size)
              query_size = 2 * query_size;
            if (address + query_size - 1 < query_size) /* wrap around? */
              {
                address_known_mapped = 0;
                break;
              }
            if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                == (void *) -1)
              {
                /* Not all the interval [address .. address + query_size - 1]
                   is unmapped.  */
                address_known_mapped = (query_size == pagesize);
                break;
              }
            /* The interval [address .. address + query_size - 1] is
               unmapped.  */
            address += query_size;
          }
        /* Reduce the query size again, to determine the precise size of the
           unmapped interval that starts at address.  */
        while (query_size > pagesize)
          {
            query_size = query_size / 2;
            if (address + query_size - 1 >= query_size)
              {
                if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
                    != (void *) -1)
                  {
                    /* The interval [address .. address + query_size - 1] is
                       unmapped.  */
                    address += query_size;
                    address_known_mapped = 0;
                  }
                else
                  address_known_mapped = (query_size == pagesize);
              }
          }
        /* Here again query_size = pagesize, and
           either address + pagesize - 1 < pagesize, or
           mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0) fails.
           So, the unmapped area ends at address.  */
      }
      if (address + pagesize - 1 < pagesize) /* wrap around? */
        break;
    }

#endif
}
Exemplo n.º 18
0
int
grub_util_hurd_get_disk_info (const char *dev, grub_uint32_t *secsize, grub_disk_addr_t *offset,
			      grub_disk_addr_t *size, char **parent)
{
  file_t file;
  mach_port_t *ports;
  int *ints;
  loff_t *offsets;
  char *data;
  error_t err;
  mach_msg_type_number_t num_ports = 0, num_ints = 0, num_offsets = 0, data_len = 0;

  file = file_name_lookup (dev, 0, 0);
  if (file == MACH_PORT_NULL)
    return 0;

  err = file_get_storage_info (file,
			       &ports, &num_ports,
			       &ints, &num_ints,
			       &offsets, &num_offsets,
			       &data, &data_len);

  if (num_ints < 1)
    grub_util_error (_("Storage info for `%s' does not include type"), dev);
  if (ints[0] != STORAGE_DEVICE)
    grub_util_error (_("`%s' is not a local disk"), dev);

  if (num_offsets != 2)
    grub_util_error (_("Storage info for `%s' does indicate neither plain partition nor plain disk"), dev);
  if (parent)
    {
      *parent = NULL;
      if (num_ints >= 5)
	{
	  size_t len = ints[4];
	  if (len > data_len)
	    len = data_len;
	  *parent = xmalloc (len+1);
	  memcpy (*parent, data, len);
	  (*parent)[len] = '\0';
	}
    }
  if (offset)
    *offset = offsets[0];
  if (size)
    *size = offsets[1];
  if (secsize)
    *secsize = ints[2];
  if (ports && num_ports > 0)
    {
      mach_msg_type_number_t i;
      for (i = 0; i < num_ports; i++)
        {
	  mach_port_t port = ports[i];
	  if (port != MACH_PORT_NULL)
	    mach_port_deallocate (mach_task_self(), port);
        }
      munmap ((caddr_t) ports, num_ports * sizeof (*ports));
    }

  if (ints && num_ints > 0)
    munmap ((caddr_t) ints, num_ints * sizeof (*ints));
  if (offsets && num_offsets > 0)
    munmap ((caddr_t) offsets, num_offsets * sizeof (*offsets));
  if (data && data_len > 0)
    munmap (data, data_len);
  mach_port_deallocate (mach_task_self (), file);

  return 1;
}
Exemplo n.º 19
0
int
mono_sgen_thread_handshake (int signum)
{
	task_t task = current_task ();
	thread_port_t cur_thread = mach_thread_self ();
	thread_act_array_t thread_list;
	mach_msg_type_number_t num_threads;
	mach_msg_type_number_t num_state;
	thread_state_t state;
	kern_return_t ret;
	ucontext_t ctx;
	mcontext_t mctx;
	pthread_t exception_thread = mono_gc_get_mach_exception_thread ();

	SgenThreadInfo *info;
	gpointer regs [ARCH_NUM_REGS];
	gpointer stack_start;

	int count, i;

	mono_mach_get_threads (&thread_list, &num_threads);

	for (i = 0, count = 0; i < num_threads; i++) {
		thread_port_t t = thread_list [i];
		pthread_t pt = pthread_from_mach_thread_np (t);
		if (t != cur_thread && pt != exception_thread && !mono_sgen_is_worker_thread (pt)) {
			if (signum == suspend_signal_num) {
				ret = thread_suspend (t);
				if (ret != KERN_SUCCESS) {
					mach_port_deallocate (task, t);
					continue;
				}

				state = (thread_state_t) alloca (mono_mach_arch_get_thread_state_size ());
				ret = mono_mach_arch_get_thread_state (t, state, &num_state);
				if (ret != KERN_SUCCESS) {
					mach_port_deallocate (task, t);
					continue;
				}


				info = mono_sgen_thread_info_lookup (pt);

				/* Ensure that the runtime is aware of this thread */
				if (info != NULL) {
					mctx = (mcontext_t) alloca (mono_mach_arch_get_mcontext_size ());
					mono_mach_arch_thread_state_to_mcontext (state, mctx);
					ctx.uc_mcontext = mctx;

					info->stopped_domain = mono_mach_arch_get_tls_value_from_thread (t, mono_pthread_key_for_tls (mono_domain_get_tls_key ()));
					info->stopped_ip = (gpointer) mono_mach_arch_get_ip (state);
					stack_start = (char*) mono_mach_arch_get_sp (state) - REDZONE_SIZE;
					/* If stack_start is not within the limits, then don't set it in info and we will be restarted. */
					if (stack_start >= info->stack_start_limit && info->stack_start <= info->stack_end) {
						info->stack_start = stack_start;

						ARCH_COPY_SIGCTX_REGS (regs, &ctx);
						info->stopped_regs = regs;
					} else {
						g_assert (!info->stack_start);
					}

					/* Notify the JIT */
					if (mono_gc_get_gc_callbacks ()->thread_suspend_func)
						mono_gc_get_gc_callbacks ()->thread_suspend_func (info->runtime_data, &ctx);
				}
			} else {
				ret = thread_resume (t);
				if (ret != KERN_SUCCESS) {
					mach_port_deallocate (task, t);
					continue;
				}
			}
			count ++;

			mach_port_deallocate (task, t);
		}
	}

	mach_port_deallocate (task, cur_thread);

	return count;
}
Exemplo n.º 20
0
/* Send data over a socket, possibly including Mach ports.  */
error_t
S_socket_send (struct sock_user *user, struct addr *dest_addr, int flags,
               char *data, size_t data_len,
               mach_port_t *ports, size_t num_ports,
               char *control, size_t control_len,
               size_t *amount)
{
    error_t err = 0;
    struct pipe *pipe;
    struct sock *sock, *dest_sock;
    struct addr *source_addr;

    if (!user)
        return EOPNOTSUPP;

    sock = user->sock;

    if (flags & MSG_OOB)
        /* BSD local sockets don't support OOB data.  */
        return EOPNOTSUPP;

    if (dest_addr)
    {
        err = addr_get_sock (dest_addr, &dest_sock);
        if (err == EADDRNOTAVAIL)
            /* The server went away.  */
            err = ECONNREFUSED;
        if (err)
            return err;
        if (sock->pipe_class != dest_sock->pipe_class)
            /* Sending to a different type of socket!  */
            err = EINVAL;		/* ? XXX */
    }
    else
        dest_sock = 0;

    /* We could provide a source address for all writes, but we
       only do so for connectionless sockets because that's the
       only place it's required, and it's more efficient not to.  */
    if (!err && sock->pipe_class->flags & PIPE_CLASS_CONNECTIONLESS)
        err = sock_get_addr (sock, &source_addr);
    else
        source_addr = NULL;

    if (!err)
    {
        if (dest_sock)
            /* Grab the destination socket's read pipe directly, and stuff data
               into it.  This is not quite the usage sock_acquire_read_pipe was
               intended for, but it will work, as the only inappropriate errors
               occur on a broken pipe, which shouldn't be possible with the sort of
               sockets with which we can use socket_send...  XXXX */
            err = sock_acquire_read_pipe (dest_sock, &pipe);
        else
            /* No address, must be a connected socket...  */
            err = sock_acquire_write_pipe (sock, &pipe);

        if (!err)
        {
            err = pipe_send (pipe, sock->flags & PFLOCAL_SOCK_NONBLOCK,
                             source_addr, data, data_len,
                             control, control_len, ports, num_ports,
                             amount);
            if (dest_sock)
                pipe_release_reader (pipe);
            else
                pipe_release_writer (pipe);
        }

        if (err)
            /* The send failed, so free any resources it would have consumed
               (mig gets rid of memory, but we have to do everything else). */
        {
            if (source_addr)
                ports_port_deref (source_addr);
            while (num_ports-- > 0)
                mach_port_deallocate (mach_task_self (), *ports++);
        }
    }

    if (dest_sock)
        sock_deref (dest_sock);

    return err;
}
Exemplo n.º 21
0
int
main (int argc, char **argv, char **envp)
{
  mach_port_t boot;
  error_t err;
  mach_port_t pset, psetcntl;
  void *genport;
  process_t startup_port;
  struct argp argp = { 0, 0, 0, "Hurd process server" };

  argp_parse (&argp, argc, argv, 0, 0, 0);

  initialize_version_info ();

  err = task_get_bootstrap_port (mach_task_self (), &boot);
  assert_perror (err);
  if (boot == MACH_PORT_NULL)
    error (2, 0, "proc server can only be run by init during boot");

  proc_bucket = ports_create_bucket ();
  proc_class = ports_create_class (0, 0);
  generic_port_class = ports_create_class (0, 0);
  exc_class = ports_create_class (exc_clean, 0);
  ports_create_port (generic_port_class, proc_bucket,
		     sizeof (struct port_info), &genport);
  generic_port = ports_get_right (genport);

  /* Create the initial proc object for init (PID 1).  */
  startup_proc = create_startup_proc ();

  /* Create our own proc object (we are PID 0).  */
  self_proc = allocate_proc (mach_task_self ());
  assert (self_proc);

  complete_proc (self_proc, 0);

  startup_port = ports_get_send_right (startup_proc);
  err = startup_procinit (boot, startup_port, &startup_proc->p_task,
			  &authserver, &master_host_port, &master_device_port);
  assert_perror (err);
  mach_port_deallocate (mach_task_self (), startup_port);

  mach_port_mod_refs (mach_task_self (), authserver, MACH_PORT_RIGHT_SEND, 1);
  _hurd_port_set (&_hurd_ports[INIT_PORT_AUTH], authserver);
  mach_port_deallocate (mach_task_self (), boot);

  proc_death_notify (startup_proc);
  add_proc_to_hash (startup_proc); /* Now that we have the task port.  */

  /* Set our own argv and envp locations.  */
  self_proc->p_argv = (vm_address_t) argv;
  self_proc->p_envp = (vm_address_t) envp;

  /* Give ourselves good scheduling performance, because we are so
     important. */
  err = thread_get_assignment (mach_thread_self (), &pset);
  assert_perror (err);
  err = host_processor_set_priv (master_host_port, pset, &psetcntl);
  assert_perror (err);
  thread_max_priority (mach_thread_self (), psetcntl, 0);
  assert_perror (err);
  err = task_priority (mach_task_self (), 2, 1);
  assert_perror (err);

  mach_port_deallocate (mach_task_self (), pset);
  mach_port_deallocate (mach_task_self (), psetcntl);

  {
    /* Get our stderr set up to print on the console, in case we have
       to panic or something.  */
    mach_port_t cons;
    error_t err;
    err = device_open (master_device_port, D_READ|D_WRITE, "console", &cons);
    assert_perror (err);
    stdin = mach_open_devstream (cons, "r");
    stdout = stderr = mach_open_devstream (cons, "w");
    mach_port_deallocate (mach_task_self (), cons);
  }

  while (1)
    ports_manage_port_operations_multithread (proc_bucket,
					      message_demuxer,
					      0, 0, 0);
}
Exemplo n.º 22
0
error_t
S_socket_connect (struct sock_user *user, struct addr *addr)
{
    error_t err;
    struct sock *peer;

    if (! addr)
        return ECONNREFUSED;

    /* Deallocate ADDR's send right, which we get as a side effect of the rpc. */
    mach_port_deallocate (mach_task_self (),
                          ((struct port_info *)addr)->port_right);

    if (! user)
        return EOPNOTSUPP;

    err = addr_get_sock (addr, &peer);
    if (err == EADDRNOTAVAIL)
        /* The server went away.  */
        err = ECONNREFUSED;
    else if (!err)
    {
        struct sock *sock = user->sock;
        struct connq *cq = peer->listen_queue;

        if (sock->pipe_class->flags & PIPE_CLASS_CONNECTIONLESS)
            /* For connectionless protocols, connect() just sets where writes
               will go, so the destination need not be doing an accept.  */
            err = sock_connect (sock, peer);
        else if (cq)
            /* For connection-oriented protocols, only connect with sockets that
                   are actually listening.  */
        {
            pthread_mutex_lock (&sock->lock);
            if (sock->connect_queue)
                /* SOCK is already doing a connect.  */
                err = EALREADY;
            else if (sock->flags & PFLOCAL_SOCK_CONNECTED)
                /* PFLOCAL_SOCK_CONNECTED is only set for connection-oriented sockets,
                   which can only ever connect once.  [If we didn't do this test
                   here, it would eventually fail when the listening socket
                   tried to accept our connection request.]  */
                err = EISCONN;
            else
            {
                /* Assert that we're trying to connect, so anyone else trying
                   to do so will fail with EALREADY.  */
                sock->connect_queue = cq;
                /* Unlock SOCK while waiting.  */
                pthread_mutex_unlock (&sock->lock);

                err = connq_connect (peer->listen_queue,
                                     sock->flags & PFLOCAL_SOCK_NONBLOCK);
                if (!err)
                {
                    struct sock *server;

                    err = sock_clone (peer, &server);
                    if (!err)
                    {
                        err = sock_connect (sock, server);
                        if (!err)
                            connq_connect_complete (peer->listen_queue, server);
                        else
                            sock_free (server);
                    }

                    if (err)
                        connq_connect_cancel (peer->listen_queue);
                }

                pthread_mutex_lock (&sock->lock);
                /* We must set CONNECT_QUEUE to NULL, as no one else can
                set it until we've done so.  */
                sock->connect_queue = NULL;
            }

            pthread_mutex_unlock (&sock->lock);
        }
        else
            err = ECONNREFUSED;

        sock_deref (peer);
    }

    return err;
}
Exemplo n.º 23
0
void
init_mapped_time(void)
{
	kern_return_t	kr;
	mach_port_t	pager;
	int 		new_res;
	tvalspec_t	rtc_time;

	kr = host_get_clock_service(host_port,
				    REALTIME_CLOCK,
				    &rt_clock);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr,
			    ("init_mapped_time: "
			     "host_get_clock_service(REALTIME_CLOCK)"));
		panic("unable to get real time clock");
	}

	kr = host_get_clock_control(privileged_host_port,
				    REALTIME_CLOCK,
				    &rt_clock_ctrl);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr,
			    ("init_mapped_time: "
			     "host_get_clock_control(REALTIME_CLOCK)"));
	} else {
		/* ask for 500 microsecond resolution */
		new_res = 500000;
#if 0
		kr = clock_set_attributes(rt_clock_ctrl,
					  CLOCK_ALARM_CURRES,
					  (clock_attr_t) &new_res,
					  1);
		if (kr != KERN_SUCCESS) {
			MACH3_DEBUG(2, kr,
				    ("init_mapped_time: "
				     "clock_set_attributes(%d nsec)",
				     new_res));
		}
#endif
	}

	kr = clock_map_time(rt_clock, &pager);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr, ("init_mapped_time: clock_map_time"));
		panic("unable to map real time clock");
	}

	kr = vm_map(mach_task_self(),
		    (vm_address_t *)&serv_mtime,
		    sizeof(mapped_tvalspec_t),
		    0,
		    TRUE,
		    pager,
		    0,
		    0,
		    VM_PROT_READ,
		    VM_PROT_READ,
		    VM_INHERIT_NONE);
	if (kr != D_SUCCESS) {
		MACH3_DEBUG(1, kr, ("init_mapped_time: vm_map"));
		panic("unable to vm_map real time clock");
	}

	kr = mach_port_deallocate(mach_task_self(), pager);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr, ("init_mapped_time: mach_port_deallocate"));
		panic("unable to deallocate pager");
	}

	/* calculate origin of rtclock (ie. time of boot) so that we
	 * can use rtclock to generate the current time
	 */
	kr = host_get_clock_service(host_port,
				    BATTERY_CLOCK,
				    &bb_clock);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr,
			    ("init_mapped_time: "
			     "host_get_clock_service(BATTERY_CLOCK)"));
		panic("unable to get battery backed clock");
	}

	kr = host_get_clock_control(privileged_host_port,
				    BATTERY_CLOCK,
				    &bb_clock_ctrl);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr,
			    ("init_mapped_time: "
			     "host_get_clock_control(BATTERY_CLOCK)"));
		return;
	}

	kr = clock_get_time(bb_clock, &base_time);

	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr, ("init_mapped_time: clock_get_time"));
	}

	MTS_TO_TS(serv_mtime, &rtc_time);

	SUB_TVALSPEC(&base_time, &rtc_time);

}
Exemplo n.º 24
0
Arquivo: disk.c Projeto: he32/collectd
static int disk_init (void)
{
#if HAVE_IOKIT_IOKITLIB_H
	kern_return_t status;

	if (io_master_port != MACH_PORT_NULL)
	{
		mach_port_deallocate (mach_task_self (),
				io_master_port);
		io_master_port = MACH_PORT_NULL;
	}

	status = IOMasterPort (MACH_PORT_NULL, &io_master_port);
	if (status != kIOReturnSuccess)
	{
		ERROR ("IOMasterPort failed: %s",
				mach_error_string (status));
		io_master_port = MACH_PORT_NULL;
		return (-1);
	}
/* #endif HAVE_IOKIT_IOKITLIB_H */

#elif KERNEL_LINUX
	/* do nothing */
/* #endif KERNEL_LINUX */

#elif KERNEL_FREEBSD
	int rv;

	rv = geom_gettree(&geom_tree);
	if (rv != 0) {
		ERROR ("geom_gettree() failed, returned %d", rv);
		return (-1);
	}
	rv = geom_stats_open();
	if (rv != 0) {
		ERROR ("geom_stats_open() failed, returned %d", rv);
		return (-1);
	}
/* #endif KERNEL_FREEBSD */

#elif HAVE_LIBKSTAT
	kstat_t *ksp_chain;

	numdisk = 0;

	if (kc == NULL)
		return (-1);

	for (numdisk = 0, ksp_chain = kc->kc_chain;
			(numdisk < MAX_NUMDISK) && (ksp_chain != NULL);
			ksp_chain = ksp_chain->ks_next)
	{
		if (strncmp (ksp_chain->ks_class, "disk", 4)
				&& strncmp (ksp_chain->ks_class, "partition", 9))
			continue;
		if (ksp_chain->ks_type != KSTAT_TYPE_IO)
			continue;
		ksp[numdisk++] = ksp_chain;
	}
/* #endif HAVE_LIBKSTAT */

#elif HAVE_SYSCTL && KERNEL_NETBSD
	int mib[3];
	size_t size;

	/* figure out number of drives */
	mib[0] = CTL_HW;
	mib[1] = HW_IOSTATS;
	mib[2] = sizeof(struct io_sysctl);
	if (sysctl(mib, 3, NULL, &size, NULL, 0) == -1) {
		ERROR ("disk plugin: sysctl for ndrives failed");
		return -1;
	}
	ndrive = size / sizeof(struct io_sysctl);

	if (size == 0 ) {
		ERROR ("disk plugin: no drives found");
		return -1;
	}
	drives = (struct io_sysctl *)malloc(size);
	if (drives == NULL) {
		ERROR ("disk plugin: memory allocation failure");
		return -1;
	}

#endif	/* HAVE_SYSCTL && KERNEL_NETBSD */


	return (0);
} /* int disk_init */
Exemplo n.º 25
0
static void
gum_kernel_do_deinit (void)
{
  mach_port_deallocate (mach_task_self (), gum_kernel_get_task ());
}
Exemplo n.º 26
0
/* -----------------------------------------------------------------------------
----------------------------------------------------------------------------- */
__private_extern__
kern_return_t
_pppcontroller_attach_proxy(mach_port_t server,
							xmlData_t nameRef,		/* raw XML bytes */
							mach_msg_type_number_t nameLen,
							mach_port_t bootstrap,
							mach_port_t notify,
							mach_port_t au_session,
							int uid,
							int gid,
							int pid,
							mach_port_t *session,
							int * result,
							audit_token_t audit_token)
{
	CFStringRef			serviceID = NULL;
	CFMachPortRef		port = NULL;
	CFRunLoopSourceRef  rls = NULL;
	struct client		*client = NULL;
	mach_port_t			oldport;
	uid_t				audit_euid = -1;
	gid_t				audit_egid = -1;
	pid_t				audit_pid = -1;
	
	*session = MACH_PORT_NULL;
	/* un-serialize the serviceID */
	if (!_SCUnserializeString(&serviceID, NULL, (void *)nameRef, nameLen)) {
		*result = kSCStatusFailed;
		goto failed;
	}

	if (!isA_CFString(serviceID)) {
		*result = kSCStatusInvalidArgument;
		goto failed;
	}

	/* only allow "root" callers to change the client uid/gid/pid */
	audit_token_to_au32(audit_token,
						NULL,			// auidp
						&audit_euid,	// euid
						&audit_egid,	// egid
						NULL,			// ruid
						NULL,			// rgid
						&audit_pid,		// pid
						NULL,			// asid
						NULL);			// tid

    if ((audit_euid != 0) &&
        ((uid != audit_euid) || (gid != audit_egid) || (pid != audit_pid))) {
        /*
         * the caller is NOT "root" and is trying to masquerade
         * as some other user/process.
         */
        
        /* does caller has the right entitlement */
        if (!(hasEntitlement(audit_token, kSCVPNConnectionEntitlementName, NULL))){
           *result = kSCStatusAccessError;
            goto failed;
        }
    }
    
	
	//if ((findbyserviceID(serviceID)) == 0) {
	//	*result = kSCStatusInvalidArgument;
	//	goto failed;
	//}

	/* allocate session port */
	(void) mach_port_allocate(mach_task_self(),
							  MACH_PORT_RIGHT_RECEIVE,
							  session);

    /*
     * Note: we create the CFMachPort *before* we insert a send
     *       right present to ensure that CF does not establish
     *       it's dead name notification.
     */
	port = _SC_CFMachPortCreateWithPort("PPPController/PPP", *session, server_handle_request, NULL);

    /* insert send right that will be moved to the client */
	(void) mach_port_insert_right(mach_task_self(),
								  *session,
								  *session,
								  MACH_MSG_TYPE_MAKE_SEND);

	/* Request a notification when/if the client dies */
	(void) mach_port_request_notification(mach_task_self(),
										  *session,
										  MACH_NOTIFY_NO_SENDERS,
										  1,
										  *session,
										  MACH_MSG_TYPE_MAKE_SEND_ONCE,
										  &oldport);

	/* add to runloop */
	rls = CFMachPortCreateRunLoopSource(NULL, port, 0);
	CFRunLoopAddSource(CFRunLoopGetCurrent(), rls, kCFRunLoopDefaultMode);

	if (au_session != MACH_PORT_NULL) {
		if ((audit_session_join(au_session)) == AU_DEFAUDITSID) {
			SCLog(TRUE, LOG_ERR, CFSTR("_pppcontroller_attach audit_session_join fails"));
		}
	}else {
		SCLog(TRUE, LOG_ERR, CFSTR("_pppcontroller_attach au_session == NULL"));
	}

	client = client_new_mach(port, rls, serviceID, uid, gid, pid, bootstrap, notify, au_session);
	if (client == 0) {
		*result = kSCStatusFailed;
		goto failed;
	}

	*result = kSCStatusOK;
	
	my_CFRelease(&serviceID);
	my_CFRelease(&port);
	my_CFRelease(&rls);
    return KERN_SUCCESS;
	
 failed:
	my_CFRelease(&serviceID);
	if (port) {
		CFMachPortInvalidate(port);
		my_CFRelease(&port);
	}
	if (rls) {
		CFRunLoopRemoveSource(CFRunLoopGetCurrent(), rls, kCFRunLoopDefaultMode);
		my_CFRelease(&rls);
	}
	if (*session != MACH_PORT_NULL) {
		mach_port_mod_refs(mach_task_self(), *session, MACH_PORT_RIGHT_SEND   , -1);
		mach_port_mod_refs(mach_task_self(), *session, MACH_PORT_RIGHT_RECEIVE, -1);
		*session = MACH_PORT_NULL;
	}
	if (client) {
		client_dispose(client);
	} else {
		if (bootstrap != MACH_PORT_NULL)
			mach_port_deallocate(mach_task_self(), bootstrap);
		if (notify != MACH_PORT_NULL)
			mach_port_deallocate(mach_task_self(), notify);
	}
    return KERN_SUCCESS;
}
Exemplo n.º 27
0
nsresult
nsPerformanceStatsService::GetResources(uint64_t* userTime,
                                        uint64_t* systemTime) const {
  MOZ_ASSERT(userTime);
  MOZ_ASSERT(systemTime);

#if defined(XP_MACOSX)
  // On MacOS X, to get we per-thread data, we need to
  // reach into the kernel.

  mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
  thread_basic_info_data_t info;
  mach_port_t port = mach_thread_self();
  kern_return_t err =
    thread_info(/* [in] targeted thread*/ port,
                /* [in] nature of information*/ THREAD_BASIC_INFO,
                /* [out] thread information */  (thread_info_t)&info,
                /* [inout] number of items */   &count);

  // We do not need ability to communicate with the thread, so
  // let's release the port.
  mach_port_deallocate(mach_task_self(), port);

  if (err != KERN_SUCCESS)
    return NS_ERROR_FAILURE;

  *userTime = info.user_time.microseconds + info.user_time.seconds * 1000000;
  *systemTime = info.system_time.microseconds + info.system_time.seconds * 1000000;

#elif defined(XP_UNIX)
  struct rusage rusage;
#if defined(RUSAGE_THREAD)
  // Under Linux, we can obtain per-thread statistics
  int err = getrusage(RUSAGE_THREAD, &rusage);
#else
  // Under other Unices, we need to do with more noisy
  // per-process statistics.
  int err = getrusage(RUSAGE_SELF, &rusage);
#endif // defined(RUSAGE_THREAD)

  if (err)
    return NS_ERROR_FAILURE;

  *userTime = rusage.ru_utime.tv_usec + rusage.ru_utime.tv_sec * 1000000;
  *systemTime = rusage.ru_stime.tv_usec + rusage.ru_stime.tv_sec * 1000000;

#elif defined(XP_WIN)
  // Under Windows, we can obtain per-thread statistics. Experience
  // seems to suggest that they are not very accurate under Windows
  // XP, though.
  FILETIME creationFileTime; // Ignored
  FILETIME exitFileTime; // Ignored
  FILETIME kernelFileTime;
  FILETIME userFileTime;
  BOOL success = GetThreadTimes(GetCurrentThread(),
                                &creationFileTime, &exitFileTime,
                                &kernelFileTime, &userFileTime);

  if (!success)
    return NS_ERROR_FAILURE;

  ULARGE_INTEGER kernelTimeInt;
  kernelTimeInt.LowPart = kernelFileTime.dwLowDateTime;
  kernelTimeInt.HighPart = kernelFileTime.dwHighDateTime;
  // Convert 100 ns to 1 us.
  *systemTime = kernelTimeInt.QuadPart / 10;

  ULARGE_INTEGER userTimeInt;
  userTimeInt.LowPart = userFileTime.dwLowDateTime;
  userTimeInt.HighPart = userFileTime.dwHighDateTime;
  // Convert 100 ns to 1 us.
  *userTime = userTimeInt.QuadPart / 10;

#endif // defined(XP_MACOSX) || defined(XP_UNIX) || defined(XP_WIN)

  return NS_OK;
}
Exemplo n.º 28
0
int yr_process_get_memory(
    pid_t pid,
    YR_MEMORY_BLOCK** first_block)
{
  task_t task;
  kern_return_t kr;

  vm_size_t size = 0;
  vm_address_t address = 0;
  vm_region_basic_info_data_64_t info;
  mach_msg_type_number_t info_count;
  mach_port_t object;

  unsigned char* data;

  YR_MEMORY_BLOCK* new_block;
  YR_MEMORY_BLOCK* current_block = NULL;

  *first_block = NULL;

  if ((kr = task_for_pid(mach_task_self(), pid, &task)) != KERN_SUCCESS)
    return ERROR_COULD_NOT_ATTACH_TO_PROCESS;

  do {

    info_count = VM_REGION_BASIC_INFO_COUNT_64;

    kr = vm_region_64(
        task,
        &address,
        &size,
        VM_REGION_BASIC_INFO,
        (vm_region_info_t) &info,
        &info_count,
        &object);

    if (kr == KERN_SUCCESS)
    {
      data = (unsigned char*) yr_malloc(size);

      if (data == NULL)
        return ERROR_INSUFICIENT_MEMORY;

      if (vm_read_overwrite(
              task,
              address,
              size,
              (vm_address_t)
              data,
              &size) == KERN_SUCCESS)
      {
        new_block = (YR_MEMORY_BLOCK*) yr_malloc(sizeof(YR_MEMORY_BLOCK));

        if (new_block == NULL)
        {
          yr_free(data);
          return ERROR_INSUFICIENT_MEMORY;
        }

        if (*first_block == NULL)
          *first_block = new_block;

        new_block->base = address;
        new_block->size = size;
        new_block->data = data;
        new_block->next = NULL;

        if (current_block != NULL)
          current_block->next = new_block;

        current_block = new_block;
      }
      else
      {
        yr_free(data);
      }

      address += size;
    }


  } while (kr != KERN_INVALID_ADDRESS);

  if (task != MACH_PORT_NULL)
    mach_port_deallocate(mach_task_self(), task);

  return ERROR_SUCCESS;
}
Exemplo n.º 29
0
SharedMemory::Handle::~Handle()
{
    if (m_port)
        mach_port_deallocate(mach_task_self(), m_port);
}
Exemplo n.º 30
0
int
main()
{
    kern_return_t     kr;
    clock_serv_t      clk_system;
    mach_timespec_t   alarm_time;
    clock_reply_t     alarm_port;
    struct timeval    t1, t2;
    msg_format_recv_t message;
    mach_port_t       mytask;
   
    // The C library optimized this call by returning the task port's value
    // that it caches in the mach_task_self_ variable
    mytask = mach_task_self();
   
    kr = host_get_clock_service(mach_host_self(), SYSTEM_CLOCK,
                                (clock_serv_t *)&clk_system);
    OUT_ON_MACH_ERROR("host_get_clock_service", kr);
   
    // Let us set the alarm to ring after 2.5 seconds
    alarm_time.tv_sec = 2;
    alarm_time.tv_nsec = 50000000;
   
    // Allocate a port (specifically, get receive right for the new port)
    // We will use this port to receive the alarm message from the clock
    kr = mach_port_allocate(
             mytask,                  // the task acquiring the port right
             MACH_PORT_RIGHT_RECEIVE, // type of right
             &alarm_port);            // task's name for the port right
    OUT_ON_MACH_ERROR("mach_port_allocate", kr);
   
    gettimeofday(&t1, NULL);
   
    // Set the alarm
    kr = clock_alarm(clk_system,    // the clock to use
                     TIME_RELATIVE, // how to interpret alarm time
                     alarm_time,    // the alarm time
                     alarm_port);   // this port will receive the alarm message
    OUT_ON_MACH_ERROR("clock_alarm", kr);
   
    printf("Current time %ld s + %d us\n"
           "Setting alarm to ring after %d s + %d ns\n",
           t1.tv_sec, t1.tv_usec, alarm_time.tv_sec, alarm_time.tv_nsec);
   
    // Wait to receive the alarm message (we will block here)
    kr = mach_msg(&(message.header),       // the message buffer
                  MACH_RCV_MSG,            // message option bits
                  0,                       // send size (we are receiving, so 0)
                  message.header.msgh_size,// receive limit
                  alarm_port,              // receive right
                  MACH_MSG_TIMEOUT_NONE,   // no timeout
                  MACH_PORT_NULL);         // no timeout notification port
    // We should have received an alarm message at this point
    gettimeofday(&t2, NULL);
    OUT_ON_MACH_ERROR("mach_msg", kr);
   
    if (t2.tv_usec < t1.tv_usec) {
        t1.tv_sec += 1;
        t1.tv_usec -= 1000000;
    }
   
    printf("\nCurrent time %ld s + %d us\n", t2.tv_sec, t2.tv_usec);
    printf("Alarm rang after %ld s + %d us\n", (t2.tv_sec - t1.tv_sec),
          (t2.tv_usec - t1.tv_usec));
   
out:
    mach_port_deallocate(mytask, clk_system);
   
    // Release user reference for the receive right we created
    mach_port_deallocate(mytask, alarm_port);
   
    exit(0);
}