/* Implement fsys_set_options as described in <hurd/fsys.defs>. */
kern_return_t
diskfs_S_fsys_set_options (fsys_t fsys,
			   mach_port_t reply,
			   mach_msg_type_name_t replytype,
			   char *data, mach_msg_type_number_t len,
			   int do_children)
{
  error_t err = 0;
  struct port_info *pt =
    ports_lookup_port (diskfs_port_bucket, fsys, diskfs_control_class);

  error_t
    helper (struct node *np)
      {
	error_t error;
	mach_port_t control;

	error = fshelp_fetch_control (&np->transbox, &control);
	mutex_unlock (&np->lock);
	if (!error && (control != MACH_PORT_NULL))
	  {
	    error = fsys_set_options (control, data, len, do_children);
	    mach_port_deallocate (mach_task_self (), control);
	  }
	else
	  error = 0;
	mutex_lock (&np->lock);

	if ((error == MIG_SERVER_DIED) || (error == MACH_SEND_INVALID_DEST))
	  error = 0;
	return error;
      }
Exemple #2
0
void
ports_dead_name (void *notify, mach_port_t dead_name)
{
  struct protid *pi = ports_lookup_port (diskfs_port_bucket, dead_name,
					 diskfs_protid_class);
  struct node *np;
  
  if (pi)
    {
      np = pi->po->np;
      pthread_mutex_lock (&np->lock);
      if (dead_name == np->sockaddr)
	{
	  mach_port_deallocate (mach_task_self (), np->sockaddr);
	  np->sockaddr = MACH_PORT_NULL;
	  diskfs_nput (np);
	}
      else
	pthread_mutex_unlock (&np->lock);
    }

  fshelp_remove_active_translator (dead_name);

  ports_interrupt_notified_rpcs (notify, dead_name, MACH_NOTIFY_DEAD_NAME);
}
static error_t
allowed (mach_port_t port, int mode)
{
  struct trivfs_protid *cred = ports_lookup_port
    (0, port, trivfs_protid_portclasses[0]);
  if (!cred)
    return MIG_BAD_ID;
  error_t result = (cred->po->openmodes & mode) ? 0 : EACCES;
  ports_port_deref (cred);
  return result;
}
Exemple #4
0
int
pfinet_demuxer (mach_msg_header_t *inp,
                mach_msg_header_t *outp)
{
    struct port_info *pi;

    /* We have several classes in one bucket, which need to be demuxed
       differently.  */
    if (MACH_MSGH_BITS_LOCAL (inp->msgh_bits) ==
            MACH_MSG_TYPE_PROTECTED_PAYLOAD)
        pi = ports_lookup_payload (pfinet_bucket,
                                   inp->msgh_protected_payload,
                                   socketport_class);
    else
        pi = ports_lookup_port (pfinet_bucket,
                                inp->msgh_local_port,
                                socketport_class);

    if (pi)
    {
        ports_port_deref (pi);

        mig_routine_t routine;
        if ((routine = io_server_routine (inp)) ||
                (routine = socket_server_routine (inp)) ||
                (routine = pfinet_server_routine (inp)) ||
                (routine = iioctl_server_routine (inp)) ||
                (routine = NULL, trivfs_demuxer (inp, outp)) ||
                (routine = startup_notify_server_routine (inp)))
        {
            if (routine)
                (*routine) (inp, outp);
            return TRUE;
        }
        else
            return FALSE;
    }
    else
    {
        mig_routine_t routine;
        if ((routine = socket_server_routine (inp)) ||
                (routine = pfinet_server_routine (inp)) ||
                (routine = iioctl_server_routine (inp)) ||
                (routine = NULL, trivfs_demuxer (inp, outp)) ||
                (routine = startup_notify_server_routine (inp)))
        {
            if (routine)
                (*routine) (inp, outp);
            return TRUE;
        }
        else
            return FALSE;
    }
}
Exemple #5
0
/* The system is going down; destroy all the extant port rights.  That
   will cause net channels and such to close promptly.  */
error_t
S_startup_dosync (mach_port_t handle)
{
    struct port_info *inpi = ports_lookup_port (pfinet_bucket, handle,
                             shutdown_notify_class);

    if (!inpi)
        return EOPNOTSUPP;

    ports_class_iterate (socketport_class, ports_destroy_right);
    return 0;
}
/* Cause a pending request on this object to immediately return.  The
   exact semantics are dependent on the specific object.  */
kern_return_t
ports_S_interrupt_operation (mach_port_t port,
			     mach_port_seqno_t seqno)
{
  struct port_info *pi = ports_lookup_port (0, port, 0);
  if (!pi)
    return EOPNOTSUPP;
  mutex_lock (&_ports_lock);
  if (pi->cancel_threshold < seqno)
    pi->cancel_threshold = seqno;
  mutex_unlock (&_ports_lock);
  ports_interrupt_rpcs (pi);
  ports_port_deref (pi);
  return 0;
}
error_t
netfs_S_fsys_getroot (mach_port_t cntl,
		      mach_port_t reply,
		      mach_msg_type_name_t reply_type,
		      mach_port_t dotdot,
		      uid_t *uids, mach_msg_type_number_t nuids,
		      uid_t *gids, mach_msg_type_number_t ngids,
		      int flags,
		      retry_type *do_retry,
		      char *retry_name,
		      mach_port_t *retry_port,
		      mach_msg_type_name_t *retry_port_type)
{
  struct port_info *pt = ports_lookup_port (netfs_port_bucket, cntl,
					    netfs_control_class);
  struct iouser *cred;
  error_t err;
  struct protid *newpi;
  mode_t type;
  struct peropen peropen_context = { root_parent: dotdot };
Exemple #8
0
error_t
S_ifsock_getsockaddr (file_t sockfile,
		      mach_port_t *address)
{
  struct trivfs_protid *cred = ports_lookup_port (port_bucket, sockfile,
						  node_class);
  int perms;
  error_t err;

  if (!cred)
    return EOPNOTSUPP;

  err = file_check_access (cred->realnode, &perms);
  if (!err && !(perms & O_READ))
    err = EACCES;

  if (!err)
    *address = address_port;
  ports_port_deref (cred);
  return err;
}
/* Return in FILE & FILE_TYPE the file in FSYS corresponding to the NFS file
   handle HANDLE & HANDLE_LEN.  */
error_t
diskfs_S_fsys_getfile (mach_port_t fsys,
		       mach_port_t reply, mach_msg_type_name_t reply_type,
		       uid_t *uids, mach_msg_type_number_t nuids,
		       gid_t *gids, mach_msg_type_number_t ngids,
		       char *handle, mach_msg_type_number_t handle_len,
		       mach_port_t *file, mach_msg_type_name_t *file_type)
{
  int flags;
  error_t err;
  struct node *node;
  const union diskfs_fhandle *f;
  struct protid *new_cred;
  struct peropen *new_po;
  struct iouser *user;
  struct port_info *pt =
    ports_lookup_port (diskfs_port_bucket, fsys, diskfs_control_class);

  if (!pt)
    return EOPNOTSUPP;

  if (handle_len != sizeof *f)
    {
      ports_port_deref (pt);
      return EINVAL;
    }

  f = (const union diskfs_fhandle *) handle;

  err = diskfs_cached_lookup (f->data.cache_id, &node);
  if (err)
    {
      ports_port_deref (pt);
      return err;
    }

  if (node->dn_stat.st_gen != f->data.gen)
    {
      diskfs_nput (node);
      ports_port_deref (pt);
      return ESTALE;
    }

  err = iohelp_create_complex_iouser (&user, uids, nuids, gids, ngids);
  if (err)
    {
      diskfs_nput (node);
      ports_port_deref (pt);
      return err;
    }

  flags = 0;
  if (! fshelp_access (&node->dn_stat, S_IREAD, user))
    flags |= O_READ;
  if (! fshelp_access (&node->dn_stat, S_IEXEC, user))
    flags |= O_EXEC;
  if (! fshelp_access (&node->dn_stat, S_IWRITE, user)
      && ! S_ISDIR (node->dn_stat.st_mode)
      && ! diskfs_check_readonly ())
    flags |= O_WRITE;

  err = diskfs_make_peropen (node, flags, 0, &new_po);
  if (! err)
    {
      err = diskfs_create_protid (new_po, user, &new_cred);
      if (err)
	diskfs_release_peropen (new_po);
    }

  iohelp_free_iouser (user);

  diskfs_nput (node);
  ports_port_deref (pt);

  if (! err)
    {
      *file = ports_get_right (new_cred);
      *file_type = MACH_MSG_TYPE_MAKE_SEND;
    }

  return err;
}
void
ports_manage_port_operations_one_thread (struct port_bucket *bucket,
					 ports_demuxer_type demuxer,
					 int timeout)
{
  error_t err;

  int 
  internal_demuxer (mach_msg_header_t *inp,
		    mach_msg_header_t *outheadp)
    {
      struct port_info *pi;
      struct rpc_info link;
      int status;
      error_t err;
      register mig_reply_header_t *outp = (mig_reply_header_t *) outheadp;
      static const mach_msg_type_t RetCodeType = {
		/* msgt_name = */		MACH_MSG_TYPE_INTEGER_32,
		/* msgt_size = */		32,
		/* msgt_number = */		1,
		/* msgt_inline = */		TRUE,
		/* msgt_longform = */		FALSE,
		/* msgt_deallocate = */		FALSE,
		/* msgt_unused = */		0
	};

      /* Fill in default response. */
      outp->Head.msgh_bits 
	= MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(inp->msgh_bits), 0);
      outp->Head.msgh_size = sizeof *outp;
      outp->Head.msgh_remote_port = inp->msgh_remote_port;
      outp->Head.msgh_local_port = MACH_PORT_NULL;
      outp->Head.msgh_seqno = 0;
      outp->Head.msgh_id = inp->msgh_id + 100;
      outp->RetCodeType = RetCodeType;
      outp->RetCode = MIG_BAD_ID;

      pi = ports_lookup_port (bucket, inp->msgh_local_port, 0);
      if (pi)
	{
	  err = ports_begin_rpc (pi, inp->msgh_id, &link);
	  if (err)
	    {
	      mach_port_deallocate (mach_task_self (), inp->msgh_remote_port);
	      outp->RetCode = err;
	      status = 1;
	    }
	  else
	    {
	      /* No need to check cancel threshhold here, because
		 in a single threaded server the cancel is always
		 handled in order. */
	      status = demuxer (inp, outheadp);
	      ports_end_rpc (pi, &link);
	    }
	  ports_port_deref (pi);
	}
      else
	{
	  outp->RetCode = EOPNOTSUPP;
	  status = 1;
	}

      return status;
    }
  
  do
    err = mach_msg_server_timeout (internal_demuxer, 0, bucket->portset, 
				   timeout ? MACH_RCV_TIMEOUT : 0, timeout);
  while (err != MACH_RCV_TIMED_OUT);
}
Exemple #11
0
void
ports_manage_port_operations_one_thread (struct port_bucket *bucket,
					 ports_demuxer_type demuxer,
					 int timeout)
{
  struct ports_thread thread;
  error_t err;

  int 
  internal_demuxer (mach_msg_header_t *inp,
		    mach_msg_header_t *outheadp)
    {
      struct port_info *pi;
      struct rpc_info link;
      int status;
      error_t err;
      register mig_reply_header_t *outp = (mig_reply_header_t *) outheadp;
      static const mach_msg_type_t RetCodeType = {
		/* msgt_name = */		MACH_MSG_TYPE_INTEGER_32,
		/* msgt_size = */		32,
		/* msgt_number = */		1,
		/* msgt_inline = */		TRUE,
		/* msgt_longform = */		FALSE,
		/* msgt_deallocate = */		FALSE,
		/* msgt_unused = */		0
	};

      /* Fill in default response. */
      outp->Head.msgh_bits 
	= MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(inp->msgh_bits), 0);
      outp->Head.msgh_size = sizeof *outp;
      outp->Head.msgh_remote_port = inp->msgh_remote_port;
      outp->Head.msgh_local_port = MACH_PORT_NULL;
      outp->Head.msgh_seqno = 0;
      outp->Head.msgh_id = inp->msgh_id + 100;
      outp->RetCodeType = RetCodeType;
      outp->RetCode = MIG_BAD_ID;

      if (MACH_MSGH_BITS_LOCAL (inp->msgh_bits) ==
	  MACH_MSG_TYPE_PROTECTED_PAYLOAD)
	pi = ports_lookup_payload (bucket, inp->msgh_protected_payload, NULL);
      else
	{
	  pi = ports_lookup_port (bucket, inp->msgh_local_port, 0);
	  if (pi)
	    {
	      /* Store the objects address as the payload and set the
		 message type accordingly.  This prevents us from
		 having to do another hash table lookup in the intran
		 functions if protected payloads are not supported by
		 the kernel.  */
	      inp->msgh_bits =
		MACH_MSGH_BITS_OTHER (inp->msgh_bits)
		| MACH_MSGH_BITS (MACH_MSGH_BITS_REMOTE (inp->msgh_bits),
				  MACH_MSG_TYPE_PROTECTED_PAYLOAD);
	      inp->msgh_protected_payload = (unsigned long) pi;
	    }
	}

      if (pi)
	{
	  err = ports_begin_rpc (pi, inp->msgh_id, &link);
	  if (err)
	    {
	      mach_port_deallocate (mach_task_self (), inp->msgh_remote_port);
	      outp->RetCode = err;
	      status = 1;
	    }
	  else
	    {
	      /* No need to check cancel threshold here, because
		 in a single threaded server the cancel is always
		 handled in order. */
	      status = demuxer (inp, outheadp);
	      ports_end_rpc (pi, &link);
	    }
	  ports_port_deref (pi);
	}
      else
	{
	  outp->RetCode = EOPNOTSUPP;
	  status = 1;
	}

      _ports_thread_quiescent (&bucket->threadpool, &thread);
      return status;
    }

  /* XXX It is currently unsafe for most servers to terminate based on
     inactivity because a request may arrive after a server has
     started shutting down, causing the client to receive an error.
     Prevent the service loop from terminating by setting TIMEOUT to
     zero.  */
  timeout = 0;

  _ports_thread_online (&bucket->threadpool, &thread);
  do
    err = mach_msg_server_timeout (internal_demuxer, 0, bucket->portset, 
				   timeout ? MACH_RCV_TIMEOUT : 0, timeout);
  while (err != MACH_RCV_TIMED_OUT);
  _ports_thread_offline (&bucket->threadpool, &thread);
}
/* Implement the object termination call from the kernel as described
   in <mach/memory_object.defs>. */
kern_return_t
_pager_seqnos_memory_object_terminate (mach_port_t object, 
				       mach_port_seqno_t seqno,
				       mach_port_t control,
				       mach_port_t name)
{
  struct pager *p;
  
  p = ports_lookup_port (0, object, _pager_class);
  if (!p)
    return EOPNOTSUPP;

  mutex_lock (&p->interlock);
  _pager_wait_for_seqno (p, seqno);
  
  if (control != p->memobjcntl)
    {
      printf ("incg terminate: wrong control port");
      goto out;
    }
  if (name != p->memobjname)
    {
      printf ("incg terminate: wrong name port");
      goto out;
    }

  while (p->noterm)
    {
      p->termwaiting = 1;
      condition_wait (&p->wakeup, &p->interlock);
    }

  /* Destry the ports we received; mark that in P so that it doesn't bother
     doing it again. */
  mach_port_destroy (mach_task_self (), control);
  mach_port_destroy (mach_task_self (), name);
  p->memobjcntl = p->memobjname = MACH_PORT_NULL;

  _pager_free_structure (p);

#ifdef KERNEL_INIT_RACE
  if (p->init_head)
    {
      struct pending_init *i = p->init_head;
      p->init_head = i->next;
      if (!i->next)
	p->init_tail = 0;
      p->memobjcntl = i->control;
      p->memobjname = i->name;
      memory_object_ready (i->control, p->may_cache, p->copy_strategy);
      p->pager_state = NORMAL;
      free (i);
    }
#endif

 out:
  _pager_release_seqno (p, seqno);
  mutex_unlock (&p->interlock);
  ports_port_deref (p);

  return 0;
}
/* Implement pagein callback as described in <mach/memory_object.defs>. */
kern_return_t
_pager_seqnos_memory_object_data_request (mach_port_t object,
					  mach_port_seqno_t seqno,
					  mach_port_t control,
					  vm_offset_t offset,
					  vm_size_t length,
					  vm_prot_t access)
{
  struct pager *p;
  short *pm_entry;
  int doread, doerror;
  error_t err;
  vm_address_t page;
  int write_lock;

  p = ports_lookup_port (0, object, _pager_class);
  if (!p)
    return EOPNOTSUPP;

  /* Acquire the right to meddle with the pagemap */
  mutex_lock (&p->interlock);
  _pager_wait_for_seqno (p, seqno);

  /* sanity checks -- we don't do multi-page requests yet.  */
  if (control != p->memobjcntl)
    {
      printf ("incg data request: wrong control port\n");
      goto release_out;
    }
  if (length != __vm_page_size)
    {
      printf ("incg data request: bad length size %zd\n", length);
      goto release_out;
    }
  if (offset % __vm_page_size)
    {
      printf ("incg data request: misaligned request\n");
      goto release_out;
    }

  _pager_block_termination (p);	/* prevent termination until
				   mark_object_error is done */

  if (p->pager_state != NORMAL)
    {
      printf ("pager in wrong state for read\n");
      goto allow_release_out;
    }

  err = _pager_pagemap_resize (p, offset + length);
  if (err)
    goto allow_release_out;	/* Can't do much about the actual error.  */

  /* If someone is paging this out right now, the disk contents are
     unreliable, so we have to wait.  It is too expensive (right now) to
     find the data and return it, and then interrupt the write, so we just
     mark the page and have the writing thread do m_o_data_supply when it
     gets around to it.  */
  pm_entry = &p->pagemap[offset / __vm_page_size];
  if (*pm_entry & PM_PAGINGOUT)
    {
      doread = 0;
      *pm_entry |= PM_PAGEINWAIT;
    }
  else
    doread = 1;

  if (*pm_entry & PM_INVALID)
    doerror = 1;
  else
    doerror = 0;

  *pm_entry |= PM_INCORE;

  if (PM_NEXTERROR (*pm_entry) != PAGE_NOERR && (access & VM_PROT_WRITE))
    {
      memory_object_data_error (control, offset, length,
				_pager_page_errors[PM_NEXTERROR (*pm_entry)]);
      _pager_mark_object_error (p, offset, length,
				_pager_page_errors[PM_NEXTERROR (*pm_entry)]);
      *pm_entry = SET_PM_NEXTERROR (*pm_entry, PAGE_NOERR);
      doread = 0;
    }

  /* Let someone else in.  */
  _pager_release_seqno (p, seqno);
  mutex_unlock (&p->interlock);

  if (!doread)
    goto allow_term_out;
  if (doerror)
    goto error_read;

  err = pager_read_page (p->upi, offset, &page, &write_lock);
  if (err)
    goto error_read;

  memory_object_data_supply (p->memobjcntl, offset, page, length, 1,
			     write_lock ? VM_PROT_WRITE : VM_PROT_NONE, 0,
			     MACH_PORT_NULL);
  mutex_lock (&p->interlock);
  _pager_mark_object_error (p, offset, length, 0);
  _pager_allow_termination (p);
  mutex_unlock (&p->interlock);
  ports_port_deref (p);
  return 0;

 error_read:
  memory_object_data_error (p->memobjcntl, offset, length, EIO);
  _pager_mark_object_error (p, offset, length, EIO);
 allow_term_out:
  mutex_lock (&p->interlock);
  _pager_allow_termination (p);
  mutex_unlock (&p->interlock);
  ports_port_deref (p);
  return 0;

 allow_release_out:
  _pager_allow_termination (p);
 release_out:
  _pager_release_seqno (p, seqno);
  mutex_unlock (&p->interlock);
  ports_port_deref (p);
  return 0;
}