Exemplo n.º 1
0
static inline int scull_w_available(void)
{
	return scull_w_count == 0 ||
		scull_w_owner == __kuid_val(CURRENT_UID) ||
		scull_w_owner == __kuid_val(CURRENT_EUID) ||
		capable(CAP_DAC_OVERRIDE);
}
Exemplo n.º 2
0
static int scull_u_open(struct inode *inode, struct file *filp)
{
	struct scull_dev *dev = &scull_u_device; /* device information */

	spin_lock(&scull_u_lock);
	if (scull_u_count && 
			(scull_u_owner != __kuid_val(CURRENT_UID)) &&  /* allow user */
			(scull_u_owner != __kuid_val(CURRENT_EUID)) && /* allow whoever did su */
			!capable(CAP_DAC_OVERRIDE)) { /* still allow root */
		spin_unlock(&scull_u_lock);
		return -EBUSY;   /* -EPERM would confuse the user */
	}

	if (scull_u_count == 0)
		scull_u_owner = __kuid_val(CURRENT_UID); /* grab it */

	scull_u_count++;
	spin_unlock(&scull_u_lock);

/* then, everything else is copied from the bare scull device */

	if ((filp->f_flags & O_ACCMODE) == O_WRONLY)
		scull_trim(dev);
	filp->private_data = dev;
	return 0;          /* success */
}
Exemplo n.º 3
0
static void bus1_test_user(void)
{
	struct bus1_user *user1, *user2;
	kuid_t uid1 = KUIDT_INIT(1), uid2 = KUIDT_INIT(2);

	/* drop the NULL user */
	bus1_user_unref(NULL);

	/* create a user */
	user1 = bus1_user_ref_by_uid(uid1);
	WARN_ON(!user1);
	WARN_ON(__kuid_val(user1->uid) != 1);
	WARN_ON(user1->id != 0);
	WARN_ON(atomic_read(&user1->n_slices) !=
					atomic_read(&user1->max_slices));
	WARN_ON(atomic_read(&user1->n_handles) !=
					atomic_read(&user1->max_handles));
	WARN_ON(atomic_read(&user1->n_inflight_bytes) !=
					atomic_read(&user1->max_bytes));
	WARN_ON(atomic_read(&user1->n_inflight_fds) !=
					atomic_read(&user1->max_fds));

	/* create a different user */
	user2 = bus1_user_ref_by_uid(uid2);
	WARN_ON(!user2);
	WARN_ON(user1 == user2);
	WARN_ON(__kuid_val(user2->uid) != 2);
	WARN_ON(user2->id != 1);
	WARN_ON(atomic_read(&user2->n_slices) !=
					atomic_read(&user2->max_slices));
	WARN_ON(atomic_read(&user2->n_handles) !=
					atomic_read(&user2->max_handles));
	WARN_ON(atomic_read(&user2->n_inflight_bytes) !=
					atomic_read(&user2->max_bytes));
	WARN_ON(atomic_read(&user2->n_inflight_fds) !=
					atomic_read(&user2->max_fds));

	/* drop the second user */
	user2 = bus1_user_unref(user2);
	WARN_ON(user2);

	/* take another ref on the first user */
	user2 = bus1_user_ref(user1);
	WARN_ON(user1 != user2);

	/* drop the ref again */
	user2 = bus1_user_unref(user2);
	WARN_ON(user2);

	/* look up the first user again by uid */
	user2 = bus1_user_ref_by_uid(uid1);
	WARN_ON(user1 != user2);

	WARN_ON(bus1_user_unref(user1));
	WARN_ON(bus1_user_unref(user2));
}
/*
_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
	Function	:dbgPrintVfsInode
	Input		:struct inode *inode
				 < vfs inode >
	Output		:void
	Return		:void

	Description	:print vfs inode debug information
_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
*/
void dbgPrintVfsInode( struct inode *inode )
{
	
	DBGPRINT( "<ME2FS>[vfs inode information]\n" );
	DBGPRINT( "<ME2FS>i_mode = %4X\n", inode->i_mode );
	DBGPRINT( "<ME2FS>i_opflags = %u\n", inode->i_opflags );
	DBGPRINT( "<ME2FS>i_uid = %u\n", __kuid_val( inode->i_uid ) );
	DBGPRINT( "<ME2FS>i_gid = %u\n", __kgid_val( inode->i_gid ) );
	DBGPRINT( "<ME2FS>i_flags = %u\n", inode->i_flags );
	DBGPRINT( "<ME2FS>i_nlink = %u\n", inode->i_nlink );
	DBGPRINT( "<ME2FS>i_rdev = %u\n", inode->i_rdev );
	DBGPRINT( "<ME2FS>i_size = %llu\n", inode->i_size );
	DBGPRINT( "<ME2FS>i_atime.tv_sec = %lu\n", inode->i_atime.tv_sec );
	DBGPRINT( "<ME2FS>i_atime.tv_nsec = %lu\n", inode->i_atime.tv_nsec );
	DBGPRINT( "<ME2FS>i_mtime.tv_sec = %lu\n", inode->i_mtime.tv_sec );
	DBGPRINT( "<ME2FS>i_mtime.tv_nsec = %lu\n", inode->i_mtime.tv_nsec );
	DBGPRINT( "<ME2FS>i_ctime.tv_sec = %lu\n", inode->i_ctime.tv_sec );
	DBGPRINT( "<ME2FS>i_ctime.tv_nsec = %lu\n", inode->i_ctime.tv_nsec );
	DBGPRINT( "<ME2FS>i_bytes = %u\n", inode->i_bytes );
	DBGPRINT( "<ME2FS>i_blkbits = %u\n", inode->i_blkbits );
	DBGPRINT( "<ME2FS>i_blocks = %lu\n", inode->i_blocks );
	DBGPRINT( "<ME2FS>i_state = %lu\n", inode->i_state );
	DBGPRINT( "<ME2FS>dirtied_when = %lu\n", inode->dirtied_when );
	DBGPRINT( "<ME2FS>i_version = %llu\n", inode->i_version );
	DBGPRINT( "<ME2FS>i_count = %d\n", atomic_read( &inode->i_count ) );
	DBGPRINT( "<ME2FS>i_dio_count = %d\n", atomic_read( &inode->i_dio_count ) );
	DBGPRINT( "<ME2FS>i_writecount = %d\n", atomic_read( &inode->i_writecount ) );
	DBGPRINT( "<ME2FS>i_generation = %u\n", inode->i_generation );
}
Exemplo n.º 5
0
Arquivo: user.c Projeto: teg/bus1
static struct bus1_user *
bus1_user_get(struct bus1_domain_info *domain_info, kuid_t uid)
{
	struct bus1_user *user;

	rcu_read_lock();
	user = idr_find(&domain_info->user_idr, __kuid_val(uid));
	if (user && !kref_get_unless_zero(&user->ref))
		/* the user is about to be destroyed, ignore it */
		user = NULL;
	rcu_read_unlock();

	return user;
}
Exemplo n.º 6
0
Arquivo: user.c Projeto: teg/bus1
static void bus1_user_free(struct kref *ref)
{
	struct bus1_user *user = container_of(ref, struct bus1_user, ref);

	WARN_ON(atomic_read(&user->fds_inflight));

	/* drop the id from the ida if it was initialized */
	if (user->id != BUS1_INTERNAL_UID_INVALID)
		ida_simple_remove(&user->domain_info->user_ida, user->id);

	mutex_lock(&user->domain_info->lock);
	if (uid_valid(user->uid)) /* if already dropped, it's set to invalid */
		idr_remove(&user->domain_info->user_idr,
			   __kuid_val(user->uid));
	mutex_unlock(&user->domain_info->lock);

	kfree_rcu(user, rcu);
}
Exemplo n.º 7
0
static int scull_w_open(struct inode *inode, struct file *filp)
{
	struct scull_dev *dev = &scull_w_device; /* device information */

	spin_lock(&scull_w_lock);
	while (! scull_w_available()) {
		spin_unlock(&scull_w_lock);
		if (filp->f_flags & O_NONBLOCK) return -EAGAIN;
		if (wait_event_interruptible (scull_w_wait, scull_w_available()))
			return -ERESTARTSYS; /* tell the fs layer to handle it */
		spin_lock(&scull_w_lock);
	}
	if (scull_w_count == 0)
		scull_w_owner = __kuid_val(CURRENT_UID); /* grab it */
	scull_w_count++;
	spin_unlock(&scull_w_lock);

	/* then, everything else is copied from the bare scull device */
	if ((filp->f_flags & O_ACCMODE) == O_WRONLY)
		scull_trim(dev);
	filp->private_data = dev;
	return 0;          /* success */
}
Exemplo n.º 8
0
uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
{
	/* Map the uid from a global kernel uid */
	return __kuid_val(kuid);
}
Exemplo n.º 9
0
Arquivo: user.c Projeto: teg/bus1
/**
 * bus1_user_acquire_by_uid() - get a user object for a uid in the given domain
 * @domain:		domain of the user
 * @uid:		uid of the user
 *
 * Find and return the user object for the uid if it exists, otherwise create it
 * first. The caller is responsible to release their reference (and all derived
 * references) before the parent domain is deactivated!
 *
 * Return: A user object for the given uid, ERR_PTR on failure.
 */
struct bus1_user *
bus1_user_acquire_by_uid(struct bus1_domain *domain, kuid_t uid)
{
	struct bus1_user *user, *old_user, *new_user;
	int r = 0;

	WARN_ON(!uid_valid(uid));

	lockdep_assert_held(&domain->active);

	/* try to get the user without taking a lock */
	user = bus1_user_get(domain->info, uid);
	if (user)
		return user;

	/* didn't exist, allocate a new one */
	new_user = bus1_user_new(domain->info, uid);
	if (IS_ERR(new_user))
		return new_user;

	/*
	 * Allocate the smallest possible internal id for this user; used in
	 * arrays for accounting user quota in receiver pools.
	 */
	r = ida_simple_get(&domain->info->user_ida, 0, 0, GFP_KERNEL);
	if (r < 0)
		goto exit;

	new_user->id = r;

	mutex_lock(&domain->info->lock);
	/*
	 * Someone else might have raced us outside the lock, so check if the
	 * user still does not exist.
	 */
	old_user = idr_find(&domain->info->user_idr, __kuid_val(uid));
	if (likely(!old_user)) {
		/* user does not exist, link the newly created one */
		r = idr_alloc(&domain->info->user_idr, new_user,
			      __kuid_val(uid), __kuid_val(uid) + 1, GFP_KERNEL);
		if (r < 0)
			goto exit;
	} else {
		/* another allocation raced us, try re-using that one */
		if (likely(kref_get_unless_zero(&old_user->ref))) {
			user = old_user;
			goto exit;
		} else {
			/* the other one is getting destroyed, replace it */
			idr_replace(&domain->info->user_idr, new_user,
				    __kuid_val(uid));
			old_user->uid = INVALID_UID; /* mark old as removed */
		}
	}

	user = new_user;
	new_user = NULL;

exit:
	mutex_unlock(&domain->info->lock);
	bus1_user_release(new_user);
	if (r < 0)
		return ERR_PTR(r);
	return user;
}
Exemplo n.º 10
0
static int
efab_tcp_helper_sock_attach(ci_private_t* priv, void *arg)
{
  oo_sock_attach_t* op = arg;
  tcp_helper_resource_t* trs = priv->thr;
  tcp_helper_endpoint_t* ep = NULL;
  citp_waitable_obj *wo;
  int rc, flags, type = op->type;

/* SOCK_CLOEXEC and SOCK_NONBLOCK exist from 2.6.27 both */
#ifdef SOCK_TYPE_MASK
  BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC);
  flags = type & (SOCK_CLOEXEC | SOCK_NONBLOCK);
  type &= SOCK_TYPE_MASK;
# ifdef SOCK_NONBLOCK
    if( SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK) )
      flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
# endif
#else
  flags = 0;
#endif

  OO_DEBUG_TCPH(ci_log("%s: ep_id=%d", __FUNCTION__, op->ep_id));
  if( trs == NULL ) {
    LOG_E(ci_log("%s: ERROR: not attached to a stack", __FUNCTION__));
    return -EINVAL;
  }

  /* Validate and find the endpoint. */
  if( ! IS_VALID_SOCK_P(&trs->netif, op->ep_id) )
    return -EINVAL;
  ep = ci_trs_get_valid_ep(trs, op->ep_id);
  if( tcp_helper_endpoint_set_aflags(ep, OO_THR_EP_AFLAG_ATTACHED) &
      OO_THR_EP_AFLAG_ATTACHED )
    return -EBUSY;
  wo = SP_TO_WAITABLE_OBJ(&trs->netif, ep->id);

  /* create OS socket */
  if( op->domain != AF_UNSPEC ) {
    struct socket *sock;
    struct file *os_file;

    rc = sock_create(op->domain, type, 0, &sock);
    if( rc < 0 ) {
      LOG_E(ci_log("%s: ERROR: sock_create(%d, %d, 0) failed (%d)",
                   __FUNCTION__, op->domain, type, rc));
      tcp_helper_endpoint_clear_aflags(ep, OO_THR_EP_AFLAG_ATTACHED);
      return rc;
    }
    os_file = sock_alloc_file(sock, flags, NULL);
    if( IS_ERR(os_file) ) {
      LOG_E(ci_log("%s: ERROR: sock_alloc_file failed (%ld)",
                   __FUNCTION__, PTR_ERR(os_file)));
      sock_release(sock);
      tcp_helper_endpoint_clear_aflags(ep, OO_THR_EP_AFLAG_ATTACHED);
      return PTR_ERR(os_file);
    }
    rc = efab_attach_os_socket(ep, os_file);
    if( rc < 0 ) {
      LOG_E(ci_log("%s: ERROR: efab_attach_os_socket failed (%d)",
                   __FUNCTION__, rc));
      /* NB. efab_attach_os_socket() consumes [os_file] even on error. */
      tcp_helper_endpoint_clear_aflags(ep, OO_THR_EP_AFLAG_ATTACHED);
      return rc;
    }
    wo->sock.domain = op->domain;
    wo->sock.ino = ep->os_socket->file->f_dentry->d_inode->i_ino;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
    wo->sock.uid = ep->os_socket->file->f_dentry->d_inode->i_uid;
#else
    wo->sock.uid = __kuid_val(ep->os_socket->file->f_dentry->d_inode->i_uid);
#endif
  }

  /* Create a new file descriptor to attach the stack to. */
  ci_assert((wo->waitable.state & CI_TCP_STATE_TCP) ||
            wo->waitable.state == CI_TCP_STATE_UDP);
  rc = oo_create_fd(ep, flags,
                    (wo->waitable.state & CI_TCP_STATE_TCP) ?
                    CI_PRIV_TYPE_TCP_EP : CI_PRIV_TYPE_UDP_EP);
  if( rc < 0 ) {
    ci_irqlock_state_t lock_flags;
    struct oo_file_ref* os_socket;
    ci_irqlock_lock(&ep->thr->lock, &lock_flags);
    os_socket = ep->os_socket;
    ep->os_socket = NULL;
    ci_irqlock_unlock(&ep->thr->lock, &lock_flags);
    if( os_socket != NULL )
      oo_file_ref_drop(os_socket);
    tcp_helper_endpoint_clear_aflags(ep, OO_THR_EP_AFLAG_ATTACHED);
    return rc;
  }

  op->fd = rc;
#ifdef SOCK_NONBLOCK
  if( op->type & SOCK_NONBLOCK )
    ci_bit_mask_set(&wo->waitable.sb_aflags, CI_SB_AFLAG_O_NONBLOCK);
#endif

  /* Re-read the OS socket buffer size settings.  This ensures we'll use
   * up-to-date values for this new socket.
   */
  efab_get_os_settings(&NI_OPTS_TRS(trs));
  return 0;
}