asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
		unsigned long arg)
{
	mm_segment_t old_fs;
	struct flock f;
	long ret;

	switch (cmd) {
	case F_GETLK:
	case F_SETLK:
	case F_SETLKW:
		ret = get_compat_flock(&f, compat_ptr(arg));
		if (ret != 0)
			break;
		old_fs = get_fs();
		set_fs(KERNEL_DS);
		ret = sys_fcntl(fd, cmd, (unsigned long)&f);
		set_fs(old_fs);
		if (cmd == F_GETLK && ret == 0) {

			if (f.l_start > COMPAT_OFF_T_MAX)
				ret = -EOVERFLOW;
			if (f.l_len > COMPAT_OFF_T_MAX)
				f.l_len = COMPAT_OFF_T_MAX;
			if (ret == 0)
				ret = put_compat_flock(&f, compat_ptr(arg));
		}
		break;

	case F_GETLK64:
	case F_SETLK64:
	case F_SETLKW64:
		ret = get_compat_flock64(&f, compat_ptr(arg));
		if (ret != 0)
			break;
		old_fs = get_fs();
		set_fs(KERNEL_DS);
		ret = sys_fcntl(fd, (cmd == F_GETLK64) ? F_GETLK :
				((cmd == F_SETLK64) ? F_SETLK : F_SETLKW),
				(unsigned long)&f);
		set_fs(old_fs);
		if (cmd == F_GETLK64 && ret == 0) {
			
			if (f.l_start > COMPAT_LOFF_T_MAX)
				ret = -EOVERFLOW;
			if (f.l_len > COMPAT_LOFF_T_MAX)
				f.l_len = COMPAT_LOFF_T_MAX;
			if (ret == 0)
				ret = put_compat_flock64(&f, compat_ptr(arg));
		}
		break;

	default:
		ret = sys_fcntl(fd, cmd, arg);
		break;
	}
	return ret;
}
Beispiel #2
0
int send_fds(int sock, struct sockaddr_un *saddr, int len,
		int *fds, int nr_fds, bool with_flags)
{
	struct scm_fdset fdset;
	int *cmsg_data;
	int i, min_fd, ret;

	cmsg_data = scm_fdset_init(&fdset, saddr, len, with_flags);
	for (i = 0; i < nr_fds; i += min_fd) {
		min_fd = min(CR_SCM_MAX_FD, nr_fds - i);
		scm_fdset_init_chunk(&fdset, min_fd);
		builtin_memcpy(cmsg_data, &fds[i], sizeof(int) * min_fd);

		if (with_flags) {
			int j;

			for (j = 0; j < min_fd; j++) {
				int flags, fd = fds[i + j];
				struct fd_opts *p = fdset.opts + j;
				struct f_owner_ex owner_ex;
				u32 v[2];

				flags = sys_fcntl(fd, F_GETFD, 0);
				if (flags < 0)
					return -1;

				p->flags = (char)flags;

				if (sys_fcntl(fd, F_GETOWN_EX, (long)&owner_ex))
					return -1;

				/*
				 * Simple case -- nothing is changed.
				 */
				if (owner_ex.pid == 0) {
					p->fown.pid = 0;
					continue;
				}

				if (sys_fcntl(fd, F_GETOWNER_UIDS, (long)&v))
					return -1;

				p->fown.uid	 = v[0];
				p->fown.euid	 = v[1];
				p->fown.pid_type = owner_ex.type;
				p->fown.pid	 = owner_ex.pid;
			}
		}

		ret = sys_sendmsg(sock, &fdset.hdr, 0);
		if (ret <= 0)
			return ret ? : -1;
	}

	return 0;
}
Beispiel #3
0
Datei: dev9.c Projekt: kyuba/dev9
static void connect_to_netlink(struct dfs *fs)
{
    struct sockaddr_nl nls = { 0, 0, 0, 0 };
    int fd;
    struct io *io;
    int newlength = NETLINK_BUFFER;
    struct exec_context *context;

    nls.nl_family = AF_NETLINK;
    nls.nl_pid = sys_getpid();
    nls.nl_groups = -1;

    fd = sys_socket(PF_NETLINK, SOCK_DGRAM, NETLINK_KOBJECT_UEVENT);

    if (fd < 0) { cexit (17); }

    if (sys_bind(fd, (void *)&nls, sizeof(struct sockaddr_nl)) < 0) {
        cexit (18);
    }

    if (sys_setsockopt (fd, SOL_SOCKET, SO_RCVBUF, (char *)&newlength,
                        sizeof (int)) < 0) {
        cexit(19);
    }

    if (sys_fcntl (fd, F_SETFD, FD_CLOEXEC) < 0) {
        cexit(20);
    }

    if (sys_fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
        cexit(21);
    }

    io = io_open (fd);
    io->type = iot_read;

    multiplex_add_io (io, on_netlink_read, on_netlink_close, (void *)fs);

    context = execute(EXEC_CALL_NO_IO, (char **)0, (char **)0);
    switch (context->pid)
    {
        case -1:
            cexit (25);
        case 0:
            ping_for_uevents ("/sys/(bus|class|block)/.+/.+/uevent");
            cexit (0);
        default:
            multiplex_add_process(context, mx_on_subprocess_death, (void *)0);
    }
}
int
ultrix_sys_fcntl(struct lwp *l, const struct ultrix_sys_fcntl_args *uap, register_t *retval)
{
	int error;
	struct ultrix_flock ufl;
	struct flock fl;

	switch (SCARG(uap, cmd)) {
	case F_GETLK:
	case F_SETLK:
	case F_SETLKW:
		error = copyin(SCARG(uap, arg), &ufl, sizeof(ufl));
		if (error)
			return error;
		error = ultrix_to_bsd_flock(&ufl, &fl);
		if (error)
			return error;
		error = do_fcntl_lock(SCARG(uap, fd), SCARG(uap, cmd), &fl);
		if (SCARG(uap, cmd) != F_GETLK || error != 0)
			return error;
		bsd_to_ultrix_flock(&fl, &ufl);
		return copyout(&ufl, SCARG(uap, arg), sizeof(ufl));

	default:
		break;
	}

	return sys_fcntl(l, (const void *)uap, retval);
}
/*****************************************************************************
 函 数 名  : acm_async_write
 功能描述  : acm写函数,非阻塞接口
 输入参数  : int acm_dev           : 对象设备
             unsigned char *pbuff  : 传入buffer的首地址
             unsigned int size     : 源数据的字节数
 输出参数  : 无
 返 回 值  : int
 调用函数  :
 被调函数  :

 修改历史      :
  1.日    期   : 2012年9月13日
    作    者   : 夏青 00195127
    修改内容   : 新生成函数

*****************************************************************************/
int acm_async_write(void *acm_dev, char *pVirAddr, char *pPhyAddr, unsigned int size)
{
    long fd;
    int len = (int)ADP_ERROR;
    mm_segment_t   fs         = 0;
    struct acm_ctx *p_acm_ctx = (struct acm_ctx *)acm_dev;
    struct acm_ncopy_ctx * acm_ncpy = (struct acm_ncopy_ctx *)p_acm_ctx->acm_ncpy;
    int rc = ADP_OK;

    ACM_ADP_DBG(" acm[%s] pVirAddr[%p] pPhyAddr[%p]size[%d]\n",
                p_acm_ctx->tty_name, pVirAddr, pPhyAddr, size);

    if (!p_acm_ctx->bopen) {
        ACM_ADP_ERR("acm[%s] device is not opened\n", p_acm_ctx->tty_name);
        return ADP_ERROR;
    }

    if ((NULL == pVirAddr) || (size == 0)) {
        ACM_ADP_ERR("acm[%s] para is invalid\n", p_acm_ctx->tty_name);
        return ADP_ERROR;
    }

    if (p_acm_ctx->ncopy) {
        #ifndef _DRV_LLT_
        if((0 == p_acm_ctx->phyaddr_from_app)||(NULL == pPhyAddr)){
            pPhyAddr = (char *)dma_map_single(acm_ncpy->tty->dev,(void *)pVirAddr,size,DMA_TO_DEVICE);
        }
        #endif
        rc = acm_ncopy_start_tx((void *)p_acm_ctx, pVirAddr, pPhyAddr, size);
        if (ADP_OK == rc) {
            len = size;
        }
    } else {
        /* 根据acm_dev_id获得设备对应tty设备fd */
        fd = p_acm_ctx->fd;

        fs = get_fs();
        set_fs(KERNEL_DS);

        /* 设置NONBLOCK标志位 */
        sys_fcntl(fd, F_SETFL, O_NONBLOCK);

        /* 调用fd的write写入数据,并且得到返回值,返回值为写数据的长度len */
        len = sys_write(fd, pVirAddr, size);

        set_fs(fs);

        if (len == size) {
            if (p_acm_ctx->writeDoneCB) {
                ACM_ADP_DBG(" acm[%s] cb len[%d]\n", p_acm_ctx->tty_name, len);
                p_acm_ctx->writeDoneCB(pVirAddr, pPhyAddr, len);
            }
        }

    }
    ACM_ADP_DBG(" acm[%s] len[0x%x]\n", p_acm_ctx->tty_name, len);

    return len;
}
/*****************************************************************************
 函 数 名  : udi_acm_write
 功能描述  : acm写函数,阻塞接口
 输入参数  : int acm_dev           : 对象设备
             unsigned char *pbuff  : 传入buffer的首地址
             unsigned int size     : 数据的字节数
 输出参数  : 无
 返 回 值  : int 写完成的字节数
 调用函数  :
 被调函数  :

 修改历史      :
  1.日    期   : 2012年9月12日
    作    者   : 夏青 00195127
    修改内容   : 新生成函数

*****************************************************************************/
int udi_acm_write(void *acm_dev, unsigned char *pbuff, unsigned int size)
{
    long fd;
    int  len;
    long flag = 0;
    mm_segment_t   fs         = 0;
    struct acm_ctx *p_acm_ctx = (struct acm_ctx *)acm_dev;

    ACM_ADP_DBG(" acm[%s] size[%d]\n", p_acm_ctx->tty_name, size);
    /* 检查open标志位 */
    if (!p_acm_ctx->bopen) {
        ACM_ADP_ERR("acm[%s] is not opened\n", p_acm_ctx->tty_name);
        return ADP_ERROR;
    }

    if ((NULL == pbuff)||(size == 0)) {
        ACM_ADP_ERR("acm[%s] para is invalid\n", p_acm_ctx->tty_name);
        return ADP_ERROR;
    }

    /* 根据acm_dev_id获得设备对应tty设备fd */
    fd = p_acm_ctx->fd;

    fs = get_fs();
    set_fs(KERNEL_DS);

    /* 将NONBLOCK标志位清除 */
    sys_fcntl(fd, F_GETFL, flag);
    flag &= ~O_NONBLOCK;
    sys_fcntl(fd, F_SETFL, flag);

    /* 调用fd的write写入数据,并且得到返回值,返回值为写数据的长度len */
    len = sys_write(fd, pbuff, size);

    set_fs(fs);
    /*udi_acm_write该接口已经不再使用回调临时打桩*/
    if (p_acm_ctx->writeDoneCB) {
        p_acm_ctx->writeDoneCB(pbuff,NULL,len);
    }
    ACM_ADP_DBG(" acm[%s] writeCB[%p] len[%d]\n",
        p_acm_ctx->tty_name, p_acm_ctx->writeDoneCB, size);

    return len;
}
Beispiel #7
0
static void interpret_pipe(struct interpreter_context *context, struct ast_node *node)
{
    struct gsh_command *cmd1 = node->n_children;
    struct gsh_command *cmd2 = node->n_children->next;
    
    int in = sys_fcntl(0, 0, 0, 0);
    int out = sys_fcntl(1, 0, 0, 0);
    int err = sys_fcntl(2, 0, 0, 0);
    
    int pipe_des[2];
    sys_pipe(pipe_des);
    
    close(0);
    sys_fcntl(pipe_des[0], 0, 0, 0);
    int pid = interpret_cmd(context, cmd2, P_NOWAIT);
    
    close(0);
    close(1);
    close(2);
    
    sys_fcntl(in, 0, 0, 0);
    sys_fcntl(pipe_des[1], 0, 1, 0);
    sys_fcntl(pipe_des[1], 0, 2, 0);
    
    interpret_cmd(context, cmd1, P_WAIT);
    
    
    close(1);
    close(2);
    
    
    sys_fcntl(out, 0, 1, 0);
    sys_fcntl(err, 0, 2, 0);
    

    close(pipe_des[0]);
    close(pipe_des[1]);
    
    int status;
    sys_waitpid(pid, &status);
    
    close(out);
    close(err);
}
Beispiel #8
0
int svr4_fcntl_flock(int fd, unsigned int cmd, unsigned long arg)
{
	struct svr4_flock fl, *flp = (struct svr4_flock *)arg;
	struct flock l_fl;
	mm_segment_t fs;
	int rval;

	/*
	 * We are not supposed to fail once the lock is set,
	 * thus we check the userspace pointer for writeaccess now.
	 */
	rval = verify_area(VERIFY_WRITE, flp, sizeof(struct svr4_flock));
	if (rval)
		return -EFAULT;

	rval = copy_from_user(&fl, flp, sizeof(struct svr4_flock));
	if (rval)
		return -EFAULT;

	l_fl.l_type = fl.l_type - 1;
	l_fl.l_whence = fl.l_whence;
	l_fl.l_start = fl.l_start;
	l_fl.l_len = fl.l_len;
	l_fl.l_pid = fl.l_pid;

	abi_trace(ABI_TRACE_API,
		"lock l_type: %d l_whence: %d "
		"l_start: %u l_len: %u "
		"l_sysid: %d l_pid: %d\n",
		fl.l_type, fl.l_whence,
		fl.l_start, fl.l_len,
		fl.l_sysid, fl.l_pid);

	fs = get_fs();
	set_fs(get_ds());
	rval = sys_fcntl(fd, cmd, (unsigned long)&l_fl);
	set_fs(fs);

	if (rval)
		return rval;

	fl.l_type = l_fl.l_type + 1;
	fl.l_whence = l_fl.l_whence;
	fl.l_start = l_fl.l_start;
	fl.l_len = l_fl.l_len;
	fl.l_sysid = 0;
	fl.l_pid = l_fl.l_pid;

	__copy_to_user(flp, &fl, sizeof(struct svr4_flock));
	return 0;
}
Beispiel #9
0
int sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
{
    struct file * filp;

    if (fd >= NR_OPEN || !(filp = current->filp[fd]))
	return -EBADF;
	
    switch(cmd)
	{
	case F_GETLK64:
	case F_SETLK64:
	case F_SETLKW64:
	    return -ENOSYS;
		
	default:
	    return sys_fcntl(fd,cmd,arg);
	}

}
Beispiel #10
0
/** Handler for doing an FCNTL in a FAF open file.
 *  @author Renaud Lottiaux
 *
 *  @param from    Node sending the request
 *  @param msgIn   Request message
 */
void handle_faf_fcntl (struct rpc_desc* desc,
		       void *msgIn, size_t size)
{
	struct faf_ctl_msg *msg = msgIn;
	const struct cred *old_cred;
	unsigned long arg;
	long r;
	int err;

	if (msg->cmd == F_GETLK || msg->cmd == F_SETLK || msg->cmd == F_SETLKW)
		arg = (unsigned long) &msg->flock;
	else
		arg = msg->arg;

	old_cred = unpack_override_creds(desc);
	if (IS_ERR(old_cred))
		goto cancel;
	err = remote_sleep_prepare(desc);
	if (err) {
		revert_creds(old_cred);
		goto cancel;
	}

	r = sys_fcntl (msg->server_fd, msg->cmd, arg);

	remote_sleep_finish();
	revert_creds(old_cred);

	err = rpc_pack_type(desc, r);
	if (unlikely(err))
		goto cancel;

	if (!r && msg->cmd == F_GETLK) {
		err = rpc_pack_type(desc, msg->flock);
		if (unlikely(err))
			goto cancel;
	}

	return;
cancel:
	rpc_cancel(desc);
}
Beispiel #11
0
static void interpret_rdir(struct interpreter_context *context, struct ast_node *node)
{
    struct gsh_command *file = node->n_children->next;
    int f = sys_open(file->c_args[0], O_WRONLY | O_CREAT);
    int out = sys_fcntl(1, 0, 0, 0);
    int err = sys_fcntl(2, 0, 0, 0);
    close(1);
    close(2);
    sys_fcntl(f, 0, 1, 0);
    sys_fcntl(f, 0, 2, 0);
    interpret_cmd(context, node->n_children, P_WAIT);
    close(f);
    close(1);
    close(2);
    sys_fcntl(out, 0, 1, 0);
    sys_fcntl(err, 0, 2, 0);
    close(out);
    close(err);
}
Beispiel #12
0
int
ibcs2_sys_fcntl(struct lwp *l, const struct ibcs2_sys_fcntl_args *uap, register_t *retval)
{
	/* {
		syscallarg(int) fd;
		syscallarg(int) cmd;
		syscallarg(char *) arg;
	} */
	struct sys_fcntl_args fa;
	struct flock fl;
	struct ibcs2_flock ifl;
	int error;
	int cmd;

	switch(SCARG(uap, cmd)) {
	case IBCS2_F_DUPFD:
		SCARG(&fa, fd) = SCARG(uap, fd);
		SCARG(&fa, cmd) = F_DUPFD;
		SCARG(&fa, arg) = SCARG(uap, arg);
		return sys_fcntl(l, &fa, retval);
	case IBCS2_F_GETFD:
		SCARG(&fa, fd) = SCARG(uap, fd);
		SCARG(&fa, cmd) = F_GETFD;
		SCARG(&fa, arg) = SCARG(uap, arg);
		return sys_fcntl(l, &fa, retval);
	case IBCS2_F_SETFD:
		SCARG(&fa, fd) = SCARG(uap, fd);
		SCARG(&fa, cmd) = F_SETFD;
		SCARG(&fa, arg) = SCARG(uap, arg);
		return sys_fcntl(l, &fa, retval);
	case IBCS2_F_GETFL:
		SCARG(&fa, fd) = SCARG(uap, fd);
		SCARG(&fa, cmd) = F_GETFL;
		SCARG(&fa, arg) = SCARG(uap, arg);
		error = sys_fcntl(l, &fa, retval);
		if (error)
			return error;
		*retval = oflags2ioflags(*retval);
		return error;
	case IBCS2_F_SETFL:
		SCARG(&fa, fd) = SCARG(uap, fd);
		SCARG(&fa, cmd) = F_SETFL;
		SCARG(&fa, arg) = (void *)ioflags2oflags((int) SCARG(uap, arg));
		return sys_fcntl(l, &fa, retval);

	case IBCS2_F_GETLK:
		cmd = F_GETLK;
		goto lock;
	case IBCS2_F_SETLK:
		cmd = F_SETLK;
		goto lock;
	case IBCS2_F_SETLKW:
		cmd = F_SETLKW;
	    lock:
		error = copyin(SCARG(uap, arg), &ifl, ibcs2_flock_len);
		if (error)
			return error;
		cvt_iflock2flock(&ifl, &fl);
		error = do_fcntl_lock(SCARG(uap, fd), cmd, &fl);
		if (cmd != F_GETLK || error != 0)
			return error;
		cvt_flock2iflock(&fl, &ifl);
		return copyout(&ifl, SCARG(uap, arg), ibcs2_flock_len);

	default:
		return ENOSYS;
	}

}
Beispiel #13
0
/*
 * locking() requires mandatory locking. Processes that attempt to
 * read or write a region locked with locking() are required to block.
 * You need to build a kernel with mandatory locking support and set
 * the permissions on the required file to setgid, no group execute.
 */
int
xnx_locking(int fd, int mode, unsigned long size)
{
	struct flock fl;
	mm_segment_t old_fs;
	int error;

	if ((mode < 0 || mode > 7) && mode != 20) {
#if defined(CONFIG_ABI_TRACE)
		abi_trace(ABI_TRACE_API,
				"unsupported locking() mode=0x%x\n", mode);
#endif
		return -EINVAL;
	}

	/*
	 * Modes 5, 6 & 7 are very like the fcntl mechanism but
	 * we can't just punt to that because the type values are
	 * different.
	 */
	if (mode > 4 && mode < 8) {
		struct ibcs_flock *ifl = (struct ibcs_flock *)size;
		short t;

		error = verify_area(VERIFY_READ, ifl, sizeof(*ifl));
		if (error)
			return error;

		get_user(t, &ifl->l_type);
		switch (t) {
			case XF_UNLCK:	t = F_UNLCK; break;
			case XF_WRLCK:	t = F_WRLCK; break;
			case XF_RDLCK:	t = F_RDLCK; break;
			default:	return -EINVAL;
		}
		put_user(t, &ifl->l_type);

		error = sys_fcntl(fd, mode, (u_long)ifl);

		get_user(t, &ifl->l_type);
		switch (t) {
			case F_UNLCK:	t = XF_UNLCK; break;
			case F_WRLCK:	t = XF_WRLCK; break;
			case F_RDLCK:	t = XF_RDLCK; break;
		}
		put_user(t, &ifl->l_type);

		get_user(t, &ifl->l_sysid);
		put_user(t, &ifl->l_pid);
		put_user(0, &ifl->l_sysid);
		return error;
	}

	fl.l_type = (mode == 0 ? F_UNLCK
			: ((mode <= 2 || mode == 20) ? F_WRLCK
			: F_RDLCK));
	fl.l_whence = 1;
	fl.l_start = 0;
	fl.l_len = size;

	old_fs = get_fs();
	set_fs (get_ds());
	error = sys_fcntl(fd, (mode == 5) ? F_GETLK
			: (!(mode % 2) ? F_SETLK : F_SETLKW), (u_long)&fl);
	set_fs(old_fs);
	return error;
}
Beispiel #14
0
int svr4_fcntl(int fd, unsigned int cmd, unsigned long arg)
{
	int rval;

	switch (cmd) {
	case 0: /* F_DUPFD */
	case 1: /* F_GETFD */
	case 2: /* F_SETFD */
		return sys_fcntl(fd, cmd, arg);
	case 3: /* F_GETFL */
		rval = sys_fcntl(fd, cmd, arg);
		return map_flags(rval, fl_linux_to_svr4);
	case 4: /* F_SETFL */
		arg = map_flags(arg, fl_svr4_to_linux);
		return sys_fcntl(fd, cmd, arg);
	case 14: /* F_GETLK SVR4 */
		cmd = 5;
		/*FALLTHROUGH*/
	case 5: /* F_GETLK */
	case 6: /* F_SETLK */
	case 7: /* F_SETLKW */
		return svr4_fcntl_flock(fd, cmd, arg);
	case 10: /* F_ALLOCSP */
		/* Extend allocation for specified portion of file. */
		return 0;
	case 11: /* F_FREESP */
		/* Free a portion of a file. */
		return 0;

	/*
	 * These are intended to support the Xenix chsize() and
	 * rdchk() system calls. I don't know if these may be
	 * generated by applications or not.
	 */
	case 0x6000: /* F_CHSIZE */
		return sys_ftruncate(fd, arg);
	case 0x6001: /* F_RDCHK */
	    {
		mm_segment_t fs;
		int nbytes;
 
		fs = get_fs();
		set_fs(get_ds());
		rval = sys_ioctl(fd, FIONREAD, &nbytes);
		set_fs(fs);

		if (rval < 0)
			return rval;
		return (nbytes ? 1 : 0);
	    }

	case  8: /* F_CHKFL */
	    /*FALLTHROUGH*/

	/*
	 * These are made from the Xenix locking() system call.
	 * According to available documentation these would
	 * never be generated by an application - only by the
	 * kernel Xenix support.
	 */
	case 0x6300: /* F_LK_UNLCK */
	case 0x7200: /* F_LK_LOCK */
	case 0x6200: /* F_LK_NBLCK */
	case 0x7100: /* F_LK_RLCK */
	case 0x6100: /* F_LK_NBRLCK */
	    /*FALLTHROUGH*/

	default:
		abi_trace(ABI_TRACE_API,
			"unsupported fcntl 0x%x, arg 0x%lx\n", cmd, arg);
		return -EINVAL;
	}
}
Beispiel #15
0
/*
 * Most actions in the fcntl() call are straightforward; simply
 * pass control to the NetBSD system call. A few commands need
 * conversions after the actual system call has done its work,
 * because the flag values and lock structure are different.
 */
int
linux_sys_fcntl(struct lwp *l, const struct linux_sys_fcntl_args *uap, register_t *retval)
{
	/* {
		syscallarg(int) fd;
		syscallarg(int) cmd;
		syscallarg(void *) arg;
	} */
	struct proc *p = l->l_proc;
	int fd, cmd, error;
	u_long val;
	void *arg;
	struct sys_fcntl_args fca;
	file_t *fp;
	struct vnode *vp;
	struct vattr va;
	long pgid;
	struct pgrp *pgrp;
	struct tty *tp;

	fd = SCARG(uap, fd);
	cmd = SCARG(uap, cmd);
	arg = SCARG(uap, arg);

	switch (cmd) {

	case LINUX_F_DUPFD:
		cmd = F_DUPFD;
		break;

	case LINUX_F_GETFD:
		cmd = F_GETFD;
		break;

	case LINUX_F_SETFD:
		cmd = F_SETFD;
		break;

	case LINUX_F_GETFL:
		SCARG(&fca, fd) = fd;
		SCARG(&fca, cmd) = F_GETFL;
		SCARG(&fca, arg) = arg;
		if ((error = sys_fcntl(l, &fca, retval)))
			return error;
		retval[0] = bsd_to_linux_ioflags(retval[0]);
		return 0;

	case LINUX_F_SETFL: {
		file_t	*fp1 = NULL;

		val = linux_to_bsd_ioflags((unsigned long)SCARG(uap, arg));
		/*
		 * Linux seems to have same semantics for sending SIGIO to the
		 * read side of socket, but slightly different semantics
		 * for SIGIO to the write side.  Rather than sending the SIGIO
		 * every time it's possible to write (directly) more data, it
		 * only sends SIGIO if last write(2) failed due to insufficient
		 * memory to hold the data. This is compatible enough
		 * with NetBSD semantics to not do anything about the
		 * difference.
		 *
		 * Linux does NOT send SIGIO for pipes. Deal with socketpair
		 * ones and DTYPE_PIPE ones. For these, we don't set
		 * the underlying flags (we don't pass O_ASYNC flag down
		 * to sys_fcntl()), but set the FASYNC flag for file descriptor,
		 * so that F_GETFL would report the ASYNC i/o is on.
		 */
		if (val & O_ASYNC) {
			if (((fp1 = fd_getfile(fd)) == NULL))
			    return (EBADF);
			if (((fp1->f_type == DTYPE_SOCKET) && fp1->f_data
			      && ((struct socket *)fp1->f_data)->so_state & SS_ISAPIPE)
			    || (fp1->f_type == DTYPE_PIPE))
				val &= ~O_ASYNC;
			else {
				/* not a pipe, do not modify anything */
				fd_putfile(fd);
				fp1 = NULL;
			}
		}

		SCARG(&fca, fd) = fd;
		SCARG(&fca, cmd) = F_SETFL;
		SCARG(&fca, arg) = (void *) val;

		error = sys_fcntl(l, &fca, retval);

		/* Now set the FASYNC flag for pipes */
		if (fp1) {
			if (!error) {
				mutex_enter(&fp1->f_lock);
				fp1->f_flag |= FASYNC;
				mutex_exit(&fp1->f_lock);
			}
			fd_putfile(fd);
		}

		return (error);
	    }

	case LINUX_F_GETLK:
		do_linux_getlk(fd, cmd, arg, linux, flock);

	case LINUX_F_SETLK:
	case LINUX_F_SETLKW:
		do_linux_setlk(fd, cmd, arg, linux, flock, LINUX_F_SETLK);

	case LINUX_F_SETOWN:
	case LINUX_F_GETOWN:
		/*
		 * We need to route fcntl() for tty descriptors around normal
		 * fcntl(), since NetBSD tty TIOC{G,S}PGRP semantics is too
		 * restrictive for Linux F_{G,S}ETOWN. For non-tty descriptors,
		 * this is not a problem.
		 */
		if ((fp = fd_getfile(fd)) == NULL)
			return EBADF;

		/* Check it's a character device vnode */
		if (fp->f_type != DTYPE_VNODE
		    || (vp = (struct vnode *)fp->f_data) == NULL
		    || vp->v_type != VCHR) {
			fd_putfile(fd);

	    not_tty:
			/* Not a tty, proceed with common fcntl() */
			cmd = cmd == LINUX_F_SETOWN ? F_SETOWN : F_GETOWN;
			break;
		}

		vn_lock(vp, LK_SHARED | LK_RETRY);
		error = VOP_GETATTR(vp, &va, l->l_cred);
		VOP_UNLOCK(vp);

		fd_putfile(fd);

		if (error)
			return error;

		if ((tp = cdev_tty(va.va_rdev)) == NULL)
			goto not_tty;

		/* set tty pg_id appropriately */
		mutex_enter(proc_lock);
		if (cmd == LINUX_F_GETOWN) {
			retval[0] = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PGID;
			mutex_exit(proc_lock);
			return 0;
		}
		if ((long)arg <= 0) {
			pgid = -(long)arg;
		} else {
			struct proc *p1 = proc_find((long)arg);
			if (p1 == NULL) {
				mutex_exit(proc_lock);
				return (ESRCH);
			}
			pgid = (long)p1->p_pgrp->pg_id;
		}
		pgrp = pgrp_find(pgid);
		if (pgrp == NULL || pgrp->pg_session != p->p_session) {
			mutex_exit(proc_lock);
			return EPERM;
		}
		tp->t_pgrp = pgrp;
		mutex_exit(proc_lock);
		return 0;

	default:
		return EOPNOTSUPP;
	}

	SCARG(&fca, fd) = fd;
	SCARG(&fca, cmd) = cmd;
	SCARG(&fca, arg) = arg;

	return sys_fcntl(l, &fca, retval);
}
Beispiel #16
0
void sys_fcntl_f_setown(int fd, pid_t pid)
{
	sys_fcntl(fd, F_SETOWN, pid);
}
asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
		unsigned long arg)
{
	mm_segment_t old_fs;
	struct flock f;
	long ret;

	switch (cmd) {
	case F_GETLK:
	case F_SETLK:
	case F_SETLKW:
		ret = get_compat_flock(&f, compat_ptr(arg));
		if (ret != 0)
			break;
		old_fs = get_fs();
		set_fs(KERNEL_DS);
		ret = sys_fcntl(fd, cmd, (unsigned long)&f);
		set_fs(old_fs);
		if (cmd == F_GETLK && ret == 0) {
			/* GETLK was successful and we need to return the data...
			 * but it needs to fit in the compat structure.
			 * l_start shouldn't be too big, unless the original
			 * start + end is greater than COMPAT_OFF_T_MAX, in which
			 * case the app was asking for trouble, so we return
			 * -EOVERFLOW in that case.
			 * l_len could be too big, in which case we just truncate it,
			 * and only allow the app to see that part of the conflicting
			 * lock that might make sense to it anyway
			 */

			if (f.l_start > COMPAT_OFF_T_MAX)
				ret = -EOVERFLOW;
			if (f.l_len > COMPAT_OFF_T_MAX)
				f.l_len = COMPAT_OFF_T_MAX;
			if (ret == 0)
				ret = put_compat_flock(&f, compat_ptr(arg));
		}
		break;

	case F_GETLK64:
	case F_SETLK64:
	case F_SETLKW64:
		ret = get_compat_flock64(&f, compat_ptr(arg));
		if (ret != 0)
			break;
		old_fs = get_fs();
		set_fs(KERNEL_DS);
		ret = sys_fcntl(fd, (cmd == F_GETLK64) ? F_GETLK :
				((cmd == F_SETLK64) ? F_SETLK : F_SETLKW),
				(unsigned long)&f);
		set_fs(old_fs);
		if (cmd == F_GETLK64 && ret == 0) {
			/* need to return lock information - see above for commentary */
			if (f.l_start > COMPAT_LOFF_T_MAX)
				ret = -EOVERFLOW;
			if (f.l_len > COMPAT_LOFF_T_MAX)
				f.l_len = COMPAT_LOFF_T_MAX;
			if (ret == 0)
				ret = put_compat_flock64(&f, compat_ptr(arg));
		}
		break;

	default:
		ret = sys_fcntl(fd, cmd, arg);
		break;
	}
	return ret;
}
static void ListerThread(struct ListerParams *args) {
  int                found_parent = 0;
  pid_t              clone_pid  = sys_gettid(), ppid = sys_getppid();
  char               proc_self_task[80], marker_name[48], *marker_path;
  const char         *proc_paths[3];
  const char *const  *proc_path = proc_paths;
  int                proc = -1, marker = -1, num_threads = 0;
  int                max_threads = 0, sig;
  struct kernel_stat marker_sb, proc_sb;
  stack_t            altstack;

  /* Create "marker" that we can use to detect threads sharing the same
   * address space and the same file handles. By setting the FD_CLOEXEC flag
   * we minimize the risk of misidentifying child processes as threads;
   * and since there is still a race condition,  we will filter those out
   * later, anyway.
   */
  if ((marker = sys_socket(PF_LOCAL, SOCK_DGRAM, 0)) < 0 ||
      sys_fcntl(marker, F_SETFD, FD_CLOEXEC) < 0) {
  failure:
    args->result = -1;
    args->err    = errno;
    if (marker >= 0)
      NO_INTR(sys_close(marker));
    sig_marker = marker = -1;
    if (proc >= 0)
      NO_INTR(sys_close(proc));
    sig_proc = proc = -1;
    sys__exit(1);
  }

  /* Compute search paths for finding thread directories in /proc            */
  local_itoa(strrchr(strcpy(proc_self_task, "/proc/"), '\000'), ppid);
  strcpy(marker_name, proc_self_task);
  marker_path = marker_name + strlen(marker_name);
  strcat(proc_self_task, "/task/");
  proc_paths[0] = proc_self_task; /* /proc/$$/task/                          */
  proc_paths[1] = "/proc/";       /* /proc/                                  */
  proc_paths[2] = NULL;

  /* Compute path for marker socket in /proc                                 */
  local_itoa(strcpy(marker_path, "/fd/") + 4, marker);
  if (sys_stat(marker_name, &marker_sb) < 0) {
    goto failure;
  }

  /* Catch signals on an alternate pre-allocated stack. This way, we can
   * safely execute the signal handler even if we ran out of memory.
   */
  memset(&altstack, 0, sizeof(altstack));
  altstack.ss_sp    = args->altstack_mem;
  altstack.ss_flags = 0;
  altstack.ss_size  = ALT_STACKSIZE;
  sys_sigaltstack(&altstack, (const stack_t *)NULL);

  /* Some kernels forget to wake up traced processes, when the
   * tracer dies.  So, intercept synchronous signals and make sure
   * that we wake up our tracees before dying. It is the caller's
   * responsibility to ensure that asynchronous signals do not
   * interfere with this function.
   */
  sig_marker = marker;
  sig_proc   = -1;
  for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) {
    struct kernel_sigaction sa;
    memset(&sa, 0, sizeof(sa));
    sa.sa_sigaction_ = SignalHandler;
    sys_sigfillset(&sa.sa_mask);
    sa.sa_flags      = SA_ONSTACK|SA_SIGINFO|SA_RESETHAND;
    sys_sigaction(sync_signals[sig], &sa, (struct kernel_sigaction *)NULL);
  }
  
  /* Read process directories in /proc/...                                   */
  for (;;) {
    /* Some kernels know about threads, and hide them in "/proc"
     * (although they are still there, if you know the process
     * id). Threads are moved into a separate "task" directory. We
     * check there first, and then fall back on the older naming
     * convention if necessary.
     */
    if ((sig_proc = proc = c_open(*proc_path, O_RDONLY|O_DIRECTORY, 0)) < 0) {
      if (*++proc_path != NULL)
        continue;
      goto failure;
    }
    if (sys_fstat(proc, &proc_sb) < 0)
      goto failure;
    
    /* Since we are suspending threads, we cannot call any libc
     * functions that might acquire locks. Most notably, we cannot
     * call malloc(). So, we have to allocate memory on the stack,
     * instead. Since we do not know how much memory we need, we
     * make a best guess. And if we guessed incorrectly we retry on
     * a second iteration (by jumping to "detach_threads").
     *
     * Unless the number of threads is increasing very rapidly, we
     * should never need to do so, though, as our guestimate is very
     * conservative.
     */
    if (max_threads < proc_sb.st_nlink + 100)
      max_threads = proc_sb.st_nlink + 100;
    
    /* scope */ {
      pid_t pids[max_threads];
      int   added_entries = 0;
      sig_num_threads     = num_threads;
      sig_pids            = pids;
      for (;;) {
        struct kernel_dirent *entry;
        char buf[4096];
        ssize_t nbytes = sys_getdents(proc, (struct kernel_dirent *)buf,
                                      sizeof(buf));
        if (nbytes < 0)
          goto failure;
        else if (nbytes == 0) {
          if (added_entries) {
            /* Need to keep iterating over "/proc" in multiple
             * passes until we no longer find any more threads. This
             * algorithm eventually completes, when all threads have
             * been suspended.
             */
            added_entries = 0;
            sys_lseek(proc, 0, SEEK_SET);
            continue;
          }
          break;
        }
        for (entry = (struct kernel_dirent *)buf;
             entry < (struct kernel_dirent *)&buf[nbytes];
             entry = (struct kernel_dirent *)((char *)entry+entry->d_reclen)) {
          if (entry->d_ino != 0) {
            const char *ptr = entry->d_name;
            pid_t pid;
            
            /* Some kernels hide threads by preceding the pid with a '.'     */
            if (*ptr == '.')
              ptr++;
            
            /* If the directory is not numeric, it cannot be a
             * process/thread
             */
            if (*ptr < '0' || *ptr > '9')
              continue;
            pid = local_atoi(ptr);

            /* Attach (and suspend) all threads                              */
            if (pid && pid != clone_pid) {
              struct kernel_stat tmp_sb;
              char fname[entry->d_reclen + 48];
              strcat(strcat(strcpy(fname, "/proc/"),
                            entry->d_name), marker_path);
              
              /* Check if the marker is identical to the one we created      */
              if (sys_stat(fname, &tmp_sb) >= 0 &&
                  marker_sb.st_ino == tmp_sb.st_ino) {
                long i, j;

                /* Found one of our threads, make sure it is no duplicate    */
                for (i = 0; i < num_threads; i++) {
                  /* Linear search is slow, but should not matter much for
                   * the typically small number of threads.
                   */
                  if (pids[i] == pid) {
                    /* Found a duplicate; most likely on second pass         */
                    goto next_entry;
                  }
                }
                
                /* Check whether data structure needs growing                */
                if (num_threads >= max_threads) {
                  /* Back to square one, this time with more memory          */
                  NO_INTR(sys_close(proc));
                  goto detach_threads;
                }

                /* Attaching to thread suspends it                           */
                pids[num_threads++] = pid;
                sig_num_threads     = num_threads;
                if (sys_ptrace(PTRACE_ATTACH, pid, (void *)0,
                               (void *)0) < 0) {
                  /* If operation failed, ignore thread. Maybe it
                   * just died?  There might also be a race
                   * condition with a concurrent core dumper or
                   * with a debugger. In that case, we will just
                   * make a best effort, rather than failing
                   * entirely.
                   */
                  num_threads--;
                  sig_num_threads = num_threads;
                  goto next_entry;
                }
                while (sys_waitpid(pid, (int *)0, __WALL) < 0) {
                  if (errno != EINTR) {
                    sys_ptrace_detach(pid);
                    num_threads--;
                    sig_num_threads = num_threads;
                    goto next_entry;
                  }
                }
                
                if (sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i++ != j ||
                    sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i   != j) {
                  /* Address spaces are distinct, even though both
                   * processes show the "marker". This is probably
                   * a forked child process rather than a thread.
                   */
                  sys_ptrace_detach(pid);
                  num_threads--;
                  sig_num_threads = num_threads;
                } else {
                  found_parent |= pid == ppid;
                  added_entries++;
                }
              }
            }
          }
        next_entry:;
        }
      }
      NO_INTR(sys_close(proc));
      sig_proc = proc = -1;

      /* If we failed to find any threads, try looking somewhere else in
       * /proc. Maybe, threads are reported differently on this system.
       */
      if (num_threads > 1 || !*++proc_path) {
        NO_INTR(sys_close(marker));
        sig_marker = marker = -1;

        /* If we never found the parent process, something is very wrong.
         * Most likely, we are running in debugger. Any attempt to operate
         * on the threads would be very incomplete. Let's just report an
         * error to the caller.
         */
        if (!found_parent) {
          ResumeAllProcessThreads(num_threads, pids);
          sys__exit(3);
        }

        /* Now we are ready to call the callback,
         * which takes care of resuming the threads for us.
         */
        args->result = args->callback(args->parameter, num_threads,
                                      pids, args->ap);
        args->err = errno;

        /* Callback should have resumed threads, but better safe than sorry  */
        if (ResumeAllProcessThreads(num_threads, pids)) {
          /* Callback forgot to resume at least one thread, report error     */
          args->err    = EINVAL;
          args->result = -1;
        }

        sys__exit(0);
      }
    detach_threads:
      /* Resume all threads prior to retrying the operation                  */
      ResumeAllProcessThreads(num_threads, pids);
      sig_pids = NULL;
      num_threads = 0;
      sig_num_threads = num_threads;
      max_threads += 100;
    }
  }
}
Beispiel #19
0
void sys_fcntl_f_setfl_o_async(int fd)
{
	sys_fcntl(fd, F_SETFL, O_ASYNC);
}
Beispiel #20
0
int
svr4_sys_fcntl(struct lwp *l, const struct svr4_sys_fcntl_args *uap, register_t *retval)
{
	struct sys_fcntl_args	fa;
	register_t		flags;
	struct svr4_flock64	ifl64;
	struct svr4_flock	ifl;
	struct flock		fl;
	int error;
	int cmd;

	SCARG(&fa, fd) = SCARG(uap, fd);
	SCARG(&fa, arg) = SCARG(uap, arg);

	switch (SCARG(uap, cmd)) {
	case SVR4_F_DUPFD:
		cmd = F_DUPFD;
		break;
	case SVR4_F_GETFD:
		cmd = F_GETFD;
		break;
	case SVR4_F_SETFD:
		cmd = F_SETFD;
		break;

	case SVR4_F_GETFL:
		cmd = F_GETFL;
		break;

	case SVR4_F_SETFL:
		/*
		 * we must save the O_ASYNC flag, as that is
		 * handled by ioctl(_, I_SETSIG, _) emulation.
		 */
		SCARG(&fa, cmd) = F_GETFL;
		if ((error = sys_fcntl(l, &fa, &flags)) != 0)
			return error;
		flags &= O_ASYNC;
		flags |= svr4_to_bsd_flags((u_long) SCARG(uap, arg));
		cmd = F_SETFL;
		SCARG(&fa, arg) = (void *) flags;
		break;

	case SVR4_F_GETLK:
		cmd = F_GETLK;
		goto lock32;
	case SVR4_F_SETLK:
		cmd = F_SETLK;
		goto lock32;
	case SVR4_F_SETLKW:
		cmd = F_SETLKW;
	    lock32:
		error = copyin(SCARG(uap, arg), &ifl, sizeof ifl);
		if (error)
			return error;
		svr4_to_bsd_flock(&ifl, &fl);

		error = do_fcntl_lock(SCARG(uap, fd), cmd, &fl);
		if (cmd != F_GETLK || error != 0)
			return error;

		bsd_to_svr4_flock(&fl, &ifl);
		return copyout(&ifl, SCARG(uap, arg), sizeof ifl);

	case SVR4_F_DUP2FD:
		{
			struct sys_dup2_args du;

			SCARG(&du, from) = SCARG(uap, fd);
			SCARG(&du, to) = (int)(u_long)SCARG(uap, arg);
			error = sys_dup2(l, &du, retval);
			if (error)
				return error;
			*retval = SCARG(&du, to);
			return 0;
		}

	case SVR4_F_FREESP:
		error = copyin(SCARG(uap, arg), &ifl, sizeof ifl);
		if (error)
			return error;
		svr4_to_bsd_flock(&ifl, &fl);
		return fd_truncate(l, SCARG(uap, fd), &fl, retval);

	case SVR4_F_GETLK64:
		cmd = F_GETLK;
		goto lock64;
	case SVR4_F_SETLK64:
		cmd = F_SETLK;
		goto lock64;
	case SVR4_F_SETLKW64:
		cmd = F_SETLKW;
	    lock64:
		error = copyin(SCARG(uap, arg), &ifl64, sizeof ifl64);
		if (error)
			return error;
		svr4_to_bsd_flock64(&ifl64, &fl);

		error = do_fcntl_lock(SCARG(uap, fd), cmd, &fl);
		if (cmd != F_GETLK || error != 0)
			return error;

		bsd_to_svr4_flock64(&fl, &ifl64);
		return copyout(&ifl64, SCARG(uap, arg), sizeof ifl64);

	case SVR4_F_FREESP64:
		error = copyin(SCARG(uap, arg), &ifl64, sizeof ifl64);
		if (error)
			return error;
		svr4_to_bsd_flock64(&ifl64, &fl);
		return fd_truncate(l, SCARG(uap, fd), &fl, retval);

	case SVR4_F_REVOKE:
		return fd_revoke(l, SCARG(uap, fd), retval);

	default:
		return ENOSYS;
	}

	SCARG(&fa, cmd) = cmd;

	error = sys_fcntl(l, &fa, retval);
	if (error != 0)
		return error;

	switch (SCARG(uap, cmd)) {

	case SVR4_F_GETFL:
		*retval = bsd_to_svr4_flags(*retval);
		break;
	}

	return 0;
}
Beispiel #21
0
int
sunos_sys_fcntl(struct lwp *l, const struct sunos_sys_fcntl_args *uap, register_t *retval)
{
	long flg;
	int n, ret;
	struct sys_fcntl_args bsd_ua;

	SCARG(&bsd_ua, fd) = SCARG(uap, fd);
	SCARG(&bsd_ua, cmd) = SCARG(uap, cmd);
	SCARG(&bsd_ua, arg) = SCARG(uap, arg);


	switch (SCARG(uap, cmd)) {
	case F_SETFL:
		flg = (long)SCARG(uap, arg);
		n = sizeof(sunfcntl_flgtab) / sizeof(sunfcntl_flgtab[0]);
		while (--n >= 0) {
			if (flg & sunfcntl_flgtab[n].sun_flg) {
				flg &= ~sunfcntl_flgtab[n].sun_flg;
				flg |= sunfcntl_flgtab[n].bsd_flg;
			}
		}
		SCARG(&bsd_ua, arg) = (void *)flg;
		break;

	case F_GETLK:
	case F_SETLK:
	case F_SETLKW:
		{
			int error;
			struct sunos_flock	 ifl;
			struct flock		 fl;

			error = copyin(SCARG(uap, arg), &ifl, sizeof ifl);
			if (error)
				return error;
			sunos_to_bsd_flock(&ifl, &fl);

			error = do_fcntl_lock(SCARG(uap, fd), SCARG(uap, cmd), &fl);
			if (error)
				return error;

			if (error || SCARG(uap, cmd) != F_GETLK)
				return error;

			bsd_to_sunos_flock(&fl, &ifl);

			return copyout(&ifl, SCARG(uap, arg), sizeof ifl);
		}
		break;
	case SUN_F_RGETLK:
	case SUN_F_RSETLK:
	case SUN_F_CNVT:
	case SUN_F_RSETLKW:
		return (EOPNOTSUPP);

	default:
		break;
	}

	ret = sys_fcntl(l, &bsd_ua, retval);

	switch (SCARG(&bsd_ua, cmd)) {
	case F_GETFL:
		n = sizeof(sunfcntl_flgtab) / sizeof(sunfcntl_flgtab[0]);
		while (--n >= 0) {
			if (ret & sunfcntl_flgtab[n].bsd_flg) {
				ret &= ~sunfcntl_flgtab[n].bsd_flg;
				ret |= sunfcntl_flgtab[n].sun_flg;
			}
		}
		break;
	default:
		break;
	}

	return (ret);
}