コード例 #1
0
static int key_wait_bit_intr(void *flags)
{
	schedule();
	return signal_pending(current) ? -ERESTARTSYS : 0;
}
コード例 #2
0
ファイル: exit.c プロジェクト: dkhapun/hafala2
asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru)
{
	int flag, retval;
	DECLARE_WAITQUEUE(wait, current);
	struct task_struct *tsk;
	
	HW2_DBG("*** %s:%d:%s(pid=%d)\n", __FILE__, __LINE__, __FUNCTION__, pid);

	if (options & ~(WNOHANG|WUNTRACED|__WNOTHREAD|__WCLONE|__WALL))
		return -EINVAL;

	HW2_DBG("a");
	add_wait_queue(&current->wait_chldexit,&wait);
	HW2_DBG("a");
repeat:
	HW2_DBG("a");
	flag = 0;
	HW2_DBG("a");
	current->state = TASK_INTERRUPTIBLE;
	HW2_DBG("a");
	read_lock(&tasklist_lock);
	HW2_DBG("a");
	tsk = current;
	HW2_DBG("a");
	do {
		struct task_struct *p;
	HW2_DBG("b");
	 	for (p = tsk->p_cptr ; p ; p = p->p_osptr) {
			if (pid>0) {
				if (p->pid != pid)
					continue;
			} else if (!pid) {
				if (p->pgrp != current->pgrp)
					continue;
			} else if (pid != -1) {
				if (p->pgrp != -pid)
					continue;
			}
			/* Wait for all children (clone and not) if __WALL is set;
			 * otherwise, wait for clone children *only* if __WCLONE is
			 * set; otherwise, wait for non-clone children *only*.  (Note:
			 * A "clone" child here is one that reports to its parent
			 * using a signal other than SIGCHLD.) */
			if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
			    && !(options & __WALL))
				continue;
			flag = 1;
			switch (p->state) {
			case TASK_STOPPED:
				if (!p->exit_code)
					continue;
				if (!(options & WUNTRACED) && !(p->ptrace & PT_PTRACED))
					continue;
				read_unlock(&tasklist_lock);
				retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; 
				if (!retval && stat_addr) 
					retval = put_user((p->exit_code << 8) | 0x7f, stat_addr);
				if (!retval) {
					p->exit_code = 0;
					retval = p->pid;
				}
				goto end_wait4;
			case TASK_ZOMBIE:
				current->times.tms_cutime += p->times.tms_utime + p->times.tms_cutime;
				current->times.tms_cstime += p->times.tms_stime + p->times.tms_cstime;
				read_unlock(&tasklist_lock);
				retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
				if (!retval && stat_addr)
					retval = put_user(p->exit_code, stat_addr);
				if (retval)
					goto end_wait4; 
				retval = p->pid;
				if (p->p_opptr != p->p_pptr) {
					write_lock_irq(&tasklist_lock);
					REMOVE_LINKS(p);
					p->p_pptr = p->p_opptr;
					SET_LINKS(p);
					do_notify_parent(p, SIGCHLD);
					write_unlock_irq(&tasklist_lock);
				} else
					release_task(p);
				goto end_wait4;
			default:
				continue;
			}
		}
	HW2_DBG("c");
		if (options & __WNOTHREAD)
			break;
	HW2_DBG("c");
		tsk = next_thread(tsk);
	HW2_DBG("c");
	} while (tsk != current);
	HW2_DBG("d");
	read_unlock(&tasklist_lock);
	HW2_DBG("d flag=%d\n", flag);
	if (flag) {
		retval = 0;
		if (options & WNOHANG)
			goto end_wait4;
		retval = -ERESTARTSYS;
		if (signal_pending(current))
			goto end_wait4;
	HW2_DBG("e");
		schedule();
	HW2_DBG("f");
		goto repeat;
	}
	HW2_DBG("g");
	retval = -ECHILD;
end_wait4:
	HW2_DBG("h");
	current->state = TASK_RUNNING;
	HW2_DBG("h");
	remove_wait_queue(&current->wait_chldexit,&wait);
	HW2_DBG("h");
	return retval;
}
コード例 #3
0
/*H:030
 * Let's jump straight to the the main loop which runs the Guest.
 * Remember, this is called by the Launcher reading /dev/lguest, and we keep
 * going around and around until something interesting happens.
 */
int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
{
	/* We stop running once the Guest is dead. */
	while (!cpu->lg->dead) {
		unsigned int irq;
		bool more;

		/* First we run any hypercalls the Guest wants done. */
		if (cpu->hcall)
			do_hypercalls(cpu);

		/*
		 * It's possible the Guest did a NOTIFY hypercall to the
		 * Launcher.
		 */
		if (cpu->pending_notify) {
			/*
			 * Does it just needs to write to a registered
			 * eventfd (ie. the appropriate virtqueue thread)?
			 */
			if (!send_notify_to_eventfd(cpu)) {
				/* OK, we tell the main Laucher. */
				if (put_user(cpu->pending_notify, user))
					return -EFAULT;
				return sizeof(cpu->pending_notify);
			}
		}

		/*
		 * All long-lived kernel loops need to check with this horrible
		 * thing called the freezer.  If the Host is trying to suspend,
		 * it stops us.
		 */
		try_to_freeze();

		/* Check for signals */
		if (signal_pending(current))
			return -ERESTARTSYS;

		/*
		 * Check if there are any interrupts which can be delivered now:
		 * if so, this sets up the hander to be executed when we next
		 * run the Guest.
		 */
		irq = interrupt_pending(cpu, &more);
		if (irq < LGUEST_IRQS)
			try_deliver_interrupt(cpu, irq, more);

		/*
		 * Just make absolutely sure the Guest is still alive.  One of
		 * those hypercalls could have been fatal, for example.
		 */
		if (cpu->lg->dead)
			break;

		/*
		 * If the Guest asked to be stopped, we sleep.  The Guest's
		 * clock timer will wake us.
		 */
		if (cpu->halted) {
			set_current_state(TASK_INTERRUPTIBLE);
			/*
			 * Just before we sleep, make sure no interrupt snuck in
			 * which we should be doing.
			 */
			if (interrupt_pending(cpu, &more) < LGUEST_IRQS)
				set_current_state(TASK_RUNNING);
			else
				schedule();
			continue;
		}

		/*
		 * OK, now we're ready to jump into the Guest.  First we put up
		 * the "Do Not Disturb" sign:
		 */
		local_irq_disable();

		/* Actually run the Guest until something happens. */
		lguest_arch_run_guest(cpu);

		/* Now we're ready to be interrupted or moved to other CPUs */
		local_irq_enable();

		/* Now we deal with whatever happened to the Guest. */
		lguest_arch_handle_trap(cpu);
	}

	/* Special case: Guest is 'dead' but wants a reboot. */
	if (cpu->lg->dead == ERR_PTR(-ERESTART))
		return -ERESTART;

	/* The Guest is dead => "No such file or directory" */
	return -ENOENT;
}
コード例 #4
0
ファイル: adutux.c プロジェクト: CSCLOG/beaglebone
static ssize_t adu_write(struct file *file, const __user char *buffer,
			 size_t count, loff_t *ppos)
{
	DECLARE_WAITQUEUE(waita, current);
	struct adu_device *dev;
	size_t bytes_written = 0;
	size_t bytes_to_write;
	size_t buffer_size;
	unsigned long flags;
	int retval;

	dbg(2," %s : enter, count = %Zd", __func__, count);

	dev = file->private_data;

	retval = mutex_lock_interruptible(&dev->mtx);
	if (retval)
		goto exit_nolock;

	/* verify that the device wasn't unplugged */
	if (dev->udev == NULL) {
		retval = -ENODEV;
		printk(KERN_ERR "adutux: No device or device unplugged %d\n",
		       retval);
		goto exit;
	}

	/* verify that we actually have some data to write */
	if (count == 0) {
		dbg(1," %s : write request of 0 bytes", __func__);
		goto exit;
	}

	while (count > 0) {
		add_wait_queue(&dev->write_wait, &waita);
		set_current_state(TASK_INTERRUPTIBLE);
		spin_lock_irqsave(&dev->buflock, flags);
		if (!dev->out_urb_finished) {
			spin_unlock_irqrestore(&dev->buflock, flags);

			mutex_unlock(&dev->mtx);
			if (signal_pending(current)) {
				dbg(1," %s : interrupted", __func__);
				set_current_state(TASK_RUNNING);
				retval = -EINTR;
				goto exit_onqueue;
			}
			if (schedule_timeout(COMMAND_TIMEOUT) == 0) {
				dbg(1, "%s - command timed out.", __func__);
				retval = -ETIMEDOUT;
				goto exit_onqueue;
			}
			remove_wait_queue(&dev->write_wait, &waita);
			retval = mutex_lock_interruptible(&dev->mtx);
			if (retval) {
				retval = bytes_written ? bytes_written : retval;
				goto exit_nolock;
			}

			dbg(4," %s : in progress, count = %Zd", __func__, count);
		} else {
			spin_unlock_irqrestore(&dev->buflock, flags);
			set_current_state(TASK_RUNNING);
			remove_wait_queue(&dev->write_wait, &waita);
			dbg(4," %s : sending, count = %Zd", __func__, count);

			/* write the data into interrupt_out_buffer from userspace */
			buffer_size = le16_to_cpu(dev->interrupt_out_endpoint->wMaxPacketSize);
			bytes_to_write = count > buffer_size ? buffer_size : count;
			dbg(4," %s : buffer_size = %Zd, count = %Zd, bytes_to_write = %Zd",
			    __func__, buffer_size, count, bytes_to_write);

			if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write) != 0) {
				retval = -EFAULT;
				goto exit;
			}

			/* send off the urb */
			usb_fill_int_urb(
				dev->interrupt_out_urb,
				dev->udev,
				usb_sndintpipe(dev->udev, dev->interrupt_out_endpoint->bEndpointAddress),
				dev->interrupt_out_buffer,
				bytes_to_write,
				adu_interrupt_out_callback,
				dev,
				dev->interrupt_out_endpoint->bInterval);
			dev->interrupt_out_urb->actual_length = bytes_to_write;
			dev->out_urb_finished = 0;
			retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL);
			if (retval < 0) {
				dev->out_urb_finished = 1;
				dev_err(&dev->udev->dev, "Couldn't submit "
					"interrupt_out_urb %d\n", retval);
				goto exit;
			}

			buffer += bytes_to_write;
			count -= bytes_to_write;

			bytes_written += bytes_to_write;
		}
	}
	mutex_unlock(&dev->mtx);
	return bytes_written;

exit:
	mutex_unlock(&dev->mtx);
exit_nolock:
	dbg(2," %s : leave, return value %d", __func__, retval);
	return retval;

exit_onqueue:
	remove_wait_queue(&dev->write_wait, &waita);
	return retval;
}
コード例 #5
0
ファイル: nbd.c プロジェクト: JonnyH/pandora-kernel
/*
 *  Send or receive packet.
 */
static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
		int msg_flags)
{
	struct socket *sock = lo->sock;
	int result;
	struct msghdr msg;
	struct kvec iov;
	sigset_t blocked, oldset;

	if (unlikely(!sock)) {
		dev_err(disk_to_dev(lo->disk),
			"Attempted %s on closed socket in sock_xmit\n",
			(send ? "send" : "recv"));
		return -EINVAL;
	}

	/* Allow interception of SIGKILL only
	 * Don't allow other signals to interrupt the transmission */
	siginitsetinv(&blocked, sigmask(SIGKILL));
	sigprocmask(SIG_SETMASK, &blocked, &oldset);

	do {
		sock->sk->sk_allocation = GFP_NOIO;
		iov.iov_base = buf;
		iov.iov_len = size;
		msg.msg_name = NULL;
		msg.msg_namelen = 0;
		msg.msg_control = NULL;
		msg.msg_controllen = 0;
		msg.msg_flags = msg_flags | MSG_NOSIGNAL;

		if (send) {
			struct timer_list ti;

			if (lo->xmit_timeout) {
				init_timer(&ti);
				ti.function = nbd_xmit_timeout;
				ti.data = (unsigned long)current;
				ti.expires = jiffies + lo->xmit_timeout;
				add_timer(&ti);
			}
			result = kernel_sendmsg(sock, &msg, &iov, 1, size);
			if (lo->xmit_timeout)
				del_timer_sync(&ti);
		} else
			result = kernel_recvmsg(sock, &msg, &iov, 1, size,
						msg.msg_flags);

		if (signal_pending(current)) {
			siginfo_t info;
			printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
				task_pid_nr(current), current->comm,
				dequeue_signal_lock(current, &current->blocked, &info));
			result = -EINTR;
			sock_shutdown(lo, !send);
			break;
		}

		if (result <= 0) {
			if (result == 0)
				result = -EPIPE; /* short read */
			break;
		}
		size -= result;
		buf += result;
	} while (size > 0);

	sigprocmask(SIG_SETMASK, &oldset, NULL);

	return result;
}
コード例 #6
0
ファイル: msg.c プロジェクト: 274914765/C
long do_msgsnd(int msqid, long mtype, void __user *mtext,
        size_t msgsz, int msgflg)
{
    struct msg_queue *msq;
    struct msg_msg *msg;
    int err;
    struct ipc_namespace *ns;

    ns = current->nsproxy->ipc_ns;

    if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
        return -EINVAL;
    if (mtype < 1)
        return -EINVAL;

    msg = load_msg(mtext, msgsz);
    if (IS_ERR(msg))
        return PTR_ERR(msg);

    msg->m_type = mtype;
    msg->m_ts = msgsz;

    msq = msg_lock_check(ns, msqid);
    if (IS_ERR(msq)) {
        err = PTR_ERR(msq);
        goto out_free;
    }

    for (;;) {
        struct msg_sender s;

        err = -EACCES;
        if (ipcperms(&msq->q_perm, S_IWUGO))
            goto out_unlock_free;

        err = security_msg_queue_msgsnd(msq, msg, msgflg);
        if (err)
            goto out_unlock_free;

        if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
                1 + msq->q_qnum <= msq->q_qbytes) {
            break;
        }

        /* queue full, wait: */
        if (msgflg & IPC_NOWAIT) {
            err = -EAGAIN;
            goto out_unlock_free;
        }
        ss_add(msq, &s);
        ipc_rcu_getref(msq);
        msg_unlock(msq);
        schedule();

        ipc_lock_by_ptr(&msq->q_perm);
        ipc_rcu_putref(msq);
        if (msq->q_perm.deleted) {
            err = -EIDRM;
            goto out_unlock_free;
        }
        ss_del(&s);

        if (signal_pending(current)) {
            err = -ERESTARTNOHAND;
            goto out_unlock_free;
        }
    }

    msq->q_lspid = task_tgid_vnr(current);
    msq->q_stime = get_seconds();

    if (!pipelined_send(msq, msg)) {
        /* noone is waiting for this message, enqueue it */
        list_add_tail(&msg->m_list, &msq->q_messages);
        msq->q_cbytes += msgsz;
        msq->q_qnum++;
        atomic_add(msgsz, &ns->msg_bytes);
        atomic_inc(&ns->msg_hdrs);
    }

    err = 0;
    msg = NULL;

out_unlock_free:
    msg_unlock(msq);
out_free:
    if (msg != NULL)
        free_msg(msg);
    return err;
}
コード例 #7
0
ファイル: osprd.c プロジェクト: smanikar/ramdisk
/*
 * osprd_ioctl(inode, filp, cmd, arg)
 *   Called to perform an ioctl on the named file.
 */
int osprd_ioctl(struct inode *inode, struct file *filp,
		unsigned int cmd, unsigned long arg)
{
	int r = 0;

	// is file open for writing?
	int filp_writable = (filp->f_mode & FMODE_WRITE) != 0;

	osprd_info_t *d = file2osprd(filp);	// device info
	DEFINE_WAIT(wait);		// wait queue entry in case we block
	wait.func = &default_wake_function;

	// This line avoids compiler warnings; you may remove it.
	(void) filp_writable, (void) d;
	
	// Set 'r' to the ioctl's return value: 0 on success, negative on error

	if (cmd == OSPRDIOCACQUIRE) {
		
		// EXERCISE: Lock the ramdisk.
		//
		// If *filp is a writable file, then attempt to write-lock
		// the ramdisk; otherwise attempt to read-lock the ramdisk.
		//
        // This lock request must block using 'd->blockq' until:
		// 1) no other process holds a write lock;
		// 2) either the request is for a read lock, or no other process
		//    holds a read lock; and
		// 3) lock requests should be serviced in order, so no process
		//    that blocked earlier is still blocked waiting for the
		//    lock.
		//
		// If a process acquires a lock, mark this fact by setting
		// 'filp->f_flags |= F_OSPRD_LOCKED'.  You may also need to
		// keep track of how many read and write locks are held:
		// change the 'osprd_info_t' structure to do this.
		//
		// Also wake up processes waiting on 'd->blockq' as needed.
		//
		// If the lock request would cause a deadlock, return -EDEADLK.
		// If the lock request blocks and is awoken by a signal, then
		// return -ERESTARTSYS. 
		// Otherwise, if we can grant the lock request, return 0.

		// Your code here (instead of the next two lines).
		if(filp_writable){
			// Attempt to take write lock
			if(d->num_ramdisks_open){
				d->num_ramdisks_open = 0;
				r = -EDEADLK;
				return r;
			}
			
			if(waitqueue_active(&d->blockq) || d->write_lock_count || 
					d->read_lock_count || (filp->f_flags & F_OSPRD_LOCKED)) {
				/* Enque writer process and call scheduler if 
				 *   i. Wait queue is not empty
				 *  ii. No. of readers > 0
				 * iii. No. of writers > 0
				 *  iv. Ramdisk has been locked
				 */
				osp_spin_lock(&d->mutex);
				prepare_to_wait_exclusive(&d->blockq,&wait,TASK_INTERRUPTIBLE);
				osp_spin_unlock(&d->mutex);
				do{
					schedule();
					/* if signal has occured, return ERESTARTSYS to caller */
					if(signal_pending(current)){
						r = -ERESTARTSYS;
						return r;
					}
				}while(d->write_lock_count || d->read_lock_count || 
						(filp->f_flags & F_OSPRD_LOCKED));
				/* All condtions for locking satisfied; unblock (dequeue) */
				finish_wait(&d->blockq, &wait);
			}

			/* Acquire write lock */
			osp_spin_lock(&d->mutex);
			filp->f_flags |= F_OSPRD_LOCKED;
			d->write_lock_count++;
			osp_spin_unlock(&d->mutex);

		} else {
			// Attempt to take read lock
			/* Enque writer process and call scheduler if 
			 *   i. Wait queue is not empty
			 *  ii. No. of writers > 0
			 * iii. Ramdisk has been locked
			 */
			if(waitqueue_active(&d->blockq) || d->write_lock_count || 
					(filp->f_flags & F_OSPRD_LOCKED)) {
				osp_spin_lock(&d->mutex);
				prepare_to_wait_exclusive(&d->blockq,&wait,TASK_INTERRUPTIBLE);
				osp_spin_unlock(&d->mutex);
				do{
					schedule();
					/* if signal has occured, return ERESTARTSYS to caller */
					if(signal_pending(current)){
						r = -ERESTARTSYS;
						return r;
					}
				}
				while(d->write_lock_count || (filp->f_flags & F_OSPRD_LOCKED));
				/* All condtions for locking satisfied; unblock (dequeue) */
				finish_wait(&d->blockq, &wait);
			}

			/* Acquire read lock */
			osp_spin_lock(&d->mutex);
			filp->f_flags |= F_OSPRD_LOCKED;
			d->read_lock_count++;
#if 0
			/* Wake up next reader in the queue to ensure that
			 * - when a writer dequeues, all subsequent readers in the queue
			 *   till the first writer, are woken up.
			 * - the writer reaches the head of the queue to be called next
			 *
			 * This causues TEST CASE 15 to fail. So I have commented it.
			 */
			if(waitqueue_active(&d->blockq))
				wake_up(&d->blockq);
#endif
			osp_spin_unlock(&d->mutex);
		}
#if 0
		eprintk("Attempting to acquire\n");
		r = -ENOTTY;
#endif
		
	} else if (cmd == OSPRDIOCTRYACQUIRE) {
		
		// EXERCISE: ATTEMPT Lock the ramdisk.
		//
		// This is just like OSPRDIOCACQUIRE, except it should never
		// block.  If OSPRDIOCACQUIRE would block or return deadlock,
		// OSPRDIOCTRYACQUIRE should return -EBUSY.
		// Otherwise, if we can grant the lock request, return 0.

		// Your code here (instead of the next two lines).
		if(filp_writable){
			// Attempt to take write lock
			/* Enque writer process and call scheduler if 
			 *   i. Wait queue is not empty
			 *  ii. No. of readers > 0
			 * iii. No. of writers > 0
			 *  iv. Ramdisk has been locked
			 */
			if(waitqueue_active(&d->blockq) || d->write_lock_count ||
					d->read_lock_count || (filp->f_flags & F_OSPRD_LOCKED)) {
				/* Not able to acuqire write lock; return EBUSY */
				r = -EBUSY;
				return r;
			}
			/* Acquire write lock */
			osp_spin_lock(&d->mutex);
			filp->f_flags |= F_OSPRD_LOCKED;
			d->write_lock_count++;
			osp_spin_unlock(&d->mutex);
		} else {
			/* Enque writer process and call scheduler if 
			 *   i. Wait queue is not empty
			 *  ii. No. of writers > 0
			 * iii. Ramdisk has been locked
			 */
			if(waitqueue_active(&d->blockq) || d->write_lock_count ||
					(filp->f_flags & F_OSPRD_LOCKED)) {
				/* Not able to acuqire read lock; return EBUSY */
				r = -EBUSY;
				return r;
			}
			/* Acquire read lock */
			osp_spin_lock(&d->mutex);
			filp->f_flags |= F_OSPRD_LOCKED;
			d->read_lock_count++;
			osp_spin_unlock(&d->mutex);
		}
#if 0
		eprintk("Attempting to try acquire\n");
		r = -ENOTTY;
#endif 
		
	} else if (cmd == OSPRDIOCRELEASE) {
		
		// EXERCISE: Unlock the ramdisk.
		//
		// If the file hasn't locked the ramdisk, return -EINVAL.
		// Otherwise, clear the lock from filp->f_flags, wake up
		// the wait queue, perform any additional accounting steps
		// you need, and return 0.
		
		// Your code here (instead of the next line).
		if(!(filp->f_flags & F_OSPRD_LOCKED))
			/* you should not be here */
			r = -EINVAL;
		else {
			/* Release read or write lock as appropriate */
			osp_spin_lock(&d->mutex);
			filp->f_flags &= (~F_OSPRD_LOCKED);
			if(filp_writable)
				d->write_lock_count = 0; 
			else
				d->read_lock_count--;
			if(waitqueue_active(&d->blockq)) {
				wake_up(&d->blockq);
			}
			//d->num_ramdisks_open--;
			osp_spin_unlock(&d->mutex);
		}

		// r = -ENOTTY;

	} else
		r = -ENOTTY; /* unknown command */
	return r;
}
コード例 #8
0
ファイル: ppdev.c プロジェクト: Camedpuffer/linux
static ssize_t pp_read(struct file *file, char __user *buf, size_t count,
		       loff_t *ppos)
{
	unsigned int minor = iminor(file_inode(file));
	struct pp_struct *pp = file->private_data;
	char *kbuffer;
	ssize_t bytes_read = 0;
	struct parport *pport;
	int mode;

	if (!(pp->flags & PP_CLAIMED)) {
		/* Don't have the port claimed */
		pr_debug(CHRDEV "%x: claim the port first\n", minor);
		return -EINVAL;
	}

	/* Trivial case. */
	if (count == 0)
		return 0;

	kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
	if (!kbuffer)
		return -ENOMEM;
	pport = pp->pdev->port;
	mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);

	parport_set_timeout(pp->pdev,
			    (file->f_flags & O_NONBLOCK) ?
			    PARPORT_INACTIVITY_O_NONBLOCK :
			    pp->default_inactivity);

	while (bytes_read == 0) {
		ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE);

		if (mode == IEEE1284_MODE_EPP) {
			/* various specials for EPP mode */
			int flags = 0;
			size_t (*fn)(struct parport *, void *, size_t, int);

			if (pp->flags & PP_W91284PIC)
				flags |= PARPORT_W91284PIC;
			if (pp->flags & PP_FASTREAD)
				flags |= PARPORT_EPP_FAST;
			if (pport->ieee1284.mode & IEEE1284_ADDR)
				fn = pport->ops->epp_read_addr;
			else
				fn = pport->ops->epp_read_data;
			bytes_read = (*fn)(pport, kbuffer, need, flags);
		} else {
			bytes_read = parport_read(pport, kbuffer, need);
		}

		if (bytes_read != 0)
			break;

		if (file->f_flags & O_NONBLOCK) {
			bytes_read = -EAGAIN;
			break;
		}

		if (signal_pending(current)) {
			bytes_read = -ERESTARTSYS;
			break;
		}

		cond_resched();
	}

	parport_set_timeout(pp->pdev, pp->default_inactivity);

	if (bytes_read > 0 && copy_to_user(buf, kbuffer, bytes_read))
		bytes_read = -EFAULT;

	kfree(kbuffer);
	pp_enable_irq(pp);
	return bytes_read;
}
コード例 #9
0
ファイル: ppdev.c プロジェクト: Camedpuffer/linux
static ssize_t pp_write(struct file *file, const char __user *buf,
			size_t count, loff_t *ppos)
{
	unsigned int minor = iminor(file_inode(file));
	struct pp_struct *pp = file->private_data;
	char *kbuffer;
	ssize_t bytes_written = 0;
	ssize_t wrote;
	int mode;
	struct parport *pport;

	if (!(pp->flags & PP_CLAIMED)) {
		/* Don't have the port claimed */
		pr_debug(CHRDEV "%x: claim the port first\n", minor);
		return -EINVAL;
	}

	kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
	if (!kbuffer)
		return -ENOMEM;

	pport = pp->pdev->port;
	mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);

	parport_set_timeout(pp->pdev,
			    (file->f_flags & O_NONBLOCK) ?
			    PARPORT_INACTIVITY_O_NONBLOCK :
			    pp->default_inactivity);

	while (bytes_written < count) {
		ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE);

		if (copy_from_user(kbuffer, buf + bytes_written, n)) {
			bytes_written = -EFAULT;
			break;
		}

		if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) {
			/* do a fast EPP write */
			if (pport->ieee1284.mode & IEEE1284_ADDR) {
				wrote = pport->ops->epp_write_addr(pport,
					kbuffer, n, PARPORT_EPP_FAST);
			} else {
				wrote = pport->ops->epp_write_data(pport,
					kbuffer, n, PARPORT_EPP_FAST);
			}
		} else {
			wrote = parport_write(pp->pdev->port, kbuffer, n);
		}

		if (wrote <= 0) {
			if (!bytes_written)
				bytes_written = wrote;
			break;
		}

		bytes_written += wrote;

		if (file->f_flags & O_NONBLOCK) {
			if (!bytes_written)
				bytes_written = -EAGAIN;
			break;
		}

		if (signal_pending(current))
			break;

		cond_resched();
	}

	parport_set_timeout(pp->pdev, pp->default_inactivity);

	kfree(kbuffer);
	pp_enable_irq(pp);
	return bytes_written;
}
コード例 #10
0
static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block, int *err)
{
	struct ivtv *itv = s->itv;
	struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
	struct ivtv_buffer *buf;
	DEFINE_WAIT(wait);

	*err = 0;
	while (1) {
		if (s->type == IVTV_ENC_STREAM_TYPE_MPG) {
			/* Process pending program info updates and pending VBI data */
			ivtv_update_pgm_info(itv);

			if (time_after(jiffies,
				       itv->dualwatch_jiffies +
				       msecs_to_jiffies(1000))) {
				itv->dualwatch_jiffies = jiffies;
				ivtv_dualwatch(itv);
			}

			if (test_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags) &&
			    !test_bit(IVTV_F_S_APPL_IO, &s_vbi->s_flags)) {
				while ((buf = ivtv_dequeue(s_vbi, &s_vbi->q_full))) {
					/* byteswap and process VBI data */
					ivtv_process_vbi_data(itv, buf, s_vbi->dma_pts, s_vbi->type);
					ivtv_enqueue(s_vbi, buf, &s_vbi->q_free);
				}
			}
			buf = &itv->vbi.sliced_mpeg_buf;
			if (buf->readpos != buf->bytesused) {
				return buf;
			}
		}

		/* do we have leftover data? */
		buf = ivtv_dequeue(s, &s->q_io);
		if (buf)
			return buf;

		/* do we have new data? */
		buf = ivtv_dequeue(s, &s->q_full);
		if (buf) {
			if ((buf->b_flags & IVTV_F_B_NEED_BUF_SWAP) == 0)
				return buf;
			buf->b_flags &= ~IVTV_F_B_NEED_BUF_SWAP;
			if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
				/* byteswap MPG data */
				ivtv_buf_swap(buf);
			else if (s->type != IVTV_DEC_STREAM_TYPE_VBI) {
				/* byteswap and process VBI data */
				ivtv_process_vbi_data(itv, buf, s->dma_pts, s->type);
			}
			return buf;
		}

		/* return if end of stream */
		if (s->type != IVTV_DEC_STREAM_TYPE_VBI && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
			IVTV_DEBUG_INFO("EOS %s\n", s->name);
			return NULL;
		}

		/* return if file was opened with O_NONBLOCK */
		if (non_block) {
			*err = -EAGAIN;
			return NULL;
		}

		/* wait for more data to arrive */
		mutex_unlock(&itv->serialize_lock);
		prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
		/* New buffers might have become available before we were added to the waitqueue */
		if (!s->q_full.buffers)
			schedule();
		finish_wait(&s->waitq, &wait);
		mutex_lock(&itv->serialize_lock);
		if (signal_pending(current)) {
			/* return if a signal was received */
			IVTV_DEBUG_INFO("User stopped %s\n", s->name);
			*err = -EINTR;
			return NULL;
		}
	}
}
コード例 #11
0
static ssize_t ivtv_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
{
	struct ivtv_open_id *id = fh2id(filp->private_data);
	struct ivtv *itv = id->itv;
	struct ivtv_stream *s = &itv->streams[id->type];
	struct yuv_playback_info *yi = &itv->yuv_info;
	struct ivtv_buffer *buf;
	struct ivtv_queue q;
	int bytes_written = 0;
	int mode;
	int rc;
	DEFINE_WAIT(wait);

	IVTV_DEBUG_HI_FILE("write %zd bytes to %s\n", count, s->name);

	if (s->type != IVTV_DEC_STREAM_TYPE_MPG &&
	    s->type != IVTV_DEC_STREAM_TYPE_YUV &&
	    s->type != IVTV_DEC_STREAM_TYPE_VOUT)
		/* not decoder streams */
		return -EINVAL;

	/* Try to claim this stream */
	if (ivtv_claim_stream(id, s->type))
		return -EBUSY;

	/* This stream does not need to start any decoding */
	if (s->type == IVTV_DEC_STREAM_TYPE_VOUT) {
		int elems = count / sizeof(struct v4l2_sliced_vbi_data);

		set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
		return ivtv_write_vbi_from_user(itv,
		   (const struct v4l2_sliced_vbi_data __user *)user_buf, elems);
	}

	mode = s->type == IVTV_DEC_STREAM_TYPE_MPG ? OUT_MPG : OUT_YUV;

	if (ivtv_set_output_mode(itv, mode) != mode) {
	    ivtv_release_stream(s);
	    return -EBUSY;
	}
	ivtv_queue_init(&q);
	set_bit(IVTV_F_S_APPL_IO, &s->s_flags);

	/* Start decoder (returns 0 if already started) */
	rc = ivtv_start_decoding(id, itv->speed);
	if (rc) {
		IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name);

		/* failure, clean up */
		clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
		clear_bit(IVTV_F_S_APPL_IO, &s->s_flags);
		return rc;
	}

retry:
	/* If possible, just DMA the entire frame - Check the data transfer size
	since we may get here before the stream has been fully set-up */
	if (mode == OUT_YUV && s->q_full.length == 0 && itv->dma_data_req_size) {
		while (count >= itv->dma_data_req_size) {
			rc = ivtv_yuv_udma_stream_frame(itv, (void __user *)user_buf);

			if (rc < 0)
				return rc;

			bytes_written += itv->dma_data_req_size;
			user_buf += itv->dma_data_req_size;
			count -= itv->dma_data_req_size;
		}
		if (count == 0) {
			IVTV_DEBUG_HI_FILE("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused);
			return bytes_written;
		}
	}

	for (;;) {
		/* Gather buffers */
		while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_io)))
			ivtv_enqueue(s, buf, &q);
		while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_free))) {
			ivtv_enqueue(s, buf, &q);
		}
		if (q.buffers)
			break;
		if (filp->f_flags & O_NONBLOCK)
			return -EAGAIN;
		mutex_unlock(&itv->serialize_lock);
		prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
		/* New buffers might have become free before we were added to the waitqueue */
		if (!s->q_free.buffers)
			schedule();
		finish_wait(&s->waitq, &wait);
		mutex_lock(&itv->serialize_lock);
		if (signal_pending(current)) {
			IVTV_DEBUG_INFO("User stopped %s\n", s->name);
			return -EINTR;
		}
	}

	/* copy user data into buffers */
	while ((buf = ivtv_dequeue(s, &q))) {
		/* yuv is a pain. Don't copy more data than needed for a single
		   frame, otherwise we lose sync with the incoming stream */
		if (s->type == IVTV_DEC_STREAM_TYPE_YUV &&
		    yi->stream_size + count > itv->dma_data_req_size)
			rc  = ivtv_buf_copy_from_user(s, buf, user_buf,
				itv->dma_data_req_size - yi->stream_size);
		else
			rc = ivtv_buf_copy_from_user(s, buf, user_buf, count);

		/* Make sure we really got all the user data */
		if (rc < 0) {
			ivtv_queue_move(s, &q, NULL, &s->q_free, 0);
			return rc;
		}
		user_buf += rc;
		count -= rc;
		bytes_written += rc;

		if (s->type == IVTV_DEC_STREAM_TYPE_YUV) {
			yi->stream_size += rc;
			/* If we have a complete yuv frame, break loop now */
			if (yi->stream_size == itv->dma_data_req_size) {
				ivtv_enqueue(s, buf, &s->q_full);
				yi->stream_size = 0;
				break;
			}
		}

		if (buf->bytesused != s->buf_size) {
			/* incomplete, leave in q_io for next time */
			ivtv_enqueue(s, buf, &s->q_io);
			break;
		}
		/* Byteswap MPEG buffer */
		if (s->type == IVTV_DEC_STREAM_TYPE_MPG)
			ivtv_buf_swap(buf);
		ivtv_enqueue(s, buf, &s->q_full);
	}

	if (test_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags)) {
		if (s->q_full.length >= itv->dma_data_req_size) {
			int got_sig;

			if (mode == OUT_YUV)
				ivtv_yuv_setup_stream_frame(itv);

			mutex_unlock(&itv->serialize_lock);
			prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
			while (!(got_sig = signal_pending(current)) &&
					test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) {
				schedule();
			}
			finish_wait(&itv->dma_waitq, &wait);
			mutex_lock(&itv->serialize_lock);
			if (got_sig) {
				IVTV_DEBUG_INFO("User interrupted %s\n", s->name);
				return -EINTR;
			}

			clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
			ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
			ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 1);
		}
	}
	/* more user data is available, wait until buffers become free
	   to transfer the rest. */
	if (count && !(filp->f_flags & O_NONBLOCK))
		goto retry;
	IVTV_DEBUG_HI_FILE("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused);
	return bytes_written;
}
コード例 #12
0
static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
			struct msghdr *msg, size_t len)
{
	struct pep_sock *pn = pep_sk(sk);
	struct sk_buff *skb;
	long timeo;
	int flags = msg->msg_flags;
	int err, done;

	if (msg->msg_flags & MSG_OOB || !(msg->msg_flags & MSG_EOR))
		return -EOPNOTSUPP;

	skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
					flags & MSG_DONTWAIT, &err);
	if (!skb)
		return -ENOBUFS;

	skb_reserve(skb, MAX_PHONET_HEADER + 3);
	err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
	if (err < 0)
		goto outfree;

	lock_sock(sk);
	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
	if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
		err = -ENOTCONN;
		goto out;
	}
	if (sk->sk_state != TCP_ESTABLISHED) {
		/* Wait until the pipe gets to enabled state */
disabled:
		err = sk_stream_wait_connect(sk, &timeo);
		if (err)
			goto out;

		if (sk->sk_state == TCP_CLOSE_WAIT) {
			err = -ECONNRESET;
			goto out;
		}
	}
	BUG_ON(sk->sk_state != TCP_ESTABLISHED);

	/* Wait until flow control allows TX */
	done = atomic_read(&pn->tx_credits);
	while (!done) {
		DEFINE_WAIT(wait);

		if (!timeo) {
			err = -EAGAIN;
			goto out;
		}
		if (signal_pending(current)) {
			err = sock_intr_errno(timeo);
			goto out;
		}

		prepare_to_wait(&sk->sk_socket->wait, &wait,
				TASK_INTERRUPTIBLE);
		done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits));
		finish_wait(&sk->sk_socket->wait, &wait);

		if (sk->sk_state != TCP_ESTABLISHED)
			goto disabled;
	}

	err = pipe_skb_send(sk, skb);
	if (err >= 0)
		err = len; /* success! */
	skb = NULL;
out:
	release_sock(sk);
outfree:
	kfree_skb(skb);
	return err;
}
コード例 #13
0
static int jffs2_garbage_collect_thread(void *_c)
{
    struct jffs2_sb_info *c = _c;

    daemonize("jffs2_gcd_mtd%d", c->mtd->index);
    allow_signal(SIGKILL);
    allow_signal(SIGSTOP);
    allow_signal(SIGCONT);

    c->gc_task = current;
    up(&c->gc_thread_start);

    set_user_nice(current, 10);

    for (;;) {
        allow_signal(SIGHUP);

        if (!jffs2_thread_should_wake(c)) {
            set_current_state (TASK_INTERRUPTIBLE);
            D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
            /* Yes, there's a race here; we checked jffs2_thread_should_wake()
               before setting current->state to TASK_INTERRUPTIBLE. But it doesn't
               matter - We don't care if we miss a wakeup, because the GC thread
               is only an optimisation anyway. */
            schedule();
        }

        if (current->flags & PF_FREEZE) {
            refrigerator(0);
            /* refrigerator() should recalc sigpending for us
               but doesn't. No matter - allow_signal() will. */
            continue;
        }

        cond_resched();

        /* Put_super will send a SIGKILL and then wait on the sem.
         */
        while (signal_pending(current)) {
            siginfo_t info;
            unsigned long signr;

            signr = dequeue_signal_lock(current, &current->blocked, &info);

            switch(signr) {
            case SIGSTOP:
                D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n"));
                set_current_state(TASK_STOPPED);
                schedule();
                break;

            case SIGKILL:
                D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n"));
die:
                spin_lock(&c->erase_completion_lock);
                c->gc_task = NULL;
                spin_unlock(&c->erase_completion_lock);
                complete_and_exit(&c->gc_thread_exit, 0);

            case SIGHUP:
                D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n"));
                break;
            default:
                D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr));
            }
        }
        /* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
        disallow_signal(SIGHUP);

        D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n"));
        if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
            printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n");
            goto die;
        }
    }
}
コード例 #14
0
ファイル: adapter.c プロジェクト: leonsh/eldk30ppc
/*
 * This task waits until at least one touchscreen is touched.  It then loops
 * digitizing and generating events until no touchscreens are being touched.
 */
static int
xts_thread(void *arg)
{
	int any_pens_down;
	struct xts_dev *dev;
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);
	xts_task = tsk;

	daemonize();
	reparent_to_init();
	strcpy(xts_task->comm, XTS_NAME);
	xts_task->tty = NULL;

	/* only want to receive SIGKILL */
	spin_lock_irq(&xts_task->sigmask_lock);
	siginitsetinv(&xts_task->blocked, sigmask(SIGKILL));
	recalc_sigpending(xts_task);
	spin_unlock_irq(&xts_task->sigmask_lock);

	complete(&task_sync);

	add_wait_queue(&irq_wait, &wait);
	any_pens_down = 0;
	for (;;) {
		/*
		 * Block waiting for interrupt or if any pens are down, either
		 * an interrupt or timeout to sample again.
		 */
		set_current_state(TASK_INTERRUPTIBLE);
		if (any_pens_down)
			schedule_timeout(HZ / 100);
		while (signal_pending(tsk)) {
			siginfo_t info;

			/* Only honor the signal if we're cleaning up */
			if (task_shutdown)
				goto exit;
			/*
			 * Someone else sent us a kill (probably the
			 * shutdown scripts "Sending all processes the
			 * KILL signal").  Just dequeue it and ignore
			 * it.
			 */
			spin_lock_irq(&current->sigmask_lock);
			(void)dequeue_signal(&current->blocked, &info);
			spin_unlock_irq(&current->sigmask_lock);
		}
		schedule();

		any_pens_down = 0;
		for (dev = dev_list; dev; dev = dev->next_dev) {
			if (dev->pen_is_down) {
				u32 x, y;
				XTouchscreen_GetPosition_2D(&dev->Touchscreen,
							    &x, &y);
				event_add(dev, 255, (u16) x, (u16) y);
				dev->pen_was_down = 1;
				any_pens_down = 1;
			} else if (dev->pen_was_down) {
				event_add(dev, 0, 0, 0);
				dev->pen_was_down = 0;
			}
		}
	}

exit:
	remove_wait_queue(&irq_wait, &wait);

	xts_task = NULL;
	complete_and_exit(&task_sync, 0);
}
コード例 #15
0
ファイル: svc.c プロジェクト: GodFox/magx_kernel_xpixl
static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
{
	struct sock *sk = sock->sk;
	struct sk_buff *skb;
	struct atmsvc_msg *msg;
	struct atm_vcc *old_vcc = ATM_SD(sock);
	struct atm_vcc *new_vcc;
	int error;

	lock_sock(sk);

	error = svc_create(newsock,0);
	if (error)
		goto out;

	new_vcc = ATM_SD(newsock);

	DPRINTK("svc_accept %p -> %p\n",old_vcc,new_vcc);
	while (1) {
		DEFINE_WAIT(wait);

		prepare_to_wait(old_vcc->sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
		while (!(skb = skb_dequeue(&old_vcc->sk->sk_receive_queue)) &&
		       sigd) {
			if (test_bit(ATM_VF_RELEASED,&old_vcc->flags)) break;
			if (test_bit(ATM_VF_CLOSE,&old_vcc->flags)) {
				error = -sk->sk_err;
				break;
			}
			if (flags & O_NONBLOCK) {
				error = -EAGAIN;
				break;
			}
			release_sock(sk);
			schedule();
			lock_sock(sk);
			if (signal_pending(current)) {
				error = -ERESTARTSYS;
				break;
			}
			prepare_to_wait(old_vcc->sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
		}
		finish_wait(old_vcc->sk->sk_sleep, &wait);
		if (error)
			goto out;
		if (!skb) {
			error = -EUNATCH;
			goto out;
		}
		msg = (struct atmsvc_msg *) skb->data;
		new_vcc->qos = msg->qos;
		set_bit(ATM_VF_HASQOS,&new_vcc->flags);
		new_vcc->remote = msg->svc;
		new_vcc->local = msg->local;
		new_vcc->sap = msg->sap;
		error = vcc_connect(newsock, msg->pvc.sap_addr.itf,
				    msg->pvc.sap_addr.vpi, msg->pvc.sap_addr.vci);
		dev_kfree_skb(skb);
		old_vcc->sk->sk_ack_backlog--;
		if (error) {
			sigd_enq2(NULL,as_reject,old_vcc,NULL,NULL,
			    &old_vcc->qos,error);
			error = error == -EAGAIN ? -EBUSY : error;
			goto out;
		}
		/* wait should be short, so we ignore the non-blocking flag */
		set_bit(ATM_VF_WAITING, &new_vcc->flags);
		prepare_to_wait(new_vcc->sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
		sigd_enq(new_vcc,as_accept,old_vcc,NULL,NULL);
		while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) {
			release_sock(sk);
			schedule();
			lock_sock(sk);
			prepare_to_wait(new_vcc->sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
		}
		finish_wait(new_vcc->sk->sk_sleep, &wait);
		if (!sigd) {
			error = -EUNATCH;
			goto out;
		}
		if (!new_vcc->sk->sk_err)
			break;
		if (new_vcc->sk->sk_err != ERESTARTSYS) {
			error = -new_vcc->sk->sk_err;
			goto out;
		}
	}
	newsock->state = SS_CONNECTED;
out:
	release_sock(sk);
	return error;
}
コード例 #16
0
ファイル: mutex.c プロジェクト: FatSunHYS/OSCourseDesign
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
static inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
{
	struct task_struct *task = current;
	struct mutex_waiter waiter;
	unsigned int old_val;
	unsigned long flags;

	spin_lock_mutex(&lock->wait_lock, flags);

	debug_mutex_lock_common(lock, &waiter);
	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
	debug_mutex_add_waiter(lock, &waiter, task->thread_info);

	/* add waiting tasks to the end of the waitqueue (FIFO): */
	list_add_tail(&waiter.list, &lock->wait_list);
	waiter.task = task;

	for (;;) {
		/*
		 * Lets try to take the lock again - this is needed even if
		 * we get here for the first time (shortly after failing to
		 * acquire the lock), to make sure that we get a wakeup once
		 * it's unlocked. Later on, if we sleep, this is the
		 * operation that gives us the lock. We xchg it to -1, so
		 * that when we release the lock, we properly wake up the
		 * other waiters:
		 */
		old_val = atomic_xchg(&lock->count, -1);
		if (old_val == 1)
			break;

		/*
		 * got a signal? (This code gets eliminated in the
		 * TASK_UNINTERRUPTIBLE case.)
		 */
		if (unlikely(state == TASK_INTERRUPTIBLE &&
						signal_pending(task))) {
			mutex_remove_waiter(lock, &waiter, task->thread_info);
			mutex_release(&lock->dep_map, 1, _RET_IP_);
			spin_unlock_mutex(&lock->wait_lock, flags);

			debug_mutex_free_waiter(&waiter);
			return -EINTR;
		}
		__set_task_state(task, state);

		/* didnt get the lock, go to sleep: */
		spin_unlock_mutex(&lock->wait_lock, flags);
		schedule();
		spin_lock_mutex(&lock->wait_lock, flags);
	}

	/* got the lock - rejoice! */
	mutex_remove_waiter(lock, &waiter, task->thread_info);
	debug_mutex_set_owner(lock, task->thread_info);

	/* set it to 0 if there are no waiters left: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);

	spin_unlock_mutex(&lock->wait_lock, flags);

	debug_mutex_free_waiter(&waiter);

	return 0;
}
コード例 #17
0
static int jffs2_garbage_collect_thread(void *_c)
{
	struct jffs2_sb_info *c = _c;

	daemonize("jffs2_gcd_mtd%d", c->mtd->index);
	allow_signal(SIGKILL);
	allow_signal(SIGSTOP);
	allow_signal(SIGCONT);

	c->gc_task = current;
	complete(&c->gc_thread_start);

	set_user_nice(current, 10);

	set_freezable();
	for (;;) {
		allow_signal(SIGHUP);
	again:
		spin_lock(&c->erase_completion_lock);
		if (!jffs2_thread_should_wake(c)) {
			set_current_state (TASK_INTERRUPTIBLE);
			spin_unlock(&c->erase_completion_lock);
			D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
			schedule();
		} else
			spin_unlock(&c->erase_completion_lock);
			

		/* Problem - immediately after bootup, the GCD spends a lot
		 * of time in places like jffs2_kill_fragtree(); so much so
		 * that userspace processes (like gdm and X) are starved
		 * despite plenty of cond_resched()s and renicing.  Yield()
		 * doesn't help, either (presumably because userspace and GCD
		 * are generally competing for a higher latency resource -
		 * disk).
		 * This forces the GCD to slow the hell down.   Pulling an
		 * inode in with read_inode() is much preferable to having
		 * the GC thread get there first. */
		schedule_timeout_interruptible(msecs_to_jiffies(50));

		/* Put_super will send a SIGKILL and then wait on the sem.
		 */
		while (signal_pending(current) || freezing(current)) {
			siginfo_t info;
			unsigned long signr;

			if (try_to_freeze())
				goto again;

			signr = dequeue_signal_lock(current, &current->blocked, &info);

			switch(signr) {
			case SIGSTOP:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n"));
				set_current_state(TASK_STOPPED);
				schedule();
				break;

			case SIGKILL:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n"));
				goto die;

			case SIGHUP:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n"));
				break;
			default:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr));
			}
		}
		/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
		disallow_signal(SIGHUP);

		D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n"));
		if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
			printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n");
			goto die;
		}
	}
 die:
	spin_lock(&c->erase_completion_lock);
	c->gc_task = NULL;
	spin_unlock(&c->erase_completion_lock);
	complete_and_exit(&c->gc_thread_exit, 0);
}
コード例 #18
0
static ssize_t
pipe_read(struct kiocb *iocb, const struct iovec *_iov,
          unsigned long nr_segs, loff_t pos)
{
    struct file *filp = iocb->ki_filp;
    struct inode *inode = filp->f_path.dentry->d_inode;
    struct pipe_inode_info *pipe;
    int do_wakeup;
    ssize_t ret;
    struct iovec *iov = (struct iovec *)_iov;
    size_t total_len;

    total_len = iov_length(iov, nr_segs);
    /* Null read succeeds. */
    if (unlikely(total_len == 0))
        return 0;

    do_wakeup = 0;
    ret = 0;
    mutex_lock(&inode->i_mutex);
    pipe = inode->i_pipe;
    for (;;) {
        int bufs = pipe->nrbufs;
        if (bufs) {
            int curbuf = pipe->curbuf;
            struct pipe_buffer *buf = pipe->bufs + curbuf;
            const struct pipe_buf_operations *ops = buf->ops;
            void *addr;
            size_t chars = buf->len;
            int error, atomic;

            if (chars > total_len)
                chars = total_len;

            error = ops->confirm(pipe, buf);
            if (error) {
                if (!ret)
                    error = ret;
                break;
            }

            atomic = !iov_fault_in_pages_write(iov, chars);
redo:
            addr = ops->map(pipe, buf, atomic);
            error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
            ops->unmap(pipe, buf, addr);
            if (unlikely(error)) {
                /*
                 * Just retry with the slow path if we failed.
                 */
                if (atomic) {
                    atomic = 0;
                    goto redo;
                }
                if (!ret)
                    ret = error;
                break;
            }
            ret += chars;
            buf->offset += chars;
            buf->len -= chars;
            if (!buf->len) {
                buf->ops = NULL;
                ops->release(pipe, buf);
                curbuf = (curbuf + 1) & (PIPE_BUFFERS-1);
                pipe->curbuf = curbuf;
                pipe->nrbufs = --bufs;
                do_wakeup = 1;
            }
            total_len -= chars;
            if (!total_len)
                break;	/* common path: read succeeded */
        }
        if (bufs)	/* More to do? */
            continue;
        if (!pipe->writers)
            break;
        if (!pipe->waiting_writers) {
            /* syscall merging: Usually we must not sleep
             * if O_NONBLOCK is set, or if we got some data.
             * But if a writer sleeps in kernel space, then
             * we can wait for that data without violating POSIX.
             */
            if (ret)
                break;
            if (filp->f_flags & O_NONBLOCK) {
                ret = -EAGAIN;
                break;
            }
        }
        if (signal_pending(current)) {
            if (!ret)
                ret = -ERESTARTSYS;
            break;
        }
        if (do_wakeup) {
            wake_up_interruptible_sync(&pipe->wait);
            kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
        }
        pipe_wait(pipe);
    }
    mutex_unlock(&inode->i_mutex);

    /* Signal writers asynchronously that there is more room. */
    if (do_wakeup) {
        wake_up_interruptible_sync(&pipe->wait);
        kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
    }
    if (ret > 0)
        file_accessed(filp);
    return ret;
}
コード例 #19
0
ファイル: msg.c プロジェクト: 274914765/C
long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
        size_t msgsz, long msgtyp, int msgflg)
{
    struct msg_queue *msq;
    struct msg_msg *msg;
    int mode;
    struct ipc_namespace *ns;

    if (msqid < 0 || (long) msgsz < 0)
        return -EINVAL;
    mode = convert_mode(&msgtyp, msgflg);
    ns = current->nsproxy->ipc_ns;

    msq = msg_lock_check(ns, msqid);
    if (IS_ERR(msq))
        return PTR_ERR(msq);

    for (;;) {
        struct msg_receiver msr_d;
        struct list_head *tmp;

        msg = ERR_PTR(-EACCES);
        if (ipcperms(&msq->q_perm, S_IRUGO))
            goto out_unlock;

        msg = ERR_PTR(-EAGAIN);
        tmp = msq->q_messages.next;
        while (tmp != &msq->q_messages) {
            struct msg_msg *walk_msg;

            walk_msg = list_entry(tmp, struct msg_msg, m_list);
            if (testmsg(walk_msg, msgtyp, mode) &&
                !security_msg_queue_msgrcv(msq, walk_msg, current,
                               msgtyp, mode)) {

                msg = walk_msg;
                if (mode == SEARCH_LESSEQUAL &&
                        walk_msg->m_type != 1) {
                    msg = walk_msg;
                    msgtyp = walk_msg->m_type - 1;
                } else {
                    msg = walk_msg;
                    break;
                }
            }
            tmp = tmp->next;
        }
        if (!IS_ERR(msg)) {
            /*
             * Found a suitable message.
             * Unlink it from the queue.
             */
            if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
                msg = ERR_PTR(-E2BIG);
                goto out_unlock;
            }
            list_del(&msg->m_list);
            msq->q_qnum--;
            msq->q_rtime = get_seconds();
            msq->q_lrpid = task_tgid_vnr(current);
            msq->q_cbytes -= msg->m_ts;
            atomic_sub(msg->m_ts, &ns->msg_bytes);
            atomic_dec(&ns->msg_hdrs);
            ss_wakeup(&msq->q_senders, 0);
            msg_unlock(msq);
            break;
        }
        /* No message waiting. Wait for a message */
        if (msgflg & IPC_NOWAIT) {
            msg = ERR_PTR(-ENOMSG);
            goto out_unlock;
        }
        list_add_tail(&msr_d.r_list, &msq->q_receivers);
        msr_d.r_tsk = current;
        msr_d.r_msgtype = msgtyp;
        msr_d.r_mode = mode;
        if (msgflg & MSG_NOERROR)
            msr_d.r_maxsize = INT_MAX;
        else
            msr_d.r_maxsize = msgsz;
        msr_d.r_msg = ERR_PTR(-EAGAIN);
        current->state = TASK_INTERRUPTIBLE;
        msg_unlock(msq);

        schedule();

        /* Lockless receive, part 1:
         * Disable preemption.  We don't hold a reference to the queue
         * and getting a reference would defeat the idea of a lockless
         * operation, thus the code relies on rcu to guarantee the
         * existance of msq:
         * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
         * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
         * rcu_read_lock() prevents preemption between reading r_msg
         * and the spin_lock() inside ipc_lock_by_ptr().
         */
        rcu_read_lock();

        /* Lockless receive, part 2:
         * Wait until pipelined_send or expunge_all are outside of
         * wake_up_process(). There is a race with exit(), see
         * ipc/mqueue.c for the details.
         */
        msg = (struct msg_msg*)msr_d.r_msg;
        while (msg == NULL) {
            cpu_relax();
            msg = (struct msg_msg *)msr_d.r_msg;
        }

        /* Lockless receive, part 3:
         * If there is a message or an error then accept it without
         * locking.
         */
        if (msg != ERR_PTR(-EAGAIN)) {
            rcu_read_unlock();
            break;
        }

        /* Lockless receive, part 3:
         * Acquire the queue spinlock.
         */
        ipc_lock_by_ptr(&msq->q_perm);
        rcu_read_unlock();

        /* Lockless receive, part 4:
         * Repeat test after acquiring the spinlock.
         */
        msg = (struct msg_msg*)msr_d.r_msg;
        if (msg != ERR_PTR(-EAGAIN))
            goto out_unlock;

        list_del(&msr_d.r_list);
        if (signal_pending(current)) {
            msg = ERR_PTR(-ERESTARTNOHAND);
out_unlock:
            msg_unlock(msq);
            break;
        }
    }
    if (IS_ERR(msg))
        return PTR_ERR(msg);

    msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
    *pmtype = msg->m_type;
    if (store_msg(mtext, msg, msgsz))
        msgsz = -EFAULT;

    free_msg(msg);

    return msgsz;
}
コード例 #20
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;
	int cgroup_callbacks_done = 0;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	/*
	 * Siblings of global init remain as zombies on exit since they are
	 * not reaped by their parent (swapper). To solve this and to avoid
	 * multi-rooted process trees, prevent global and container-inits
	 * from creating siblings.
	 */
	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE)
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->real_cred->user->processes) >=
			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
		    p->real_cred->user != INIT_USER)
			goto bad_fork_free;
	}

	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	retval = -EAGAIN;
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	p->did_exec = 0;
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	copy_flags(clone_flags, p);
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	rcu_copy_process(p);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);

	p->utime = cputime_zero;
	p->stime = cputime_zero;
	p->gtime = cputime_zero;
	p->utimescaled = cputime_zero;
	p->stimescaled = cputime_zero;
	p->prev_utime = cputime_zero;
	p->prev_stime = cputime_zero;

	p->default_timer_slack_ns = current->timer_slack_ns;

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	posix_cpu_timers_init(p);

	p->lock_depth = -1;		/* -1 = no lock */
	do_posix_clock_monotonic_gettime(&p->start_time);
	p->real_start_time = p->start_time;
	monotonic_to_bootbased(&p->real_start_time);
	p->io_context = NULL;
	p->audit_context = NULL;
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		goto bad_fork_cleanup_cgroup;
 	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	p->hardirqs_enabled = 1;
#else
	p->hardirqs_enabled = 0;
#endif
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif

	p->bts = NULL;

	p->stack_start = stack_start;

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p, clone_flags);

	retval = perf_event_init_task(p);
	if (retval)
		goto bad_fork_cleanup_policy;

	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_policy;
	/* copy all the process information */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_namespaces(clone_flags, p)))
		goto bad_fork_cleanup_mm;
	if ((retval = copy_io(clone_flags, p)))
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		retval = -ENOMEM;
		pid = alloc_pid(p->nsproxy->pid_ns);
		if (!pid)
			goto bad_fork_cleanup_io;

		if (clone_flags & CLONE_NEWPID) {
			retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
			if (retval < 0)
				goto bad_fork_free_pid;
		}
	}

	p->pid = pid_nr(pid);
	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	if (current->nsproxy != p->nsproxy) {
		retval = ns_cgroup_clone(p, pid);
		if (retval)
			goto bad_fork_free_pid;
	}

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing should be turned off in the child regardless
	 * of CLONE_PTRACE.
	 */
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);

	/* Now that the task is set up, run cgroup callbacks if
	 * necessary. We need to run them before the task is visible
	 * on the tasklist. */
	cgroup_fork_callbacks(p);
	cgroup_callbacks_done = 1;

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/*
	 * The task hasn't been attached yet, so its cpus_allowed mask will
	 * not be changed, nor will its assigned CPU.
	 *
	 * The cpus_allowed mask of the parent may have changed after it was
	 * copied first time - so re-copy it here, then check the child's CPU
	 * to ensure it is on a valid CPU (and if not, just force it back to
	 * parent's CPU). This avoids alot of nasty races.
	 */
	p->cpus_allowed = current->cpus_allowed;
	p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
			!cpu_online(task_cpu(p))))
		set_task_cpu(p, smp_processor_id());

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
 	 */
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (clone_flags & CLONE_THREAD) {
		atomic_inc(&current->signal->count);
		atomic_inc(&current->signal->live);
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
	}

	if (likely(p->pid)) {
		list_add_tail(&p->sibling, &p->real_parent->children);
		tracehook_finish_clone(p, clone_flags, trace);

		if (thread_group_leader(p)) {
			if (clone_flags & CLONE_NEWPID)
				p->nsproxy->pid_ns->child_reaper = p;

			p->signal->leader_pid = pid;
			tty_kref_put(p->signal->tty);
			p->signal->tty = tty_kref_get(current->signal->tty);
			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
			attach_pid(p, PIDTYPE_SID, task_session(current));
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__get_cpu_var(process_counts)++;
		}
		attach_pid(p, PIDTYPE_PID, pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
	proc_fork_connector(p);
	cgroup_post_fork(p);
	perf_event_fork(p);
	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	put_io_context(p->io_context);
bad_fork_cleanup_namespaces:
	exit_task_namespaces(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	if (!(clone_flags & CLONE_THREAD))
		__cleanup_signal(p->signal);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_policy:
	perf_event_free_task(p);
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
	cgroup_exit(p, cgroup_callbacks_done);
	delayacct_tsk_free(p);
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	atomic_dec(&p->cred->user->processes);
	exit_creds(p);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
コード例 #21
0
ファイル: adutux.c プロジェクト: CSCLOG/beaglebone
static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
			loff_t *ppos)
{
	struct adu_device *dev;
	size_t bytes_read = 0;
	size_t bytes_to_read = count;
	int i;
	int retval = 0;
	int timeout = 0;
	int should_submit = 0;
	unsigned long flags;
	DECLARE_WAITQUEUE(wait, current);

	dbg(2," %s : enter, count = %Zd, file=%p", __func__, count, file);

	dev = file->private_data;
	dbg(2," %s : dev=%p", __func__, dev);

	if (mutex_lock_interruptible(&dev->mtx))
		return -ERESTARTSYS;

	/* verify that the device wasn't unplugged */
	if (dev->udev == NULL) {
		retval = -ENODEV;
		printk(KERN_ERR "adutux: No device or device unplugged %d\n",
		       retval);
		goto exit;
	}

	/* verify that some data was requested */
	if (count == 0) {
		dbg(1," %s : read request of 0 bytes", __func__);
		goto exit;
	}

	timeout = COMMAND_TIMEOUT;
	dbg(2," %s : about to start looping", __func__);
	while (bytes_to_read) {
		int data_in_secondary = dev->secondary_tail - dev->secondary_head;
		dbg(2," %s : while, data_in_secondary=%d, status=%d",
		    __func__, data_in_secondary,
		    dev->interrupt_in_urb->status);

		if (data_in_secondary) {
			/* drain secondary buffer */
			int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary;
			i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount);
			if (i) {
				retval = -EFAULT;
				goto exit;
			}
			dev->secondary_head += (amount - i);
			bytes_read += (amount - i);
			bytes_to_read -= (amount - i);
			if (i) {
				retval = bytes_read ? bytes_read : -EFAULT;
				goto exit;
			}
		} else {
			/* we check the primary buffer */
			spin_lock_irqsave (&dev->buflock, flags);
			if (dev->read_buffer_length) {
				/* we secure access to the primary */
				char *tmp;
				dbg(2," %s : swap, read_buffer_length = %d",
				    __func__, dev->read_buffer_length);
				tmp = dev->read_buffer_secondary;
				dev->read_buffer_secondary = dev->read_buffer_primary;
				dev->read_buffer_primary = tmp;
				dev->secondary_head = 0;
				dev->secondary_tail = dev->read_buffer_length;
				dev->read_buffer_length = 0;
				spin_unlock_irqrestore(&dev->buflock, flags);
				/* we have a free buffer so use it */
				should_submit = 1;
			} else {
				/* even the primary was empty - we may need to do IO */
				if (!dev->read_urb_finished) {
					/* somebody is doing IO */
					spin_unlock_irqrestore(&dev->buflock, flags);
					dbg(2," %s : submitted already", __func__);
				} else {
					/* we must initiate input */
					dbg(2," %s : initiate input", __func__);
					dev->read_urb_finished = 0;
					spin_unlock_irqrestore(&dev->buflock, flags);

					usb_fill_int_urb(dev->interrupt_in_urb,dev->udev,
							 usb_rcvintpipe(dev->udev,
							 		dev->interrupt_in_endpoint->bEndpointAddress),
							 dev->interrupt_in_buffer,
							 le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize),
							 adu_interrupt_in_callback,
							 dev,
							 dev->interrupt_in_endpoint->bInterval);
					retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL);
					if (retval) {
						dev->read_urb_finished = 1;
						if (retval == -ENOMEM) {
							retval = bytes_read ? bytes_read : -ENOMEM;
						}
						dbg(2," %s : submit failed", __func__);
						goto exit;
					}
				}

				/* we wait for I/O to complete */
				set_current_state(TASK_INTERRUPTIBLE);
				add_wait_queue(&dev->read_wait, &wait);
				spin_lock_irqsave(&dev->buflock, flags);
				if (!dev->read_urb_finished) {
					spin_unlock_irqrestore(&dev->buflock, flags);
					timeout = schedule_timeout(COMMAND_TIMEOUT);
				} else {
					spin_unlock_irqrestore(&dev->buflock, flags);
					set_current_state(TASK_RUNNING);
				}
				remove_wait_queue(&dev->read_wait, &wait);

				if (timeout <= 0) {
					dbg(2," %s : timeout", __func__);
					retval = bytes_read ? bytes_read : -ETIMEDOUT;
					goto exit;
				}

				if (signal_pending(current)) {
					dbg(2," %s : signal pending", __func__);
					retval = bytes_read ? bytes_read : -EINTR;
					goto exit;
				}
			}
		}
	}

	retval = bytes_read;
	/* if the primary buffer is empty then use it */
	spin_lock_irqsave(&dev->buflock, flags);
	if (should_submit && dev->read_urb_finished) {
		dev->read_urb_finished = 0;
		spin_unlock_irqrestore(&dev->buflock, flags);
		usb_fill_int_urb(dev->interrupt_in_urb,dev->udev,
				 usb_rcvintpipe(dev->udev,
				 		dev->interrupt_in_endpoint->bEndpointAddress),
				dev->interrupt_in_buffer,
				le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize),
				adu_interrupt_in_callback,
				dev,
				dev->interrupt_in_endpoint->bInterval);
		if (usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL) != 0)
			dev->read_urb_finished = 1;
		/* we ignore failure */
	} else {
		spin_unlock_irqrestore(&dev->buflock, flags);
	}

exit:
	/* unlock the device */
	mutex_unlock(&dev->mtx);

	dbg(2," %s : leave, return value %d", __func__, retval);
	return retval;
}
コード例 #22
0
ファイル: applicom.c プロジェクト: rcplay/snake-os
static ssize_t ac_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos)
{
    unsigned int NumCard;	/* Board number 1 -> 8           */
    unsigned int IndexCard;	/* Index board number 0 -> 7     */
    unsigned char TicCard;	/* Board TIC to send             */
    unsigned long flags;	/* Current priority              */
    struct st_ram_io st_loc;
    struct mailbox tmpmailbox;
#ifdef DEBUG
    int c;
#endif
    DECLARE_WAITQUEUE(wait, current);

    if (count != sizeof(struct st_ram_io) + sizeof(struct mailbox)) {
        static int warncount = 5;
        if (warncount) {
            printk(KERN_INFO "Hmmm. write() of Applicom card, length %zd != expected %zd\n",
                   count, sizeof(struct st_ram_io) + sizeof(struct mailbox));
            warncount--;
        }
        return -EINVAL;
    }

    if(copy_from_user(&st_loc, buf, sizeof(struct st_ram_io)))
        return -EFAULT;

    if(copy_from_user(&tmpmailbox, &buf[sizeof(struct st_ram_io)],
                      sizeof(struct mailbox)))
        return -EFAULT;

    NumCard = st_loc.num_card;	/* board number to send          */
    TicCard = st_loc.tic_des_from_pc;	/* tic number to send            */
    IndexCard = NumCard - 1;

    if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
        return -EINVAL;

#ifdef DEBUG
    printk("Write to applicom card #%d. struct st_ram_io follows:",
           IndexCard+1);

    for (c = 0; c < sizeof(struct st_ram_io);) {

        printk("\n%5.5X: %2.2X", c, ((unsigned char *) &st_loc)[c]);

        for (c++; c % 8 && c < sizeof(struct st_ram_io); c++) {
            printk(" %2.2X", ((unsigned char *) &st_loc)[c]);
        }
    }

    printk("\nstruct mailbox follows:");

    for (c = 0; c < sizeof(struct mailbox);) {
        printk("\n%5.5X: %2.2X", c, ((unsigned char *) &tmpmailbox)[c]);

        for (c++; c % 8 && c < sizeof(struct mailbox); c++) {
            printk(" %2.2X", ((unsigned char *) &tmpmailbox)[c]);
        }
    }

    printk("\n");
#endif

    spin_lock_irqsave(&apbs[IndexCard].mutex, flags);

    /* Test octet ready correct */
    if(readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) > 2) {
        Dummy = readb(apbs[IndexCard].RamIO + VERS);
        spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
        printk(KERN_WARNING "APPLICOM driver write error board %d, DataFromPcReady = %d\n",
               IndexCard,(int)readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY));
        DeviceErrorCount++;
        return -EIO;
    }

    /* Place ourselves on the wait queue */
    set_current_state(TASK_INTERRUPTIBLE);
    add_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait);

    /* Check whether the card is ready for us */
    while (readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) != 0) {
        Dummy = readb(apbs[IndexCard].RamIO + VERS);
        /* It's busy. Sleep. */

        spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
        schedule();
        if (signal_pending(current)) {
            remove_wait_queue(&apbs[IndexCard].FlagSleepSend,
                              &wait);
            return -EINTR;
        }
        spin_lock_irqsave(&apbs[IndexCard].mutex, flags);
        set_current_state(TASK_INTERRUPTIBLE);
    }

    /* We may not have actually slept */
    set_current_state(TASK_RUNNING);
    remove_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait);

    writeb(1, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);

    /* Which is best - lock down the pages with rawio and then
       copy directly, or use bounce buffers? For now we do the latter
       because it works with 2.2 still */
    {
        unsigned char *from = (unsigned char *) &tmpmailbox;
        void __iomem *to = apbs[IndexCard].RamIO + RAM_FROM_PC;
        int c;

        for (c = 0; c < sizeof(struct mailbox); c++)
            writeb(*(from++), to++);
    }

    writeb(0x20, apbs[IndexCard].RamIO + TIC_OWNER_FROM_PC);
    writeb(0xff, apbs[IndexCard].RamIO + NUMCARD_OWNER_FROM_PC);
    writeb(TicCard, apbs[IndexCard].RamIO + TIC_DES_FROM_PC);
    writeb(NumCard, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC);
    writeb(2, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
    writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
    Dummy = readb(apbs[IndexCard].RamIO + VERS);
    spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
    return 0;
}
コード例 #23
0
ファイル: sunmouse.c プロジェクト: dmgerman/original
static ssize_t
sun_mouse_read(struct file *file, char *buffer,
	       size_t count, loff_t *ppos)
{
	DECLARE_WAITQUEUE(wait, current);
	unsigned long flags;

	if (queue_empty ()) {
		if (file->f_flags & O_NONBLOCK)
			return -EWOULDBLOCK;
		add_wait_queue (&sunmouse.proc_list, &wait);
repeat:
		set_current_state(TASK_INTERRUPTIBLE);
		if (queue_empty() && !signal_pending(current)) {
			schedule();
			goto repeat;
		}
		current->state = TASK_RUNNING;
		remove_wait_queue (&sunmouse.proc_list, &wait);
	}
	if (gen_events) {
		char *p = buffer, *end = buffer+count;
		
		spin_lock_irqsave(&sunmouse.lock, flags);
		while (p < end && !queue_empty ()){
			Firm_event this_event;

			get_from_queue(&this_event);
			spin_unlock_irqrestore(&sunmouse.lock, flags);

#ifdef CONFIG_SPARC32_COMPAT
			if (current->thread.flags & SPARC_FLAG_32BIT) {
				if ((end - p) <
				    ((sizeof(Firm_event) - sizeof(struct timeval) +
				      (sizeof(u32) * 2))))
					break;
				if (copy_to_user((Firm_event *)p, &this_event,
						 sizeof(Firm_event)-sizeof(struct timeval)))
					return -EFAULT;
				p += sizeof(Firm_event)-sizeof(struct timeval);
				if (__put_user(this_event.time.tv_sec, (u32 *)p))
					return -EFAULT;
				p += sizeof(u32);
				if (__put_user(this_event.time.tv_usec, (u32 *)p))
					return -EFAULT;
				p += sizeof(u32);
			} else
#endif	
			{	
				if ((end - p) < sizeof(Firm_event))
					break;
				if (copy_to_user((Firm_event *)p, &this_event,
				     		 sizeof(Firm_event)))
					return -EFAULT;
				p += sizeof (Firm_event);
			}
			spin_lock_irqsave(&sunmouse.lock, flags);
		}
		spin_unlock_irqrestore(&sunmouse.lock, flags);
		file->f_dentry->d_inode->i_atime = CURRENT_TIME;
		return p-buffer;
	} else {
		int c, limit = 3;

		if (count < limit)
			limit = count;
		for (c = 0; c < limit; c++) {
			unsigned char val;
			int empty = 0;

			spin_lock_irqsave(&sunmouse.lock, flags);
			if (queue_empty()) {
				empty = 1;
				val = 0;
			} else {
				val = sunmouse.queue.stream[sunmouse.tail];
				sunmouse.tail = (sunmouse.tail + 1) % STREAM_SIZE;
			}
			spin_unlock_irqrestore(&sunmouse.lock, flags);

			if (empty)
				break;

			put_user(val, buffer);
			buffer++;
		}
		while (c < count) {
			if (c >= 5)
				break;
			put_user(0, buffer);
			buffer++;
			c++;
		}
		file->f_dentry->d_inode->i_atime = CURRENT_TIME;
		return c;
	}
	/* Only called if nothing was sent */
	if (signal_pending(current))
		return -ERESTARTSYS;
	return 0;
}
コード例 #24
0
ファイル: applicom.c プロジェクト: rcplay/snake-os
static ssize_t ac_read (struct file *filp, char __user *buf, size_t count, loff_t *ptr)
{
    unsigned long flags;
    unsigned int i;
    unsigned char tmp;
    int ret = 0;
    DECLARE_WAITQUEUE(wait, current);
#ifdef DEBUG
    int loopcount=0;
#endif
    /* No need to ratelimit this. Only root can trigger it anyway */
    if (count != sizeof(struct st_ram_io) + sizeof(struct mailbox)) {
        printk( KERN_WARNING "Hmmm. read() of Applicom card, length %zd != expected %zd\n",
                count,sizeof(struct st_ram_io) + sizeof(struct mailbox));
        return -EINVAL;
    }

    while(1) {
        /* Stick ourself on the wait queue */
        set_current_state(TASK_INTERRUPTIBLE);
        add_wait_queue(&FlagSleepRec, &wait);

        /* Scan each board, looking for one which has a packet for us */
        for (i=0; i < MAX_BOARD; i++) {
            if (!apbs[i].RamIO)
                continue;
            spin_lock_irqsave(&apbs[i].mutex, flags);

            tmp = readb(apbs[i].RamIO + DATA_TO_PC_READY);

            if (tmp == 2) {
                struct st_ram_io st_loc;
                struct mailbox mailbox;

                /* Got a packet for us */
                ret = do_ac_read(i, buf, &st_loc, &mailbox);
                spin_unlock_irqrestore(&apbs[i].mutex, flags);
                set_current_state(TASK_RUNNING);
                remove_wait_queue(&FlagSleepRec, &wait);

                if (copy_to_user(buf, &st_loc, sizeof(st_loc)))
                    return -EFAULT;
                if (copy_to_user(buf + sizeof(st_loc), &mailbox, sizeof(mailbox)))
                    return -EFAULT;
                return tmp;
            }

            if (tmp > 2) {
                /* Got an error */
                Dummy = readb(apbs[i].RamIO + VERS);

                spin_unlock_irqrestore(&apbs[i].mutex, flags);
                set_current_state(TASK_RUNNING);
                remove_wait_queue(&FlagSleepRec, &wait);

                printk(KERN_WARNING "APPLICOM driver read error board %d, DataToPcReady = %d\n",
                       i,(int)readb(apbs[i].RamIO + DATA_TO_PC_READY));
                DeviceErrorCount++;
                return -EIO;
            }

            /* Nothing for us. Try the next board */
            Dummy = readb(apbs[i].RamIO + VERS);
            spin_unlock_irqrestore(&apbs[i].mutex, flags);

        } /* per board */

        /* OK - No boards had data for us. Sleep now */

        schedule();
        remove_wait_queue(&FlagSleepRec, &wait);

        if (signal_pending(current))
            return -EINTR;

#ifdef DEBUG
        if (loopcount++ > 2) {
            printk(KERN_DEBUG "Looping in ac_read. loopcount %d\n", loopcount);
        }
#endif
    }
}
コード例 #25
0
ファイル: sock.c プロジェクト: xricson/knoppix
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
				     unsigned long data_len, int noblock, int *errcode)
{
	struct sk_buff *skb;
	unsigned int gfp_mask;
	long timeo;
	int err;

	gfp_mask = sk->sk_allocation;
	if (gfp_mask & __GFP_WAIT)
		gfp_mask |= __GFP_REPEAT;

	timeo = sock_sndtimeo(sk, noblock);
	while (1) {
		err = sock_error(sk);
		if (err != 0)
			goto failure;

		err = -EPIPE;
		if (sk->sk_shutdown & SEND_SHUTDOWN)
			goto failure;

		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
			skb = alloc_skb(header_len, sk->sk_allocation);
			if (skb) {
				int npages;
				int i;

				/* No pages, we're done... */
				if (!data_len)
					break;

				npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
				skb->truesize += data_len;
				skb_shinfo(skb)->nr_frags = npages;
				for (i = 0; i < npages; i++) {
					struct page *page;
					skb_frag_t *frag;

					page = alloc_pages(sk->sk_allocation, 0);
					if (!page) {
						err = -ENOBUFS;
						skb_shinfo(skb)->nr_frags = i;
						kfree_skb(skb);
						goto failure;
					}

					frag = &skb_shinfo(skb)->frags[i];
					frag->page = page;
					frag->page_offset = 0;
					frag->size = (data_len >= PAGE_SIZE ?
						      PAGE_SIZE :
						      data_len);
					data_len -= PAGE_SIZE;
				}

				/* Full success... */
				break;
			}
			err = -ENOBUFS;
			goto failure;
		}
		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
		err = -EAGAIN;
		if (!timeo)
			goto failure;
		if (signal_pending(current))
			goto interrupted;
		timeo = sock_wait_for_wmem(sk, timeo);
	}
コード例 #26
0
/**
 * mei_read - the read function.
 *
 * @file: pointer to file structure
 * @ubuf: pointer to user buffer
 * @length: buffer length
 * @offset: data offset in buffer
 *
 * returns >=0 data length on success , <0 on error
 */
static ssize_t mei_read(struct file *file, char __user *ubuf,
			size_t length, loff_t *offset)
{
	struct mei_cl *cl = file->private_data;
	struct mei_cl_cb *cb_pos = NULL;
	struct mei_cl_cb *cb = NULL;
	struct mei_device *dev;
	int rets;
	int err;


	if (WARN_ON(!cl || !cl->dev))
		return -ENODEV;

	dev = cl->dev;


	mutex_lock(&dev->device_lock);
	if (dev->dev_state != MEI_DEV_ENABLED) {
		rets = -ENODEV;
		goto out;
	}

	if (length == 0) {
		rets = 0;
		goto out;
	}

	if (cl == &dev->iamthif_cl) {
		rets = mei_amthif_read(dev, file, ubuf, length, offset);
		goto out;
	}

	if (cl->read_cb) {
		cb = cl->read_cb;
		/* read what left */
		if (cb->buf_idx > *offset)
			goto copy_buffer;
		/* offset is beyond buf_idx we have no more data return 0 */
		if (cb->buf_idx > 0 && cb->buf_idx <= *offset) {
			rets = 0;
			goto free;
		}
		/* Offset needs to be cleaned for contiguous reads*/
		if (cb->buf_idx == 0 && *offset > 0) {
			dev_dbg(&dev->pdev->dev, "idx = 0 offset = %lld\n", (unsigned long long)*offset);
			*offset = 0;
		}
	} else if (*offset > 0) {
		dev_dbg(&dev->pdev->dev, "offset = %lld\n", (unsigned long long)*offset);
		*offset = 0;
	}

	err = mei_cl_read_start(cl, length);
	if (err && err != -EBUSY) {
		dev_dbg(&dev->pdev->dev,
			"mei start read failure with status = %d\n", err);
		rets = err;
		goto out;
	}

	if (MEI_READ_COMPLETE != cl->reading_state &&
			!waitqueue_active(&cl->rx_wait)) {
		if (file->f_flags & O_NONBLOCK) {
			rets = -EAGAIN;
			goto out;
		}

		mutex_unlock(&dev->device_lock);

		if (wait_event_interruptible(cl->rx_wait,
				MEI_READ_COMPLETE == cl->reading_state ||
				mei_cl_is_transitioning(cl))) {

			if (signal_pending(current))
				return -EINTR;
			return -ERESTARTSYS;
		}

		mutex_lock(&dev->device_lock);
		if (mei_cl_is_transitioning(cl)) {
			rets = -EBUSY;
			goto out;
		}
	}

	cb = cl->read_cb;

	if (!cb) {
		rets = -ENODEV;
		goto out;
	}
	if (cl->reading_state != MEI_READ_COMPLETE) {
		rets = 0;
		goto out;
	}
	/* now copy the data to user space */
copy_buffer:
	dev_dbg(&dev->pdev->dev, "buf.size = %d buf.idx= %ld\n",
	    cb->response_buffer.size, cb->buf_idx);
	if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
		rets = -EMSGSIZE;
		goto free;
	}

	/* length is being truncated to PAGE_SIZE,
	 * however buf_idx may point beyond that */
	length = min_t(size_t, length, cb->buf_idx - *offset);

	if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
		dev_err(&dev->pdev->dev, "failed to copy data to userland\n");
		rets = -EFAULT;
		goto free;
	}

	rets = length;
	*offset += length;
	if ((unsigned long)*offset < cb->buf_idx)
		goto out;

free:
	cb_pos = mei_cl_find_read_cb(cl);
	/* Remove entry from read list */
	if (cb_pos)
		list_del(&cb_pos->list);
	mei_io_cb_free(cb);
	cl->reading_state = MEI_IDLE;
	cl->read_cb = NULL;
out:
	dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
	mutex_unlock(&dev->device_lock);
	return rets;
}
コード例 #27
0
ファイル: transport.c プロジェクト: cilynx/dd-wrt
int
SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
	    int *pbytes_returned, const int long_op)
{
	int rc = 0;
	unsigned int receive_len;
	long timeout;
	struct mid_q_entry *midQ;

	if (ses == NULL) {
		cERROR(1,("Null smb session"));
		return -EIO;
	}
	if(ses->server == NULL) {
		cERROR(1,("Null tcp session"));
		return -EIO;
	}

	/* Ensure that we do not send more than 50 overlapping requests 
	   to the same server. We may make this configurable later or
	   use ses->maxReq */
	if(long_op == -1) {
		/* oplock breaks must not be held up */
		atomic_inc(&ses->server->inFlight);
	} else {
		spin_lock(&GlobalMid_Lock); 
		while(1) {        
			if(atomic_read(&ses->server->inFlight) >= CIFS_MAX_REQ){
				spin_unlock(&GlobalMid_Lock);
				wait_event(ses->server->request_q,
					atomic_read(&ses->server->inFlight)
					 < CIFS_MAX_REQ);
				spin_lock(&GlobalMid_Lock);
			} else {
				if(ses->server->tcpStatus == CifsExiting) {
					spin_unlock(&GlobalMid_Lock);
					return -ENOENT;
				}

			/* can not count locking commands against total since
			   they are allowed to block on server */
					
				if(long_op < 3) {
				/* update # of requests on the wire to server */
					atomic_inc(&ses->server->inFlight);
				}
				spin_unlock(&GlobalMid_Lock);
				break;
			}
		}
	}
	/* make sure that we sign in the same order that we send on this socket 
	   and avoid races inside tcp sendmsg code that could cause corruption
	   of smb data */

	down(&ses->server->tcpSem); 

	if (ses->server->tcpStatus == CifsExiting) {
		rc = -ENOENT;
		goto out_unlock;
	} else if (ses->server->tcpStatus == CifsNeedReconnect) {
		cFYI(1,("tcp session dead - return to caller to retry"));
		rc = -EAGAIN;
		goto out_unlock;
	} else if (ses->status != CifsGood) {
		/* check if SMB session is bad because we are setting it up */
		if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && 
			(in_buf->Command != SMB_COM_NEGOTIATE)) {
			rc = -EAGAIN;
			goto out_unlock;
		} /* else ok - we are setting up session */
	}
	midQ = AllocMidQEntry(in_buf, ses);
	if (midQ == NULL) {
		up(&ses->server->tcpSem);
		/* If not lock req, update # of requests on wire to server */
		if(long_op < 3) {
			atomic_dec(&ses->server->inFlight); 
			wake_up(&ses->server->request_q);
		}
		return -ENOMEM;
	}

	if (in_buf->smb_buf_length > CIFS_MAX_MSGSIZE + MAX_CIFS_HDR_SIZE - 4) {
		up(&ses->server->tcpSem);
		cERROR(1,
		       ("Illegal length, greater than maximum frame, %d ",
			in_buf->smb_buf_length));
		DeleteMidQEntry(midQ);
		/* If not lock req, update # of requests on wire to server */
		if(long_op < 3) {
			atomic_dec(&ses->server->inFlight); 
			wake_up(&ses->server->request_q);
		}
		return -EIO;
	}

	if (in_buf->smb_buf_length > 12)
		in_buf->Flags2 = cpu_to_le16(in_buf->Flags2);
	
	rc = cifs_sign_smb(in_buf, ses, &midQ->sequence_number);

	midQ->midState = MID_REQUEST_SUBMITTED;
	rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
		      (struct sockaddr *) &(ses->server->addr.sockAddr));
	if(rc < 0) {
		DeleteMidQEntry(midQ);
		up(&ses->server->tcpSem);
		/* If not lock req, update # of requests on wire to server */
		if(long_op < 3) {
			atomic_dec(&ses->server->inFlight); 
			wake_up(&ses->server->request_q);
		}
		return rc;
	} else
		up(&ses->server->tcpSem);
	if (long_op == -1)
		goto cifs_no_response_exit;
	else if (long_op == 2) /* writes past end of file can take looooong time */
		timeout = 300 * HZ;
	else if (long_op == 1)
		timeout = 45 * HZ; /* should be greater than 
			servers oplock break timeout (about 43 seconds) */
	else if (long_op > 2) {
		timeout = MAX_SCHEDULE_TIMEOUT;
	} else
		timeout = 15 * HZ;
	/* wait for 15 seconds or until woken up due to response arriving or 
	   due to last connection to this server being unmounted */
	if (signal_pending(current)) {
		/* if signal pending do not hold up user for full smb timeout
		but we still give response a change to complete */
		if(midQ->midState & MID_REQUEST_SUBMITTED) {
			set_current_state(TASK_UNINTERRUPTIBLE);
			timeout = sleep_on_timeout(&ses->server->response_q,2 * HZ);
		}
	} else { /* using normal timeout */
		/* timeout = wait_event_interruptible_timeout(ses->server->response_q,
			(midQ->midState & MID_RESPONSE_RECEIVED) || 
			((ses->server->tcpStatus != CifsGood) &&
			 (ses->server->tcpStatus != CifsNew)),
			timeout); */ 
		/* Can not allow user interrupts- wreaks havoc with performance */
		if(midQ->midState & MID_REQUEST_SUBMITTED) {
			set_current_state(TASK_UNINTERRUPTIBLE);
			timeout = sleep_on_timeout(&ses->server->response_q,timeout);
		}
	}
    
	spin_lock(&GlobalMid_Lock);
	if (midQ->resp_buf) {
		spin_unlock(&GlobalMid_Lock);
		receive_len = be32_to_cpu(midQ->resp_buf->smb_buf_length);
	} else {
		cERROR(1,("No response buffer"));
		if(midQ->midState == MID_REQUEST_SUBMITTED) {
			if(ses->server->tcpStatus == CifsExiting)
				rc = -EHOSTDOWN;
			else {
				ses->server->tcpStatus = CifsNeedReconnect;
				midQ->midState = MID_RETRY_NEEDED;
			}
		}

		if (rc != -EHOSTDOWN) {
			if(midQ->midState == MID_RETRY_NEEDED) {
				rc = -EAGAIN;
				cFYI(1,("marking request for retry"));
			} else {
				rc = -EIO;
			}
		}
		spin_unlock(&GlobalMid_Lock);
		DeleteMidQEntry(midQ);
		/* If not lock req, update # of requests on wire to server */
		if(long_op < 3) {
			atomic_dec(&ses->server->inFlight); 
			wake_up(&ses->server->request_q);
		}
		return rc;
	}
  
	if (receive_len > CIFS_MAX_MSGSIZE + MAX_CIFS_HDR_SIZE) {
		cERROR(1,
		       ("Frame too large received.  Length: %d  Xid: %d",
			receive_len, xid));
		rc = -EIO;
	} else {		/* rcvd frame is ok */

		if (midQ->resp_buf && out_buf
		    && (midQ->midState == MID_RESPONSE_RECEIVED)) {
			memcpy(out_buf, midQ->resp_buf,
			       receive_len +
			       4 /* include 4 byte RFC1001 header */ );

			dump_smb(out_buf, 92);
			/* convert the length into a more usable form */
			out_buf->smb_buf_length =
			    be32_to_cpu(out_buf->smb_buf_length);
			if((out_buf->smb_buf_length > 24) &&
			   (ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))) {
				rc = cifs_verify_signature(out_buf, ses->mac_signing_key,midQ->sequence_number); /* BB fix BB */
				if(rc)
					cFYI(1,("Unexpected signature received from server"));
			}

			if (out_buf->smb_buf_length > 12)
				out_buf->Flags2 = le16_to_cpu(out_buf->Flags2);
			if (out_buf->smb_buf_length > 28)
				out_buf->Pid = le16_to_cpu(out_buf->Pid);
			if (out_buf->smb_buf_length > 28)
				out_buf->PidHigh =
				    le16_to_cpu(out_buf->PidHigh);

			*pbytes_returned = out_buf->smb_buf_length;

			/* BB special case reconnect tid and reconnect uid here? */
			rc = map_smb_to_linux_error(out_buf);

			/* convert ByteCount if necessary */
			if (receive_len >=
			    sizeof (struct smb_hdr) -
			    4 /* do not count RFC1001 header */  +
			    (2 * out_buf->WordCount) + 2 /* bcc */ )
				BCC(out_buf) = le16_to_cpu(BCC(out_buf));
		} else {
			rc = -EIO;
			cFYI(1,("Bad MID state? "));
		}
	}
cifs_no_response_exit:
	DeleteMidQEntry(midQ);

	if(long_op < 3) {
		atomic_dec(&ses->server->inFlight); 
		wake_up(&ses->server->request_q);
	}

	return rc;

out_unlock:
	up(&ses->server->tcpSem);
	/* If not lock req, update # of requests on wire to server */
	if(long_op < 3) {
		atomic_dec(&ses->server->inFlight); 
		wake_up(&ses->server->request_q);
	}

	return rc;
}
コード例 #28
0
ファイル: svc.c プロジェクト: GodFox/magx_kernel_xpixl
static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
    int sockaddr_len,int flags)
{
	DEFINE_WAIT(wait);
	struct sock *sk = sock->sk;
	struct sockaddr_atmsvc *addr;
	struct atm_vcc *vcc = ATM_SD(sock);
	int error;

	DPRINTK("svc_connect %p\n",vcc);
	lock_sock(sk);
	if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) {
		error = -EINVAL;
		goto out;
	}

	switch (sock->state) {
	default:
		error = -EINVAL;
		goto out;
	case SS_CONNECTED:
		error = -EISCONN;
		goto out;
	case SS_CONNECTING:
		if (test_bit(ATM_VF_WAITING, &vcc->flags)) {
			error = -EALREADY;
			goto out;
		}
		sock->state = SS_UNCONNECTED;
		if (sk->sk_err) {
			error = -sk->sk_err;
			goto out;
		}
		break;
	case SS_UNCONNECTED:
		addr = (struct sockaddr_atmsvc *) sockaddr;
		if (addr->sas_family != AF_ATMSVC) {
			error = -EAFNOSUPPORT;
			goto out;
		}
		if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
			error = -EBADFD;
			goto out;
		}
		if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS ||
		    vcc->qos.rxtp.traffic_class == ATM_ANYCLASS) {
			error = -EINVAL;
			goto out;
		}
		if (!vcc->qos.txtp.traffic_class &&
		    !vcc->qos.rxtp.traffic_class) {
			error = -EINVAL;
			goto out;
		}
		vcc->remote = *addr;
		set_bit(ATM_VF_WAITING, &vcc->flags);
		prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
		sigd_enq(vcc,as_connect,NULL,NULL,&vcc->remote);
		if (flags & O_NONBLOCK) {
			finish_wait(sk->sk_sleep, &wait);
			sock->state = SS_CONNECTING;
			error = -EINPROGRESS;
			goto out;
		}
		error = 0;
		while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
			schedule();
			if (!signal_pending(current)) {
				prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
				continue;
			}
			DPRINTK("*ABORT*\n");
			/*
			 * This is tricky:
			 *   Kernel ---close--> Demon
			 *   Kernel <--close--- Demon
		         * or
			 *   Kernel ---close--> Demon
			 *   Kernel <--error--- Demon
			 * or
			 *   Kernel ---close--> Demon
			 *   Kernel <--okay---- Demon
			 *   Kernel <--close--- Demon
			 */
			sigd_enq(vcc,as_close,NULL,NULL,NULL);
			while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
				prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
				schedule();
			}
			if (!sk->sk_err)
				while (!test_bit(ATM_VF_RELEASED,&vcc->flags)
				    && sigd) {
					prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
					schedule();
				}
			clear_bit(ATM_VF_REGIS,&vcc->flags);
			clear_bit(ATM_VF_RELEASED,&vcc->flags);
			clear_bit(ATM_VF_CLOSE,&vcc->flags);
			    /* we're gone now but may connect later */
			error = -EINTR;
			break;
		}
		finish_wait(sk->sk_sleep, &wait);
		if (error)
			goto out;
		if (!sigd) {
			error = -EUNATCH;
			goto out;
		}
		if (sk->sk_err) {
			error = -sk->sk_err;
			goto out;
		}
	}
/*
 * Not supported yet
 *
 * #ifndef CONFIG_SINGLE_SIGITF
 */
	vcc->qos.txtp.max_pcr = SELECT_TOP_PCR(vcc->qos.txtp);
	vcc->qos.txtp.pcr = 0;
	vcc->qos.txtp.min_pcr = 0;
/*
 * #endif
 */
	if (!(error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci)))
		sock->state = SS_CONNECTED;
	else (void) svc_disconnect(vcc);
out:
	release_sock(sk);
	return error;
}
コード例 #29
0
int tty_port_block_til_ready(struct tty_port *port,
				struct tty_struct *tty, struct file *filp)
{
	int do_clocal = 0, retval;
	unsigned long flags;
	DEFINE_WAIT(wait);
	int cd;

	/* block if port is in the process of being closed */
	if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
		wait_event_interruptible(port->close_wait,
				!(port->flags & ASYNC_CLOSING));
		if (port->flags & ASYNC_HUP_NOTIFY)
			return -EAGAIN;
		else
			return -ERESTARTSYS;
	}

	/* if non-blocking mode is set we can pass directly to open unless
	   the port has just hung up or is in another error state */
	if (tty->flags & (1 << TTY_IO_ERROR)) {
		port->flags |= ASYNC_NORMAL_ACTIVE;
		return 0;
	}
	if (filp->f_flags & O_NONBLOCK) {
		/* Indicate we are open */
		if (tty->termios->c_cflag & CBAUD)
			tty_port_raise_dtr_rts(port);
		port->flags |= ASYNC_NORMAL_ACTIVE;
		return 0;
	}

	if (C_CLOCAL(tty))
		do_clocal = 1;

	/* Block waiting until we can proceed. We may need to wait for the
	   carrier, but we must also wait for any close that is in progress
	   before the next open may complete */

	retval = 0;

	/* The port lock protects the port counts */
	spin_lock_irqsave(&port->lock, flags);
	if (!tty_hung_up_p(filp))
		port->count--;
	port->blocked_open++;
	spin_unlock_irqrestore(&port->lock, flags);

	while (1) {
		/* Indicate we are open */
		if (tty->termios->c_cflag & CBAUD)
			tty_port_raise_dtr_rts(port);

		prepare_to_wait(&port->open_wait, &wait, TASK_INTERRUPTIBLE);
		/* Check for a hangup or uninitialised port.
							Return accordingly */
		if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) {
			if (port->flags & ASYNC_HUP_NOTIFY)
				retval = -EAGAIN;
			else
				retval = -ERESTARTSYS;
			break;
		}
		/* Probe the carrier. For devices with no carrier detect this
		   will always return true */
		cd = tty_port_carrier_raised(port);
		if (!(port->flags & ASYNC_CLOSING) &&
				(do_clocal || cd))
			break;
		if (signal_pending(current)) {
			retval = -ERESTARTSYS;
			break;
		}
		schedule();
	}
	finish_wait(&port->open_wait, &wait);

	/* Update counts. A parallel hangup will have set count to zero and
	   we must not mess that up further */
	spin_lock_irqsave(&port->lock, flags);
	if (!tty_hung_up_p(filp))
		port->count++;
	port->blocked_open--;
	if (retval == 0)
		port->flags |= ASYNC_NORMAL_ACTIVE;
	spin_unlock_irqrestore(&port->lock, flags);
	return retval;
}
コード例 #30
0
static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
		int len, int flags)
{
	struct sock *sk = sock->sk;
	struct pn_sock *pn = pn_sk(sk);
	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
	struct task_struct *tsk = current;
	long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
	int err;

	if (pn_socket_autobind(sock))
		return -ENOBUFS;
	if (len < sizeof(struct sockaddr_pn))
		return -EINVAL;
	if (spn->spn_family != AF_PHONET)
		return -EAFNOSUPPORT;

	lock_sock(sk);

	switch (sock->state) {
	case SS_UNCONNECTED:
		if (sk->sk_state != TCP_CLOSE) {
			err = -EISCONN;
			goto out;
		}
		break;
	case SS_CONNECTING:
		err = -EALREADY;
		goto out;
	default:
		err = -EISCONN;
		goto out;
	}

	pn->dobject = pn_sockaddr_get_object(spn);
	pn->resource = pn_sockaddr_get_resource(spn);
	sock->state = SS_CONNECTING;

	err = sk->sk_prot->connect(sk, addr, len);
	if (err) {
		sock->state = SS_UNCONNECTED;
		pn->dobject = 0;
		goto out;
	}

	while (sk->sk_state == TCP_SYN_SENT) {
		DEFINE_WAIT(wait);

		if (!timeo) {
			err = -EINPROGRESS;
			goto out;
		}
		if (signal_pending(tsk)) {
			err = sock_intr_errno(timeo);
			goto out;
		}

		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
						TASK_INTERRUPTIBLE);
		release_sock(sk);
		timeo = schedule_timeout(timeo);
		lock_sock(sk);
		finish_wait(sk_sleep(sk), &wait);
	}

	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
		err = 0;
	else if (sk->sk_state == TCP_CLOSE_WAIT)
		err = -ECONNRESET;
	else
		err = -ECONNREFUSED;
	sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
out:
	release_sock(sk);
	return err;
}