Esempio n. 1
0
int read(int fd, void *buf, size_t nbytes)
{
    if (fd < 0 || fd >= NOFILE) {
	fd = EBADF;
	return -1;
    }
    switch (files[fd].type) {
        case FTYPE_SAVEFILE:
	case FTYPE_CONSOLE: {
	    int ret;
            DEFINE_WAIT(w);
            while(1) {
                add_waiter(w, console_queue);
                ret = xencons_ring_recv(files[fd].cons.dev, buf, nbytes);
                if (ret)
                    break;
                schedule();
            }
            remove_waiter(w);
            return ret;
        }
#ifdef HAVE_LWIP
	case FTYPE_SOCKET:
	    return lwip_read(files[fd].socket.fd, buf, nbytes);
#endif
	case FTYPE_TAP: {
	    ssize_t ret;
	    ret = netfront_receive(files[fd].tap.dev, buf, nbytes);
	    if (ret <= 0) {
		errno = EAGAIN;
		return -1;
	    }
	    return ret;
	}
        case FTYPE_KBD: {
            int ret, n;
            n = nbytes / sizeof(union xenkbd_in_event);
            ret = kbdfront_receive(files[fd].kbd.dev, buf, n);
	    if (ret <= 0) {
		errno = EAGAIN;
		return -1;
	    }
	    return ret * sizeof(union xenkbd_in_event);
        }
        case FTYPE_FB: {
            int ret, n;
            n = nbytes / sizeof(union xenfb_in_event);
            ret = fbfront_receive(files[fd].fb.dev, buf, n);
	    if (ret <= 0) {
		errno = EAGAIN;
		return -1;
	    }
	    return ret * sizeof(union xenfb_in_event);
        }
	case FTYPE_COMPILED_FILE: {
	    int n;
	    if (files[fd].compiled_file.offset >= files[fd].compiled_file.size)
		    n = 0;
	    else
		    n = files[fd].compiled_file.size - files[fd].compiled_file.offset;
	    if (n >= nbytes)
		    n = nbytes;
	    printf("Request %d on %d, get %d\n", nbytes, fd, n);
	    memcpy(buf, files[fd].compiled_file.content + files[fd].compiled_file.offset, n);
	    files[fd].compiled_file.offset += n;
	    return n;
	}
	default:
	    break;
    }
    printk("read(%d): Bad descriptor\n", fd);
    errno = EBADF;
    return -1;
}
Esempio n. 2
0
static int yurex_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
	struct usb_yurex *dev;
	struct usb_host_interface *iface_desc;
	struct usb_endpoint_descriptor *endpoint;
	int retval = -ENOMEM;
	int i;
	DEFINE_WAIT(wait);

	/* allocate memory for our device state and initialize it */
	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev) {
		err("Out of memory");
		goto error;
	}
	kref_init(&dev->kref);
	mutex_init(&dev->io_mutex);
	spin_lock_init(&dev->lock);
	init_waitqueue_head(&dev->waitq);

	dev->udev = usb_get_dev(interface_to_usbdev(interface));
	dev->interface = interface;

	/* set up the endpoint information */
	iface_desc = interface->cur_altsetting;
	for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
		endpoint = &iface_desc->endpoint[i].desc;

		if (usb_endpoint_is_int_in(endpoint)) {
			dev->int_in_endpointAddr = endpoint->bEndpointAddress;
			break;
		}
	}
	if (!dev->int_in_endpointAddr) {
		retval = -ENODEV;
		err("Could not find endpoints");
		goto error;
	}


	/* allocate control URB */
	dev->cntl_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!dev->cntl_urb) {
		err("Could not allocate control URB");
		goto error;
	}

	/* allocate buffer for control req */
	dev->cntl_req = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
					   GFP_KERNEL,
					   &dev->cntl_urb->setup_dma);
	if (!dev->cntl_req) {
		err("Could not allocate cntl_req");
		goto error;
	}

	/* allocate buffer for control msg */
	dev->cntl_buffer = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
					      GFP_KERNEL,
					      &dev->cntl_urb->transfer_dma);
	if (!dev->cntl_buffer) {
		err("Could not allocate cntl_buffer");
		goto error;
	}

	/* configure control URB */
	dev->cntl_req->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS |
				      USB_RECIP_INTERFACE;
	dev->cntl_req->bRequest	= HID_REQ_SET_REPORT;
	dev->cntl_req->wValue	= cpu_to_le16((HID_OUTPUT_REPORT + 1) << 8);
	dev->cntl_req->wIndex	= cpu_to_le16(iface_desc->desc.bInterfaceNumber);
	dev->cntl_req->wLength	= cpu_to_le16(YUREX_BUF_SIZE);

	usb_fill_control_urb(dev->cntl_urb, dev->udev,
			     usb_sndctrlpipe(dev->udev, 0),
			     (void *)dev->cntl_req, dev->cntl_buffer,
			     YUREX_BUF_SIZE, yurex_control_callback, dev);
	dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;


	/* allocate interrupt URB */
	dev->urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!dev->urb) {
		err("Could not allocate URB");
		goto error;
	}

	/* allocate buffer for interrupt in */
	dev->int_buffer = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
					GFP_KERNEL, &dev->urb->transfer_dma);
	if (!dev->int_buffer) {
		err("Could not allocate int_buffer");
		goto error;
	}

	/* configure interrupt URB */
	usb_fill_int_urb(dev->urb, dev->udev,
			 usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr),
			 dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt,
			 dev, 1);
	dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
	if (usb_submit_urb(dev->urb, GFP_KERNEL)) {
		retval = -EIO;
		err("Could not submitting URB");
		goto error;
	}

	/* save our data pointer in this interface device */
	usb_set_intfdata(interface, dev);

	/* we can register the device now, as it is ready */
	retval = usb_register_dev(interface, &yurex_class);
	if (retval) {
		err("Not able to get a minor for this device.");
		usb_set_intfdata(interface, NULL);
		goto error;
	}

	dev->bbu = -1;

	dev_info(&interface->dev,
		 "USB YUREX device now attached to Yurex #%d\n",
		 interface->minor);

	return 0;

error:
	if (dev)
		/* this frees allocated memory */
		kref_put(&dev->kref, yurex_delete);
	return retval;
}
Esempio n. 3
0
/*
 * Perform one request-response transaction to the device.
 */
int
islpci_mgt_transaction(struct net_device *ndev,
		       int operation, unsigned long oid,
		       void *senddata, int sendlen,
		       struct islpci_mgmtframe **recvframe)
{
	islpci_private *priv = netdev_priv(ndev);
	const long wait_cycle_jiffies = msecs_to_jiffies(ISL38XX_WAIT_CYCLE * 10);
	long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies;
	int err;
	DEFINE_WAIT(wait);

	*recvframe = NULL;

	if (mutex_lock_interruptible(&priv->mgmt_lock))
		return -ERESTARTSYS;

	prepare_to_wait(&priv->mgmt_wqueue, &wait, TASK_UNINTERRUPTIBLE);
	err = islpci_mgt_transmit(ndev, operation, oid, senddata, sendlen);
	if (err)
		goto out;

	err = -ETIMEDOUT;
	while (timeout_left > 0) {
		int timeleft;
		struct islpci_mgmtframe *frame;

		timeleft = schedule_timeout_uninterruptible(wait_cycle_jiffies);
		frame = xchg(&priv->mgmt_received, NULL);
		if (frame) {
			if (frame->header->oid == oid) {
				*recvframe = frame;
				err = 0;
				goto out;
			} else {
				printk(KERN_DEBUG
				       "%s: expecting oid 0x%x, received 0x%x.\n",
				       ndev->name, (unsigned int) oid,
				       frame->header->oid);
				kfree(frame);
				frame = NULL;
			}
		}
		if (timeleft == 0) {
			printk(KERN_DEBUG
				"%s: timeout waiting for mgmt response %lu, "
				"triggering device\n",
				ndev->name, timeout_left);
			islpci_trigger(priv);
		}
		timeout_left += timeleft - wait_cycle_jiffies;
	}
	printk(KERN_WARNING "%s: timeout waiting for mgmt response\n",
	       ndev->name);

	/* TODO: we should reset the device here */
 out:
	finish_wait(&priv->mgmt_wqueue, &wait);
	mutex_unlock(&priv->mgmt_lock);
	return err;
}
Esempio n. 4
0
static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block, int *err)
{
	struct ivtv *itv = s->itv;
	struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
	struct ivtv_buffer *buf;
	DEFINE_WAIT(wait);

	*err = 0;
	while (1) {
		if (s->type == IVTV_ENC_STREAM_TYPE_MPG) {
			/* Process pending program info updates and pending VBI data */
			ivtv_update_pgm_info(itv);

			if (time_after(jiffies,
				       itv->dualwatch_jiffies +
				       msecs_to_jiffies(1000))) {
				itv->dualwatch_jiffies = jiffies;
				ivtv_dualwatch(itv);
			}

			if (test_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags) &&
			    !test_bit(IVTV_F_S_APPL_IO, &s_vbi->s_flags)) {
				while ((buf = ivtv_dequeue(s_vbi, &s_vbi->q_full))) {
					/* byteswap and process VBI data */
					ivtv_process_vbi_data(itv, buf, s_vbi->dma_pts, s_vbi->type);
					ivtv_enqueue(s_vbi, buf, &s_vbi->q_free);
				}
			}
			buf = &itv->vbi.sliced_mpeg_buf;
			if (buf->readpos != buf->bytesused) {
				return buf;
			}
		}

		/* do we have leftover data? */
		buf = ivtv_dequeue(s, &s->q_io);
		if (buf)
			return buf;

		/* do we have new data? */
		buf = ivtv_dequeue(s, &s->q_full);
		if (buf) {
			if ((buf->b_flags & IVTV_F_B_NEED_BUF_SWAP) == 0)
				return buf;
			buf->b_flags &= ~IVTV_F_B_NEED_BUF_SWAP;
			if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
				/* byteswap MPG data */
				ivtv_buf_swap(buf);
			else if (s->type != IVTV_DEC_STREAM_TYPE_VBI) {
				/* byteswap and process VBI data */
				ivtv_process_vbi_data(itv, buf, s->dma_pts, s->type);
			}
			return buf;
		}

		/* return if end of stream */
		if (s->type != IVTV_DEC_STREAM_TYPE_VBI && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
			IVTV_DEBUG_INFO("EOS %s\n", s->name);
			return NULL;
		}

		/* return if file was opened with O_NONBLOCK */
		if (non_block) {
			*err = -EAGAIN;
			return NULL;
		}

		/* wait for more data to arrive */
		prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
		/* New buffers might have become available before we were added to the waitqueue */
		if (!s->q_full.buffers)
			schedule();
		finish_wait(&s->waitq, &wait);
		if (signal_pending(current)) {
			/* return if a signal was received */
			IVTV_DEBUG_INFO("User stopped %s\n", s->name);
			*err = -EINTR;
			return NULL;
		}
	}
}
Esempio n. 5
0
static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
			struct msghdr *msg, size_t len)
{
	struct pep_sock *pn = pep_sk(sk);
	struct sk_buff *skb = NULL;
	long timeo;
	int flags = msg->msg_flags;
	int err, done;
	
	if (len > 65535)
                return -EMSGSIZE;

	if (msg->msg_flags & MSG_OOB || !(msg->msg_flags & MSG_EOR))
		return -EOPNOTSUPP;

	lock_sock(sk);
	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
	if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
		err = -ENOTCONN;
		goto out;
	}
	if (sk->sk_state != TCP_ESTABLISHED) {
		/* Wait until the pipe gets to enabled state */
disabled:
		err = sk_stream_wait_connect(sk, &timeo);
		if (err)
			goto out;

		if (sk->sk_state == TCP_CLOSE_WAIT) {
			err = -ECONNRESET;
			goto out;
		}
	}
	BUG_ON(sk->sk_state != TCP_ESTABLISHED);

	/* Wait until flow control allows TX */
	done = atomic_read(&pn->tx_credits);
	while (!done) {
		DEFINE_WAIT(wait);

		if (!timeo) {
			err = -EAGAIN;
			goto out;
		}
		if (signal_pending(current)) {
			err = sock_intr_errno(timeo);
			goto out;
		}

		prepare_to_wait(&sk->sk_socket->wait, &wait,
				TASK_INTERRUPTIBLE);
		done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits));
		finish_wait(&sk->sk_socket->wait, &wait);

		if (sk->sk_state != TCP_ESTABLISHED)
			goto disabled;
	}

	if (!skb) {
		skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
						flags & MSG_DONTWAIT, &err);
		if (skb == NULL)
			goto out;
		skb_reserve(skb, MAX_PHONET_HEADER + 3);

		if (sk->sk_state != TCP_ESTABLISHED ||
		    !atomic_read(&pn->tx_credits))
			goto disabled; /* sock_alloc_send_skb might sleep */
	}

	err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
	if (err < 0)
		goto out;

	err = pipe_skb_send(sk, skb);
	if (err >= 0)
		err = len; /* success! */
	skb = NULL;
out:
	release_sock(sk);
	kfree_skb(skb);
	return err;
}
Esempio n. 6
0
static ssize_t
write_rio(struct file *file, const char __user *buffer,
	  size_t count, loff_t * ppos)
{
	DEFINE_WAIT(wait);
	struct rio_usb_data *rio = &rio_instance;

	unsigned long copy_size;
	unsigned long bytes_written = 0;
	unsigned int partial;

	int result = 0;
	int maxretry;
	int errn = 0;
	int intr;

	intr = mutex_lock_interruptible(&(rio->lock));
	if (intr)
		return -EINTR;
        /* Sanity check to make sure rio is connected, powered, etc */
        if ( rio == NULL ||
             rio->present == 0 ||
             rio->rio_dev == NULL )
	{
		mutex_unlock(&(rio->lock));
		return -ENODEV;
	}



	do {
		unsigned long thistime;
		char *obuf = rio->obuf;

		thistime = copy_size =
		    (count >= OBUF_SIZE) ? OBUF_SIZE : count;
		if (copy_from_user(rio->obuf, buffer, copy_size)) {
			errn = -EFAULT;
			goto error;
		}
		maxretry = 5;
		while (thistime) {
			if (!rio->rio_dev) {
				errn = -ENODEV;
				goto error;
			}
			if (signal_pending(current)) {
				mutex_unlock(&(rio->lock));
				return bytes_written ? bytes_written : -EINTR;
			}

			result = usb_bulk_msg(rio->rio_dev,
					 usb_sndbulkpipe(rio->rio_dev, 2),
					 obuf, thistime, &partial, 5000);

			dbg("write stats: result:%d thistime:%lu partial:%u",
			     result, thistime, partial);

			if (result == -ETIMEDOUT) {	/* NAK - so hold for a while */
				if (!maxretry--) {
					errn = -ETIME;
					goto error;
				}
				prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
				schedule_timeout(NAK_TIMEOUT);
				finish_wait(&rio->wait_q, &wait);
				continue;
			} else if (!result && partial) {
				obuf += partial;
				thistime -= partial;
			} else
				break;
		};
		if (result) {
			err("Write Whoops - %x", result);
			errn = -EIO;
			goto error;
		}
		bytes_written += copy_size;
		count -= copy_size;
		buffer += copy_size;
	} while (count > 0);

	mutex_unlock(&(rio->lock));

	return bytes_written ? bytes_written : -EIO;

error:
	mutex_unlock(&(rio->lock));
	return errn;
}
Esempio n. 7
0
/*
 * journal_commit_transaction
 *
 * The primary function for committing a transaction to the log.  This
 * function is called by the journal thread to begin a complete commit.
 */
void journal_commit_transaction(journal_t *journal)
{
	transaction_t *commit_transaction;
	struct journal_head *jh, *new_jh, *descriptor;
	struct buffer_head **wbuf = journal->j_wbuf;
	int bufs;
	int flags;
	int err;
	unsigned long blocknr;
	char *tagp = NULL;
	journal_header_t *header;
	journal_block_tag_t *tag = NULL;
	int space_left = 0;
	int first_tag = 0;
	int tag_flag;
	int i;

	/*
	 * First job: lock down the current transaction and wait for
	 * all outstanding updates to complete.
	 */

#ifdef COMMIT_STATS
	spin_lock(&journal->j_list_lock);
	summarise_journal_usage(journal);
	spin_unlock(&journal->j_list_lock);
#endif

	/* Do we need to erase the effects of a prior journal_flush? */
	if (journal->j_flags & JFS_FLUSHED) {
		jbd_debug(3, "super block updated\n");
		journal_update_superblock(journal, 1);
	} else {
		jbd_debug(3, "superblock not updated\n");
	}

	J_ASSERT(journal->j_running_transaction != NULL);
	J_ASSERT(journal->j_committing_transaction == NULL);

	commit_transaction = journal->j_running_transaction;
	J_ASSERT(commit_transaction->t_state == T_RUNNING);

	jbd_debug(1, "JBD: starting commit of transaction %d\n",
			commit_transaction->t_tid);

	spin_lock(&journal->j_state_lock);
	commit_transaction->t_state = T_LOCKED;

	spin_lock(&commit_transaction->t_handle_lock);
	while (commit_transaction->t_updates) {
		DEFINE_WAIT(wait);

		prepare_to_wait(&journal->j_wait_updates, &wait,
					TASK_UNINTERRUPTIBLE);
		if (commit_transaction->t_updates) {
			spin_unlock(&commit_transaction->t_handle_lock);
			spin_unlock(&journal->j_state_lock);
			schedule();
			spin_lock(&journal->j_state_lock);
			spin_lock(&commit_transaction->t_handle_lock);
		}
		finish_wait(&journal->j_wait_updates, &wait);
	}
	spin_unlock(&commit_transaction->t_handle_lock);

	J_ASSERT (commit_transaction->t_outstanding_credits <=
			journal->j_max_transaction_buffers);

	/*
	 * First thing we are allowed to do is to discard any remaining
	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
	 * that there are no such buffers: if a large filesystem
	 * operation like a truncate needs to split itself over multiple
	 * transactions, then it may try to do a journal_restart() while
	 * there are still BJ_Reserved buffers outstanding.  These must
	 * be released cleanly from the current transaction.
	 *
	 * In this case, the filesystem must still reserve write access
	 * again before modifying the buffer in the new transaction, but
	 * we do not require it to remember exactly which old buffers it
	 * has reserved.  This is consistent with the existing behaviour
	 * that multiple journal_get_write_access() calls to the same
	 * buffer are perfectly permissable.
	 */
	while (commit_transaction->t_reserved_list) {
		jh = commit_transaction->t_reserved_list;
		JBUFFER_TRACE(jh, "reserved, unused: refile");
		/*
		 * A journal_get_undo_access()+journal_release_buffer() may
		 * leave undo-committed data.
		 */
		if (jh->b_committed_data) {
			struct buffer_head *bh = jh2bh(jh);

			jbd_lock_bh_state(bh);
			jbd_slab_free(jh->b_committed_data, bh->b_size);
			jh->b_committed_data = NULL;
			jbd_unlock_bh_state(bh);
		}
		journal_refile_buffer(journal, jh);
	}

	/*
	 * Now try to drop any written-back buffers from the journal's
	 * checkpoint lists.  We do this *before* commit because it potentially
	 * frees some memory
	 */
	spin_lock(&journal->j_list_lock);
	__journal_clean_checkpoint_list(journal);
	spin_unlock(&journal->j_list_lock);

	jbd_debug (3, "JBD: commit phase 1\n");

	/*
	 * Switch to a new revoke table.
	 */
	journal_switch_revoke_table(journal);

	commit_transaction->t_state = T_FLUSH;
	journal->j_committing_transaction = commit_transaction;
	journal->j_running_transaction = NULL;
	commit_transaction->t_log_start = journal->j_head;
	wake_up(&journal->j_wait_transaction_locked);
	spin_unlock(&journal->j_state_lock);

	jbd_debug (3, "JBD: commit phase 2\n");

	/*
	 * First, drop modified flag: all accesses to the buffers
	 * will be tracked for a new trasaction only -bzzz
	 */
	spin_lock(&journal->j_list_lock);
	if (commit_transaction->t_buffers) {
		new_jh = jh = commit_transaction->t_buffers->b_tnext;
		do {
			J_ASSERT_JH(new_jh, new_jh->b_modified == 1 ||
					new_jh->b_modified == 0);
			new_jh->b_modified = 0;
			new_jh = new_jh->b_tnext;
		} while (new_jh != jh);
	}
	spin_unlock(&journal->j_list_lock);

	/*
	 * Now start flushing things to disk, in the order they appear
	 * on the transaction lists.  Data blocks go first.
	 */
	err = 0;
	journal_submit_data_buffers(journal, commit_transaction);

	/*
	 * Wait for all previously submitted IO to complete.
	 */
	spin_lock(&journal->j_list_lock);
	while (commit_transaction->t_locked_list) {
		struct buffer_head *bh;

		jh = commit_transaction->t_locked_list->b_tprev;
		bh = jh2bh(jh);
		get_bh(bh);
		if (buffer_locked(bh)) {
			spin_unlock(&journal->j_list_lock);
			wait_on_buffer(bh);
			if (unlikely(!buffer_uptodate(bh)))
				err = -EIO;
			spin_lock(&journal->j_list_lock);
		}
		if (!inverted_lock(journal, bh)) {
			put_bh(bh);
			spin_lock(&journal->j_list_lock);
			continue;
		}
		if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
			__journal_unfile_buffer(jh);
			jbd_unlock_bh_state(bh);
			journal_remove_journal_head(bh);
			put_bh(bh);
		} else {
			jbd_unlock_bh_state(bh);
		}
		put_bh(bh);
		cond_resched_lock(&journal->j_list_lock);
	}
	spin_unlock(&journal->j_list_lock);

	if (err)
		__journal_abort_hard(journal);

	journal_write_revoke_records(journal, commit_transaction);

	jbd_debug(3, "JBD: commit phase 2\n");

	/*
	 * If we found any dirty or locked buffers, then we should have
	 * looped back up to the write_out_data label.  If there weren't
	 * any then journal_clean_data_list should have wiped the list
	 * clean by now, so check that it is in fact empty.
	 */
	J_ASSERT (commit_transaction->t_sync_datalist == NULL);

	jbd_debug (3, "JBD: commit phase 3\n");

	/*
	 * Way to go: we have now written out all of the data for a
	 * transaction!  Now comes the tricky part: we need to write out
	 * metadata.  Loop over the transaction's entire buffer list:
	 */
	commit_transaction->t_state = T_COMMIT;

	descriptor = NULL;
	bufs = 0;
	while (commit_transaction->t_buffers) {

		/* Find the next buffer to be journaled... */

		jh = commit_transaction->t_buffers;

		/* If we're in abort mode, we just un-journal the buffer and
		   release it for background writing. */

		if (is_journal_aborted(journal)) {
			JBUFFER_TRACE(jh, "journal is aborting: refile");
			journal_refile_buffer(journal, jh);
			/* If that was the last one, we need to clean up
			 * any descriptor buffers which may have been
			 * already allocated, even if we are now
			 * aborting. */
			if (!commit_transaction->t_buffers)
				goto start_journal_io;
			continue;
		}

		/* Make sure we have a descriptor block in which to
		   record the metadata buffer. */

		if (!descriptor) {
			struct buffer_head *bh;

			J_ASSERT (bufs == 0);

			jbd_debug(4, "JBD: get descriptor\n");

			descriptor = journal_get_descriptor_buffer(journal);
			if (!descriptor) {
				__journal_abort_hard(journal);
				continue;
			}

			bh = jh2bh(descriptor);
			jbd_debug(4, "JBD: got buffer %llu (%p)\n",
				(unsigned long long)bh->b_blocknr, bh->b_data);
			header = (journal_header_t *)&bh->b_data[0];
			header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
			header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);

			tagp = &bh->b_data[sizeof(journal_header_t)];
			space_left = bh->b_size - sizeof(journal_header_t);
			first_tag = 1;
			set_buffer_jwrite(bh);
			set_buffer_dirty(bh);
			wbuf[bufs++] = bh;

			/* Record it so that we can wait for IO
                           completion later */
			BUFFER_TRACE(bh, "ph3: file as descriptor");
			journal_file_buffer(descriptor, commit_transaction,
					BJ_LogCtl);
		}

		/* Where is the buffer to be written? */

		err = journal_next_log_block(journal, &blocknr);
		/* If the block mapping failed, just abandon the buffer
		   and repeat this loop: we'll fall into the
		   refile-on-abort condition above. */
		if (err) {
			__journal_abort_hard(journal);
			continue;
		}

		/*
		 * start_this_handle() uses t_outstanding_credits to determine
		 * the free space in the log, but this counter is changed
		 * by journal_next_log_block() also.
		 */
		commit_transaction->t_outstanding_credits--;

		/* Bump b_count to prevent truncate from stumbling over
                   the shadowed buffer!  @@@ This can go if we ever get
                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
		atomic_inc(&jh2bh(jh)->b_count);

		/* Make a temporary IO buffer with which to write it out
                   (this will requeue both the metadata buffer and the
                   temporary IO buffer). new_bh goes on BJ_IO*/

		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
		/*
		 * akpm: journal_write_metadata_buffer() sets
		 * new_bh->b_transaction to commit_transaction.
		 * We need to clean this up before we release new_bh
		 * (which is of type BJ_IO)
		 */
		JBUFFER_TRACE(jh, "ph3: write metadata");
		flags = journal_write_metadata_buffer(commit_transaction,
						      jh, &new_jh, blocknr);
		set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
		wbuf[bufs++] = jh2bh(new_jh);

		/* Record the new block's tag in the current descriptor
                   buffer */

		tag_flag = 0;
		if (flags & 1)
			tag_flag |= JFS_FLAG_ESCAPE;
		if (!first_tag)
			tag_flag |= JFS_FLAG_SAME_UUID;

		tag = (journal_block_tag_t *) tagp;
		tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
		tag->t_flags = cpu_to_be32(tag_flag);
		tagp += sizeof(journal_block_tag_t);
		space_left -= sizeof(journal_block_tag_t);

		if (first_tag) {
			memcpy (tagp, journal->j_uuid, 16);
			tagp += 16;
			space_left -= 16;
			first_tag = 0;
		}

		/* If there's no more to do, or if the descriptor is full,
		   let the IO rip! */

		if (bufs == journal->j_wbufsize ||
		    commit_transaction->t_buffers == NULL ||
		    space_left < sizeof(journal_block_tag_t) + 16) {

			jbd_debug(4, "JBD: Submit %d IOs\n", bufs);

			/* Write an end-of-descriptor marker before
                           submitting the IOs.  "tag" still points to
                           the last tag we set up. */

			tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);

start_journal_io:
			for (i = 0; i < bufs; i++) {
				struct buffer_head *bh = wbuf[i];
				lock_buffer(bh);
				clear_buffer_dirty(bh);
				set_buffer_uptodate(bh);
				bh->b_end_io = journal_end_buffer_io_sync;
				submit_bh(WRITE, bh);
			}
			cond_resched();

			/* Force a new descriptor to be generated next
                           time round the loop. */
			descriptor = NULL;
			bufs = 0;
		}
	}

	/* Lo and behold: we have just managed to send a transaction to
           the log.  Before we can commit it, wait for the IO so far to
           complete.  Control buffers being written are on the
           transaction's t_log_list queue, and metadata buffers are on
           the t_iobuf_list queue.

	   Wait for the buffers in reverse order.  That way we are
	   less likely to be woken up until all IOs have completed, and
	   so we incur less scheduling load.
	*/

	jbd_debug(3, "JBD: commit phase 4\n");

	/*
	 * akpm: these are BJ_IO, and j_list_lock is not needed.
	 * See __journal_try_to_free_buffer.
	 */
wait_for_iobuf:
	while (commit_transaction->t_iobuf_list != NULL) {
		struct buffer_head *bh;

		jh = commit_transaction->t_iobuf_list->b_tprev;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			wait_on_buffer(bh);
			goto wait_for_iobuf;
		}
		if (cond_resched())
			goto wait_for_iobuf;

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		clear_buffer_jwrite(bh);

		JBUFFER_TRACE(jh, "ph4: unfile after journal write");
		journal_unfile_buffer(journal, jh);

		/*
		 * ->t_iobuf_list should contain only dummy buffer_heads
		 * which were created by journal_write_metadata_buffer().
		 */
		BUFFER_TRACE(bh, "dumping temporary bh");
		journal_put_journal_head(jh);
		__brelse(bh);
		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
		free_buffer_head(bh);

		/* We also have to unlock and free the corresponding
                   shadowed buffer */
		jh = commit_transaction->t_shadow_list->b_tprev;
		bh = jh2bh(jh);
		clear_bit(BH_JWrite, &bh->b_state);
		J_ASSERT_BH(bh, buffer_jbddirty(bh));

		/* The metadata is now released for reuse, but we need
                   to remember it against this transaction so that when
                   we finally commit, we can do any checkpointing
                   required. */
		JBUFFER_TRACE(jh, "file as BJ_Forget");
		journal_file_buffer(jh, commit_transaction, BJ_Forget);
		/* Wake up any transactions which were waiting for this
		   IO to complete */
		wake_up_bit(&bh->b_state, BH_Unshadow);
		JBUFFER_TRACE(jh, "brelse shadowed buffer");
		__brelse(bh);
	}

	J_ASSERT (commit_transaction->t_shadow_list == NULL);

	jbd_debug(3, "JBD: commit phase 5\n");

	/* Here we wait for the revoke record and descriptor record buffers */
 wait_for_ctlbuf:
	while (commit_transaction->t_log_list != NULL) {
		struct buffer_head *bh;

		jh = commit_transaction->t_log_list->b_tprev;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			wait_on_buffer(bh);
			goto wait_for_ctlbuf;
		}
		if (cond_resched())
			goto wait_for_ctlbuf;

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
		clear_buffer_jwrite(bh);
		journal_unfile_buffer(journal, jh);
		journal_put_journal_head(jh);
		__brelse(bh);		/* One for getblk */
		/* AKPM: bforget here */
	}

	jbd_debug(3, "JBD: commit phase 6\n");

	if (journal_write_commit_record(journal, commit_transaction))
		err = -EIO;

	if (err)
		__journal_abort_hard(journal);

	/* End of a transaction!  Finally, we can do checkpoint
           processing: any buffers committed as a result of this
           transaction can be removed from any checkpoint list it was on
           before. */

	jbd_debug(3, "JBD: commit phase 7\n");

	J_ASSERT(commit_transaction->t_sync_datalist == NULL);
	J_ASSERT(commit_transaction->t_buffers == NULL);
	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
	J_ASSERT(commit_transaction->t_iobuf_list == NULL);
	J_ASSERT(commit_transaction->t_shadow_list == NULL);
	J_ASSERT(commit_transaction->t_log_list == NULL);

restart_loop:
	/*
	 * As there are other places (journal_unmap_buffer()) adding buffers
	 * to this list we have to be careful and hold the j_list_lock.
	 */
	spin_lock(&journal->j_list_lock);
	while (commit_transaction->t_forget) {
		transaction_t *cp_transaction;
		struct buffer_head *bh;

		jh = commit_transaction->t_forget;
		spin_unlock(&journal->j_list_lock);
		bh = jh2bh(jh);
		jbd_lock_bh_state(bh);
		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction ||
			jh->b_transaction == journal->j_running_transaction);

		/*
		 * If there is undo-protected committed data against
		 * this buffer, then we can remove it now.  If it is a
		 * buffer needing such protection, the old frozen_data
		 * field now points to a committed version of the
		 * buffer, so rotate that field to the new committed
		 * data.
		 *
		 * Otherwise, we can just throw away the frozen data now.
		 */
		if (jh->b_committed_data) {
			jbd_slab_free(jh->b_committed_data, bh->b_size);
			jh->b_committed_data = NULL;
			if (jh->b_frozen_data) {
				jh->b_committed_data = jh->b_frozen_data;
				jh->b_frozen_data = NULL;
			}
		} else if (jh->b_frozen_data) {
			jbd_slab_free(jh->b_frozen_data, bh->b_size);
			jh->b_frozen_data = NULL;
		}

		spin_lock(&journal->j_list_lock);
		cp_transaction = jh->b_cp_transaction;
		if (cp_transaction) {
			JBUFFER_TRACE(jh, "remove from old cp transaction");
			__journal_remove_checkpoint(jh);
		}

		/* Only re-checkpoint the buffer_head if it is marked
		 * dirty.  If the buffer was added to the BJ_Forget list
		 * by journal_forget, it may no longer be dirty and
		 * there's no point in keeping a checkpoint record for
		 * it. */

		/* A buffer which has been freed while still being
		 * journaled by a previous transaction may end up still
		 * being dirty here, but we want to avoid writing back
		 * that buffer in the future now that the last use has
		 * been committed.  That's not only a performance gain,
		 * it also stops aliasing problems if the buffer is left
		 * behind for writeback and gets reallocated for another
		 * use in a different page. */
		if (buffer_freed(bh)) {
			clear_buffer_freed(bh);
			clear_buffer_jbddirty(bh);
		}

		if (buffer_jbddirty(bh)) {
			JBUFFER_TRACE(jh, "add to new checkpointing trans");
			__journal_insert_checkpoint(jh, commit_transaction);
			JBUFFER_TRACE(jh, "refile for checkpoint writeback");
			__journal_refile_buffer(jh);
			jbd_unlock_bh_state(bh);
		} else {
			J_ASSERT_BH(bh, !buffer_dirty(bh));
			/* The buffer on BJ_Forget list and not jbddirty means
			 * it has been freed by this transaction and hence it
			 * could not have been reallocated until this
			 * transaction has committed. *BUT* it could be
			 * reallocated once we have written all the data to
			 * disk and before we process the buffer on BJ_Forget
			 * list. */
			JBUFFER_TRACE(jh, "refile or unfile freed buffer");
			__journal_refile_buffer(jh);
			if (!jh->b_transaction) {
				jbd_unlock_bh_state(bh);
				 /* needs a brelse */
				journal_remove_journal_head(bh);
				release_buffer_page(bh);
			} else
				jbd_unlock_bh_state(bh);
		}
		cond_resched_lock(&journal->j_list_lock);
	}
	spin_unlock(&journal->j_list_lock);
	/*
	 * This is a bit sleazy.  We borrow j_list_lock to protect
	 * journal->j_committing_transaction in __journal_remove_checkpoint.
	 * Really, __journal_remove_checkpoint should be using j_state_lock but
	 * it's a bit hassle to hold that across __journal_remove_checkpoint
	 */
	spin_lock(&journal->j_state_lock);
	spin_lock(&journal->j_list_lock);
	/*
	 * Now recheck if some buffers did not get attached to the transaction
	 * while the lock was dropped...
	 */
	if (commit_transaction->t_forget) {
		spin_unlock(&journal->j_list_lock);
		spin_unlock(&journal->j_state_lock);
		goto restart_loop;
	}

	/* Done with this transaction! */

	jbd_debug(3, "JBD: commit phase 8\n");

	J_ASSERT(commit_transaction->t_state == T_COMMIT);

	commit_transaction->t_state = T_FINISHED;
	J_ASSERT(commit_transaction == journal->j_committing_transaction);
	journal->j_commit_sequence = commit_transaction->t_tid;
	journal->j_committing_transaction = NULL;
	spin_unlock(&journal->j_state_lock);

	if (commit_transaction->t_checkpoint_list == NULL &&
	    commit_transaction->t_checkpoint_io_list == NULL) {
		__journal_drop_transaction(journal, commit_transaction);
	} else {
		if (journal->j_checkpoint_transactions == NULL) {
			journal->j_checkpoint_transactions = commit_transaction;
			commit_transaction->t_cpnext = commit_transaction;
			commit_transaction->t_cpprev = commit_transaction;
		} else {
			commit_transaction->t_cpnext =
				journal->j_checkpoint_transactions;
			commit_transaction->t_cpprev =
				commit_transaction->t_cpnext->t_cpprev;
			commit_transaction->t_cpnext->t_cpprev =
				commit_transaction;
			commit_transaction->t_cpprev->t_cpnext =
				commit_transaction;
		}
	}
	spin_unlock(&journal->j_list_lock);

	jbd_debug(1, "JBD: commit %d complete, head %d\n",
		  journal->j_commit_sequence, journal->j_tail_sequence);

	wake_up(&journal->j_wait_done_commit);
}
Esempio n. 8
0
static int
islpci_reset_if(islpci_private *priv)
{
	long remaining;
	int result = -ETIME;
	int count;

	DEFINE_WAIT(wait);
	prepare_to_wait(&priv->reset_done, &wait, TASK_UNINTERRUPTIBLE);

	/* now the last step is to reset the interface */
	isl38xx_interface_reset(priv->device_base, priv->device_host_address);
	islpci_set_state(priv, PRV_STATE_PREINIT);

        for(count = 0; count < 2 && result; count++) {
		/* The software reset acknowledge needs about 220 msec here.
		 * Be conservative and wait for up to one second. */

		remaining = schedule_timeout_uninterruptible(HZ);

		if(remaining > 0) {
			result = 0;
			break;
		}

		/* If we're here it's because our IRQ hasn't yet gone through.
		 * Retry a bit more...
		 */
		printk(KERN_ERR "%s: no 'reset complete' IRQ seen - retrying\n",
			priv->ndev->name);
	}

	finish_wait(&priv->reset_done, &wait);

	if (result) {
		printk(KERN_ERR "%s: interface reset failure\n", priv->ndev->name);
		return result;
	}

	islpci_set_state(priv, PRV_STATE_INIT);

	/* Now that the device is 100% up, let's allow
	 * for the other interrupts --
	 * NOTE: this is not *yet* true since we've only allowed the
	 * INIT interrupt on the IRQ line. We can perhaps poll
	 * the IRQ line until we know for sure the reset went through */
	isl38xx_enable_common_interrupts(priv->device_base);

	down_write(&priv->mib_sem);
	result = mgt_commit(priv);
	if (result) {
		printk(KERN_ERR "%s: interface reset failure\n", priv->ndev->name);
		up_write(&priv->mib_sem);
		return result;
	}
	up_write(&priv->mib_sem);

	islpci_set_state(priv, PRV_STATE_READY);

	printk(KERN_DEBUG "%s: interface reset complete\n", priv->ndev->name);
	return 0;
}
Esempio n. 9
0
int vmw_fallback_wait(struct vmw_private *dev_priv,
		      bool lazy,
		      bool fifo_idle,
		      uint32_t sequence,
		      bool interruptible,
		      unsigned long timeout)
{
	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;

	uint32_t count = 0;
	uint32_t signal_seq;
	int ret;
	unsigned long end_jiffies = jiffies + timeout;
	bool (*wait_condition)(struct vmw_private *, uint32_t);
	DEFINE_WAIT(__wait);

	wait_condition = (fifo_idle) ? &vmw_fifo_idle :
		&vmw_fence_signaled;

	/**
	 * Block command submission while waiting for idle.
	 */

	if (fifo_idle)
		down_read(&fifo_state->rwsem);
	signal_seq = atomic_read(&dev_priv->fence_seq);
	ret = 0;

	for (;;) {
		prepare_to_wait(&dev_priv->fence_queue, &__wait,
				(interruptible) ?
				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
		if (wait_condition(dev_priv, sequence))
			break;
		if (time_after_eq(jiffies, end_jiffies)) {
			DRM_ERROR("SVGA device lockup.\n");
			break;
		}
		if (lazy)
			schedule_timeout(1);
		else if ((++count & 0x0F) == 0) {
			/**
			 * FIXME: Use schedule_hr_timeout here for
			 * newer kernels and lower CPU utilization.
			 */

			__set_current_state(TASK_RUNNING);
			schedule();
			__set_current_state((interruptible) ?
					    TASK_INTERRUPTIBLE :
					    TASK_UNINTERRUPTIBLE);
		}
		if (interruptible && signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}
	}
	finish_wait(&dev_priv->fence_queue, &__wait);
	if (ret == 0 && fifo_idle) {
		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
		iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
	}
	wake_up_all(&dev_priv->fence_queue);
	if (fifo_idle)
		up_read(&fifo_state->rwsem);

	return ret;
}
static struct cx18_buffer *cx18_get_buffer(struct cx18_stream *s, int non_block, int *err)
{
	struct cx18 *cx = s->cx;
	struct cx18_stream *s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI];
	struct cx18_buffer *buf;
	DEFINE_WAIT(wait);

	*err = 0;
	while (1) {
		if (s->type == CX18_ENC_STREAM_TYPE_MPG) {
			/* Process pending program info updates and pending
			   VBI data */

			if (time_after(jiffies, cx->dualwatch_jiffies + msecs_to_jiffies(1000))) {
				cx->dualwatch_jiffies = jiffies;
				cx18_dualwatch(cx);
			}
			if (test_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags) &&
			    !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) {
				while ((buf = cx18_dequeue(s_vbi, &s_vbi->q_full))) {
					/* byteswap and process VBI data */
					cx18_process_vbi_data(cx, buf,
							      s_vbi->type);
					cx18_stream_put_buf_fw(s_vbi, buf);
				}
			}
			buf = &cx->vbi.sliced_mpeg_buf;
			if (buf->readpos != buf->bytesused)
				return buf;
		}

		/* do we have new data? */
		buf = cx18_dequeue(s, &s->q_full);
		if (buf) {
			if (!test_and_clear_bit(CX18_F_B_NEED_BUF_SWAP,
						&buf->b_flags))
				return buf;
			if (s->type == CX18_ENC_STREAM_TYPE_MPG)
				/* byteswap MPG data */
				cx18_buf_swap(buf);
			else {
				/* byteswap and process VBI data */
				cx18_process_vbi_data(cx, buf, s->type);
			}
			return buf;
		}

		/* return if end of stream */
		if (!test_bit(CX18_F_S_STREAMING, &s->s_flags)) {
			CX18_DEBUG_INFO("EOS %s\n", s->name);
			return NULL;
		}

		/* return if file was opened with O_NONBLOCK */
		if (non_block) {
			*err = -EAGAIN;
			return NULL;
		}

		/* wait for more data to arrive */
		prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
		/* New buffers might have become available before we were added
		   to the waitqueue */
		if (!atomic_read(&s->q_full.buffers))
			schedule();
		finish_wait(&s->waitq, &wait);
		if (signal_pending(current)) {
			/* return if a signal was received */
			CX18_DEBUG_INFO("User stopped %s\n", s->name);
			*err = -EINTR;
			return NULL;
		}
	}
}
Esempio n. 11
0
static void bluecard_write_wakeup(bluecard_info_t *info)
{
	if (!info) {
		BT_ERR("Unknown device");
		return;
	}

	if (!test_bit(XMIT_SENDING_READY, &(info->tx_state)))
		return;

	if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) {
		set_bit(XMIT_WAKEUP, &(info->tx_state));
		return;
	}

	do {
		register unsigned int iobase = info->link.io.BasePort1;
		register unsigned int offset;
		register unsigned char command;
		register unsigned long ready_bit;
		register struct sk_buff *skb;
		register int len;

		clear_bit(XMIT_WAKEUP, &(info->tx_state));

		if (!(info->link.state & DEV_PRESENT))
			return;

		if (test_bit(XMIT_BUFFER_NUMBER, &(info->tx_state))) {
			if (!test_bit(XMIT_BUF_TWO_READY, &(info->tx_state)))
				break;
			offset = 0x10;
			command = REG_COMMAND_TX_BUF_TWO;
			ready_bit = XMIT_BUF_TWO_READY;
		} else {
			if (!test_bit(XMIT_BUF_ONE_READY, &(info->tx_state)))
				break;
			offset = 0x00;
			command = REG_COMMAND_TX_BUF_ONE;
			ready_bit = XMIT_BUF_ONE_READY;
		}

		if (!(skb = skb_dequeue(&(info->txq))))
			break;

		if (skb->pkt_type & 0x80) {
			/* Disable RTS */
			info->ctrl_reg |= REG_CONTROL_RTS;
			outb(info->ctrl_reg, iobase + REG_CONTROL);
		}

		/* Activate LED */
		bluecard_enable_activity_led(info);

		/* Send frame */
		len = bluecard_write(iobase, offset, skb->data, skb->len);

		/* Tell the FPGA to send the data */
		outb_p(command, iobase + REG_COMMAND);

		/* Mark the buffer as dirty */
		clear_bit(ready_bit, &(info->tx_state));

		if (skb->pkt_type & 0x80) {
			DECLARE_WAIT_QUEUE_HEAD(wq);
			DEFINE_WAIT(wait);

			unsigned char baud_reg;

			switch (skb->pkt_type) {
			case PKT_BAUD_RATE_460800:
				baud_reg = REG_CONTROL_BAUD_RATE_460800;
				break;
			case PKT_BAUD_RATE_230400:
				baud_reg = REG_CONTROL_BAUD_RATE_230400;
				break;
			case PKT_BAUD_RATE_115200:
				baud_reg = REG_CONTROL_BAUD_RATE_115200;
				break;
			case PKT_BAUD_RATE_57600:
				/* Fall through... */
			default:
				baud_reg = REG_CONTROL_BAUD_RATE_57600;
				break;
			}

			/* Wait until the command reaches the baseband */
			prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
			schedule_timeout(HZ/10);
			finish_wait(&wq, &wait);

			/* Set baud on baseband */
			info->ctrl_reg &= ~0x03;
			info->ctrl_reg |= baud_reg;
			outb(info->ctrl_reg, iobase + REG_CONTROL);

			/* Enable RTS */
			info->ctrl_reg &= ~REG_CONTROL_RTS;
			outb(info->ctrl_reg, iobase + REG_CONTROL);

			/* Wait before the next HCI packet can be send */
			prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
			schedule_timeout(HZ);
			finish_wait(&wq, &wait);
		}

		if (len == skb->len) {
			kfree_skb(skb);
		} else {
			skb_pull(skb, len);
			skb_queue_head(&(info->txq), skb);
		}

		info->hdev->stat.byte_tx += len;

		/* Change buffer */
		change_bit(XMIT_BUFFER_NUMBER, &(info->tx_state));

	} while (test_bit(XMIT_WAKEUP, &(info->tx_state)));

	clear_bit(XMIT_SENDING, &(info->tx_state));
}
Esempio n. 12
0
DMA_Handle_t dma_request_channel(DMA_Device_t dev)
#endif
{
	DMA_Handle_t handle;
	DMA_DeviceAttribute_t *devAttr;
	DMA_Channel_t *channel;
	int controllerIdx;
	int controllerIdx2;
	int channelIdx;

	if (down_interruptible(&gDMA.lock) < 0) {
		return -ERESTARTSYS;
	}

	if ((dev < 0) || (dev >= DMA_NUM_DEVICE_ENTRIES)) {
		handle = -ENODEV;
		goto out;
	}
	devAttr = &DMA_gDeviceAttribute[dev];

#if (DMA_DEBUG_TRACK_RESERVATION)
	{
		char *s;

		s = strrchr(fileName, '/');
		if (s != NULL) {
			fileName = s + 1;
		}
	}
#endif
	if ((devAttr->flags & DMA_DEVICE_FLAG_IN_USE) != 0) {
		/* This device has already been requested and not been freed */

		printk(KERN_ERR "%s: device %s is already requested\n",
		       __func__, devAttr->name);
		handle = -EBUSY;
		goto out;
	}

	if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
		/* This device has a dedicated channel. */

		channel =
		    &gDMA.controller[devAttr->dedicatedController].
		    channel[devAttr->dedicatedChannel];
		if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) {
			handle = -EBUSY;
			goto out;
		}

		channel->flags |= DMA_CHANNEL_FLAG_IN_USE;
		devAttr->flags |= DMA_DEVICE_FLAG_IN_USE;

#if (DMA_DEBUG_TRACK_RESERVATION)
		channel->fileName = fileName;
		channel->lineNum = lineNum;
#endif
		handle =
		    MAKE_HANDLE(devAttr->dedicatedController,
				devAttr->dedicatedChannel);
		goto out;
	}

	/* This device needs to use one of the shared channels. */

	handle = DMA_INVALID_HANDLE;
	while (handle == DMA_INVALID_HANDLE) {
		/* Scan through the shared channels and see if one is available */

		for (controllerIdx2 = 0; controllerIdx2 < DMA_NUM_CONTROLLERS;
		     controllerIdx2++) {
			/* Check to see if we should try on controller 1 first. */

			controllerIdx = controllerIdx2;
			if ((devAttr->
			     flags & DMA_DEVICE_FLAG_ALLOC_DMA1_FIRST) != 0) {
				controllerIdx = 1 - controllerIdx;
			}

			/* See if the device is available on the controller being tested */

			if ((devAttr->
			     flags & (DMA_DEVICE_FLAG_ON_DMA0 << controllerIdx))
			    != 0) {
				for (channelIdx = 0;
				     channelIdx < DMA_NUM_CHANNELS;
				     channelIdx++) {
					channel =
					    &gDMA.controller[controllerIdx].
					    channel[channelIdx];

					if (((channel->
					      flags &
					      DMA_CHANNEL_FLAG_IS_DEDICATED) ==
					     0)
					    &&
					    ((channel->
					      flags & DMA_CHANNEL_FLAG_IN_USE)
					     == 0)) {
						if (((channel->
						      flags &
						      DMA_CHANNEL_FLAG_LARGE_FIFO)
						     != 0)
						    &&
						    ((devAttr->
						      flags &
						      DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO)
						     == 0)) {
							/* This channel is a large fifo - don't tie it up */
							/* with devices that we don't want using it. */

							continue;
						}

						channel->flags |=
						    DMA_CHANNEL_FLAG_IN_USE;
						channel->devType = dev;
						devAttr->flags |=
						    DMA_DEVICE_FLAG_IN_USE;

#if (DMA_DEBUG_TRACK_RESERVATION)
						channel->fileName = fileName;
						channel->lineNum = lineNum;
#endif
						handle =
						    MAKE_HANDLE(controllerIdx,
								channelIdx);

						/* Now that we've reserved the channel - we can go ahead and configure it */

						if (ConfigChannel(handle) != 0) {
							handle = -EIO;
							printk(KERN_ERR
							       "dma_request_channel: ConfigChannel failed\n");
						}
						goto out;
					}
				}
			}
		}

		/* No channels are currently available. Let's wait for one to free up. */

		{
			DEFINE_WAIT(wait);

			prepare_to_wait(&gDMA.freeChannelQ, &wait,
					TASK_INTERRUPTIBLE);
			up(&gDMA.lock);
			schedule();
			finish_wait(&gDMA.freeChannelQ, &wait);

			if (signal_pending(current)) {
				/* We don't currently hold gDMA.lock, so we return directly */

				return -ERESTARTSYS;
			}
		}

		if (down_interruptible(&gDMA.lock)) {
			return -ERESTARTSYS;
		}
	}

out:
	up(&gDMA.lock);

	return handle;
}
size_t write_citty_buffer(struct buf_struct * cittyBuf,
			  const char *buf, size_t count, short action )
{
	unsigned char *pbuf;
	struct semaphore * lSem;
	int curBufIndex;

	DEFINE_WAIT(wait);

	F_ENTER();

	/* make it a non-blocking write*/
	if (spacefree( cittyBuf ) == 0 )
	{
		printk("\"%s\" warning: Write Buffer overflow.\n", current->comm);
		return -EIO;
	}

	lSem = &(cittyBuf->gSem);

	if (down_interruptible( lSem ))
	{
		printk("\"%s\" Error: Unable to down SEM.\n", current->comm);
		return -ERESTARTSYS;
	}

	/* Make sure there's space to write */
	while (spacefree( cittyBuf ) == 0)   /* full */

	{
		PDEBUG("\"%s\" Going to define wait:", current->comm );


		up( lSem );     /* release the lock */

		//if (filp->f_flags & O_NONBLOCK)
		//	return -EAGAIN;

		PDEBUG("\"%s\" writing: going to sleep", current->comm);
		prepare_to_wait(&cittyBuf->gOutq, &wait, TASK_INTERRUPTIBLE);

		if (spacefree( cittyBuf ) == 0)
		{
			schedule(); /* seem like it is bad: scheduling while atomic */
		}
		finish_wait(&cittyBuf->gOutq, &wait);
		if (signal_pending(current))
		{
			printk("\"%s\" Error: Unable to signal_pending.\n", current->comm);
			return -ERESTARTSYS; /* signal: tell the fs layer to handle it */
		}
		if (down_interruptible( lSem ))
		{
			printk("\"%s\" Error: Unable to down SEM.\n", current->comm);
			return -ERESTARTSYS;
		}
	}

	curBufIndex = cittyBuf->iBufIn++;
	pbuf = cittyBuf->pBuf[ curBufIndex ];

	PDEBUG("\"%s\" Going to check flip", current->comm );
	/*
	 *  Check if it is flipped
	 */
	if ( cittyBuf->iBufIn >= NUM_CITTY_BUF)
	{
		cittyBuf->iBufIn = cittyBuf->iBufIn % NUM_CITTY_BUF;
	}

	/* Check space */
	if (pbuf == NULL)
	{
		printk("warning: Buffer overflowed.\n");
		up( lSem );
		return EIO;
	}

	/* ok, space is there, accept something */
	/* write only up to the size of the buffer */
	if (count > CITTY_BUF_SIZE)
	{
		count = CITTY_BUF_SIZE;
		printk("warning: Buffer too size to write.\n");
	}

	if ( action == COPY_FROM_USER )
	{
		PDEBUG("write_citty_buffer: going to copy_from_user at buf index %d and count %d", curBufIndex, count);
		if (copy_from_user((pbuf), buf, count ))
		{
			up( lSem );
			return -EFAULT;
		}
	}
	else if ( action == COPY_FROM_CITTY)
	{
		/* it is from the cinet_hard_start_xmit */
		PDEBUG("write_citty_buffer: going to COPY_FROM_CITTY at buf index %d and count %d", curBufIndex, count);
		memcpy(pbuf, buf, count);

	}
	else
	{
		printk("undefined action.\n");
	}

	/* saving datalen */
	cittyBuf->iDatalen[ curBufIndex ] = count;

	up( lSem );

	/* finally, awake any reader */
	wake_up_interruptible(&cittyBuf->gInq);  /* blocked in read() and select() */

	F_LEAVE();

	return count;
}
Esempio n. 14
0
static int play_thread( void *arg)
{
	struct snd_pcm_substream *substream = (struct snd_pcm_substream *)arg;
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct pcm_runtime_data *prtd = substream->runtime->private_data;
	struct snd_pcm *pcm = substream->pcm;
	struct tegra_audio_data *ptscx = tegra_snd_cx[pcm->device];
	int offset = 0;
	int rtbuffersize = 0;
	int buffer_to_prime = 0, buffer_in_queue = 0;
	NvAudioFxState state = NVALSA_INVALID_STATE;

	wait_for_completion(&prtd->thread_comp);
	rtbuffersize = frames_to_bytes(runtime, runtime->buffer_size);
	buffer_to_prime  = (rtbuffersize / TEGRA_DEFAULT_BUFFER_SIZE);

	if (runtime->control->appl_ptr)
	for (;;) {
		state = play_state(ptscx, prtd, state);
		if (state == SNDRV_PCM_TRIGGER_STOP)
			buffer_in_queue = 0;
		if (kthread_should_stop())
			return 0;
		if ((prtd->audiofx_frames < runtime->control->appl_ptr) &&
				(state != NvAudioFxState_Stop)) {
			offset = queue_next_buffer(substream, offset);
			buffer_in_queue++;
		}
		if (buffer_in_queue == 0) {
			DEFINE_WAIT(wq);
			prepare_to_wait(&prtd->buf_wait, &wq, TASK_INTERRUPTIBLE);
			schedule();
			finish_wait(&prtd->buf_wait, &wq);
			continue;
		}
		if ((buffer_to_prime == buffer_in_queue) ||
			(prtd->audiofx_frames >= runtime->control->appl_ptr)) {
			play_buffer_done(substream);
			buffer_in_queue--;
		}
	}

	for (;;) {
		state = play_state(ptscx, prtd, state);
		if (state == SNDRV_PCM_TRIGGER_STOP)
			buffer_in_queue = 0;
		if (kthread_should_stop())
			break;
		if ((prtd->state != SNDRV_PCM_TRIGGER_STOP) &&
			 (runtime->status->state != SNDRV_PCM_STATE_DRAINING)) {
			offset = queue_next_buffer(substream, offset);
			buffer_in_queue++;
		}
		if (buffer_in_queue == 0) {
			DEFINE_WAIT(wq);
			prepare_to_wait(&prtd->buf_wait, &wq, TASK_INTERRUPTIBLE);
			schedule();
			finish_wait(&prtd->buf_wait, &wq);
			continue;
		}
		if ((buffer_to_prime == buffer_in_queue) &&
			(prtd->state != SNDRV_PCM_TRIGGER_STOP)) {
			play_buffer_done(substream);
			buffer_in_queue--;
		}
	}
	return 0;
}
Esempio n. 15
0
/* The strategy is to
 * - announce that we will maybe sleep
 * - poll a bit ; if successful, return
 * - if timeout, return
 * - really sleep (except if somebody woke us in the meanwhile) */
int select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
	struct timeval *timeout)
{
    int n, ret;
    fd_set myread, mywrite, myexcept;
    struct thread *thread = get_current();
    s_time_t start = NOW(), stop;
    DEFINE_WAIT(w1);
    DEFINE_WAIT(w2);
    DEFINE_WAIT(w3);
    DEFINE_WAIT(w4);
    DEFINE_WAIT(w5);
    DEFINE_WAIT(w6);
    int n_sockets;
    int n_non_sockets;

    assert(thread == main_thread);

    n_sockets = 0;
    n_non_sockets = 0;
    for (n = 0; n <= nfds; n++) {
	if ((readfds && FD_ISSET(n, readfds)) ||
	    (writefds && FD_ISSET(n, writefds)) ||
	    (exceptfds && FD_ISSET(n, exceptfds))) {
	    if (files[n].type == FTYPE_SOCKET)
		n_sockets++;
	    else
		n_non_sockets++;
	}
    }
    if (n_sockets != 0 && n_non_sockets != 0) {
	static int cntr;
	if (cntr < 1000) {
	    printk("WARNING: select combining socket and non-socket FDs (n = %d vs %d); warning %d/1000\n",
		   n_sockets, n_non_sockets, cntr);
	    cntr++;
	}
    }
    if (n_non_sockets == 0)
	return do_lwip_select(nfds, readfds, writefds, exceptfds, timeout);

    if (timeout)
	stop = start + SECONDS(timeout->tv_sec) + timeout->tv_usec * 1000;
    else
	/* just make gcc happy */
	stop = start;

    /* Tell people we're going to sleep before looking at what they are
     * saying, hence letting them wake us if events happen between here and
     * schedule() */
    add_waiter(w1, netfront_queue);
    add_waiter(w2, event_queue);
    add_waiter(w3, blkfront_queue);
    add_waiter(w4, xenbus_watch_queue);
    add_waiter(w5, kbdfront_queue);
    add_waiter(w6, console_queue);

    if (readfds)
        myread = *readfds;
    else
        FD_ZERO(&myread);
    if (writefds)
        mywrite = *writefds;
    else
        FD_ZERO(&mywrite);
    if (exceptfds)
        myexcept = *exceptfds;
    else
        FD_ZERO(&myexcept);

    DEBUG("polling ");
    dump_set(nfds, &myread, &mywrite, &myexcept, timeout);
    DEBUG("\n");
    n = select_poll(nfds, &myread, &mywrite, &myexcept);

    if (n) {
	dump_set(nfds, readfds, writefds, exceptfds, timeout);
	if (readfds)
	    *readfds = myread;
	if (writefds)
	    *writefds = mywrite;
	if (exceptfds)
	    *exceptfds = myexcept;
	DEBUG(" -> ");
	dump_set(nfds, readfds, writefds, exceptfds, timeout);
	DEBUG("\n");
	wake(thread);
	ret = n;
	goto out;
    }
    if (timeout && NOW() >= stop) {
	if (readfds)
	    FD_ZERO(readfds);
	if (writefds)
	    FD_ZERO(writefds);
	if (exceptfds)
	    FD_ZERO(exceptfds);
	timeout->tv_sec = 0;
	timeout->tv_usec = 0;
	wake(thread);
	ret = 0;
	goto out;
    }

    if (timeout)
	thread->wakeup_time = stop;
    schedule();

    if (readfds)
        myread = *readfds;
    else
        FD_ZERO(&myread);
    if (writefds)
        mywrite = *writefds;
    else
        FD_ZERO(&mywrite);
    if (exceptfds)
        myexcept = *exceptfds;
    else
        FD_ZERO(&myexcept);

    n = select_poll(nfds, &myread, &mywrite, &myexcept);

    if (n) {
	if (readfds)
	    *readfds = myread;
	if (writefds)
	    *writefds = mywrite;
	if (exceptfds)
	    *exceptfds = myexcept;
	ret = n;
	goto out;
    }
    errno = EINTR;
    ret = -1;

out:
    remove_waiter(w1);
    remove_waiter(w2);
    remove_waiter(w3);
    remove_waiter(w4);
    remove_waiter(w5);
    remove_waiter(w6);
    return ret;
}
Esempio n. 16
0
static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
		int len, int flags)
{
	struct sock *sk = sock->sk;
	struct pn_sock *pn = pn_sk(sk);
	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
	struct task_struct *tsk = current;
	long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
	int err;

	if (pn_socket_autobind(sock))
		return -ENOBUFS;
	if (len < sizeof(struct sockaddr_pn))
		return -EINVAL;
	if (spn->spn_family != AF_PHONET)
		return -EAFNOSUPPORT;

	lock_sock(sk);

	switch (sock->state) {
	case SS_UNCONNECTED:
		if (sk->sk_state != TCP_CLOSE) {
			err = -EISCONN;
			goto out;
		}
		break;
	case SS_CONNECTING:
		err = -EALREADY;
		goto out;
	default:
		err = -EISCONN;
		goto out;
	}

	pn->dobject = pn_sockaddr_get_object(spn);
	pn->resource = pn_sockaddr_get_resource(spn);
	sock->state = SS_CONNECTING;

	err = sk->sk_prot->connect(sk, addr, len);
	if (err) {
		sock->state = SS_UNCONNECTED;
		pn->dobject = 0;
		goto out;
	}

	while (sk->sk_state == TCP_SYN_SENT) {
		DEFINE_WAIT(wait);

		if (!timeo) {
			err = -EINPROGRESS;
			goto out;
		}
		if (signal_pending(tsk)) {
			err = sock_intr_errno(timeo);
			goto out;
		}

		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
						TASK_INTERRUPTIBLE);
		release_sock(sk);
		timeo = schedule_timeout(timeo);
		lock_sock(sk);
		finish_wait(sk_sleep(sk), &wait);
	}

	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
		err = 0;
	else if (sk->sk_state == TCP_CLOSE_WAIT)
		err = -ECONNRESET;
	else
		err = -ECONNREFUSED;
	sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
out:
	release_sock(sk);
	return err;
}
Esempio n. 17
0
/* Fill the buffer. Called with dev->lock held
 */
static int _chaoskey_fill(struct chaoskey *dev)
{
	DEFINE_WAIT(wait);
	int result;
	bool started;

	usb_dbg(dev->interface, "fill");

	/* Return immediately if someone called before the buffer was
	 * empty */
	if (dev->valid != dev->used) {
		usb_dbg(dev->interface, "not empty yet (valid %d used %d)",
			dev->valid, dev->used);
		return 0;
	}

	/* Bail if the device has been removed */
	if (!dev->present) {
		usb_dbg(dev->interface, "device not present");
		return -ENODEV;
	}

	/* Make sure the device is awake */
	result = usb_autopm_get_interface(dev->interface);
	if (result) {
		usb_dbg(dev->interface, "wakeup failed (result %d)", result);
		return result;
	}

	dev->reading = true;
	result = usb_submit_urb(dev->urb, GFP_KERNEL);
	if (result < 0) {
		result = usb_translate_errors(result);
		dev->reading = false;
		goto out;
	}

	/* The first read on the Alea takes a little under 2 seconds.
	 * Reads after the first read take only a few microseconds
	 * though.  Presumably the entropy-generating circuit needs
	 * time to ramp up.  So, we wait longer on the first read.
	 */
	started = dev->reads_started;
	dev->reads_started = true;
	result = wait_event_interruptible_timeout(
		dev->wait_q,
		!dev->reading,
		(started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) );

	if (result < 0)
		goto out;

	if (result == 0)
		result = -ETIMEDOUT;
	else
		result = dev->valid;
out:
	/* Let the device go back to sleep eventually */
	usb_autopm_put_interface(dev->interface);

	usb_dbg(dev->interface, "read %d bytes", dev->valid);

	return result;
}
Esempio n. 18
0
static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
    int sockaddr_len,int flags)
{
	DEFINE_WAIT(wait);
	struct sock *sk = sock->sk;
	struct sockaddr_atmsvc *addr;
	struct atm_vcc *vcc = ATM_SD(sock);
	int error;

	DPRINTK("svc_connect %p\n",vcc);
	lock_sock(sk);
	if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) {
		error = -EINVAL;
		goto out;
	}

	switch (sock->state) {
	default:
		error = -EINVAL;
		goto out;
	case SS_CONNECTED:
		error = -EISCONN;
		goto out;
	case SS_CONNECTING:
		if (test_bit(ATM_VF_WAITING, &vcc->flags)) {
			error = -EALREADY;
			goto out;
		}
		sock->state = SS_UNCONNECTED;
		if (sk->sk_err) {
			error = -sk->sk_err;
			goto out;
		}
		break;
	case SS_UNCONNECTED:
		addr = (struct sockaddr_atmsvc *) sockaddr;
		if (addr->sas_family != AF_ATMSVC) {
			error = -EAFNOSUPPORT;
			goto out;
		}
		if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
			error = -EBADFD;
			goto out;
		}
		if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS ||
		    vcc->qos.rxtp.traffic_class == ATM_ANYCLASS) {
			error = -EINVAL;
			goto out;
		}
		if (!vcc->qos.txtp.traffic_class &&
		    !vcc->qos.rxtp.traffic_class) {
			error = -EINVAL;
			goto out;
		}
		vcc->remote = *addr;
		set_bit(ATM_VF_WAITING, &vcc->flags);
		prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
		sigd_enq(vcc,as_connect,NULL,NULL,&vcc->remote);
		if (flags & O_NONBLOCK) {
			finish_wait(sk->sk_sleep, &wait);
			sock->state = SS_CONNECTING;
			error = -EINPROGRESS;
			goto out;
		}
		error = 0;
		while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
			schedule();
			if (!signal_pending(current)) {
				prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
				continue;
			}
			DPRINTK("*ABORT*\n");
			/*
			 * This is tricky:
			 *   Kernel ---close--> Demon
			 *   Kernel <--close--- Demon
		         * or
			 *   Kernel ---close--> Demon
			 *   Kernel <--error--- Demon
			 * or
			 *   Kernel ---close--> Demon
			 *   Kernel <--okay---- Demon
			 *   Kernel <--close--- Demon
			 */
			sigd_enq(vcc,as_close,NULL,NULL,NULL);
			while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
				prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
				schedule();
			}
			if (!sk->sk_err)
				while (!test_bit(ATM_VF_RELEASED,&vcc->flags)
				    && sigd) {
					prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
					schedule();
				}
			clear_bit(ATM_VF_REGIS,&vcc->flags);
			clear_bit(ATM_VF_RELEASED,&vcc->flags);
			clear_bit(ATM_VF_CLOSE,&vcc->flags);
			    /* we're gone now but may connect later */
			error = -EINTR;
			break;
		}
		finish_wait(sk->sk_sleep, &wait);
		if (error)
			goto out;
		if (!sigd) {
			error = -EUNATCH;
			goto out;
		}
		if (sk->sk_err) {
			error = -sk->sk_err;
			goto out;
		}
	}
/*
 * Not supported yet
 *
 * #ifndef CONFIG_SINGLE_SIGITF
 */
	vcc->qos.txtp.max_pcr = SELECT_TOP_PCR(vcc->qos.txtp);
	vcc->qos.txtp.pcr = 0;
	vcc->qos.txtp.min_pcr = 0;
/*
 * #endif
 */
	if (!(error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci)))
		sock->state = SS_CONNECTED;
	else (void) svc_disconnect(vcc);
out:
	release_sock(sk);
	return error;
}
Esempio n. 19
0
static ssize_t
read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
{
	DEFINE_WAIT(wait);
	struct rio_usb_data *rio = &rio_instance;
	ssize_t read_count;
	unsigned int partial;
	int this_read;
	int result;
	int maxretry = 10;
	char *ibuf;
	int intr;

	intr = mutex_lock_interruptible(&(rio->lock));
	if (intr)
		return -EINTR;
	/* Sanity check to make sure rio is connected, powered, etc */
        if ( rio == NULL ||
             rio->present == 0 ||
             rio->rio_dev == NULL )
	{
		mutex_unlock(&(rio->lock));
		return -ENODEV;
	}

	ibuf = rio->ibuf;

	read_count = 0;


	while (count > 0) {
		if (signal_pending(current)) {
			mutex_unlock(&(rio->lock));
			return read_count ? read_count : -EINTR;
		}
		if (!rio->rio_dev) {
			mutex_unlock(&(rio->lock));
			return -ENODEV;
		}
		this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;

		result = usb_bulk_msg(rio->rio_dev,
				      usb_rcvbulkpipe(rio->rio_dev, 1),
				      ibuf, this_read, &partial,
				      8000);

		dbg("read stats: result:%d this_read:%u partial:%u",
		       result, this_read, partial);

		if (partial) {
			count = this_read = partial;
		} else if (result == -ETIMEDOUT || result == 15) {	/* FIXME: 15 ??? */
			if (!maxretry--) {
				mutex_unlock(&(rio->lock));
				err("read_rio: maxretry timeout");
				return -ETIME;
			}
			prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
			schedule_timeout(NAK_TIMEOUT);
			finish_wait(&rio->wait_q, &wait);
			continue;
		} else if (result != -EREMOTEIO) {
			mutex_unlock(&(rio->lock));
			err("Read Whoops - result:%u partial:%u this_read:%u",
			     result, partial, this_read);
			return -EIO;
		} else {
			mutex_unlock(&(rio->lock));
			return (0);
		}

		if (this_read) {
			if (copy_to_user(buffer, ibuf, this_read)) {
				mutex_unlock(&(rio->lock));
				return -EFAULT;
			}
			count -= this_read;
			read_count += this_read;
			buffer += this_read;
		}
	}
	mutex_unlock(&(rio->lock));
	return read_count;
}
Esempio n. 20
0
static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
{
	struct sock *sk = sock->sk;
	struct sk_buff *skb;
	struct atmsvc_msg *msg;
	struct atm_vcc *old_vcc = ATM_SD(sock);
	struct atm_vcc *new_vcc;
	int error;

	lock_sock(sk);

	error = svc_create(newsock,0);
	if (error)
		goto out;

	new_vcc = ATM_SD(newsock);

	DPRINTK("svc_accept %p -> %p\n",old_vcc,new_vcc);
	while (1) {
		DEFINE_WAIT(wait);

		prepare_to_wait(old_vcc->sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
		while (!(skb = skb_dequeue(&old_vcc->sk->sk_receive_queue)) &&
		       sigd) {
			if (test_bit(ATM_VF_RELEASED,&old_vcc->flags)) break;
			if (test_bit(ATM_VF_CLOSE,&old_vcc->flags)) {
				error = -sk->sk_err;
				break;
			}
			if (flags & O_NONBLOCK) {
				error = -EAGAIN;
				break;
			}
			release_sock(sk);
			schedule();
			lock_sock(sk);
			if (signal_pending(current)) {
				error = -ERESTARTSYS;
				break;
			}
			prepare_to_wait(old_vcc->sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
		}
		finish_wait(old_vcc->sk->sk_sleep, &wait);
		if (error)
			goto out;
		if (!skb) {
			error = -EUNATCH;
			goto out;
		}
		msg = (struct atmsvc_msg *) skb->data;
		new_vcc->qos = msg->qos;
		set_bit(ATM_VF_HASQOS,&new_vcc->flags);
		new_vcc->remote = msg->svc;
		new_vcc->local = msg->local;
		new_vcc->sap = msg->sap;
		error = vcc_connect(newsock, msg->pvc.sap_addr.itf,
				    msg->pvc.sap_addr.vpi, msg->pvc.sap_addr.vci);
		dev_kfree_skb(skb);
		old_vcc->sk->sk_ack_backlog--;
		if (error) {
			sigd_enq2(NULL,as_reject,old_vcc,NULL,NULL,
			    &old_vcc->qos,error);
			error = error == -EAGAIN ? -EBUSY : error;
			goto out;
		}
		/* wait should be short, so we ignore the non-blocking flag */
		set_bit(ATM_VF_WAITING, &new_vcc->flags);
		prepare_to_wait(new_vcc->sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
		sigd_enq(new_vcc,as_accept,old_vcc,NULL,NULL);
		while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) {
			release_sock(sk);
			schedule();
			lock_sock(sk);
			prepare_to_wait(new_vcc->sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
		}
		finish_wait(new_vcc->sk->sk_sleep, &wait);
		if (!sigd) {
			error = -EUNATCH;
			goto out;
		}
		if (!new_vcc->sk->sk_err)
			break;
		if (new_vcc->sk->sk_err != ERESTARTSYS) {
			error = -new_vcc->sk->sk_err;
			goto out;
		}
	}
	newsock->state = SS_CONNECTED;
out:
	release_sock(sk);
	return error;
}
Esempio n. 21
0
/*
 * receive a message from an RxRPC socket
 * - we need to be careful about two or more threads calling recvmsg
 *   simultaneously
 */
int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
		  struct msghdr *msg, size_t len, int flags)
{
	struct rxrpc_skb_priv *sp;
	struct rxrpc_call *call = NULL, *continue_call = NULL;
	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
	struct sk_buff *skb;
	long timeo;
	int copy, ret, ullen, offset, copied = 0;
	u32 abort_code;

	DEFINE_WAIT(wait);

	_enter(",,,%zu,%d", len, flags);

	if (flags & (MSG_OOB | MSG_TRUNC))
		return -EOPNOTSUPP;

	ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);

	timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
	msg->msg_flags |= MSG_MORE;

	lock_sock(&rx->sk);

	for (;;) {
		/* return immediately if a client socket has no outstanding
		 * calls */
		if (RB_EMPTY_ROOT(&rx->calls)) {
			if (copied)
				goto out;
			if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
				release_sock(&rx->sk);
				if (continue_call)
					rxrpc_put_call(continue_call);
				return -ENODATA;
			}
		}

		/* get the next message on the Rx queue */
		skb = skb_peek(&rx->sk.sk_receive_queue);
		if (!skb) {
			/* nothing remains on the queue */
			if (copied &&
			    (flags & MSG_PEEK || timeo == 0))
				goto out;

			/* wait for a message to turn up */
			release_sock(&rx->sk);
			prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
						  TASK_INTERRUPTIBLE);
			ret = sock_error(&rx->sk);
			if (ret)
				goto wait_error;

			if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
				if (signal_pending(current))
					goto wait_interrupted;
				timeo = schedule_timeout(timeo);
			}
			finish_wait(sk_sleep(&rx->sk), &wait);
			lock_sock(&rx->sk);
			continue;
		}

	peek_next_packet:
		sp = rxrpc_skb(skb);
		call = sp->call;
		ASSERT(call != NULL);

		_debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);

		/* make sure we wait for the state to be updated in this call */
		spin_lock_bh(&call->lock);
		spin_unlock_bh(&call->lock);

		if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
			_debug("packet from released call");
			if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
				BUG();
			rxrpc_free_skb(skb);
			continue;
		}

		/* determine whether to continue last data receive */
		if (continue_call) {
			_debug("maybe cont");
			if (call != continue_call ||
			    skb->mark != RXRPC_SKB_MARK_DATA) {
				release_sock(&rx->sk);
				rxrpc_put_call(continue_call);
				_leave(" = %d [noncont]", copied);
				return copied;
			}
		}

		rxrpc_get_call(call);

		/* copy the peer address and timestamp */
		if (!continue_call) {
			if (msg->msg_name) {
				size_t len =
					sizeof(call->conn->trans->peer->srx);
				memcpy(msg->msg_name,
				       &call->conn->trans->peer->srx, len);
				msg->msg_namelen = len;
			}
			sock_recv_ts_and_drops(msg, &rx->sk, skb);
		}

		/* receive the message */
		if (skb->mark != RXRPC_SKB_MARK_DATA)
			goto receive_non_data_message;

		_debug("recvmsg DATA #%u { %d, %d }",
		       ntohl(sp->hdr.seq), skb->len, sp->offset);

		if (!continue_call) {
			/* only set the control data once per recvmsg() */
			ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
				       ullen, &call->user_call_ID);
			if (ret < 0)
				goto copy_error;
			ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
		}

		ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
		ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
		call->rx_data_recv = ntohl(sp->hdr.seq);

		ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);

		offset = sp->offset;
		copy = skb->len - offset;
		if (copy > len - copied)
			copy = len - copied;

		if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
			ret = skb_copy_datagram_iovec(skb, offset,
						      msg->msg_iov, copy);
		} else {
			ret = skb_copy_and_csum_datagram_iovec(skb, offset,
							       msg->msg_iov);
			if (ret == -EINVAL)
				goto csum_copy_error;
		}

		if (ret < 0)
			goto copy_error;

		/* handle piecemeal consumption of data packets */
		_debug("copied %d+%d", copy, copied);

		offset += copy;
		copied += copy;

		if (!(flags & MSG_PEEK))
			sp->offset = offset;

		if (sp->offset < skb->len) {
			_debug("buffer full");
			ASSERTCMP(copied, ==, len);
			break;
		}

		/* we transferred the whole data packet */
		if (sp->hdr.flags & RXRPC_LAST_PACKET) {
			_debug("last");
			if (call->conn->out_clientflag) {
				 /* last byte of reply received */
				ret = copied;
				goto terminal_message;
			}

			/* last bit of request received */
			if (!(flags & MSG_PEEK)) {
				_debug("eat packet");
				if (skb_dequeue(&rx->sk.sk_receive_queue) !=
				    skb)
					BUG();
				rxrpc_free_skb(skb);
			}
			msg->msg_flags &= ~MSG_MORE;
			break;
		}

		/* move on to the next data message */
		_debug("next");
		if (!continue_call)
			continue_call = sp->call;
		else
			rxrpc_put_call(call);
		call = NULL;

		if (flags & MSG_PEEK) {
			_debug("peek next");
			skb = skb->next;
			if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue)
				break;
			goto peek_next_packet;
		}

		_debug("eat packet");
		if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
			BUG();
		rxrpc_free_skb(skb);
	}
Esempio n. 22
0
/*
 * osprd_ioctl(inode, filp, cmd, arg)
 *   Called to perform an ioctl on the named file.
 */
int osprd_ioctl(struct inode *inode, struct file *filp,
		unsigned int cmd, unsigned long arg)
{
	int r = 0;

	// is file open for writing?
	int filp_writable = (filp->f_mode & FMODE_WRITE) != 0;

	osprd_info_t *d = file2osprd(filp);	// device info
	DEFINE_WAIT(wait);		// wait queue entry in case we block
	wait.func = &default_wake_function;

	// This line avoids compiler warnings; you may remove it.
	(void) filp_writable, (void) d;
	
	// Set 'r' to the ioctl's return value: 0 on success, negative on error

	if (cmd == OSPRDIOCACQUIRE) {
		
		// EXERCISE: Lock the ramdisk.
		//
		// If *filp is a writable file, then attempt to write-lock
		// the ramdisk; otherwise attempt to read-lock the ramdisk.
		//
        // This lock request must block using 'd->blockq' until:
		// 1) no other process holds a write lock;
		// 2) either the request is for a read lock, or no other process
		//    holds a read lock; and
		// 3) lock requests should be serviced in order, so no process
		//    that blocked earlier is still blocked waiting for the
		//    lock.
		//
		// If a process acquires a lock, mark this fact by setting
		// 'filp->f_flags |= F_OSPRD_LOCKED'.  You may also need to
		// keep track of how many read and write locks are held:
		// change the 'osprd_info_t' structure to do this.
		//
		// Also wake up processes waiting on 'd->blockq' as needed.
		//
		// If the lock request would cause a deadlock, return -EDEADLK.
		// If the lock request blocks and is awoken by a signal, then
		// return -ERESTARTSYS. 
		// Otherwise, if we can grant the lock request, return 0.

		// Your code here (instead of the next two lines).
		if(filp_writable){
			// Attempt to take write lock
			if(d->num_ramdisks_open){
				d->num_ramdisks_open = 0;
				r = -EDEADLK;
				return r;
			}
			
			if(waitqueue_active(&d->blockq) || d->write_lock_count || 
					d->read_lock_count || (filp->f_flags & F_OSPRD_LOCKED)) {
				/* Enque writer process and call scheduler if 
				 *   i. Wait queue is not empty
				 *  ii. No. of readers > 0
				 * iii. No. of writers > 0
				 *  iv. Ramdisk has been locked
				 */
				osp_spin_lock(&d->mutex);
				prepare_to_wait_exclusive(&d->blockq,&wait,TASK_INTERRUPTIBLE);
				osp_spin_unlock(&d->mutex);
				do{
					schedule();
					/* if signal has occured, return ERESTARTSYS to caller */
					if(signal_pending(current)){
						r = -ERESTARTSYS;
						return r;
					}
				}while(d->write_lock_count || d->read_lock_count || 
						(filp->f_flags & F_OSPRD_LOCKED));
				/* All condtions for locking satisfied; unblock (dequeue) */
				finish_wait(&d->blockq, &wait);
			}

			/* Acquire write lock */
			osp_spin_lock(&d->mutex);
			filp->f_flags |= F_OSPRD_LOCKED;
			d->write_lock_count++;
			osp_spin_unlock(&d->mutex);

		} else {
			// Attempt to take read lock
			/* Enque writer process and call scheduler if 
			 *   i. Wait queue is not empty
			 *  ii. No. of writers > 0
			 * iii. Ramdisk has been locked
			 */
			if(waitqueue_active(&d->blockq) || d->write_lock_count || 
					(filp->f_flags & F_OSPRD_LOCKED)) {
				osp_spin_lock(&d->mutex);
				prepare_to_wait_exclusive(&d->blockq,&wait,TASK_INTERRUPTIBLE);
				osp_spin_unlock(&d->mutex);
				do{
					schedule();
					/* if signal has occured, return ERESTARTSYS to caller */
					if(signal_pending(current)){
						r = -ERESTARTSYS;
						return r;
					}
				}
				while(d->write_lock_count || (filp->f_flags & F_OSPRD_LOCKED));
				/* All condtions for locking satisfied; unblock (dequeue) */
				finish_wait(&d->blockq, &wait);
			}

			/* Acquire read lock */
			osp_spin_lock(&d->mutex);
			filp->f_flags |= F_OSPRD_LOCKED;
			d->read_lock_count++;
#if 0
			/* Wake up next reader in the queue to ensure that
			 * - when a writer dequeues, all subsequent readers in the queue
			 *   till the first writer, are woken up.
			 * - the writer reaches the head of the queue to be called next
			 *
			 * This causues TEST CASE 15 to fail. So I have commented it.
			 */
			if(waitqueue_active(&d->blockq))
				wake_up(&d->blockq);
#endif
			osp_spin_unlock(&d->mutex);
		}
#if 0
		eprintk("Attempting to acquire\n");
		r = -ENOTTY;
#endif
		
	} else if (cmd == OSPRDIOCTRYACQUIRE) {
		
		// EXERCISE: ATTEMPT Lock the ramdisk.
		//
		// This is just like OSPRDIOCACQUIRE, except it should never
		// block.  If OSPRDIOCACQUIRE would block or return deadlock,
		// OSPRDIOCTRYACQUIRE should return -EBUSY.
		// Otherwise, if we can grant the lock request, return 0.

		// Your code here (instead of the next two lines).
		if(filp_writable){
			// Attempt to take write lock
			/* Enque writer process and call scheduler if 
			 *   i. Wait queue is not empty
			 *  ii. No. of readers > 0
			 * iii. No. of writers > 0
			 *  iv. Ramdisk has been locked
			 */
			if(waitqueue_active(&d->blockq) || d->write_lock_count ||
					d->read_lock_count || (filp->f_flags & F_OSPRD_LOCKED)) {
				/* Not able to acuqire write lock; return EBUSY */
				r = -EBUSY;
				return r;
			}
			/* Acquire write lock */
			osp_spin_lock(&d->mutex);
			filp->f_flags |= F_OSPRD_LOCKED;
			d->write_lock_count++;
			osp_spin_unlock(&d->mutex);
		} else {
			/* Enque writer process and call scheduler if 
			 *   i. Wait queue is not empty
			 *  ii. No. of writers > 0
			 * iii. Ramdisk has been locked
			 */
			if(waitqueue_active(&d->blockq) || d->write_lock_count ||
					(filp->f_flags & F_OSPRD_LOCKED)) {
				/* Not able to acuqire read lock; return EBUSY */
				r = -EBUSY;
				return r;
			}
			/* Acquire read lock */
			osp_spin_lock(&d->mutex);
			filp->f_flags |= F_OSPRD_LOCKED;
			d->read_lock_count++;
			osp_spin_unlock(&d->mutex);
		}
#if 0
		eprintk("Attempting to try acquire\n");
		r = -ENOTTY;
#endif 
		
	} else if (cmd == OSPRDIOCRELEASE) {
		
		// EXERCISE: Unlock the ramdisk.
		//
		// If the file hasn't locked the ramdisk, return -EINVAL.
		// Otherwise, clear the lock from filp->f_flags, wake up
		// the wait queue, perform any additional accounting steps
		// you need, and return 0.
		
		// Your code here (instead of the next line).
		if(!(filp->f_flags & F_OSPRD_LOCKED))
			/* you should not be here */
			r = -EINVAL;
		else {
			/* Release read or write lock as appropriate */
			osp_spin_lock(&d->mutex);
			filp->f_flags &= (~F_OSPRD_LOCKED);
			if(filp_writable)
				d->write_lock_count = 0; 
			else
				d->read_lock_count--;
			if(waitqueue_active(&d->blockq)) {
				wake_up(&d->blockq);
			}
			//d->num_ramdisks_open--;
			osp_spin_unlock(&d->mutex);
		}

		// r = -ENOTTY;

	} else
		r = -ENOTTY; /* unknown command */
	return r;
}
Esempio n. 23
0
ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
{
	struct ivtv_open_id *id = filp->private_data;
	struct ivtv *itv = id->itv;
	struct ivtv_stream *s = &itv->streams[id->type];
	struct yuv_playback_info *yi = &itv->yuv_info;
	struct ivtv_buffer *buf;
	struct ivtv_queue q;
	int bytes_written = 0;
	int mode;
	int rc;
	DEFINE_WAIT(wait);

	IVTV_DEBUG_HI_FILE("write %zd bytes to %s\n", count, s->name);

	if (s->type != IVTV_DEC_STREAM_TYPE_MPG &&
	    s->type != IVTV_DEC_STREAM_TYPE_YUV &&
	    s->type != IVTV_DEC_STREAM_TYPE_VOUT)
		/* not decoder streams */
		return -EPERM;

	/* Try to claim this stream */
	if (ivtv_claim_stream(id, s->type))
		return -EBUSY;

	/* This stream does not need to start any decoding */
	if (s->type == IVTV_DEC_STREAM_TYPE_VOUT) {
		int elems = count / sizeof(struct v4l2_sliced_vbi_data);

		set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
		ivtv_write_vbi(itv, (const struct v4l2_sliced_vbi_data *)user_buf, elems);
		return elems * sizeof(struct v4l2_sliced_vbi_data);
	}

	mode = s->type == IVTV_DEC_STREAM_TYPE_MPG ? OUT_MPG : OUT_YUV;

	if (ivtv_set_output_mode(itv, mode) != mode) {
	    ivtv_release_stream(s);
	    return -EBUSY;
	}
	ivtv_queue_init(&q);
	set_bit(IVTV_F_S_APPL_IO, &s->s_flags);

	/* Start decoder (returns 0 if already started) */
	mutex_lock(&itv->serialize_lock);
	rc = ivtv_start_decoding(id, itv->speed);
	mutex_unlock(&itv->serialize_lock);
	if (rc) {
		IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name);

		/* failure, clean up */
		clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
		clear_bit(IVTV_F_S_APPL_IO, &s->s_flags);
		return rc;
	}

retry:
	/* If possible, just DMA the entire frame - Check the data transfer size
	since we may get here before the stream has been fully set-up */
	if (mode == OUT_YUV && s->q_full.length == 0 && itv->dma_data_req_size) {
		while (count >= itv->dma_data_req_size) {
			rc = ivtv_yuv_udma_stream_frame(itv, (void __user *)user_buf);

			if (rc < 0)
				return rc;

			bytes_written += itv->dma_data_req_size;
			user_buf += itv->dma_data_req_size;
			count -= itv->dma_data_req_size;
		}
		if (count == 0) {
			IVTV_DEBUG_HI_FILE("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused);
			return bytes_written;
		}
	}

	for (;;) {
		/* Gather buffers */
		while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_io)))
			ivtv_enqueue(s, buf, &q);
		while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_free))) {
			ivtv_enqueue(s, buf, &q);
		}
		if (q.buffers)
			break;
		if (filp->f_flags & O_NONBLOCK)
			return -EAGAIN;
		prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
		/* New buffers might have become free before we were added to the waitqueue */
		if (!s->q_free.buffers)
			schedule();
		finish_wait(&s->waitq, &wait);
		if (signal_pending(current)) {
			IVTV_DEBUG_INFO("User stopped %s\n", s->name);
			return -EINTR;
		}
	}

	/* copy user data into buffers */
	while ((buf = ivtv_dequeue(s, &q))) {
		/* yuv is a pain. Don't copy more data than needed for a single
		   frame, otherwise we lose sync with the incoming stream */
		if (s->type == IVTV_DEC_STREAM_TYPE_YUV &&
		    yi->stream_size + count > itv->dma_data_req_size)
			rc  = ivtv_buf_copy_from_user(s, buf, user_buf,
				itv->dma_data_req_size - yi->stream_size);
		else
			rc = ivtv_buf_copy_from_user(s, buf, user_buf, count);

		/* Make sure we really got all the user data */
		if (rc < 0) {
			ivtv_queue_move(s, &q, NULL, &s->q_free, 0);
			return rc;
		}
		user_buf += rc;
		count -= rc;
		bytes_written += rc;

		if (s->type == IVTV_DEC_STREAM_TYPE_YUV) {
			yi->stream_size += rc;
			/* If we have a complete yuv frame, break loop now */
			if (yi->stream_size == itv->dma_data_req_size) {
				ivtv_enqueue(s, buf, &s->q_full);
				yi->stream_size = 0;
				break;
			}
		}

		if (buf->bytesused != s->buf_size) {
			/* incomplete, leave in q_io for next time */
			ivtv_enqueue(s, buf, &s->q_io);
			break;
		}
		/* Byteswap MPEG buffer */
		if (s->type == IVTV_DEC_STREAM_TYPE_MPG)
			ivtv_buf_swap(buf);
		ivtv_enqueue(s, buf, &s->q_full);
	}

	if (test_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags)) {
		if (s->q_full.length >= itv->dma_data_req_size) {
			int got_sig;

			if (mode == OUT_YUV)
				ivtv_yuv_setup_stream_frame(itv);

			prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
			while (!(got_sig = signal_pending(current)) &&
					test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) {
				schedule();
			}
			finish_wait(&itv->dma_waitq, &wait);
			if (got_sig) {
				IVTV_DEBUG_INFO("User interrupted %s\n", s->name);
				return -EINTR;
			}

			clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
			ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
			ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 1);
		}
	}
	/* more user data is available, wait until buffers become free
	   to transfer the rest. */
	if (count && !(filp->f_flags & O_NONBLOCK))
		goto retry;
	IVTV_DEBUG_HI_FILE("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused);
	return bytes_written;
}
Esempio n. 24
0
int tty_port_block_til_ready(struct tty_port *port,
				struct tty_struct *tty, struct file *filp)
{
	int do_clocal = 0, retval;
	unsigned long flags;
	DEFINE_WAIT(wait);
	int cd;

	/* block if port is in the process of being closed */
	if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
		wait_event_interruptible(port->close_wait,
				!(port->flags & ASYNC_CLOSING));
		if (port->flags & ASYNC_HUP_NOTIFY)
			return -EAGAIN;
		else
			return -ERESTARTSYS;
	}

	/* if non-blocking mode is set we can pass directly to open unless
	   the port has just hung up or is in another error state */
	if (tty->flags & (1 << TTY_IO_ERROR)) {
		port->flags |= ASYNC_NORMAL_ACTIVE;
		return 0;
	}
	if (filp->f_flags & O_NONBLOCK) {
		/* Indicate we are open */
		if (tty->termios->c_cflag & CBAUD)
			tty_port_raise_dtr_rts(port);
		port->flags |= ASYNC_NORMAL_ACTIVE;
		return 0;
	}

	if (C_CLOCAL(tty))
		do_clocal = 1;

	/* Block waiting until we can proceed. We may need to wait for the
	   carrier, but we must also wait for any close that is in progress
	   before the next open may complete */

	retval = 0;

	/* The port lock protects the port counts */
	spin_lock_irqsave(&port->lock, flags);
	if (!tty_hung_up_p(filp))
		port->count--;
	port->blocked_open++;
	spin_unlock_irqrestore(&port->lock, flags);

	while (1) {
		/* Indicate we are open */
		if (tty->termios->c_cflag & CBAUD)
			tty_port_raise_dtr_rts(port);

		prepare_to_wait(&port->open_wait, &wait, TASK_INTERRUPTIBLE);
		/* Check for a hangup or uninitialised port.
							Return accordingly */
		if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) {
			if (port->flags & ASYNC_HUP_NOTIFY)
				retval = -EAGAIN;
			else
				retval = -ERESTARTSYS;
			break;
		}
		/* Probe the carrier. For devices with no carrier detect this
		   will always return true */
		cd = tty_port_carrier_raised(port);
		if (!(port->flags & ASYNC_CLOSING) &&
				(do_clocal || cd))
			break;
		if (signal_pending(current)) {
			retval = -ERESTARTSYS;
			break;
		}
		schedule();
	}
	finish_wait(&port->open_wait, &wait);

	/* Update counts. A parallel hangup will have set count to zero and
	   we must not mess that up further */
	spin_lock_irqsave(&port->lock, flags);
	if (!tty_hung_up_p(filp))
		port->count++;
	port->blocked_open--;
	if (retval == 0)
		port->flags |= ASYNC_NORMAL_ACTIVE;
	spin_unlock_irqrestore(&port->lock, flags);
	return retval;
}
Esempio n. 25
0
static int pohmelfs_data_recv(struct netfs_state *st, void *data, unsigned int size)
{
	unsigned int revents = 0;
	unsigned int err_mask = POLLERR | POLLHUP | POLLRDHUP;
	unsigned int mask = err_mask | POLLIN;
	int err = 0;

	while (size && !err) {
		revents = netfs_state_poll(st);

		if (!(revents & mask)) {
			DEFINE_WAIT(wait);

			for (;;) {
				prepare_to_wait(&st->thread_wait, &wait, TASK_INTERRUPTIBLE);
				if (kthread_should_stop())
					break;

				revents = netfs_state_poll(st);

				if (revents & mask)
					break;

				if (signal_pending(current))
					break;

				schedule();
				continue;
			}
			finish_wait(&st->thread_wait, &wait);
		}

		err = 0;
		netfs_state_lock(st);
		if (st->socket && (st->read_socket == st->socket) && (revents & POLLIN)) {
			err = netfs_data_recv(st, data, size);
			if (err > 0) {
				data += err;
				size -= err;
				err = 0;
			} else if (err == 0)
				err = -ECONNRESET;
		}

		if (revents & err_mask) {
			printk("%s: revents: %x, socket: %p, size: %u, err: %d.\n",
					__func__, revents, st->socket, size, err);
			err = -ECONNRESET;
		}
		netfs_state_unlock(st);

		if (err < 0) {
			if (netfs_state_trylock_send(st)) {
				netfs_state_exit(st);
				err = netfs_state_init(st);
				if (!err)
					err = -EAGAIN;
				netfs_state_unlock_send(st);
			} else {
				st->need_reset = 1;
			}
		}

		if (kthread_should_stop())
			err = -ENODEV;

		if (err)
			printk("%s: socket: %p, read_socket: %p, revents: %x, rev_error: %d, "
					"should_stop: %d, size: %u, err: %d.\n",
				__func__, st->socket, st->read_socket,
				revents, revents & err_mask, kthread_should_stop(), size, err);
	}

	return err;
}
Esempio n. 26
0
File: rtlx.c Progetto: 274914765/C
int rtlx_open(int index, int can_sleep)
{
    struct rtlx_info **p;
    struct rtlx_channel *chan;
    enum rtlx_state state;
    int ret = 0;

    if (index >= RTLX_CHANNELS) {
        printk(KERN_DEBUG "rtlx_open index out of range\n");
        return -ENOSYS;
    }

    if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
        printk(KERN_DEBUG "rtlx_open channel %d already opened\n",
               index);
        ret = -EBUSY;
        goto out_fail;
    }

    if (rtlx == NULL) {
        if( (p = vpe_get_shared(tclimit)) == NULL) {
            if (can_sleep) {
            __wait_event_interruptible(channel_wqs[index].lx_queue,
                (p = vpe_get_shared(tclimit)), ret);
            if (ret)
                goto out_fail;
            } else {
            printk(KERN_DEBUG "No SP program loaded, and device "
                    "opened with O_NONBLOCK\n");
            ret = -ENOSYS;
            goto out_fail;
            }
        }

        smp_rmb();
        if (*p == NULL) {
            if (can_sleep) {
                DEFINE_WAIT(wait);

                for (;;) {
                    prepare_to_wait(
                        &channel_wqs[index].lx_queue,
                        &wait, TASK_INTERRUPTIBLE);
                    smp_rmb();
                    if (*p != NULL)
                        break;
                    if (!signal_pending(current)) {
                        schedule();
                        continue;
                    }
                    ret = -ERESTARTSYS;
                    goto out_fail;
                }
                finish_wait(&channel_wqs[index].lx_queue, &wait);
            } else {
                pr_err(" *vpe_get_shared is NULL. "
                       "Has an SP program been loaded?\n");
                ret = -ENOSYS;
                goto out_fail;
            }
        }

        if ((unsigned int)*p < KSEG0) {
            printk(KERN_WARNING "vpe_get_shared returned an "
                   "invalid pointer maybe an error code %d\n",
                   (int)*p);
            ret = -ENOSYS;
            goto out_fail;
        }

        if ((ret = rtlx_init(*p)) < 0)
            goto out_ret;
    }

    chan = &rtlx->channel[index];

    state = xchg(&chan->lx_state, RTLX_STATE_OPENED);
    if (state == RTLX_STATE_OPENED) {
        ret = -EBUSY;
        goto out_fail;
    }

out_fail:
    smp_mb();
    atomic_dec(&channel_wqs[index].in_open);
    smp_mb();

out_ret:
    return ret;
}
Esempio n. 27
0
static ssize_t yurex_write(struct file *file, const char *user_buffer, size_t count, loff_t *ppos)
{
	struct usb_yurex *dev;
	int i, set = 0, retval = 0;
	char buffer[16];
	char *data = buffer;
	unsigned long long c, c2 = 0;
	signed long timeout = 0;
	DEFINE_WAIT(wait);

	count = min(sizeof(buffer), count);
	dev = (struct usb_yurex *)file->private_data;

	/* verify that we actually have some data to write */
	if (count == 0)
		goto error;

	mutex_lock(&dev->io_mutex);
	if (!dev->interface) {		/* alreaday disconnected */
		mutex_unlock(&dev->io_mutex);
		retval = -ENODEV;
		goto error;
	}

	if (copy_from_user(buffer, user_buffer, count)) {
		mutex_unlock(&dev->io_mutex);
		retval = -EFAULT;
		goto error;
	}
	memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE);

	switch (buffer[0]) {
	case CMD_ANIMATE:
	case CMD_LED:
		dev->cntl_buffer[0] = buffer[0];
		dev->cntl_buffer[1] = buffer[1];
		dev->cntl_buffer[2] = CMD_EOF;
		break;
	case CMD_READ:
	case CMD_VERSION:
		dev->cntl_buffer[0] = buffer[0];
		dev->cntl_buffer[1] = 0x00;
		dev->cntl_buffer[2] = CMD_EOF;
		break;
	case CMD_SET:
		data++;
		/* FALL THROUGH */
	case '0' ... '9':
		set = 1;
		c = c2 = simple_strtoull(data, NULL, 0);
		dev->cntl_buffer[0] = CMD_SET;
		for (i = 1; i < 6; i++) {
			dev->cntl_buffer[i] = (c>>32) & 0xff;
			c <<= 8;
		}
		buffer[6] = CMD_EOF;
		break;
	default:
		mutex_unlock(&dev->io_mutex);
		return -EINVAL;
	}

	/* send the data as the control msg */
	prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE);
	dbg("%s - submit %c", __func__, dev->cntl_buffer[0]);
	retval = usb_submit_urb(dev->cntl_urb, GFP_KERNEL);
	if (retval >= 0)
		timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
	finish_wait(&dev->waitq, &wait);

	mutex_unlock(&dev->io_mutex);

	if (retval < 0) {
		err("%s - failed to send bulk msg, error %d", __func__, retval);
		goto error;
	}
	if (set && timeout)
		dev->bbu = c2;
	return timeout ? count : -EIO;

error:
	return retval;
}
Esempio n. 28
0
static int rs_ioctl(struct tty_struct *tty,
		    unsigned int cmd, unsigned long arg)
{
	struct serial_state *info = tty->driver_data;
	struct async_icount cprev, cnow;	/* kernel counter temps */
	void __user *argp = (void __user *)arg;
	unsigned long flags;
	DEFINE_WAIT(wait);
	int ret;

	if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
		return -ENODEV;

	if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
	    (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
	    (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
		if (tty_io_error(tty))
		    return -EIO;
	}

	switch (cmd) {
		case TIOCGSERIAL:
			return get_serial_info(tty, info, argp);
		case TIOCSSERIAL:
			return set_serial_info(tty, info, argp);
		case TIOCSERCONFIG:
			return 0;

		case TIOCSERGETLSR: /* Get line status register */
			return get_lsr_info(info, argp);

		case TIOCSERGSTRUCT:
			if (copy_to_user(argp,
					 info, sizeof(struct serial_state)))
				return -EFAULT;
			return 0;

		/*
		 * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
		 * - mask passed in arg for lines of interest
 		 *   (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
		 * Caller should use TIOCGICOUNT to see which one it was
		 */
		case TIOCMIWAIT:
			local_irq_save(flags);
			/* note the counters on entry */
			cprev = info->icount;
			local_irq_restore(flags);
			while (1) {
				prepare_to_wait(&info->tport.delta_msr_wait,
						&wait, TASK_INTERRUPTIBLE);
				local_irq_save(flags);
				cnow = info->icount; /* atomic copy */
				local_irq_restore(flags);
				if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && 
				    cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
					ret = -EIO; /* no change => error */
					break;
				}
				if ( ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
				     ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
				     ((arg & TIOCM_CD)  && (cnow.dcd != cprev.dcd)) ||
				     ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts)) ) {
					ret = 0;
					break;
				}
				schedule();
				/* see if a signal did it */
				if (signal_pending(current)) {
					ret = -ERESTARTSYS;
					break;
				}
				cprev = cnow;
			}
			finish_wait(&info->tport.delta_msr_wait, &wait);
			return ret;

		case TIOCSERGWILD:
		case TIOCSERSWILD:
			/* "setserial -W" is called in Debian boot */
			printk ("TIOCSER?WILD ioctl obsolete, ignored.\n");
			return 0;

		default:
			return -ENOIOCTLCMD;
		}
	return 0;
}