Beispiel #1
0
static int audio_sync(struct file *file)
{
	audio_state_t *state = file->private_data;
	audio_stream_t *s = state->output_stream;
	audio_buf_t *b;
	u_int shiftval = 0;
	unsigned long flags;
	DECLARE_WAITQUEUE(wait, current);

	DPRINTK("audio_sync\n");

	if (!(file->f_mode & FMODE_WRITE) || !s->buffers || s->mapped)
		return 0;

	/*
	 * Send current buffer if it contains data.  Be sure to send
	 * a full sample count.
	 */
	b = &s->buffers[s->usr_head];
	if (b->offset &= ~3) {
		down(&s->sem);
		/*
		 * HACK ALERT !
		 * To avoid increased complexity in the rest of the code
		 * where full fragment sizes are assumed, we cheat a little
		 * with the start pointer here and don't forget to restore
		 * it later.
		 */
		shiftval = s->fragsize - b->offset;
		b->offset = shiftval;
		b->dma_addr -= shiftval;
		s->bytecount -= shiftval;
		if (++s->usr_head >= s->nbfrags)
			s->usr_head = 0;
		local_irq_save(flags);
		s->pending_frags++;
		audio_process_dma(s);
		local_irq_restore(flags);
	}

	/* Let's wait for all buffers to complete */
	set_current_state(TASK_INTERRUPTIBLE);
	add_wait_queue(&s->wq, &wait);
	while (s->pending_frags &&
	       s->dma_tail != s->usr_head &&
	       !signal_pending(current)) {
		schedule();
		set_current_state(TASK_INTERRUPTIBLE);
	}
	set_current_state(TASK_RUNNING);
	remove_wait_queue(&s->wq, &wait);

	/* undo the pointer hack above */
	if (shiftval) {
		local_irq_save(flags);
		b->dma_addr += shiftval;
		/* ensure sane DMA code behavior if not yet processed */
		if (b->offset != 0)
			b->offset = s->fragsize;
		local_irq_restore(flags);
	}

	return 0;
}
Beispiel #2
0
int gs_block_til_ready(void *port_, struct file * filp)
{
	struct gs_port *port = port_;
	DECLARE_WAITQUEUE(wait, current);
	int    retval;
	int    do_clocal = 0;
	int    CD;
	struct tty_struct *tty;
	unsigned long flags;

	func_enter ();

	if (!port) return 0;

	tty = port->tty;

	if (!tty) return 0;

	gs_dprintk (GS_DEBUG_BTR, "Entering gs_block_till_ready.\n"); 
	/*
	 * If the device is in the middle of being closed, then block
	 * until it's done, and then try again.
	 */
	if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
		interruptible_sleep_on(&port->close_wait);
		if (port->flags & ASYNC_HUP_NOTIFY)
			return -EAGAIN;
		else
			return -ERESTARTSYS;
	}

	gs_dprintk (GS_DEBUG_BTR, "after hung up\n"); 

	/*
	 * If non-blocking mode is set, or the port is not enabled,
	 * then make the check up front and then exit.
	 */
	if ((filp->f_flags & O_NONBLOCK) ||
	    (tty->flags & (1 << TTY_IO_ERROR))) {
		port->flags |= ASYNC_NORMAL_ACTIVE;
		return 0;
	}

	gs_dprintk (GS_DEBUG_BTR, "after nonblock\n"); 
 
	if (C_CLOCAL(tty))
		do_clocal = 1;

	/*
	 * Block waiting for the carrier detect and the line to become
	 * free (i.e., not in use by the callout).  While we are in
	 * this loop, port->count is dropped by one, so that
	 * rs_close() knows when to free things.  We restore it upon
	 * exit, either normal or abnormal.
	 */
	retval = 0;

	add_wait_queue(&port->open_wait, &wait);

	gs_dprintk (GS_DEBUG_BTR, "after add waitq.\n"); 
	spin_lock_irqsave(&port->driver_lock, flags);
	if (!tty_hung_up_p(filp)) {
		port->count--;
	}
	spin_unlock_irqrestore(&port->driver_lock, flags);
	port->blocked_open++;
	while (1) {
		CD = port->rd->get_CD (port);
		gs_dprintk (GS_DEBUG_BTR, "CD is now %d.\n", CD);
		set_current_state (TASK_INTERRUPTIBLE);
		if (tty_hung_up_p(filp) ||
		    !(port->flags & ASYNC_INITIALIZED)) {
			if (port->flags & ASYNC_HUP_NOTIFY)
				retval = -EAGAIN;
			else
				retval = -ERESTARTSYS;
			break;
		}
		if (!(port->flags & ASYNC_CLOSING) &&
		    (do_clocal || CD))
			break;
		gs_dprintk (GS_DEBUG_BTR, "signal_pending is now: %d (%lx)\n", 
		(int)signal_pending (current), *(long*)(&current->blocked)); 
		if (signal_pending(current)) {
			retval = -ERESTARTSYS;
			break;
		}
		schedule();
	}
	gs_dprintk (GS_DEBUG_BTR, "Got out of the loop. (%d)\n",
		    port->blocked_open);
	set_current_state (TASK_RUNNING);
	remove_wait_queue(&port->open_wait, &wait);
	if (!tty_hung_up_p(filp)) {
		port->count++;
	}
	port->blocked_open--;
	if (retval)
		return retval;

	port->flags |= ASYNC_NORMAL_ACTIVE;
	func_exit ();
	return 0;
}			 
Beispiel #3
0
int
nvram_commit(void)
{
	char *buf;
	size_t erasesize, len, magic_len;
	unsigned int i;
	int ret;
	struct nvram_header *header;
	unsigned long flags;
	u_int32_t offset;
	DECLARE_WAITQUEUE(wait, current);
	wait_queue_head_t wait_q;
	struct erase_info erase;
	u_int32_t magic_offset = 0; /* Offset for writing MAGIC # */

	if (!nvram_mtd) {
		printk("nvram_commit: NVRAM not found\n");
		return -ENODEV;
	}

	if (in_interrupt()) {
		printk("nvram_commit: not committing in interrupt\n");
		return -EINVAL;
	}

	/* Backup sector blocks to be erased */
	erasesize = ROUNDUP(NVRAM_SPACE, nvram_mtd->erasesize);
	if (!(buf = kmalloc(erasesize, GFP_KERNEL))) {
		printk("nvram_commit: out of memory\n");
		return -ENOMEM;
	}

	down(&nvram_sem);

	if ((i = erasesize - NVRAM_SPACE) > 0) {
		offset = nvram_mtd->size - erasesize;
		len = 0;
		ret = MTD_READ(nvram_mtd, offset, i, &len, buf);
		if (ret || len != i) {
			printk("nvram_commit: read error ret = %d, len = %d/%d\n", ret, len, i);
			ret = -EIO;
			goto done;
		}
		header = (struct nvram_header *)(buf + i);
		magic_offset = i + ((void *)&header->magic - (void *)header);
	} else {
		offset = nvram_mtd->size - NVRAM_SPACE;
		magic_offset = ((void *)&header->magic - (void *)header);
		header = (struct nvram_header *)buf;
	}

	/* clear the existing magic # to mark the NVRAM as unusable 
		 we can pull MAGIC bits low without erase	*/
	header->magic = NVRAM_CLEAR_MAGIC; /* All zeros magic */

	/* Unlock sector blocks (for Intel 28F320C3B flash) , 20060309 */
	if(nvram_mtd->unlock)
		nvram_mtd->unlock(nvram_mtd, offset, nvram_mtd->erasesize);

	ret = MTD_WRITE(nvram_mtd, offset + magic_offset, sizeof(header->magic), 
									&magic_len, (char *)&header->magic);
	if (ret || magic_len != sizeof(header->magic)) {
		printk("nvram_commit: clear MAGIC error\n");
		ret = -EIO;
		goto done;
	}

	header->magic = NVRAM_MAGIC; /* reset MAGIC before we regenerate the NVRAM,
																otherwise we'll have an incorrect CRC */
	/* Regenerate NVRAM */
	spin_lock_irqsave(&nvram_lock, flags);
	ret = _nvram_commit(header);
	spin_unlock_irqrestore(&nvram_lock, flags);
	if (ret)
		goto done;

	/* Erase sector blocks */
	init_waitqueue_head(&wait_q);
	for (; offset < nvram_mtd->size - NVRAM_SPACE + header->len; offset += nvram_mtd->erasesize) {
		erase.mtd = nvram_mtd;
		erase.addr = offset;
		erase.len = nvram_mtd->erasesize;
		erase.callback = erase_callback;
		erase.priv = (u_long) &wait_q;

		set_current_state(TASK_INTERRUPTIBLE);
		add_wait_queue(&wait_q, &wait);

		/* Unlock sector blocks */
		if (nvram_mtd->unlock)
			nvram_mtd->unlock(nvram_mtd, offset, nvram_mtd->erasesize);

		if ((ret = MTD_ERASE(nvram_mtd, &erase))) {
			set_current_state(TASK_RUNNING);
			remove_wait_queue(&wait_q, &wait);
			printk("nvram_commit: erase error\n");
			goto done;
		}

		/* Wait for erase to finish */
		schedule();
		remove_wait_queue(&wait_q, &wait);
	}

	/* Write partition up to end of data area */
	header->magic = NVRAM_INVALID_MAGIC; /* All ones magic */
	offset = nvram_mtd->size - erasesize;
	i = erasesize - NVRAM_SPACE + header->len;
	ret = MTD_WRITE(nvram_mtd, offset, i, &len, buf);
	if (ret || len != i) {
		printk("nvram_commit: write error\n");
		ret = -EIO;
		goto done;
	}

	/* Now mark the NVRAM in flash as "valid" by setting the correct
		 MAGIC # */
	header->magic = NVRAM_MAGIC;
	ret = MTD_WRITE(nvram_mtd, offset + magic_offset, sizeof(header->magic), 
									&magic_len, (char *)&header->magic);
	if (ret || magic_len != sizeof(header->magic)) {
		printk("nvram_commit: write MAGIC error\n");
		ret = -EIO;
		goto done;
	}

	/*
	 * Reading a few bytes back here will put the device
	 * back to the correct mode on certain flashes */
	offset = nvram_mtd->size - erasesize;
	ret = MTD_READ(nvram_mtd, offset, 4, &len, buf);

 done:
	up(&nvram_sem);
	kfree(buf);

	return ret;
}
Beispiel #4
0
static int mtd_ioctl(struct inode *inode, struct file *file,
		     u_int cmd, u_long arg)
{
	struct mtd_file_info *mfi = file->private_data;
	struct mtd_info *mtd = mfi->mtd;
	void __user *argp = (void __user *)arg;
	int ret = 0;
	u_long size;
	struct mtd_info_user info;

	DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");

	size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
	if (cmd & IOC_IN) {
		if (!access_ok(VERIFY_READ, argp, size))
			return -EFAULT;
	}
	if (cmd & IOC_OUT) {
		if (!access_ok(VERIFY_WRITE, argp, size))
			return -EFAULT;
	}

	switch (cmd) {
	case MEMGETREGIONCOUNT:
		if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
			return -EFAULT;
		break;

	case MEMGETREGIONINFO:
	{
		uint32_t ur_idx;
		struct mtd_erase_region_info *kr;
		struct region_info_user *ur = (struct region_info_user *) argp;

		if (get_user(ur_idx, &(ur->regionindex)))
			return -EFAULT;

		kr = &(mtd->eraseregions[ur_idx]);

		if (put_user(kr->offset, &(ur->offset))
		    || put_user(kr->erasesize, &(ur->erasesize))
		    || put_user(kr->numblocks, &(ur->numblocks)))
			return -EFAULT;

		break;
	}

	case MEMGETINFO:
		info.type	= mtd->type;
		info.flags	= mtd->flags;
		info.size	= mtd->size;
		info.erasesize	= mtd->erasesize;
		info.writesize	= mtd->writesize;
		info.oobsize	= mtd->oobsize;
		/* The below fields are obsolete */
		info.ecctype	= -1;
		info.eccsize	= 0;
		if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
			return -EFAULT;
		break;

	case MEMERASE:
	{
		struct erase_info *erase;

		if(!(file->f_mode & FMODE_WRITE))
			return -EPERM;

		erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
		if (!erase)
			ret = -ENOMEM;
		else {
			struct erase_info_user einfo;

			wait_queue_head_t waitq;
			DECLARE_WAITQUEUE(wait, current);

			init_waitqueue_head(&waitq);

			if (copy_from_user(&einfo, argp,
				    sizeof(struct erase_info_user))) {
				kfree(erase);
				return -EFAULT;
			}
			erase->addr = einfo.start;
			erase->len = einfo.length;
			erase->mtd = mtd;
			erase->callback = mtdchar_erase_callback;
			erase->priv = (unsigned long)&waitq;

			/*
			  FIXME: Allow INTERRUPTIBLE. Which means
			  not having the wait_queue head on the stack.

			  If the wq_head is on the stack, and we
			  leave because we got interrupted, then the
			  wq_head is no longer there when the
			  callback routine tries to wake us up.
			*/
			ret = mtd->erase(mtd, erase);
			if (!ret) {
				set_current_state(TASK_UNINTERRUPTIBLE);
				add_wait_queue(&waitq, &wait);
				if (erase->state != MTD_ERASE_DONE &&
				    erase->state != MTD_ERASE_FAILED)
					schedule();
				remove_wait_queue(&waitq, &wait);
				set_current_state(TASK_RUNNING);

				ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
			}
			kfree(erase);
		}
		break;
	}

	case MEMWRITEOOB:
	{
		struct mtd_oob_buf buf;
		struct mtd_oob_ops ops;
		struct mtd_oob_buf __user *user_buf = argp;
	        uint32_t retlen;

		if(!(file->f_mode & FMODE_WRITE))
			return -EPERM;

		if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
			return -EFAULT;

		if (buf.length > 4096)
			return -EINVAL;

		if (!mtd->write_oob)
			ret = -EOPNOTSUPP;
		else
			ret = access_ok(VERIFY_READ, buf.ptr,
					buf.length) ? 0 : EFAULT;

		if (ret)
			return ret;

		ops.ooblen = buf.length;
		ops.ooboffs = buf.start & (mtd->oobsize - 1);
		ops.datbuf = NULL;
		ops.mode = MTD_OOB_PLACE;

		if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
			return -EINVAL;

		ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
		if (!ops.oobbuf)
			return -ENOMEM;

		if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
			kfree(ops.oobbuf);
			return -EFAULT;
		}

		buf.start &= ~(mtd->oobsize - 1);
		ret = mtd->write_oob(mtd, buf.start, &ops);

		if (ops.oobretlen > 0xFFFFFFFFU)
			ret = -EOVERFLOW;
		retlen = ops.oobretlen;
		if (copy_to_user(&user_buf->length, &retlen, sizeof(buf.length)))
			ret = -EFAULT;

		kfree(ops.oobbuf);
		break;

	}

	case MEMREADOOB:
	{
		struct mtd_oob_buf buf;
		struct mtd_oob_ops ops;

		if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
			return -EFAULT;

		if (buf.length > 4096)
			return -EINVAL;

		if (!mtd->read_oob)
			ret = -EOPNOTSUPP;
		else
			ret = access_ok(VERIFY_WRITE, buf.ptr,
					buf.length) ? 0 : -EFAULT;
		if (ret)
			return ret;

		ops.ooblen = buf.length;
		ops.ooboffs = buf.start & (mtd->oobsize - 1);
		ops.datbuf = NULL;
		ops.mode = MTD_OOB_PLACE;

		if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
			return -EINVAL;

		ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
		if (!ops.oobbuf)
			return -ENOMEM;

		buf.start &= ~(mtd->oobsize - 1);
		ret = mtd->read_oob(mtd, buf.start, &ops);

		if (put_user(ops.oobretlen, (uint32_t __user *)argp))
			ret = -EFAULT;
		else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf,
						    ops.oobretlen))
			ret = -EFAULT;

		kfree(ops.oobbuf);
		break;
	}

	case MEMLOCK:
	{
		struct erase_info_user einfo;

		if (copy_from_user(&einfo, argp, sizeof(einfo)))
			return -EFAULT;

		if (!mtd->lock)
			ret = -EOPNOTSUPP;
		else
			ret = mtd->lock(mtd, einfo.start, einfo.length);
		break;
	}

	case MEMUNLOCK:
	{
		struct erase_info_user einfo;

		if (copy_from_user(&einfo, argp, sizeof(einfo)))
			return -EFAULT;

		if (!mtd->unlock)
			ret = -EOPNOTSUPP;
		else
			ret = mtd->unlock(mtd, einfo.start, einfo.length);
		break;
	}

	/* Legacy interface */
	case MEMGETOOBSEL:
	{
		struct nand_oobinfo oi;

		if (!mtd->ecclayout)
			return -EOPNOTSUPP;
		if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
			return -EINVAL;

		oi.useecc = MTD_NANDECC_AUTOPLACE;
		memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
		memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
		       sizeof(oi.oobfree));
		oi.eccbytes = mtd->ecclayout->eccbytes;

		if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
			return -EFAULT;
		break;
	}

	case MEMGETBADBLOCK:
	{
		loff_t offs;

		if (copy_from_user(&offs, argp, sizeof(loff_t)))
			return -EFAULT;
		if (!mtd->block_isbad)
			ret = -EOPNOTSUPP;
		else
			return mtd->block_isbad(mtd, offs);
		break;
	}

	case MEMSETBADBLOCK:
	{
		loff_t offs;

		if (copy_from_user(&offs, argp, sizeof(loff_t)))
			return -EFAULT;
		if (!mtd->block_markbad)
			ret = -EOPNOTSUPP;
		else
			return mtd->block_markbad(mtd, offs);
		break;
	}

#ifdef CONFIG_HAVE_MTD_OTP
	case OTPSELECT:
	{
		int mode;
		if (copy_from_user(&mode, argp, sizeof(int)))
			return -EFAULT;

		mfi->mode = MTD_MODE_NORMAL;

		ret = otp_select_filemode(mfi, mode);

		file->f_pos = 0;
		break;
	}

	case OTPGETREGIONCOUNT:
	case OTPGETREGIONINFO:
	{
		struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
		if (!buf)
			return -ENOMEM;
		ret = -EOPNOTSUPP;
		switch (mfi->mode) {
		case MTD_MODE_OTP_FACTORY:
			if (mtd->get_fact_prot_info)
				ret = mtd->get_fact_prot_info(mtd, buf, 4096);
			break;
		case MTD_MODE_OTP_USER:
			if (mtd->get_user_prot_info)
				ret = mtd->get_user_prot_info(mtd, buf, 4096);
			break;
		default:
			break;
		}
		if (ret >= 0) {
			if (cmd == OTPGETREGIONCOUNT) {
				int nbr = ret / sizeof(struct otp_info);
				ret = copy_to_user(argp, &nbr, sizeof(int));
			} else
				ret = copy_to_user(argp, buf, ret);
			if (ret)
				ret = -EFAULT;
		}
		kfree(buf);
		break;
	}

	case OTPLOCK:
	{
		struct otp_info oinfo;

		if (mfi->mode != MTD_MODE_OTP_USER)
			return -EINVAL;
		if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
			return -EFAULT;
		if (!mtd->lock_user_prot_reg)
			return -EOPNOTSUPP;
		ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
		break;
	}
#endif

	case ECCGETLAYOUT:
	{
		if (!mtd->ecclayout)
			return -EOPNOTSUPP;

		if (copy_to_user(argp, mtd->ecclayout,
				 sizeof(struct nand_ecclayout)))
			return -EFAULT;
		break;
	}

	case ECCGETSTATS:
	{
		if (copy_to_user(argp, &mtd->ecc_stats,
				 sizeof(struct mtd_ecc_stats)))
			return -EFAULT;
		break;
	}

	case MTDFILEMODE:
	{
		mfi->mode = 0;

		switch(arg) {
		case MTD_MODE_OTP_FACTORY:
		case MTD_MODE_OTP_USER:
			ret = otp_select_filemode(mfi, arg);
			break;

		case MTD_MODE_RAW:
			if (!mtd->read_oob || !mtd->write_oob)
				return -EOPNOTSUPP;
			mfi->mode = arg;

		case MTD_MODE_NORMAL:
			break;
		default:
			ret = -EINVAL;
		}
		file->f_pos = 0;
		break;
	}

	default:
		ret = -ENOTTY;
	}

	return ret;
} /* memory_ioctl */
Beispiel #5
0
/*
 * ------------------------------------------------------------
 * rs_open() and friends
 * ------------------------------------------------------------
 */
static int block_til_ready(struct tty_struct *tty, struct file * filp,
			   struct NIOS_serial *info)
{
	DECLARE_WAITQUEUE(wait, current);
	int		retval;
	int		do_clocal = 0;

	/*
	 * If the device is in the middle of being closed, then block
	 * until it's done, and then try again.
	 */
	if (info->flags & S_CLOSING) {
		interruptible_sleep_on(&info->close_wait);
#ifdef SERIAL_DO_RESTART
		if (info->flags & S_HUP_NOTIFY)
			return -EAGAIN;
		else
			return -ERESTARTSYS;
#else
		return -EAGAIN;
#endif
	}

	/*
	 * If this is a callout device, then just make sure the normal
	 * device isn't being used.
	 */
	if (tty->driver.subtype == SERIAL_TYPE_CALLOUT) {
		if (info->flags & S_NORMAL_ACTIVE)
			return -EBUSY;
		if ((info->flags & S_CALLOUT_ACTIVE) &&
		    (info->flags & S_SESSION_LOCKOUT) &&
		    (info->session != current->session))
		    return -EBUSY;
		if ((info->flags & S_CALLOUT_ACTIVE) &&
		    (info->flags & S_PGRP_LOCKOUT) &&
		    (info->pgrp != current->pgrp))
		    return -EBUSY;
		info->flags |= S_CALLOUT_ACTIVE;
		return 0;
	}

	/*
	 * If non-blocking mode is set, or the port is not enabled,
	 * then make the check up front and then exit.
	 */
	if ((filp->f_flags & O_NONBLOCK) ||
	    (tty->flags & (1 << TTY_IO_ERROR))) {
		if (info->flags & S_CALLOUT_ACTIVE)
			return -EBUSY;
		info->flags |= S_NORMAL_ACTIVE;
		return 0;
	}

	if (info->flags & S_CALLOUT_ACTIVE) {
		if (info->normal_termios.c_cflag & CLOCAL)
			do_clocal = 1;
	} else {
		if (tty->termios->c_cflag & CLOCAL)
			do_clocal = 1;
	}

	/*
	 * Block waiting for the carrier detect and the line to become
	 * free (i.e., not in use by the callout).  While we are in
	 * this loop, info->count is dropped by one, so that
	 * rs_close() knows when to free things.  We restore it upon
	 * exit, either normal or abnormal.
	 */
	retval = 0;
	add_wait_queue(&info->open_wait, &wait);

	info->count--;
	info->blocked_open++;
	while (1) {
		cli();
		if (!(info->flags & S_CALLOUT_ACTIVE))
			NIOS_rtsdtr(info, 1);
		sti();
		current->state = TASK_INTERRUPTIBLE;
		if (tty_hung_up_p(filp) ||
		    !(info->flags & S_INITIALIZED)) {
#ifdef SERIAL_DO_RESTART
			if (info->flags & S_HUP_NOTIFY)
				retval = -EAGAIN;
			else
				retval = -ERESTARTSYS;
#else
			retval = -EAGAIN;
#endif
			break;
		}
		if (!(info->flags & S_CALLOUT_ACTIVE) &&
		    !(info->flags & S_CLOSING) && do_clocal)
			break;
		if (signal_pending(current)) {
			retval = -ERESTARTSYS;
			break;
		}
		schedule();
	}
	current->state = TASK_RUNNING;
	remove_wait_queue(&info->open_wait, &wait);
	if (!tty_hung_up_p(filp))
		info->count++;
	info->blocked_open--;

	if (retval)
		return retval;
	info->flags |= S_NORMAL_ACTIVE;
	return 0;
}
Beispiel #6
0
static ssize_t cdata_write(struct file *filp, const char *buf, size_t size, loff_t *off)
{
  	struct cdata_t *cdata = (struct cdata_t *)filp->private_data;
	unsigned char *pixel;
	unsigned int i;
	unsigned int index;
	struct timer_list *timer;
	struct timer_list *sched;
	wait_queue_head_t *wq;
	wait_queue_t wait;
	
	mutex_lock(&cdata->mutex);

	spin_lock_irqsave(&cdata->lock);
	pixel = cdata->buf;
	index = cdata->index;
	spin_unlock_irqsave(&cdata->lock);

	timer = &cdata->flush_timer;
	sched = &cdata->sched_timer;
	wq = &cdata->wq;

	mutex_unlock(&cdata->mutex);

	//printk(KERN_INFO "CDATA: In cdata_write()\n");
        	
	for (i = 0; i < size; i++){
	  if (index >= BUF_SIZE){

	     down_interruptible(&cdata->sem);
	     cdata->index = index;
	     up(&cdata->sem);
	     // Kernel scheduling
	     timer->expires = jiffies + 5*HZ;  // 1*HZ = 1  second
	     timer->function = flush_lcd;
	     timer->data = (unsigned long)cdata;
	     add_timer(timer);

	     // You can setup one timer(timer) without sched
	     // Timer expires, call flush_lcd() 
	     // When flush_lcd() finish the write I/O, then wake up the process
	     // Then you do notn need to maintain 2nd timer for process state change
	     sched->expires = jiffies + 10;  // 10 == 0.1 second
	     sched->function = cdata_wake_up;
	     sched->data = (unsigned long)cdata;
	     add_timer(sched);

	     wait.flags = 0;
	     wait.task = current;
	     add_wait_queue(wq,&wait); 
repeat:

	     // Process scheduling
	     current->state = TASK_INTERRUPTIBLE;
	     schedule();

	     // Every time, sched timer expires, it will read the index and check if index != 0 
	     // Meaning flush_lcd has not finished yet. So, keep changing itself to ready state for next wake up.
	     down_interruptible(&cdata->sem);
	     index = cdata->index;   // IMPORTANT: Use state machine concept to maintain. Do not use index = 0; not good!
	     up(&cdata->sem);

	     if (index != 0)
		goto repeat;

	     remove_wait_queue(wq, &wait);
	     del_timer(sched);
          }

	  //fb[index] = buf[i];  // wrong!! Can NOT access user space data directly
	  copy_from_user(&pixel[index], &buf[i], 1);
	  index++;
	}
	
	down_interruptible(&cdata->sem);
	cdata->index = index;
	up(&cdata->sem);
        //while(1) {
	  //current->state=TASK_UNINTERRUPTIBLE;
	//  current->state=TASK_INTERRUPTIBLE;
	//  schedule();
	//}
	return 0;
}
Beispiel #7
0
static int mtd_blktrans_thread(void *arg)
{
	struct mtd_blktrans_ops *tr = arg;
	struct request_queue *rq = tr->blkcore_priv->rq;

	/* we might get involved when memory gets low, so use PF_MEMALLOC */
	current->flags |= PF_MEMALLOC | PF_NOFREEZE;


	daemonize("%sd", tr->name);


	/* daemonize() doesn't do this for us since some kernel threads
	   actually want to deal with signals. We can't just call 
	   exit_sighand() since that'll cause an oops when we finally
	   do exit. */
	spin_lock_irq(&current->sighand->siglock);
	sigfillset(&current->blocked);
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

#ifdef CONFIG_MOT_WFN473
        set_user_nice(current, -20);
#endif

	spin_lock_irq(rq->queue_lock);
		
	while (!tr->blkcore_priv->exiting) {
		struct request *req;
		struct mtd_blktrans_dev *dev;
		int res = 0;
		DECLARE_WAITQUEUE(wait, current);

		req = elv_next_request(rq);

		if (!req) {
			add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
			set_current_state(TASK_INTERRUPTIBLE);

			spin_unlock_irq(rq->queue_lock);

			schedule();
			remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);

			spin_lock_irq(rq->queue_lock);

			continue;
		}

		dev = req->rq_disk->private_data;
		tr = dev->tr;

		spin_unlock_irq(rq->queue_lock);

		down(&dev->sem);
		res = do_blktrans_request(tr, dev, req);
		up(&dev->sem);

		spin_lock_irq(rq->queue_lock);

		end_request(req, res);
	}
	spin_unlock_irq(rq->queue_lock);

	complete_and_exit(&tr->blkcore_priv->thread_dead, 0);
}
Beispiel #8
0
int i810_release(struct inode *inode, struct file *filp)
{
	drm_file_t    *priv   = filp->private_data;
	drm_device_t  *dev;
	int	      retcode = 0;

	lock_kernel();
	dev    = priv->dev;
	DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d\n",
		  current->pid, dev->device, dev->open_count);

	if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
	    && dev->lock.pid == current->pid) {
	      	i810_reclaim_buffers(dev, priv->pid);
		DRM_ERROR("Process %d dead, freeing lock for context %d\n",
			  current->pid,
			  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
		drm_lock_free(dev,
			      &dev->lock.hw_lock->lock,
			      _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));

				/* FIXME: may require heavy-handed reset of
                                   hardware at this point, possibly
                                   processed via a callback to the X
                                   server. */
	} else if (dev->lock.hw_lock) {
	   	/* The lock is required to reclaim buffers */
	   	DECLARE_WAITQUEUE(entry, current);
	   	add_wait_queue(&dev->lock.lock_queue, &entry);
		for (;;) {
			current->state = TASK_INTERRUPTIBLE;
			if (!dev->lock.hw_lock) {
				/* Device has been unregistered */
				retcode = -EINTR;
				break;
			}
			if (drm_lock_take(&dev->lock.hw_lock->lock,
					  DRM_KERNEL_CONTEXT)) {
				dev->lock.pid	    = priv->pid;
				dev->lock.lock_time = jiffies;
				atomic_inc(&dev->total_locks);
				break;	/* Got lock */
			}
				/* Contention */
			atomic_inc(&dev->total_sleeps);
			schedule();
			if (signal_pending(current)) {
				retcode = -ERESTARTSYS;
				break;
			}
		}
		current->state = TASK_RUNNING;
		remove_wait_queue(&dev->lock.lock_queue, &entry);
	   	if(!retcode) {
		   	i810_reclaim_buffers(dev, priv->pid);
		   	drm_lock_free(dev, &dev->lock.hw_lock->lock,
				      DRM_KERNEL_CONTEXT);
		}
	}
	drm_fasync(-1, filp, 0);

	down(&dev->struct_sem);
	if (priv->prev) priv->prev->next = priv->next;
	else		dev->file_first	 = priv->next;
	if (priv->next) priv->next->prev = priv->prev;
	else		dev->file_last	 = priv->prev;
	up(&dev->struct_sem);

	drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
#if LINUX_VERSION_CODE < 0x020333
	MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
#endif
   	atomic_inc(&dev->total_close);
   	spin_lock(&dev->count_lock);
   	if (!--dev->open_count) {
	   	if (atomic_read(&dev->ioctl_count) || dev->blocked) {
		   	DRM_ERROR("Device busy: %d %d\n",
				  atomic_read(&dev->ioctl_count),
				  dev->blocked);
		   	spin_unlock(&dev->count_lock);
			unlock_kernel();
		   	return -EBUSY;
		}
	   	spin_unlock(&dev->count_lock);
		unlock_kernel();
		return i810_takedown(dev);
	}
	spin_unlock(&dev->count_lock);
	unlock_kernel();
	return retcode;
}
static int vbi_workaround(struct saa7146_dev *dev)
{
	struct saa7146_vv *vv = dev->vv_data;

        u32          *cpu;
        dma_addr_t   dma_addr;
	
	int count = 0;
	int i;

	DECLARE_WAITQUEUE(wait, current);
	
	DEB_VBI(("dev:%p\n",dev));
	
	/* once again, a bug in the saa7146: the brs acquisition
	   is buggy and especially the BXO-counter does not work
	   as specified. there is this workaround, but please
	   don't let me explain it. ;-) */

	cpu = pci_alloc_consistent(dev->pci, 4096, &dma_addr);
	if (NULL == cpu)
		return -ENOMEM;

	/* setup some basic programming, just for the workaround */
	saa7146_write(dev, BASE_EVEN3,	dma_addr);
	saa7146_write(dev, BASE_ODD3,	dma_addr+vbi_pixel_to_capture);
	saa7146_write(dev, PROT_ADDR3,	dma_addr+4096);
	saa7146_write(dev, PITCH3, 	vbi_pixel_to_capture);
	saa7146_write(dev, BASE_PAGE3,	0x0);
	saa7146_write(dev, NUM_LINE_BYTE3, (2<<16)|((vbi_pixel_to_capture)<<0));
	saa7146_write(dev, MC2, MASK_04|MASK_20);

		/* load brs-control register */
		WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4));
		/* BXO = 1h, BRS to outbound */
		WRITE_RPS1(0xc000008c);   
	/* wait for vbi_a or vbi_b*/
	if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) {
		DEB_D(("...using port b\n"));
		WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | CMD_E_FID_B);
		WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | CMD_O_FID_B);
/*
		WRITE_RPS1(CMD_PAUSE | MASK_09);
*/
	} else {
		DEB_D(("...using port a\n"));
		WRITE_RPS1(CMD_PAUSE | MASK_10);
	}
		/* upload brs */
		WRITE_RPS1(CMD_UPLOAD | MASK_08);
		/* load brs-control register */
		WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4));
		/* BYO = 1, BXO = NQBIL (=1728 for PAL, for NTSC this is 858*2) - NumByte3 (=1440) = 288 */
		WRITE_RPS1(((1728-(vbi_pixel_to_capture)) << 7) | MASK_19);
		/* wait for brs_done */
		WRITE_RPS1(CMD_PAUSE | MASK_08);
		/* upload brs */
		WRITE_RPS1(CMD_UPLOAD | MASK_08);
		/* load video-dma3 NumLines3 and NumBytes3 */
		WRITE_RPS1(CMD_WR_REG | (1 << 8) | (NUM_LINE_BYTE3/4));
		/* dev->vbi_count*2 lines, 720 pixel (= 1440 Bytes) */
		WRITE_RPS1((2 << 16) | (vbi_pixel_to_capture));
		/* load brs-control register */
		WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4));
		/* Set BRS right: note: this is an experimental value for BXO (=> PAL!) */
		WRITE_RPS1((540 << 7) | (5 << 19));  // 5 == vbi_start  
		/* wait for brs_done */
		WRITE_RPS1(CMD_PAUSE | MASK_08);
		/* upload brs and video-dma3*/
		WRITE_RPS1(CMD_UPLOAD | MASK_08 | MASK_04);
		/* load mc2 register: enable dma3 */
		WRITE_RPS1(CMD_WR_REG | (1 << 8) | (MC1/4));
		WRITE_RPS1(MASK_20 | MASK_04);
		/* generate interrupt */
		WRITE_RPS1(CMD_INTERRUPT);
		/* stop rps1 */
		WRITE_RPS1(CMD_STOP);
	
	/* we have to do the workaround twice to be sure that
	   everything is ok */
	for(i = 0; i < 2; i++) {

		/* indicate to the irq handler that we do the workaround */
		saa7146_write(dev, MC2, MASK_31|MASK_15);

		saa7146_write(dev, NUM_LINE_BYTE3, (1<<16)|(2<<0));
		saa7146_write(dev, MC2, MASK_04|MASK_20);
	
		/* enable rps1 irqs */
		IER_ENABLE(dev,MASK_28);

		/* prepare to wait to be woken up by the irq-handler */
		add_wait_queue(&vv->vbi_wq, &wait);
		current->state = TASK_INTERRUPTIBLE;

		/* start rps1 to enable workaround */
		saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle);
		saa7146_write(dev, MC1, (MASK_13 | MASK_29));	
		
		schedule();

		DEB_VBI(("brs bug workaround %d/1.\n",i));
	
		remove_wait_queue(&vv->vbi_wq, &wait);
		current->state = TASK_RUNNING;

		/* disable rps1 irqs */
		IER_DISABLE(dev,MASK_28);

		/* stop video-dma3 */
		saa7146_write(dev, MC1, MASK_20);

		if(signal_pending(current)) {
		
			DEB_VBI(("aborted (rps:0x%08x).\n",saa7146_read(dev,RPS_ADDR1)));

			/* stop rps1 for sure */
			saa7146_write(dev, MC1, MASK_29);

			pci_free_consistent(dev->pci, 4096, cpu, dma_addr);
			return -EINTR;
		}
	}

	pci_free_consistent(dev->pci, 4096, cpu, dma_addr);
	return 0;
}
/*
========================================================================
Routine Description:
    Close raxx interface.

Arguments:
	*net_dev			the raxx interface pointer

Return Value:
    0					Open OK
	otherwise			Open Fail

Note:
	1. if open fail, kernel will not call the close function.
	2. Free memory for
		(1) Mlme Memory Handler:		MlmeHalt()
		(2) TX & RX:					RTMPFreeTxRxRingMemory()
		(3) BA Reordering: 				ba_reordering_resource_release()
========================================================================
*/
int rt28xx_close(struct net_device *dev)
{
	struct net_device *net_dev = (struct net_device *)dev;
	struct rt_rtmp_adapter *pAd = NULL;
	BOOLEAN Cancelled;
	u32 i = 0;

#ifdef RTMP_MAC_USB
	DECLARE_WAIT_QUEUE_HEAD(unlink_wakeup);
	DECLARE_WAITQUEUE(wait, current);
#endif /* RTMP_MAC_USB // */

	GET_PAD_FROM_NET_DEV(pAd, net_dev);

	DBGPRINT(RT_DEBUG_TRACE, ("===> rt28xx_close\n"));

	Cancelled = FALSE;
	/* Sanity check for pAd */
	if (pAd == NULL)
		return 0;	/* close ok */

	{
#ifdef RTMP_MAC_PCI
		RTMPPCIeLinkCtrlValueRestore(pAd, RESTORE_CLOSE);
#endif /* RTMP_MAC_PCI // */

		/* If dirver doesn't wake up firmware here, */
		/* NICLoadFirmware will hang forever when interface is up again. */
		if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE)) {
			AsicForceWakeup(pAd, TRUE);
		}
#ifdef RTMP_MAC_USB
		RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_REMOVE_IN_PROGRESS);
#endif /* RTMP_MAC_USB // */

		MlmeRadioOff(pAd);
#ifdef RTMP_MAC_PCI
		pAd->bPCIclkOff = FALSE;
#endif /* RTMP_MAC_PCI // */
	}

	RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS);

	for (i = 0; i < NUM_OF_TX_RING; i++) {
		while (pAd->DeQueueRunning[i] == TRUE) {
			DBGPRINT(RT_DEBUG_TRACE,
				 ("Waiting for TxQueue[%d] done..........\n",
				  i));
			RTMPusecDelay(1000);
		}
	}

#ifdef RTMP_MAC_USB
	/* ensure there are no more active urbs. */
	add_wait_queue(&unlink_wakeup, &wait);
	pAd->wait = &unlink_wakeup;

	/* maybe wait for deletions to finish. */
	i = 0;
	/*while((i < 25) && atomic_read(&pAd->PendingRx) > 0) */
	while (i < 25) {
		unsigned long IrqFlags;

		RTMP_IRQ_LOCK(&pAd->BulkInLock, IrqFlags);
		if (pAd->PendingRx == 0) {
			RTMP_IRQ_UNLOCK(&pAd->BulkInLock, IrqFlags);
			break;
		}
		RTMP_IRQ_UNLOCK(&pAd->BulkInLock, IrqFlags);

		msleep(UNLINK_TIMEOUT_MS);	/*Time in millisecond */
		i++;
	}
	pAd->wait = NULL;
	remove_wait_queue(&unlink_wakeup, &wait);
#endif /* RTMP_MAC_USB // */

	/* Stop Mlme state machine */
	MlmeHalt(pAd);

	/* Close net tasklets */
	RtmpNetTaskExit(pAd);

	{
		MacTableReset(pAd);
	}

	MeasureReqTabExit(pAd);
	TpcReqTabExit(pAd);

	/* Close kernel threads */
	RtmpMgmtTaskExit(pAd);

#ifdef RTMP_MAC_PCI
	{
		BOOLEAN brc;
		/*      unsigned long                   Value; */

		if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_ACTIVE)) {
			RTMP_ASIC_INTERRUPT_DISABLE(pAd);
		}
		/* Receive packets to clear DMA index after disable interrupt. */
		/*RTMPHandleRxDoneInterrupt(pAd); */
		/* put to radio off to save power when driver unload.  After radiooff, can't write /read register.  So need to finish all */
		/* register access before Radio off. */

		brc = RT28xxPciAsicRadioOff(pAd, RTMP_HALT, 0);

/*In  solution 3 of 3090F, the bPCIclkOff will be set to TRUE after calling RT28xxPciAsicRadioOff */
		pAd->bPCIclkOff = FALSE;

		if (brc == FALSE) {
			DBGPRINT(RT_DEBUG_ERROR,
				 ("%s call RT28xxPciAsicRadioOff fail!\n",
				  __func__));
		}
	}

/*
	if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_ACTIVE))
	{
		RTMP_ASIC_INTERRUPT_DISABLE(pAd);
	}

	// Disable Rx, register value supposed will remain after reset
	NICIssueReset(pAd);
*/
#endif /* RTMP_MAC_PCI // */

	/* Free IRQ */
	if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_IN_USE)) {
#ifdef RTMP_MAC_PCI
		/* Deregister interrupt function */
		RtmpOSIRQRelease(net_dev);
#endif /* RTMP_MAC_PCI // */
		RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_IN_USE);
	}
	/* Free Ring or USB buffers */
	RTMPFreeTxRxRingMemory(pAd);

	RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS);

	/* Free BA reorder resource */
	ba_reordering_resource_release(pAd);

	RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_START_UP);

/*+++Modify by woody to solve the bulk fail+++*/
	{
	}

	DBGPRINT(RT_DEBUG_TRACE, ("<=== rt28xx_close\n"));
	return 0;		/* close ok */
}				/* End of rt28xx_close */
Beispiel #11
0
static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
{
	unsigned long	  address;
	unsigned long	  length;
	int		  must_free = 0;
	int		  retcode   = 0;
	int		  i;
	int		  idx;
	drm_buf_t	  *buf;
	drm_buf_t	  *last_buf = NULL;
	drm_device_dma_t  *dma	    = dev->dma;
	DECLARE_WAITQUEUE(entry, current);

				/* Turn off interrupt handling */
	while (test_and_set_bit(0, &dev->interrupt_flag)) {
		schedule();
		if (signal_pending(current)) return -EINTR;
	}
	if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
		while (!gamma_lock_take(&dev->lock.hw_lock->lock,
				      DRM_KERNEL_CONTEXT)) {
			schedule();
			if (signal_pending(current)) {
				clear_bit(0, &dev->interrupt_flag);
				return -EINTR;
			}
		}
		++must_free;
	}

	for (i = 0; i < d->send_count; i++) {
		idx = d->send_indices[i];
		if (idx < 0 || idx >= dma->buf_count) {
			DRM_ERROR("Index %d (of %d max)\n",
				  d->send_indices[i], dma->buf_count - 1);
			continue;
		}
		buf = dma->buflist[ idx ];
		if (buf->pid != current->pid) {
			DRM_ERROR("Process %d using buffer owned by %d\n",
				  current->pid, buf->pid);
			retcode = -EINVAL;
			goto cleanup;
		}
		if (buf->list != DRM_LIST_NONE) {
			DRM_ERROR("Process %d using %d's buffer on list %d\n",
				  current->pid, buf->pid, buf->list);
			retcode = -EINVAL;
			goto cleanup;
		}
				/* This isn't a race condition on
				   buf->list, since our concern is the
				   buffer reclaim during the time the
				   process closes the /dev/drm? handle, so
				   it can't also be doing DMA. */
		buf->list	  = DRM_LIST_PRIO;
		buf->used	  = d->send_sizes[i];
		buf->context	  = d->context;
		buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
		address		  = (unsigned long)buf->address;
		length		  = buf->used;
		if (!length) {
			DRM_ERROR("0 length buffer\n");
		}
		if (buf->pending) {
			DRM_ERROR("Sending pending buffer:"
				  " buffer %d, offset %d\n",
				  d->send_indices[i], i);
			retcode = -EINVAL;
			goto cleanup;
		}
		if (buf->waiting) {
			DRM_ERROR("Sending waiting buffer:"
				  " buffer %d, offset %d\n",
				  d->send_indices[i], i);
			retcode = -EINVAL;
			goto cleanup;
		}
		buf->pending = 1;

		if (dev->last_context != buf->context
		    && !(dev->queuelist[buf->context]->flags
			 & _DRM_CONTEXT_PRESERVED)) {
			add_wait_queue(&dev->context_wait, &entry);
			current->state = TASK_INTERRUPTIBLE;
				/* PRE: dev->last_context != buf->context */
			DRM(context_switch)(dev, dev->last_context,
					    buf->context);
				/* POST: we will wait for the context
				   switch and will dispatch on a later call
				   when dev->last_context == buf->context.
				   NOTE WE HOLD THE LOCK THROUGHOUT THIS
				   TIME! */
			schedule();
			current->state = TASK_RUNNING;
			remove_wait_queue(&dev->context_wait, &entry);
			if (signal_pending(current)) {
				retcode = -EINTR;
				goto cleanup;
			}
			if (dev->last_context != buf->context) {
				DRM_ERROR("Context mismatch: %d %d\n",
					  dev->last_context,
					  buf->context);
			}
		}

#if DRM_DMA_HISTOGRAM
		buf->time_queued     = get_cycles();
		buf->time_dispatched = buf->time_queued;
#endif
		gamma_dma_dispatch(dev, address, length);
		atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
		atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */

		if (last_buf) {
			gamma_free_buffer(dev, last_buf);
		}
		last_buf = buf;
	}


cleanup:
	if (last_buf) {
		gamma_dma_ready(dev);
		gamma_free_buffer(dev, last_buf);
	}

	if (must_free && !dev->context_flag) {
		if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
				  DRM_KERNEL_CONTEXT)) {
			DRM_ERROR("\n");
		}
	}
	clear_bit(0, &dev->interrupt_flag);
	return retcode;
}
static void ad1889_update_ptr(ad1889_dev_t *dev, int wake)
{
	ad1889_state_t *state;
	struct dmabuf *dmabuf;
	unsigned long hwptr;
	int diff;

	/* check ADC first */
	state = &dev->adc_state;
	dmabuf = &state->dmabuf;
	if (dmabuf->enable & ADC_RUNNING) {
		hwptr = ad1889_get_dma_addr(state);
		diff = (dmabuf->dmasize + hwptr - dmabuf->hwptr) % dmabuf->dmasize;

		dmabuf->hwptr = hwptr;
		dmabuf->total_bytes += diff;
		dmabuf->count += diff;
		if (dmabuf->count > dmabuf->dmasize)
			dmabuf->count = dmabuf->dmasize;

		if (dmabuf->mapped) {
			if (wake & dmabuf->count >= dmabuf->fragsize)
				wake_up(&dmabuf->wait);
		} else {
			if (wake & dmabuf->count > 0)
				wake_up(&dmabuf->wait);
		}
	}

	/* check DAC */
	state = &dev->wav_state;
	dmabuf = &state->dmabuf;
	if (dmabuf->enable & DAC_RUNNING) {
XXX

}
#endif

/************************* /dev/dsp interfaces ************************* */

static ssize_t ad1889_read(struct file *file, char __user *buffer, size_t count,
	loff_t *ppos)
{
	return 0;
}

static ssize_t ad1889_write(struct file *file, const char __user *buffer, size_t count,
	loff_t *ppos)
{
	ad1889_dev_t *dev = (ad1889_dev_t *)file->private_data;
	ad1889_state_t *state = &dev->state[AD_WAV_STATE];
	volatile struct dmabuf *dmabuf = &state->dmabuf;
	ssize_t ret = 0;
	DECLARE_WAITQUEUE(wait, current);

	down(&state->sem);
#if 0
	if (dmabuf->mapped) {
		ret = -ENXIO;
		goto err1;
	}
#endif
	if (!access_ok(VERIFY_READ, buffer, count)) {
		ret = -EFAULT;
		goto err1;
	}

	add_wait_queue(&state->dmabuf.wait, &wait);

	/* start filling dma buffer.... */
	while (count > 0) {
		long rem;
		long cnt = count;
		unsigned long flags;

		for (;;) {
			long used_bytes;
			long timeout;	/* max time for DMA in jiffies */

			/* buffer is full if wr catches up to rd */
			spin_lock_irqsave(&state->card->lock, flags);
			used_bytes = dmabuf->wr_ptr - dmabuf->rd_ptr;
			timeout = (dmabuf->dma_len * HZ) / dmabuf->rate;
			spin_unlock_irqrestore(&state->card->lock, flags);

			/* adjust for buffer wrap around */
			used_bytes = (used_bytes + DMA_SIZE) & (DMA_SIZE - 1);

			/* If at least one page unused */
			if (used_bytes < (DMA_SIZE - 0x1000))
				break;

			/* dma buffer full */

			if (file->f_flags & O_NONBLOCK) {
				ret = -EAGAIN;
				goto err2;
			}

			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(timeout + 1);
			if (signal_pending(current)) {
				ret = -ERESTARTSYS;
				goto err2;
			}
		}

		/* watch out for wrapping around static buffer */
		spin_lock_irqsave(&state->card->lock, flags);
		rem = DMA_SIZE - dmabuf->wr_ptr;
		if (cnt > rem)
			cnt = rem;

		rem = dmabuf->wr_ptr;

		/* update dma pointers */
		dmabuf->wr_ptr += cnt;
		dmabuf->wr_ptr &= DMA_SIZE - 1;	/* wrap ptr if necessary */
		spin_unlock_irqrestore(&state->card->lock, flags);

		/* transfer unwrapped chunk */
		if (copy_from_user(dmabuf->rawbuf + rem, buffer, cnt)) {
			ret = -EFAULT;
			goto err2;
		}

		DBG("Writing 0x%lx bytes to +0x%lx\n", cnt, rem);

		/* update counters */
		count -= cnt;
		buffer += cnt;
		ret += cnt;

		/* we have something to play - go play it! */
		ad1889_trigger_playback(dev);
	}

err2:
	remove_wait_queue(&state->dmabuf.wait, &wait);
err1:
	up(&state->sem);
	return ret;
}
Beispiel #13
0
/*
 * For simplicity, we read one record in one system call and throw out
 * what does not fit. This means that the following does not work:
 *   dd if=/dbg/usbmon/0t bs=10
 * Also, we do not allow seeks and do not bother advancing the offset.
 */
static ssize_t mon_text_read(struct file *file, char __user *buf,
				size_t nbytes, loff_t *ppos)
{
	struct mon_reader_text *rp = file->private_data;
	struct mon_bus *mbus = rp->r.m_bus;
	DECLARE_WAITQUEUE(waita, current);
	struct mon_event_text *ep;
	int cnt, limit;
	char *pbuf;
	char udir, utype;
	int data_len, i;

	add_wait_queue(&rp->wait, &waita);
	set_current_state(TASK_INTERRUPTIBLE);
	while ((ep = mon_text_fetch(rp, mbus)) == NULL) {
		if (file->f_flags & O_NONBLOCK) {
			set_current_state(TASK_RUNNING);
			remove_wait_queue(&rp->wait, &waita);
			return -EWOULDBLOCK;	/* Same as EAGAIN in Linux */
		}
		/*
		 * We do not count nwaiters, because ->release is supposed
		 * to be called when all openers are gone only.
		 */
		schedule();
		if (signal_pending(current)) {
			remove_wait_queue(&rp->wait, &waita);
			return -EINTR;
		}
		set_current_state(TASK_INTERRUPTIBLE);
	}
	set_current_state(TASK_RUNNING);
	remove_wait_queue(&rp->wait, &waita);

	down(&rp->printf_lock);
	cnt = 0;
	pbuf = rp->printf_buf;
	limit = rp->printf_size;

	udir = usb_pipein(ep->pipe) ? 'i' : 'o';
	switch (usb_pipetype(ep->pipe)) {
	case PIPE_ISOCHRONOUS:	utype = 'Z'; break;
	case PIPE_INTERRUPT:	utype = 'I'; break;
	case PIPE_CONTROL:	utype = 'C'; break;
	default: /* PIPE_BULK */  utype = 'B';
	}
	cnt += snprintf(pbuf + cnt, limit - cnt,
	    "%lx %u %c %c%c:%03u:%02u",
	    ep->id, ep->tstamp, ep->type,
	    utype, udir, usb_pipedevice(ep->pipe), usb_pipeendpoint(ep->pipe));

	if (ep->setup_flag == 0) {   /* Setup packet is present and captured */
		cnt += snprintf(pbuf + cnt, limit - cnt,
		    " s %02x %02x %04x %04x %04x",
		    ep->setup[0],
		    ep->setup[1],
		    (ep->setup[3] << 8) | ep->setup[2],
		    (ep->setup[5] << 8) | ep->setup[4],
		    (ep->setup[7] << 8) | ep->setup[6]);
	} else if (ep->setup_flag != '-') { /* Unable to capture setup packet */
		cnt += snprintf(pbuf + cnt, limit - cnt,
		    " %c __ __ ____ ____ ____", ep->setup_flag);
	} else {                     /* No setup for this kind of URB */
		cnt += snprintf(pbuf + cnt, limit - cnt, " %d", ep->status);
	}
	cnt += snprintf(pbuf + cnt, limit - cnt, " %d", ep->length);

	if ((data_len = ep->length) > 0) {
		if (ep->data_flag == 0) {
			cnt += snprintf(pbuf + cnt, limit - cnt, " =");
			if (data_len >= DATA_MAX)
				data_len = DATA_MAX;
			for (i = 0; i < data_len; i++) {
				if (i % 4 == 0) {
					cnt += snprintf(pbuf + cnt, limit - cnt,
					    " ");
				}
				cnt += snprintf(pbuf + cnt, limit - cnt,
				    "%02x", ep->data[i]);
			}
			cnt += snprintf(pbuf + cnt, limit - cnt, "\n");
		} else {
			cnt += snprintf(pbuf + cnt, limit - cnt,
			    " %c\n", ep->data_flag);
		}
	} else {
		cnt += snprintf(pbuf + cnt, limit - cnt, "\n");
	}

	if (copy_to_user(buf, rp->printf_buf, cnt))
		cnt = -EFAULT;
	up(&rp->printf_lock);
	kmem_cache_free(rp->e_slab, ep);
	return cnt;
}
int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
{
	struct ivtv *itv = s->itv;
	DECLARE_WAITQUEUE(wait, current);
	int cap_type;
	int stopmode;

	if (s->vdev == NULL)
		return -EINVAL;

	/*                                                               
                                       */

	IVTV_DEBUG_INFO("Stop Capture\n");

	if (s->type == IVTV_DEC_STREAM_TYPE_VOUT)
		return 0;
	if (atomic_read(&itv->capturing) == 0)
		return 0;

	switch (s->type) {
	case IVTV_ENC_STREAM_TYPE_YUV:
		cap_type = 1;
		break;
	case IVTV_ENC_STREAM_TYPE_PCM:
		cap_type = 1;
		break;
	case IVTV_ENC_STREAM_TYPE_VBI:
		cap_type = 1;
		break;
	case IVTV_ENC_STREAM_TYPE_MPG:
	default:
		cap_type = 0;
		break;
	}

	/*                   */
	if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) {
		stopmode = 0;
	} else {
		stopmode = 1;
	}

	/*             */
	/*                                                                           */
	ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, stopmode, cap_type, s->subtype);

	if (!test_bit(IVTV_F_S_PASSTHROUGH, &s->s_flags)) {
		if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) {
			/*                                                    */
			unsigned long duration;
			unsigned long then = jiffies;

			add_wait_queue(&itv->eos_waitq, &wait);

			set_current_state(TASK_INTERRUPTIBLE);

			/*                           */
			while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
				time_before(jiffies,
					    then + msecs_to_jiffies(2000))) {
				schedule_timeout(msecs_to_jiffies(10));
			}

			/*                                                   
                                                      
                                                
                                                       
                                                               
                                         
    */
			duration = ((1000 + HZ / 2) / HZ) * (jiffies - then);

			if (!test_bit(IVTV_F_I_EOS, &itv->i_flags)) {
				IVTV_DEBUG_WARN("%s: EOS interrupt not received! stopping anyway.\n", s->name);
				IVTV_DEBUG_WARN("%s: waited %lu ms.\n", s->name, duration);
			} else {
				IVTV_DEBUG_INFO("%s: EOS took %lu ms to occur.\n", s->name, duration);
			}
			set_current_state(TASK_RUNNING);
			remove_wait_queue(&itv->eos_waitq, &wait);
			set_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
		}

		/*                               */
		ivtv_msleep_timeout(100, 0);
	}

	atomic_dec(&itv->capturing);

	/*                                */
	clear_bit(IVTV_F_S_STREAMING, &s->s_flags);

	if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
		ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP);

	if (atomic_read(&itv->capturing) > 0) {
		return 0;
	}

	cx2341x_handler_set_busy(&itv->cxhdl, 0);

	/*                                                   */
	ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
	del_timer(&itv->dma_timer);

	/*                          */
	if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
		/*                   */
		/*                                                      */
		ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1);
		ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
	}

	/*                                                               
                                                     */
	ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, 1, 2, 7);

	wake_up(&s->waitq);

	return 0;
}
Beispiel #15
0
/*
 * This is a RT kernel thread that handles the ADC accesses
 * (mainly so we can use semaphores in the UCB1200 core code
 * to serialise accesses to the ADC).
 */
static int ucb1x00_thread(void *_ts)
{
	struct ucb1x00_ts *ts = _ts;
	DECLARE_WAITQUEUE(wait, current);
	int valid = 0;

	set_freezable();
	add_wait_queue(&ts->irq_wait, &wait);
	while (!kthread_should_stop()) {
		unsigned int x, y, p;
		signed long timeout;

		ts->restart = 0;

		ucb1x00_adc_enable(ts->ucb);

		x = ucb1x00_ts_read_xpos(ts);
		y = ucb1x00_ts_read_ypos(ts);
		p = ucb1x00_ts_read_pressure(ts);

		/*
		 * Switch back to interrupt mode.
		 */
		ucb1x00_ts_mode_int(ts);
		ucb1x00_adc_disable(ts->ucb);

		msleep(10);

		ucb1x00_enable(ts->ucb);


		if (ucb1x00_ts_pen_down(ts)) {
			set_current_state(TASK_INTERRUPTIBLE);

			ucb1x00_enable_irq(ts->ucb, UCB_IRQ_TSPX, machine_is_collie() ? UCB_RISING : UCB_FALLING);
			ucb1x00_disable(ts->ucb);

			/*
			 * If we spat out a valid sample set last time,
			 * spit out a "pen off" sample here.
			 */
			if (valid) {
				ucb1x00_ts_event_release(ts);
				valid = 0;
			}

			timeout = MAX_SCHEDULE_TIMEOUT;
		} else {
			ucb1x00_disable(ts->ucb);

			/*
			 * Filtering is policy.  Policy belongs in user
			 * space.  We therefore leave it to user space
			 * to do any filtering they please.
			 */
			if (!ts->restart) {
				ucb1x00_ts_evt_add(ts, p, x, y);
				valid = 1;
			}

			set_current_state(TASK_INTERRUPTIBLE);
			timeout = HZ / 100;
		}

		try_to_freeze();

		schedule_timeout(timeout);
	}

	remove_wait_queue(&ts->irq_wait, &wait);

	ts->rtask = NULL;
	return 0;
}
static long mwave_ioctl(struct file *file, unsigned int iocmd,
							unsigned long ioarg)
{
	unsigned int retval = 0;
	pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
	void __user *arg = (void __user *)ioarg;

	PRINTK_4(TRACE_MWAVE,
		"mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n",
		file, iocmd, (int) ioarg);

	switch (iocmd) {

		case IOCTL_MW_RESET:
			PRINTK_1(TRACE_MWAVE,
				"mwavedd::mwave_ioctl, IOCTL_MW_RESET"
				" calling tp3780I_ResetDSP\n");
			lock_kernel();
			retval = tp3780I_ResetDSP(&pDrvData->rBDData);
			unlock_kernel();
			PRINTK_2(TRACE_MWAVE,
				"mwavedd::mwave_ioctl, IOCTL_MW_RESET"
				" retval %x from tp3780I_ResetDSP\n",
				retval);
			break;
	
		case IOCTL_MW_RUN:
			PRINTK_1(TRACE_MWAVE,
				"mwavedd::mwave_ioctl, IOCTL_MW_RUN"
				" calling tp3780I_StartDSP\n");
			lock_kernel();
			retval = tp3780I_StartDSP(&pDrvData->rBDData);
			unlock_kernel();
			PRINTK_2(TRACE_MWAVE,
				"mwavedd::mwave_ioctl, IOCTL_MW_RUN"
				" retval %x from tp3780I_StartDSP\n",
				retval);
			break;
	
		case IOCTL_MW_DSP_ABILITIES: {
			MW_ABILITIES rAbilities;
	
			PRINTK_1(TRACE_MWAVE,
				"mwavedd::mwave_ioctl,"
				" IOCTL_MW_DSP_ABILITIES calling"
				" tp3780I_QueryAbilities\n");
			lock_kernel();
			retval = tp3780I_QueryAbilities(&pDrvData->rBDData,
					&rAbilities);
			unlock_kernel();
			PRINTK_2(TRACE_MWAVE,
				"mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
				" retval %x from tp3780I_QueryAbilities\n",
				retval);
			if (retval == 0) {
				if( copy_to_user(arg, &rAbilities,
							sizeof(MW_ABILITIES)) )
					return -EFAULT;
			}
			PRINTK_2(TRACE_MWAVE,
				"mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
				" exit retval %x\n",
				retval);
		}
			break;
	
		case IOCTL_MW_READ_DATA:
		case IOCTL_MW_READCLEAR_DATA: {
			MW_READWRITE rReadData;
			unsigned short __user *pusBuffer = NULL;
	
			if( copy_from_user(&rReadData, arg,
						sizeof(MW_READWRITE)) )
				return -EFAULT;
			pusBuffer = (unsigned short __user *) (rReadData.pBuf);
	
			PRINTK_4(TRACE_MWAVE,
				"mwavedd::mwave_ioctl IOCTL_MW_READ_DATA,"
				" size %lx, ioarg %lx pusBuffer %p\n",
				rReadData.ulDataLength, ioarg, pusBuffer);
			lock_kernel();
			retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
					iocmd,
					pusBuffer,
					rReadData.ulDataLength,
					rReadData.usDspAddress);
			unlock_kernel();
		}
			break;
	
		case IOCTL_MW_READ_INST: {
			MW_READWRITE rReadData;
			unsigned short __user *pusBuffer = NULL;
	
			if( copy_from_user(&rReadData, arg,
						sizeof(MW_READWRITE)) )
				return -EFAULT;
			pusBuffer = (unsigned short __user *) (rReadData.pBuf);
	
			PRINTK_4(TRACE_MWAVE,
				"mwavedd::mwave_ioctl IOCTL_MW_READ_INST,"
				" size %lx, ioarg %lx pusBuffer %p\n",
				rReadData.ulDataLength / 2, ioarg,
				pusBuffer);
			lock_kernel();
			retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
				iocmd, pusBuffer,
				rReadData.ulDataLength / 2,
				rReadData.usDspAddress);
			unlock_kernel();
		}
			break;
	
		case IOCTL_MW_WRITE_DATA: {
			MW_READWRITE rWriteData;
			unsigned short __user *pusBuffer = NULL;
	
			if( copy_from_user(&rWriteData, arg,
						sizeof(MW_READWRITE)) )
				return -EFAULT;
			pusBuffer = (unsigned short __user *) (rWriteData.pBuf);
	
			PRINTK_4(TRACE_MWAVE,
				"mwavedd::mwave_ioctl IOCTL_MW_WRITE_DATA,"
				" size %lx, ioarg %lx pusBuffer %p\n",
				rWriteData.ulDataLength, ioarg,
				pusBuffer);
			lock_kernel();
			retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
					iocmd, pusBuffer,
					rWriteData.ulDataLength,
					rWriteData.usDspAddress);
			unlock_kernel();
		}
			break;
	
		case IOCTL_MW_WRITE_INST: {
			MW_READWRITE rWriteData;
			unsigned short __user *pusBuffer = NULL;
	
			if( copy_from_user(&rWriteData, arg,
						sizeof(MW_READWRITE)) )
				return -EFAULT;
			pusBuffer = (unsigned short __user *)(rWriteData.pBuf);
	
			PRINTK_4(TRACE_MWAVE,
				"mwavedd::mwave_ioctl IOCTL_MW_WRITE_INST,"
				" size %lx, ioarg %lx pusBuffer %p\n",
				rWriteData.ulDataLength, ioarg,
				pusBuffer);
			lock_kernel();
			retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData,
					iocmd, pusBuffer,
					rWriteData.ulDataLength,
					rWriteData.usDspAddress);
			unlock_kernel();
		}
			break;
	
		case IOCTL_MW_REGISTER_IPC: {
			unsigned int ipcnum = (unsigned int) ioarg;
	
			if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
				PRINTK_ERROR(KERN_ERR_MWAVE
						"mwavedd::mwave_ioctl:"
						" IOCTL_MW_REGISTER_IPC:"
						" Error: Invalid ipcnum %x\n",
						ipcnum);
				return -EINVAL;
			}
			PRINTK_3(TRACE_MWAVE,
				"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
				" ipcnum %x entry usIntCount %x\n",
				ipcnum,
				pDrvData->IPCs[ipcnum].usIntCount);

			lock_kernel();
			pDrvData->IPCs[ipcnum].bIsHere = FALSE;
			pDrvData->IPCs[ipcnum].bIsEnabled = TRUE;
			unlock_kernel();
	
			PRINTK_2(TRACE_MWAVE,
				"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
				" ipcnum %x exit\n",
				ipcnum);
		}
			break;
	
		case IOCTL_MW_GET_IPC: {
			unsigned int ipcnum = (unsigned int) ioarg;
	
			if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
				PRINTK_ERROR(KERN_ERR_MWAVE
						"mwavedd::mwave_ioctl:"
						" IOCTL_MW_GET_IPC: Error:"
						" Invalid ipcnum %x\n", ipcnum);
				return -EINVAL;
			}
			PRINTK_3(TRACE_MWAVE,
				"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
				" ipcnum %x, usIntCount %x\n",
				ipcnum,
				pDrvData->IPCs[ipcnum].usIntCount);
	
			lock_kernel();
			if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) {
				DECLARE_WAITQUEUE(wait, current);

				PRINTK_2(TRACE_MWAVE,
					"mwavedd::mwave_ioctl, thread for"
					" ipc %x going to sleep\n",
					ipcnum);
				add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
				pDrvData->IPCs[ipcnum].bIsHere = TRUE;
				set_current_state(TASK_INTERRUPTIBLE);
				/* check whether an event was signalled by */
				/* the interrupt handler while we were gone */
				if (pDrvData->IPCs[ipcnum].usIntCount == 1) {	/* first int has occurred (race condition) */
					pDrvData->IPCs[ipcnum].usIntCount = 2;	/* first int has been handled */
					PRINTK_2(TRACE_MWAVE,
						"mwavedd::mwave_ioctl"
						" IOCTL_MW_GET_IPC ipcnum %x"
						" handling first int\n",
						ipcnum);
				} else {	/* either 1st int has not yet occurred, or we have already handled the first int */
					schedule();
					if (pDrvData->IPCs[ipcnum].usIntCount == 1) {
						pDrvData->IPCs[ipcnum].usIntCount = 2;
					}
					PRINTK_2(TRACE_MWAVE,
						"mwavedd::mwave_ioctl"
						" IOCTL_MW_GET_IPC ipcnum %x"
						" woke up and returning to"
						" application\n",
						ipcnum);
				}
				pDrvData->IPCs[ipcnum].bIsHere = FALSE;
				remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
				set_current_state(TASK_RUNNING);
				PRINTK_2(TRACE_MWAVE,
					"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC,"
					" returning thread for ipc %x"
					" processing\n",
					ipcnum);
			}
			unlock_kernel();
		}
			break;
	
		case IOCTL_MW_UNREGISTER_IPC: {
			unsigned int ipcnum = (unsigned int) ioarg;
	
			PRINTK_2(TRACE_MWAVE,
				"mwavedd::mwave_ioctl IOCTL_MW_UNREGISTER_IPC"
				" ipcnum %x\n",
				ipcnum);
			if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
				PRINTK_ERROR(KERN_ERR_MWAVE
						"mwavedd::mwave_ioctl:"
						" IOCTL_MW_UNREGISTER_IPC:"
						" Error: Invalid ipcnum %x\n",
						ipcnum);
				return -EINVAL;
			}
			lock_kernel();
			if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) {
				pDrvData->IPCs[ipcnum].bIsEnabled = FALSE;
				if (pDrvData->IPCs[ipcnum].bIsHere == TRUE) {
					wake_up_interruptible(&pDrvData->IPCs[ipcnum].ipc_wait_queue);
				}
			}
			unlock_kernel();
		}
			break;
	
		default:
			return -ENOTTY;
			break;
	} /* switch */

	PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval);

	return retval;
}
Beispiel #17
0
/**
 * ioctl() entry point from the I/O manager
 *
 */
int SEC2xIoctl(struct inode  *nd,
               struct file   *fil,
               unsigned int   code,
               unsigned long  param)
{
    int               status;
    RQ_TYPE           rqType;
    RMstatus          rmstat;
    RMexecMessage    *localMsg;
    wait_queue_head_t wq;
    wait_queue_t      wqent;
    volatile int      blockst;
    int               i;
    MALLOC_REQ       *mem;
    KBUF_MULTI       *kbm;
    PTRTYPE           memtype;


    status = SEC2_SUCCESS;

    switch (code)
    {

        /* standard request type */
        case IOCTL_PROC_REQ:
        case IOCTL_PROC_REQ_VIRTUAL:

            /* Check the param block */
            if (param == (int)NULL)
            {
                status = SEC2_INVALID_ADDRESS;
                break;
            }

            /* Figure out the memory type we have to deal with */
            /* If virtual specified, we have to figure out which type */
            if (code == IOCTL_PROC_REQ_VIRTUAL)
                if (fil == NULL)
                    memtype = PTR_KERNEL_VIRTUAL;
                else
                    memtype = PTR_USER_VIRTUAL;
            else
                memtype = PTR_LOGICAL;

            /* Allocate a request message from the "pool" */
            localMsg = getExecMsg();
            if (localMsg == NULL)
                return SEC2_INSUFFICIENT_REQS;

            /* Construct a list of descriptors from the input */
            status = constructDPDlist((GENERIC_REQ *)param, localMsg, memtype);
            if (status != SEC2_SUCCESS)
                return status;

            /* Set up completion handlers here. For a non-blocking */
            /* request like this, we have to do all releasing from */
            /* inside the handler itself, because this function    */
            /* may exit before the request completes               */
            localMsg->messageReleaseHandler = sec2xAsyncReleaseHandler;
            localMsg->releaseArgument       = localMsg;
            localMsg->buftype               = (uint8_t)memtype;

            /* Save off parameter block, process ID, signal values */
            localMsg->initialRQ             = (void *)param;
            if (fil != NULL)
            {
                localMsg->rqID                  = current->pid;
                localMsg->sigval[0]             = (int)(((GENERIC_REQ *)param)->notify);
                localMsg->sigval[1]             = (int)(((GENERIC_REQ *)param)->notify_on_error);
            }
            else
                localMsg->rqID = 0;

            /* Set the RM to processing our request */
            rmstat = xwcRMqueueRequest(ifctx, localMsg, &msgID);

            /* report error if the RM no can do... */
            if (rmstat)
            {
                printk("t23xsec2:ioctl() - error 0x%08x from RM, request not initiated\n", rmstat);
                status = SEC2_UNKNOWN_ERROR;
                releaseDPDlist(localMsg, memtype);
                freeExecMsg(localMsg);
            }

            /* Return status to the user and go home. If queueRequest()   */
            /* worked OK, now it's processing, and it's up to the release */
            /* handler to free resources and translate/report errors      */
            return status;
            break;


        /* blocking request types, should ONLY come from usermode */
        case IOCTL_PROC_REQ_BLOCK:
        case IOCTL_PROC_REQ_BLOCK_VIRTUAL:
            /* check the presence of a param block */
            if (param == (int)NULL)
            {
                status = SEC2_INVALID_ADDRESS;
                break;
            }

            if (fil == NULL) /* only valid from usermode */
            {
                status = SEC2_INVALID_REQUEST_MODE;
                break;
            }
            else
                rqType = RQ_USER_BLOCK;

            if (code == IOCTL_PROC_REQ_BLOCK_VIRTUAL)
                memtype = PTR_USER_VIRTUAL;
            else
                memtype = PTR_LOGICAL;

            /* Allocate a request message from the "pool" */
            localMsg = getExecMsg();
            if (localMsg == NULL)
                return SEC2_INSUFFICIENT_REQS;

            /* Construct a list of descriptors from the input */
            status = constructDPDlist((GENERIC_REQ *)param, localMsg, memtype);
            if (status != SEC2_SUCCESS)
                return status;

            /* Set up completion action & waitqueue entry for this request */
            blockst                         = BLOCK_RQ_PENDING;
            localMsg->messageReleaseHandler = sec2xBlockReleaseHandler;
            localMsg->releaseArgument       = (void *)&blockst;

            init_waitqueue_head(&wq);
            init_waitqueue_entry(&wqent, current);
            add_wait_queue(&wq, &wqent);
            localMsg->waitingTask = &wq;
            set_current_state(TASK_INTERRUPTIBLE);

            /* Pass constructed request off to the RM for processing */
            rmstat = xwcRMqueueRequest(ifctx, localMsg, &msgID);

            /* report error, else spin on the waitqueue */
            if (rmstat)
            {
                status = SEC2_UNKNOWN_ERROR; /* worst case error */
                if (rmstat == RM_NO_CAPABILITY)     /* maybe no such CHA? */
                    status = SEC2_INVALID_CHA_TYPE;
                set_current_state(TASK_RUNNING);
            }
            else
            {
                while(1)
                {
                    set_current_state(TASK_INTERRUPTIBLE);
                    if (blockst == BLOCK_RQ_PENDING)
                        schedule();
                    else
                        break;
                }
                set_current_state(TASK_RUNNING);
            }

            /* Release the DPD list. */
            releaseDPDlist(localMsg, memtype);

            /* If error from the waitqueue hold, return it */
            if (status)
                return status;

            /* If no error from the waitqueue hold, then check our exec */
            /* message for error registers, and translate */
            if (!status)
                status = sec2xTranslateError(localMsg);

            /* all done with this exec msg */
            freeExecMsg(localMsg);
            return status;

            break;

        case IOCTL_GET_STATUS:
            status = SEC2_UNIMPLEMENTED;
            break;

        case IOCTL_RESERVE_CHANNEL_STATIC:
#ifdef UNIMPLEMENTED
            status = ReserveChannelStatic((unsigned char *)param,
                    (int)taskIdSelf());
#endif
            status = SEC2_UNIMPLEMENTED;
            break;

        case IOCTL_RELEASE_CHANNEL:
#ifdef UNIMPLEMENTED
            status = ReleaseChannel(*(unsigned char *)param, (int)taskIdSelf(), FALSE);
#endif
            status = SEC2_UNIMPLEMENTED;
            break;


        case IOCTL_MALLOC:
            if ((((MALLOC_REQ *)param)->ptr =
                        kmalloc(((MALLOC_REQ *)param)->sz, GFP_KERNEL | GFP_DMA)) == 0)
            {
                status = SEC2_MALLOC_FAILED;
                break;
            }
            memset(((MALLOC_REQ *)param)->ptr, 0, ((MALLOC_REQ *)param)->sz);
            status = SEC2_SUCCESS;
            break;

        case IOCTL_COPYFROM:
            mem = (MALLOC_REQ *)param;
            mem->pid = current->pid;
            copy_from_user(mem->to, mem->from, mem->sz);
            status = SEC2_SUCCESS;
            break;

        case IOCTL_COPYTO:
            mem = (MALLOC_REQ *)param;
            mem->pid = current->pid;
            copy_to_user(mem->to, mem->from, mem->sz);
            status = SEC2_SUCCESS;
            break;

        case IOCTL_FREE:
            kfree((void *)param);
            break;

        case IOCTL_KBUF_MULTI_PUSH:
            kbm = (KBUF_MULTI *)param;
            for (i = 0; i < MAX_PAIRS; i++)
            {
                if ((kbm->pair[i].local != NULL) &&
                        (kbm->pair[i].kbuf != NULL) &&
                        (kbm->pair[i].size > 0))
                    copy_from_user(kbm->pair[i].kbuf,   /* destination */
                            kbm->pair[i].local,  /* source      */
                            kbm->pair[i].size);
            }
            break;

        case IOCTL_KBUF_MULTI_PULL:
            kbm = (KBUF_MULTI *)param;
            for (i = 0; i < MAX_PAIRS; i++)
            {
                if ((kbm->pair[i].local != NULL) &&
                        (kbm->pair[i].kbuf != NULL) &&
                        (kbm->pair[i].size > 0))
                    copy_to_user(kbm->pair[i].local,   /* destination */
                            kbm->pair[i].kbuf,    /* source      */
                            kbm->pair[i].size);
            }
            break;

        case IOCTL_KBUF_MULTI_ALLOC:
            kbm = (KBUF_MULTI *)param;
            for (i = 0; i < MAX_PAIRS; i++)
            {
                /* If size spec'ed nonzero, allocate buffer */
                if (kbm->pair[i].size)
                {
                    kbm->pair[i].kbuf = kmalloc(kbm->pair[i].size, GFP_KERNEL | GFP_DMA);
                    /* If allocate error, unwind any other allocs and exit */
                    if (kbm->pair[i].kbuf == NULL)
                    {
                        while (i >= 0)
                        {
                            if (kbm->pair[i].kbuf != NULL)
                                kfree(kbm->pair[i].kbuf);
                            i--;
                        }
                        status = SEC2_MALLOC_FAILED;
                        break;
                    } /* end allocation error */
                } /* end if (nonzero size) */
            }
            status = SEC2_SUCCESS;
            break;


        case IOCTL_KBUF_MULTI_FREE:
            kbm = (KBUF_MULTI *)param;
            for (i = 0; i < MAX_PAIRS; i++)
                if (kbm->pair[i].kbuf != NULL)
                    kfree(kbm->pair[i].kbuf);
            break;



        case IOCTL_INSTALL_AUX_HANDLER:
#ifdef UNIMPLEMENTED
            chan = ((AUX_HANDLER_SPEC *)param)->channel;
            /* see if requested channel is valid */
            if ((chan <= 0) || (chan > TotalChannels))
            {
                status = SEC2_INVALID_CHANNEL;
                break;
            }

            /* channel is valid, is it reserved (and not busy)? */
            if (ChannelAssignments[chan - 1].assignment != CHANNEL_STATIC_ASSIGNED)
            {
                status = SEC2_CHANNEL_NOT_AVAILABLE;
                break;
            }


            /* Channel spec is in range, and is reserved for use. Notice that */
            /* we really don't have any good means to identify the requestor  */
            /* for validity (could be the kernel itself), so will assume that */
            /* channel ownership is not an issue. Now register/remove the     */
            /* handler                                                        */

            ChannelAssignments[chan - 1].auxHandler = ((AUX_HANDLER_SPEC *)param)->auxHandler;
#endif
            status = SEC2_UNIMPLEMENTED;
            break;

    } /* switch (code) */


    return status;
}
Beispiel #18
0
void
cfs_waitq_add(cfs_waitq_t *waitq, cfs_waitlink_t *link)
{
        add_wait_queue(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
}
Beispiel #19
0
asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru)
{
	int flag, retval;
	DECLARE_WAITQUEUE(wait, current);
	struct task_struct *tsk;

	if (options & ~(WNOHANG|WUNTRACED|__WNOTHREAD|__WCLONE|__WALL))
		return -EINVAL;

	add_wait_queue(&current->wait_chldexit,&wait);
repeat:
	flag = 0;
	current->state = TASK_INTERRUPTIBLE;
	read_lock(&tasklist_lock);
	tsk = current;
	do {
		struct task_struct *p;
	 	for (p = tsk->p_cptr ; p ; p = p->p_osptr) {
			if (pid>0) {
				if (p->pid != pid)
					continue;
			} else if (!pid) {
				if (p->pgrp != current->pgrp)
					continue;
			} else if (pid != -1) {
				if (p->pgrp != -pid)
					continue;
			}
			/* Wait for all children (clone and not) if __WALL is set;
			 * otherwise, wait for clone children *only* if __WCLONE is
			 * set; otherwise, wait for non-clone children *only*.  (Note:
			 * A "clone" child here is one that reports to its parent
			 * using a signal other than SIGCHLD.) */
			if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
			    && !(options & __WALL))
				continue;
			flag = 1;
			switch (p->state) {
			case TASK_STOPPED:
				if (!p->exit_code)
					continue;
				if (!(options & WUNTRACED) && !(p->ptrace & PT_PTRACED))
					continue;
				read_unlock(&tasklist_lock);
				retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; 
				if (!retval && stat_addr) 
					retval = put_user((p->exit_code << 8) | 0x7f, stat_addr);
				if (!retval) {
					p->exit_code = 0;
					retval = p->pid;
				}
				goto end_wait4;
			case TASK_ZOMBIE:
				current->times.tms_cutime += p->times.tms_utime + p->times.tms_cutime;
				current->times.tms_cstime += p->times.tms_stime + p->times.tms_cstime;
				read_unlock(&tasklist_lock);
				retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
				if (!retval && stat_addr)
					retval = put_user(p->exit_code, stat_addr);
				if (retval)
					goto end_wait4; 
				retval = p->pid;
				if (p->p_opptr != p->p_pptr) {
					write_lock_irq(&tasklist_lock);
					REMOVE_LINKS(p);
					p->p_pptr = p->p_opptr;
					SET_LINKS(p);
					do_notify_parent(p, SIGCHLD);
					write_unlock_irq(&tasklist_lock);
				} else
					release_task(p);
				goto end_wait4;
			default:
				continue;
			}
		}
		if (options & __WNOTHREAD)
			break;
		tsk = next_thread(tsk);
	} while (tsk != current);
	read_unlock(&tasklist_lock);
	if (flag) {
		retval = 0;
		if (options & WNOHANG)
			goto end_wait4;
		retval = -ERESTARTSYS;
		if (signal_pending(current))
			goto end_wait4;
		schedule();
		goto repeat;
	}
	retval = -ECHILD;
end_wait4:
	current->state = TASK_RUNNING;
	remove_wait_queue(&current->wait_chldexit,&wait);
	return retval;
}
Beispiel #20
0
int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
{
	struct sock *sk;
	int len = skb->len;
	int protocol = ssk->protocol;
	long timeo;
        DECLARE_WAITQUEUE(wait, current);

	timeo = sock_sndtimeo(ssk, nonblock);

retry:
	sk = netlink_lookup(protocol, pid);
	if (sk == NULL)
		goto no_dst;

#ifdef NL_EMULATE_DEV
	if (sk->protinfo.af_netlink->handler) {
		skb_orphan(skb);
		len = sk->protinfo.af_netlink->handler(protocol, skb);
		sock_put(sk);
		return len;
	}
#endif

	if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
	    test_bit(0, &sk->protinfo.af_netlink->state)) {
		if (!timeo) {
			if (ssk->protinfo.af_netlink->pid == 0)
				netlink_overrun(sk);
			sock_put(sk);
			kfree_skb(skb);
			return -EAGAIN;
		}

		__set_current_state(TASK_INTERRUPTIBLE);
		add_wait_queue(&sk->protinfo.af_netlink->wait, &wait);

		if ((atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
		    test_bit(0, &sk->protinfo.af_netlink->state)) &&
		    !sk->dead)
			timeo = schedule_timeout(timeo);

		__set_current_state(TASK_RUNNING);
		remove_wait_queue(&sk->protinfo.af_netlink->wait, &wait);
		sock_put(sk);

		if (signal_pending(current)) {
			kfree_skb(skb);
			return sock_intr_errno(timeo);
		}
		goto retry;
	}

	skb_orphan(skb);
	skb_set_owner_r(skb, sk);
	skb_queue_tail(&sk->receive_queue, skb);
	sk->data_ready(sk, len);
	sock_put(sk);
	return len;

no_dst:
	kfree_skb(skb);
	return -ECONNREFUSED;
}
Beispiel #21
0
/*globalfifo写操作*/
static ssize_t globalfifo_write(struct file *filp, const char __user *buf,
  size_t count, loff_t *ppos)
{
  struct globalfifo_dev *dev = filp->private_data; //获得设备结构体指针
  int ret;
  DECLARE_WAITQUEUE(wait, current); //定义等待队列

  down(&dev->sem); //获取信号量
  add_wait_queue(&dev->w_wait, &wait); //进入写等待队列头

  /* 等待FIFO非满 */
  if (dev->current_len == GLOBALFIFO_SIZE)
  {
    if (filp->f_flags &O_NONBLOCK)
    //如果是非阻塞访问
    {
      ret =  - EAGAIN;
      goto out;
    } 
    __set_current_state(TASK_INTERRUPTIBLE); //改变进程状态为睡眠
    up(&dev->sem);

    schedule(); //调度其他进程执行
    if (signal_pending(current))
    //如果是因为信号唤醒
    {
      ret =  - ERESTARTSYS;
      goto out2;
    }

    down(&dev->sem); //获得信号量
  }

  /*从用户空间拷贝到内核空间*/
  if (count > GLOBALFIFO_SIZE - dev->current_len)
    count = GLOBALFIFO_SIZE - dev->current_len;

  if (copy_from_user(dev->mem + dev->current_len, buf, count))
  {
    ret =  - EFAULT;
    goto out;
  }
  else
  {
    dev->current_len += count;
    printk(KERN_INFO "written %d bytes(s),current_len:%d\n", count, dev
      ->current_len);

    wake_up_interruptible(&dev->r_wait); //唤醒读等待队列
    /* 产生异步读信号 */
    if (dev->async_queue)
       kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
    
    ret = count;
  }

  out: up(&dev->sem); //释放信号量
  out2:remove_wait_queue(&dev->w_wait, &wait); //从附属的等待队列头移除
  set_current_state(TASK_RUNNING);
  return ret;
}
Beispiel #22
0
static
int axusbnet_stop (struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	struct driver_info	*info = dev->driver_info;
	int			temp;
	int			retval;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK (unlink_wakeup);
#else
	DECLARE_WAIT_QUEUE_HEAD (unlink_wakeup);
#endif
	DECLARE_WAITQUEUE (wait, current);

	netif_stop_queue (net);

	if (netif_msg_ifdown (dev))
		devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
			dev->stats.rx_packets, dev->stats.tx_packets,
			dev->stats.rx_errors, dev->stats.tx_errors
			);

	/* allow minidriver to stop correctly (wireless devices to turn off
	 * radio etc) */
	if (info->stop) {
		retval = info->stop(dev);
		if (retval < 0 && netif_msg_ifdown(dev))
			devinfo(dev,
				"stop fail (%d) usbnet usb-%s-%s, %s",
				retval,
				dev->udev->bus->bus_name, dev->udev->devpath,
				info->description);
	}

	if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) {
		/* ensure there are no more active urbs */
		add_wait_queue(&unlink_wakeup, &wait);
		dev->wait = &unlink_wakeup;
		temp = unlink_urbs(dev, &dev->txq) +
			unlink_urbs(dev, &dev->rxq);

		/* maybe wait for deletions to finish. */
		while (!skb_queue_empty(&dev->rxq)
				&& !skb_queue_empty(&dev->txq)
				&& !skb_queue_empty(&dev->done)) {
			msleep(UNLINK_TIMEOUT_MS);
			if (netif_msg_ifdown(dev))
				devdbg(dev, "waited for %d urb completions",
					temp);
		}
		dev->wait = NULL;
		remove_wait_queue(&unlink_wakeup, &wait);
	}

	usb_kill_urb(dev->interrupt);

	/* deferred work (task, timer, softirq) must also stop.
	 * can't flush_scheduled_work() until we drop rtnl (later),
	 * else workers could deadlock; so make workers a NOP.
	 */
	dev->flags = 0;
	del_timer_sync (&dev->delay);
	tasklet_kill (&dev->bh);

	return 0;
}
Beispiel #23
0
/**
 * n_hdlc_tty_write - write a single frame of data to device
 * @tty	- pointer to associated tty device instance data
 * @file - pointer to file object data
 * @data - pointer to transmit data (one frame)
 * @count - size of transmit frame in bytes
 * 		
 * Returns the number of bytes written (or error code).
 */
static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
			    const unsigned char *data, size_t count)
{
	struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
	int error = 0;
	DECLARE_WAITQUEUE(wait, current);
	struct n_hdlc_buf *tbuf;

	if (debuglevel >= DEBUG_LEVEL_INFO)	
		printk("%s(%d)n_hdlc_tty_write() called count=%Zd\n",
			__FILE__,__LINE__,count);
		
	/* Verify pointers */
	if (!n_hdlc)
		return -EIO;

	if (n_hdlc->magic != HDLC_MAGIC)
		return -EIO;

	/* verify frame size */
	if (count > maxframe ) {
		if (debuglevel & DEBUG_LEVEL_INFO)
			printk (KERN_WARNING
				"n_hdlc_tty_write: truncating user packet "
				"from %lu to %d\n", (unsigned long) count,
				maxframe );
		count = maxframe;
	}
	
	tty_lock();

	add_wait_queue(&tty->write_wait, &wait);
	set_current_state(TASK_INTERRUPTIBLE);
	
	/* Allocate transmit buffer */
	/* sleep until transmit buffer available */		
	while (!(tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list))) {
		if (file->f_flags & O_NONBLOCK) {
			error = -EAGAIN;
			break;
		}
		schedule();
			
		n_hdlc = tty2n_hdlc (tty);
		if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC || 
		    tty != n_hdlc->tty) {
			printk("n_hdlc_tty_write: %p invalid after wait!\n", n_hdlc);
			error = -EIO;
			break;
		}
			
		if (signal_pending(current)) {
			error = -EINTR;
			break;
		}
	}

	set_current_state(TASK_RUNNING);
	remove_wait_queue(&tty->write_wait, &wait);

	if (!error) {		
		/* Retrieve the user's buffer */
		memcpy(tbuf->buf, data, count);

		/* Send the data */
		tbuf->count = error = count;
		n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf);
		n_hdlc_send_frames(n_hdlc,tty);
	}
	tty_unlock();
	return error;
	
}	/* end of n_hdlc_tty_write() */
Beispiel #24
0
int rt28xx_close(IN PNET_DEV dev)
{
	struct net_device * net_dev = (struct net_device *)dev;
    RTMP_ADAPTER	*pAd = net_dev->ml_priv;
	BOOLEAN 		Cancelled = FALSE;
	UINT32			i = 0;
#ifdef RT2870
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
	DECLARE_WAITQUEUE(wait, current);

	
#endif 


    DBGPRINT(RT_DEBUG_TRACE, ("===> rt28xx_close\n"));

	
	if (pAd == NULL)
		return 0; 

	{
		
		
#ifdef RT2860
		if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE) ||
			RTMP_SET_PSFLAG(pAd, fRTMP_PS_SET_PCI_CLK_OFF_COMMAND) ||
			RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_IDLE_RADIO_OFF))
#endif
#ifdef RT2870
		if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE))
#endif
        {
#ifdef RT2860
		    AsicForceWakeup(pAd, RTMP_HALT);
#endif
#ifdef RT2870
		    AsicForceWakeup(pAd, TRUE);
#endif
        }

		if (INFRA_ON(pAd) &&
			(!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST)))
		{
			MLME_DISASSOC_REQ_STRUCT	DisReq;
			MLME_QUEUE_ELEM *MsgElem = (MLME_QUEUE_ELEM *) kmalloc(sizeof(MLME_QUEUE_ELEM), MEM_ALLOC_FLAG);

			COPY_MAC_ADDR(DisReq.Addr, pAd->CommonCfg.Bssid);
			DisReq.Reason =  REASON_DEAUTH_STA_LEAVING;

			MsgElem->Machine = ASSOC_STATE_MACHINE;
			MsgElem->MsgType = MT2_MLME_DISASSOC_REQ;
			MsgElem->MsgLen = sizeof(MLME_DISASSOC_REQ_STRUCT);
			NdisMoveMemory(MsgElem->Msg, &DisReq, sizeof(MLME_DISASSOC_REQ_STRUCT));

			
			pAd->MlmeAux.AutoReconnectSsidLen= 32;
			NdisZeroMemory(pAd->MlmeAux.AutoReconnectSsid, pAd->MlmeAux.AutoReconnectSsidLen);

			pAd->Mlme.CntlMachine.CurrState = CNTL_WAIT_OID_DISASSOC;
			MlmeDisassocReqAction(pAd, MsgElem);
			kfree(MsgElem);

			RTMPusecDelay(1000);
		}

#ifdef RT2870
	RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_REMOVE_IN_PROGRESS);
#endif 

#ifdef CCX_SUPPORT
		RTMPCancelTimer(&pAd->StaCfg.LeapAuthTimer, &Cancelled);
#endif

		RTMPCancelTimer(&pAd->StaCfg.StaQuickResponeForRateUpTimer, &Cancelled);
		RTMPCancelTimer(&pAd->StaCfg.WpaDisassocAndBlockAssocTimer, &Cancelled);

		MlmeRadioOff(pAd);
#ifdef RT2860
		pAd->bPCIclkOff = FALSE;
#endif
	}

	RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS);

	for (i = 0 ; i < NUM_OF_TX_RING; i++)
	{
		while (pAd->DeQueueRunning[i] == TRUE)
		{
			printk("Waiting for TxQueue[%d] done..........\n", i);
			RTMPusecDelay(1000);
		}
	}

#ifdef RT2870
	
	add_wait_queue (&unlink_wakeup, &wait);
	pAd->wait = &unlink_wakeup;

	
	i = 0;
	
	while(i < 25)
	{
		unsigned long IrqFlags;

		RTMP_IRQ_LOCK(&pAd->BulkInLock, IrqFlags);
		if (pAd->PendingRx == 0)
		{
			RTMP_IRQ_UNLOCK(&pAd->BulkInLock, IrqFlags);
			break;
		}
		RTMP_IRQ_UNLOCK(&pAd->BulkInLock, IrqFlags);

		msleep(UNLINK_TIMEOUT_MS);	
		i++;
	}
	pAd->wait = NULL;
	remove_wait_queue (&unlink_wakeup, &wait);
#endif 

#ifdef RT2870
	
	RT2870_TimerQ_Exit(pAd);
	
	RT28xxThreadTerminate(pAd);
#endif 

	
	MlmeHalt(pAd);

	
	kill_thread_task(pAd);

	MacTableReset(pAd);

	MeasureReqTabExit(pAd);
	TpcReqTabExit(pAd);

#ifdef RT2860
	if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_ACTIVE))
	{
		NICDisableInterrupt(pAd);
	}

	
	NICIssueReset(pAd);

	
	if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_IN_USE))
	{
		
		RT28XX_IRQ_RELEASE(net_dev)
		RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_IN_USE);
	}
#endif

	
	RTMPFreeTxRxRingMemory(pAd);

	RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS);

	
	ba_reordering_resource_release(pAd);

	RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_START_UP);

	return 0; 
} 
Beispiel #25
0
/**
 *  lbs_thread - handles the major jobs in the LBS driver.
 *  It handles all events generated by firmware, RX data received
 *  from firmware and TX data sent from kernel.
 *
 *  @data:	A pointer to &lbs_thread structure
 *  returns:	0
 */
static int lbs_thread(void *data)
{
	struct net_device *dev = data;
	struct lbs_private *priv = dev->ml_priv;
	wait_queue_t wait;

	lbs_deb_enter(LBS_DEB_THREAD);

	init_waitqueue_entry(&wait, current);

	for (;;) {
		int shouldsleep;
		u8 resp_idx;

		lbs_deb_thread("1: currenttxskb %p, dnld_sent %d\n",
				priv->currenttxskb, priv->dnld_sent);

		add_wait_queue(&priv->waitq, &wait);
		set_current_state(TASK_INTERRUPTIBLE);
		spin_lock_irq(&priv->driver_lock);

		if (kthread_should_stop())
			shouldsleep = 0;	/* Bye */
		else if (priv->surpriseremoved)
			shouldsleep = 1;	/* We need to wait until we're _told_ to die */
		else if (priv->psstate == PS_STATE_SLEEP)
			shouldsleep = 1;	/* Sleep mode. Nothing we can do till it wakes */
		else if (priv->cmd_timed_out)
			shouldsleep = 0;	/* Command timed out. Recover */
		else if (!priv->fw_ready)
			shouldsleep = 1;	/* Firmware not ready. We're waiting for it */
		else if (priv->dnld_sent)
			shouldsleep = 1;	/* Something is en route to the device already */
		else if (priv->tx_pending_len > 0)
			shouldsleep = 0;	/* We've a packet to send */
		else if (priv->resp_len[priv->resp_idx])
			shouldsleep = 0;	/* We have a command response */
		else if (priv->cur_cmd)
			shouldsleep = 1;	/* Can't send a command; one already running */
		else if (!list_empty(&priv->cmdpendingq) &&
					!(priv->wakeup_dev_required))
			shouldsleep = 0;	/* We have a command to send */
		else if (kfifo_len(&priv->event_fifo))
			shouldsleep = 0;	/* We have an event to process */
		else
			shouldsleep = 1;	/* No command */

		if (shouldsleep) {
			lbs_deb_thread("sleeping, connect_status %d, "
				"psmode %d, psstate %d\n",
				priv->connect_status,
				priv->psmode, priv->psstate);
			spin_unlock_irq(&priv->driver_lock);
			schedule();
		} else
			spin_unlock_irq(&priv->driver_lock);

		lbs_deb_thread("2: currenttxskb %p, dnld_send %d\n",
			       priv->currenttxskb, priv->dnld_sent);

		set_current_state(TASK_RUNNING);
		remove_wait_queue(&priv->waitq, &wait);

		lbs_deb_thread("3: currenttxskb %p, dnld_sent %d\n",
			       priv->currenttxskb, priv->dnld_sent);

		if (kthread_should_stop()) {
			lbs_deb_thread("break from main thread\n");
			break;
		}

		if (priv->surpriseremoved) {
			lbs_deb_thread("adapter removed; waiting to die...\n");
			continue;
		}

		lbs_deb_thread("4: currenttxskb %p, dnld_sent %d\n",
		       priv->currenttxskb, priv->dnld_sent);

		/* Process any pending command response */
		spin_lock_irq(&priv->driver_lock);
		resp_idx = priv->resp_idx;
		if (priv->resp_len[resp_idx]) {
			spin_unlock_irq(&priv->driver_lock);
			lbs_process_command_response(priv,
				priv->resp_buf[resp_idx],
				priv->resp_len[resp_idx]);
			spin_lock_irq(&priv->driver_lock);
			priv->resp_len[resp_idx] = 0;
		}
		spin_unlock_irq(&priv->driver_lock);

		/* Process hardware events, e.g. card removed, link lost */
		spin_lock_irq(&priv->driver_lock);
		while (kfifo_len(&priv->event_fifo)) {
			u32 event;

			if (kfifo_out(&priv->event_fifo,
				(unsigned char *) &event, sizeof(event)) !=
				sizeof(event))
					break;
			spin_unlock_irq(&priv->driver_lock);
			lbs_process_event(priv, event);
			spin_lock_irq(&priv->driver_lock);
		}
		spin_unlock_irq(&priv->driver_lock);

		if (priv->wakeup_dev_required) {
			lbs_deb_thread("Waking up device...\n");
			/* Wake up device */
			if (priv->exit_deep_sleep(priv))
				lbs_deb_thread("Wakeup device failed\n");
			continue;
		}

		/* command timeout stuff */
		if (priv->cmd_timed_out && priv->cur_cmd) {
			struct cmd_ctrl_node *cmdnode = priv->cur_cmd;

			netdev_info(dev, "Timeout submitting command 0x%04x\n",
				    le16_to_cpu(cmdnode->cmdbuf->command));
			lbs_complete_command(priv, cmdnode, -ETIMEDOUT);

#if 0 /* Not in RHEL */
			/* Reset card, but only when it isn't in the process
			 * of being shutdown anyway. */
			if (!dev->dismantle && priv->reset_card)
#else
			if (priv->reset_card)
#endif
				priv->reset_card(priv);
		}
		priv->cmd_timed_out = 0;

		if (!priv->fw_ready)
			continue;

		/* Check if we need to confirm Sleep Request received previously */
		if (priv->psstate == PS_STATE_PRE_SLEEP &&
		    !priv->dnld_sent && !priv->cur_cmd) {
			if (priv->connect_status == LBS_CONNECTED) {
				lbs_deb_thread("pre-sleep, currenttxskb %p, "
					"dnld_sent %d, cur_cmd %p\n",
					priv->currenttxskb, priv->dnld_sent,
					priv->cur_cmd);

				lbs_ps_confirm_sleep(priv);
			} else {
				/* workaround for firmware sending
				 * deauth/linkloss event immediately
				 * after sleep request; remove this
				 * after firmware fixes it
				 */
				priv->psstate = PS_STATE_AWAKE;
				netdev_alert(dev,
					     "ignore PS_SleepConfirm in non-connected state\n");
			}
		}

		/* The PS state is changed during processing of Sleep Request
		 * event above
		 */
		if ((priv->psstate == PS_STATE_SLEEP) ||
		    (priv->psstate == PS_STATE_PRE_SLEEP))
			continue;

		if (priv->is_deep_sleep)
			continue;

		/* Execute the next command */
		if (!priv->dnld_sent && !priv->cur_cmd)
			lbs_execute_next_command(priv);

		spin_lock_irq(&priv->driver_lock);
		if (!priv->dnld_sent && priv->tx_pending_len > 0) {
			int ret = priv->hw_host_to_card(priv, MVMS_DAT,
							priv->tx_pending_buf,
							priv->tx_pending_len);
			if (ret) {
				lbs_deb_tx("host_to_card failed %d\n", ret);
				priv->dnld_sent = DNLD_RES_RECEIVED;
			} else {
				mod_timer(&priv->tx_lockup_timer,
					  jiffies + (HZ * 5));
			}
			priv->tx_pending_len = 0;
			if (!priv->currenttxskb) {
				/* We can wake the queues immediately if we aren't
				   waiting for TX feedback */
				if (priv->connect_status == LBS_CONNECTED)
					netif_wake_queue(priv->dev);
				if (priv->mesh_dev &&
				    netif_running(priv->mesh_dev))
					netif_wake_queue(priv->mesh_dev);
			}
		}
		spin_unlock_irq(&priv->driver_lock);
	}

	del_timer(&priv->command_timer);
	del_timer(&priv->tx_lockup_timer);
	del_timer(&priv->auto_deepsleep_timer);

	lbs_deb_leave(LBS_DEB_THREAD);
	return 0;
}
Beispiel #26
0
static ssize_t cmmbmemo_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
{
    struct cmmb_memory *cmmbmemo = (struct cmmb_memory*)file->private_data;
    ssize_t avail_V, avail_A, avail_D;
    ssize_t ret;
    
    DBG("[CMMB HW]:[memory]:enter cmmb memory read\n");
    
    if (cmmbmemo->r_datatype == CMMB_VIDEO_TYPE){
#if 0         
        DECLARE_WAITQUEUE(wait, current);
        for(;;){
            avail_V = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Video);
             
            if (avail_V < count){          
                add_wait_queue(&cmmbmemo->buffer_Video.queue, &wait);
                __set_current_state(TASK_INTERRUPTIBLE);
                schedule();
                remove_wait_queue(&cmmbmemo->buffer_Video.queue, &wait);
                if (signal_pending(current)){
                   ret = -ERESTARTSYS;
                   goto out2;
                }
            }
        }
#else
#if 0
        avail_V = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Video);
        while (avail_V < count){
            DBG("[CMMB HW]:[memory]:cmmb memory read video data sleep!!\n");
            spin_lock(cmmbmemo->buffer_Video.lock);
            cmmbmemo->buffer_Video.condition = 0;
            spin_unlock(cmmbmemo->buffer_Video.lock);
            if (wait_event_interruptible(cmmbmemo->buffer_Video.queue, cmmbmemo->buffer_Video.condition))
                return -ERESTARTSYS;
            
            avail_V = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Video);
            DBG("[CMMB HW]:[memory]:cmmb memory read video data awake\n");
        }
#endif 
	    avail_V = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Video);
	    if (avail_V < count)  
	    	return 0;     
#endif          
        ret = cmmb_ringbuffer_read(&cmmbmemo->buffer_Video, buf, count, 1);   
     
        DBG("[CMMB HW]:[memory]:cmmb memory video read ret = 0x%x\n",ret);
    }else if (cmmbmemo->r_datatype == CMMB_AUDIO_TYPE){
#if 0
        DECLARE_WAITQUEUE(wait, current);
        for(;;){
            avail_A = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Audio);
            if (avail_A < count){
                add_wait_queue(&cmmbmemo->buffer_Audio.queue, &wait);
                __set_current_state(TASK_INTERRUPTIBLE);
                schedule();
                remove_wait_queue(&cmmbmemo->buffer_Audio.queue, &wait);
                if (signal_pending(current)){
                    ret = -ERESTARTSYS;
                    goto out2;
                }
            }
        }
#else
#if 0
        avail_A = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Audio);
        while (avail_A < count){
            DBG("[CMMB HW]:[memory]:cmmb memory read audio data sleep!!\n");
            spin_lock(cmmbmemo->buffer_Audio.lock);
            cmmbmemo->buffer_Audio.condition = 0;
            spin_unlock(cmmbmemo->buffer_Audio.lock);
            if (wait_event_interruptible(cmmbmemo->buffer_Audio.queue, cmmbmemo->buffer_Audio.condition))
                return -ERESTARTSYS;
            
            avail_A = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Audio);
            DBG("[CMMB HW]:[memory]:cmmb memory read audio data awake\n");
        }
#endif
		avail_A = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Audio);  
		if (avail_A < count)  
			return 0;    
#endif
        ret = cmmb_ringbuffer_read(&cmmbmemo->buffer_Audio, buf, count, 1);
    }else if(cmmbmemo->r_datatype == CMMB_DATA_TYPE){
 #if 0   
        DECLARE_WAITQUEUE(wait, current);
        for(;;){
           avail_D = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Data);
           if (avail_D < count){
               add_wait_queue(&cmmbmemo->buffer_Data.queue, &wait);
               __set_current_state(TASK_INTERRUPTIBLE);
               schedule();
               remove_wait_queue(&cmmbmemo->buffer_Data.queue, &wait);
               if (signal_pending(current)){
                   ret = -ERESTARTSYS;
                   goto out2;
               }
           }
        }
#else
#if 0
        avail_D = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Data);
        while (avail_D < count){
        DBG("[CMMB HW]:[memory]:cmmb memory read data sleep!!\n");
        spin_lock(cmmbmemo->buffer_Data.lock);
        cmmbmemo->buffer_Data.condition = 0;
        spin_unlock(cmmbmemo->buffer_Data.lock);
        if (wait_event_interruptible(cmmbmemo->buffer_Data.queue, cmmbmemo->buffer_Data.condition))
            return -ERESTARTSYS;
        
        avail_D= cmmb_ringbuffer_avail(&cmmbmemo->buffer_Data);
        DBG("[CMMB HW]:[memory]:cmmb memory read data awake\n");
        }
#endif
		avail_D = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Data);  
		if (avail_D < count)  
			return 0;    	        
#endif
        ret = cmmb_ringbuffer_read(&cmmbmemo->buffer_Data, buf, count, 1);
    }
    
out2:
    cmmbmemo->r_datatype = CMMB_NULL_TYPE;
    return ret;;
}
Beispiel #27
0
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    struct inode *inode = filp->f_dentry->d_inode;
    struct ext4_inode_info *ei = EXT4_I(inode);
    unsigned int flags;

    ext4_debug("cmd = %u, arg = %lu\n", cmd, arg);

    switch (cmd) {
    case EXT4_IOC_GETFLAGS:
        ext4_get_inode_flags(ei);
        flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
        return put_user(flags, (int __user *) arg);
    case EXT4_IOC_SETFLAGS: {
        handle_t *handle = NULL;
        int err, migrate = 0;
        struct ext4_iloc iloc;
        unsigned int oldflags;
        unsigned int jflag;

        if (!is_owner_or_cap(inode))
            return -EACCES;

        if (get_user(flags, (int __user *) arg))
            return -EFAULT;

        err = mnt_want_write(filp->f_path.mnt);
        if (err)
            return err;

        flags = ext4_mask_flags(inode->i_mode, flags);

        err = -EPERM;
        mutex_lock(&inode->i_mutex);
        /* Is it quota file? Do not allow user to mess with it */
        if (IS_NOQUOTA(inode))
            goto flags_out;

        oldflags = ei->i_flags;

        /* The JOURNAL_DATA flag is modifiable only by root */
        jflag = flags & EXT4_JOURNAL_DATA_FL;

        /*
         * The IMMUTABLE and APPEND_ONLY flags can only be changed by
         * the relevant capability.
         *
         * This test looks nicer. Thanks to Pauline Middelink
         */
        if ((flags ^ oldflags) & (EXT4_APPEND_FL | EXT4_IMMUTABLE_FL)) {
            if (!capable(CAP_LINUX_IMMUTABLE))
                goto flags_out;
        }

        /*
         * The JOURNAL_DATA flag can only be changed by
         * the relevant capability.
         */
        if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
            if (!capable(CAP_SYS_RESOURCE))
                goto flags_out;
        }
        if (oldflags & EXT4_EXTENTS_FL) {
            /* We don't support clearning extent flags */
            if (!(flags & EXT4_EXTENTS_FL)) {
                err = -EOPNOTSUPP;
                goto flags_out;
            }
        } else if (flags & EXT4_EXTENTS_FL) {
            /* migrate the file */
            migrate = 1;
            flags &= ~EXT4_EXTENTS_FL;
        }

        if (flags & EXT4_EOFBLOCKS_FL) {
            /* we don't support adding EOFBLOCKS flag */
            if (!(oldflags & EXT4_EOFBLOCKS_FL)) {
                err = -EOPNOTSUPP;
                goto flags_out;
            }
        } else if (oldflags & EXT4_EOFBLOCKS_FL)
            ext4_truncate(inode);

        handle = ext4_journal_start(inode, 1);
        if (IS_ERR(handle)) {
            err = PTR_ERR(handle);
            goto flags_out;
        }
        if (IS_SYNC(inode))
            ext4_handle_sync(handle);
        err = ext4_reserve_inode_write(handle, inode, &iloc);
        if (err)
            goto flags_err;

        flags = flags & EXT4_FL_USER_MODIFIABLE;
        flags |= oldflags & ~EXT4_FL_USER_MODIFIABLE;
        ei->i_flags = flags;

        ext4_set_inode_flags(inode);
        inode->i_ctime = ext4_current_time(inode);

        err = ext4_mark_iloc_dirty(handle, inode, &iloc);
flags_err:
        ext4_journal_stop(handle);
        if (err)
            goto flags_out;

        if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL))
            err = ext4_change_inode_journal_flag(inode, jflag);
        if (err)
            goto flags_out;
        if (migrate)
            err = ext4_ext_migrate(inode);
flags_out:
        mutex_unlock(&inode->i_mutex);
        mnt_drop_write(filp->f_path.mnt);
        return err;
    }
    case EXT4_IOC_GETVERSION:
    case EXT4_IOC_GETVERSION_OLD:
        return put_user(inode->i_generation, (int __user *) arg);
    case EXT4_IOC_SETVERSION:
    case EXT4_IOC_SETVERSION_OLD: {
        handle_t *handle;
        struct ext4_iloc iloc;
        __u32 generation;
        int err;

        if (!is_owner_or_cap(inode))
            return -EPERM;

        err = mnt_want_write(filp->f_path.mnt);
        if (err)
            return err;
        if (get_user(generation, (int __user *) arg)) {
            err = -EFAULT;
            goto setversion_out;
        }

        handle = ext4_journal_start(inode, 1);
        if (IS_ERR(handle)) {
            err = PTR_ERR(handle);
            goto setversion_out;
        }
        err = ext4_reserve_inode_write(handle, inode, &iloc);
        if (err == 0) {
            inode->i_ctime = ext4_current_time(inode);
            inode->i_generation = generation;
            err = ext4_mark_iloc_dirty(handle, inode, &iloc);
        }
        ext4_journal_stop(handle);
setversion_out:
        mnt_drop_write(filp->f_path.mnt);
        return err;
    }
#ifdef CONFIG_JBD2_DEBUG
    case EXT4_IOC_WAIT_FOR_READONLY:
        /*
         * This is racy - by the time we're woken up and running,
         * the superblock could be released.  And the module could
         * have been unloaded.  So sue me.
         *
         * Returns 1 if it slept, else zero.
         */
    {
        struct super_block *sb = inode->i_sb;
        DECLARE_WAITQUEUE(wait, current);
        int ret = 0;

        set_current_state(TASK_INTERRUPTIBLE);
        add_wait_queue(&EXT4_SB(sb)->ro_wait_queue, &wait);
        if (timer_pending(&EXT4_SB(sb)->turn_ro_timer)) {
            schedule();
            ret = 1;
        }
        remove_wait_queue(&EXT4_SB(sb)->ro_wait_queue, &wait);
        return ret;
    }
#endif
    case EXT4_IOC_GROUP_EXTEND: {
        ext4_fsblk_t n_blocks_count;
        struct super_block *sb = inode->i_sb;
        int err, err2=0;

        if (!capable(CAP_SYS_RESOURCE))
            return -EPERM;

        if (get_user(n_blocks_count, (__u32 __user *)arg))
            return -EFAULT;

        err = mnt_want_write(filp->f_path.mnt);
        if (err)
            return err;

        err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
        if (EXT4_SB(sb)->s_journal) {
            jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
            err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
            jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
        }
        if (err == 0)
            err = err2;
        mnt_drop_write(filp->f_path.mnt);

        return err;
    }

    case EXT4_IOC_MOVE_EXT: {
        struct move_extent me;
        struct file *donor_filp;
        int err;

        if (!(filp->f_mode & FMODE_READ) ||
                !(filp->f_mode & FMODE_WRITE))
            return -EBADF;

        if (copy_from_user(&me,
                           (struct move_extent __user *)arg, sizeof(me)))
            return -EFAULT;
        me.moved_len = 0;

        donor_filp = fget(me.donor_fd);
        if (!donor_filp)
            return -EBADF;

        if (!(donor_filp->f_mode & FMODE_WRITE)) {
            err = -EBADF;
            goto mext_out;
        }

        err = mnt_want_write(filp->f_path.mnt);
        if (err)
            goto mext_out;

        err = ext4_move_extents(filp, donor_filp, me.orig_start,
                                me.donor_start, me.len, &me.moved_len);
        mnt_drop_write(filp->f_path.mnt);
        if (me.moved_len > 0)
            file_remove_suid(donor_filp);

        if (copy_to_user((struct move_extent *)arg, &me, sizeof(me)))
            err = -EFAULT;
mext_out:
        fput(donor_filp);
        return err;
    }

    case EXT4_IOC_GROUP_ADD: {
        struct ext4_new_group_data input;
        struct super_block *sb = inode->i_sb;
        int err, err2=0;

        if (!capable(CAP_SYS_RESOURCE))
            return -EPERM;

        if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
                           sizeof(input)))
            return -EFAULT;

        err = mnt_want_write(filp->f_path.mnt);
        if (err)
            return err;

        err = ext4_group_add(sb, &input);
        if (EXT4_SB(sb)->s_journal) {
            jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
            err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
            jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
        }
        if (err == 0)
            err = err2;
        mnt_drop_write(filp->f_path.mnt);

        return err;
    }

    case EXT4_IOC_MIGRATE:
    {
        int err;
        if (!is_owner_or_cap(inode))
            return -EACCES;

        err = mnt_want_write(filp->f_path.mnt);
        if (err)
            return err;
        /*
         * inode_mutex prevent write and truncate on the file.
         * Read still goes through. We take i_data_sem in
         * ext4_ext_swap_inode_data before we switch the
         * inode format to prevent read.
         */
        mutex_lock(&(inode->i_mutex));
        err = ext4_ext_migrate(inode);
        mutex_unlock(&(inode->i_mutex));
        mnt_drop_write(filp->f_path.mnt);
        return err;
    }

    case EXT4_IOC_ALLOC_DA_BLKS:
    {
        int err;
        if (!is_owner_or_cap(inode))
            return -EACCES;

        err = mnt_want_write(filp->f_path.mnt);
        if (err)
            return err;
        err = ext4_alloc_da_blocks(inode);
        mnt_drop_write(filp->f_path.mnt);
        return err;
    }

    default:
        return -ENOTTY;
    }
}
Beispiel #28
0
static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
				size_t len)
{
	struct sock *sk;
	struct vsock_sock *vsk;
	ssize_t total_written;
	long timeout;
	int err;
	struct vsock_transport_send_notify_data send_data;
	DEFINE_WAIT_FUNC(wait, woken_wake_function);

	sk = sock->sk;
	vsk = vsock_sk(sk);
	total_written = 0;
	err = 0;

	if (msg->msg_flags & MSG_OOB)
		return -EOPNOTSUPP;

	lock_sock(sk);

	/* Callers should not provide a destination with stream sockets. */
	if (msg->msg_namelen) {
		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
		goto out;
	}

	/* Send data only if both sides are not shutdown in the direction. */
	if (sk->sk_shutdown & SEND_SHUTDOWN ||
	    vsk->peer_shutdown & RCV_SHUTDOWN) {
		err = -EPIPE;
		goto out;
	}

	if (sk->sk_state != TCP_ESTABLISHED ||
	    !vsock_addr_bound(&vsk->local_addr)) {
		err = -ENOTCONN;
		goto out;
	}

	if (!vsock_addr_bound(&vsk->remote_addr)) {
		err = -EDESTADDRREQ;
		goto out;
	}

	/* Wait for room in the produce queue to enqueue our user's data. */
	timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);

	err = transport->notify_send_init(vsk, &send_data);
	if (err < 0)
		goto out;

	while (total_written < len) {
		ssize_t written;

		add_wait_queue(sk_sleep(sk), &wait);
		while (vsock_stream_has_space(vsk) == 0 &&
		       sk->sk_err == 0 &&
		       !(sk->sk_shutdown & SEND_SHUTDOWN) &&
		       !(vsk->peer_shutdown & RCV_SHUTDOWN)) {

			/* Don't wait for non-blocking sockets. */
			if (timeout == 0) {
				err = -EAGAIN;
				remove_wait_queue(sk_sleep(sk), &wait);
				goto out_err;
			}

			err = transport->notify_send_pre_block(vsk, &send_data);
			if (err < 0) {
				remove_wait_queue(sk_sleep(sk), &wait);
				goto out_err;
			}

			release_sock(sk);
			timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
			lock_sock(sk);
			if (signal_pending(current)) {
				err = sock_intr_errno(timeout);
				remove_wait_queue(sk_sleep(sk), &wait);
				goto out_err;
			} else if (timeout == 0) {
				err = -EAGAIN;
				remove_wait_queue(sk_sleep(sk), &wait);
				goto out_err;
			}
		}
		remove_wait_queue(sk_sleep(sk), &wait);

		/* These checks occur both as part of and after the loop
		 * conditional since we need to check before and after
		 * sleeping.
		 */
		if (sk->sk_err) {
			err = -sk->sk_err;
			goto out_err;
		} else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
			   (vsk->peer_shutdown & RCV_SHUTDOWN)) {
			err = -EPIPE;
			goto out_err;
		}

		err = transport->notify_send_pre_enqueue(vsk, &send_data);
		if (err < 0)
			goto out_err;

		/* Note that enqueue will only write as many bytes as are free
		 * in the produce queue, so we don't need to ensure len is
		 * smaller than the queue size.  It is the caller's
		 * responsibility to check how many bytes we were able to send.
		 */

		written = transport->stream_enqueue(
				vsk, msg,
				len - total_written);
		if (written < 0) {
			err = -ENOMEM;
			goto out_err;
		}

		total_written += written;

		err = transport->notify_send_post_enqueue(
				vsk, written, &send_data);
		if (err < 0)
			goto out_err;

	}

out_err:
	if (total_written > 0)
		err = total_written;
out:
	release_sock(sk);
	return err;
}
Beispiel #29
0
static ssize_t ac_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos)
{
	unsigned int NumCard;	/* Board number 1 -> 8           */
	unsigned int IndexCard;	/* Index board number 0 -> 7     */
	unsigned char TicCard;	/* Board TIC to send             */
	unsigned long flags;	/* Current priority              */
	struct st_ram_io st_loc;
	struct mailbox tmpmailbox;
#ifdef DEBUG
	int c;
#endif
	DECLARE_WAITQUEUE(wait, current);

	if (count != sizeof(struct st_ram_io) + sizeof(struct mailbox)) {
		static int warncount = 5;
		if (warncount) {
			printk(KERN_INFO "Hmmm. write() of Applicom card, length %zd != expected %zd\n",
			       count, sizeof(struct st_ram_io) + sizeof(struct mailbox));
			warncount--;
		}
		return -EINVAL;
	}

	if(copy_from_user(&st_loc, buf, sizeof(struct st_ram_io))) 
		return -EFAULT;
	
	if(copy_from_user(&tmpmailbox, &buf[sizeof(struct st_ram_io)],
			  sizeof(struct mailbox))) 
		return -EFAULT;

	NumCard = st_loc.num_card;	/* board number to send          */
	TicCard = st_loc.tic_des_from_pc;	/* tic number to send            */
	IndexCard = NumCard - 1;

	if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
		return -EINVAL;

#ifdef DEBUG
	printk("Write to applicom card #%d. struct st_ram_io follows:",
	       IndexCard+1);

		for (c = 0; c < sizeof(struct st_ram_io);) {
		
			printk("\n%5.5X: %2.2X", c, ((unsigned char *) &st_loc)[c]);

			for (c++; c % 8 && c < sizeof(struct st_ram_io); c++) {
				printk(" %2.2X", ((unsigned char *) &st_loc)[c]);
			}
		}

		printk("\nstruct mailbox follows:");

		for (c = 0; c < sizeof(struct mailbox);) {
			printk("\n%5.5X: %2.2X", c, ((unsigned char *) &tmpmailbox)[c]);

			for (c++; c % 8 && c < sizeof(struct mailbox); c++) {
				printk(" %2.2X", ((unsigned char *) &tmpmailbox)[c]);
			}
		}

		printk("\n");
#endif

	spin_lock_irqsave(&apbs[IndexCard].mutex, flags);

	/* Test octet ready correct */
	if(readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) > 2) { 
		Dummy = readb(apbs[IndexCard].RamIO + VERS);
		spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
		printk(KERN_WARNING "APPLICOM driver write error board %d, DataFromPcReady = %d\n",
		       IndexCard,(int)readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY));
		DeviceErrorCount++;
		return -EIO;
	}
	
	/* Place ourselves on the wait queue */
	set_current_state(TASK_INTERRUPTIBLE);
	add_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait);

	/* Check whether the card is ready for us */
	while (readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) != 0) {
		Dummy = readb(apbs[IndexCard].RamIO + VERS);
		/* It's busy. Sleep. */

		spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
		schedule();
		if (signal_pending(current)) {
			remove_wait_queue(&apbs[IndexCard].FlagSleepSend,
					  &wait);
			return -EINTR;
		}
		spin_lock_irqsave(&apbs[IndexCard].mutex, flags);
		set_current_state(TASK_INTERRUPTIBLE);
	}

	/* We may not have actually slept */
	set_current_state(TASK_RUNNING);
	remove_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait);

	writeb(1, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);

	/* Which is best - lock down the pages with rawio and then
	   copy directly, or use bounce buffers? For now we do the latter 
	   because it works with 2.2 still */
	{
		unsigned char *from = (unsigned char *) &tmpmailbox;
		void __iomem *to = apbs[IndexCard].RamIO + RAM_FROM_PC;
		int c;

		for (c = 0; c < sizeof(struct mailbox); c++)
			writeb(*(from++), to++);
	}

	writeb(0x20, apbs[IndexCard].RamIO + TIC_OWNER_FROM_PC);
	writeb(0xff, apbs[IndexCard].RamIO + NUMCARD_OWNER_FROM_PC);
	writeb(TicCard, apbs[IndexCard].RamIO + TIC_DES_FROM_PC);
	writeb(NumCard, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC);
	writeb(2, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
	writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
	Dummy = readb(apbs[IndexCard].RamIO + VERS);
	spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
	return 0;
}
Beispiel #30
0
/*
 * We need to be able to change a mapping table under a mounted
 * filesystem.  For example we might want to move some data in
 * the background.  Before the table can be swapped with
 * dm_bind_table, dm_suspend must be called to flush any in
 * flight bios and ensure that any further io gets deferred.
 */
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
{
    struct dm_table *map = NULL;
    unsigned long flags;
    DECLARE_WAITQUEUE(wait, current);
    struct bio *def;
    int r = -EINVAL;
    int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
    int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;

    down(&md->suspend_lock);

    if (dm_suspended(md))
        goto out_unlock;

    map = dm_get_table(md);

    /*
     * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
     * This flag is cleared before dm_suspend returns.
     */
    if (noflush)
        set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);

    /* This does not get reverted if there's an error later. */
    dm_table_presuspend_targets(map);

    /* bdget() can stall if the pending I/Os are not flushed */
    if (!noflush) {
        md->suspended_bdev = bdget_disk(md->disk, 0);
        if (!md->suspended_bdev) {
            DMWARN("bdget failed in dm_suspend");
            r = -ENOMEM;
            goto flush_and_out;
        }
    }

    /*
     * Flush I/O to the device.
     * noflush supersedes do_lockfs, because lock_fs() needs to flush I/Os.
     */
    if (do_lockfs && !noflush) {
        r = lock_fs(md);
        if (r)
            goto out;
    }

    /*
     * First we set the BLOCK_IO flag so no more ios will be mapped.
     */
    down_write(&md->io_lock);
    set_bit(DMF_BLOCK_IO, &md->flags);

    add_wait_queue(&md->wait, &wait);
    up_write(&md->io_lock);

    /* unplug */
    if (map)
        dm_table_unplug_all(map);

    /*
     * Then we wait for the already mapped ios to
     * complete.
     */
    while (1) {
        set_current_state(TASK_INTERRUPTIBLE);

        if (!atomic_read(&md->pending) || signal_pending(current))
            break;

        io_schedule();
    }
    set_current_state(TASK_RUNNING);

    down_write(&md->io_lock);
    remove_wait_queue(&md->wait, &wait);

    if (noflush) {
        spin_lock_irqsave(&md->pushback_lock, flags);
        clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
        bio_list_merge_head(&md->deferred, &md->pushback);
        bio_list_init(&md->pushback);
        spin_unlock_irqrestore(&md->pushback_lock, flags);
    }

    /* were we interrupted ? */
    r = -EINTR;
    if (atomic_read(&md->pending)) {
        clear_bit(DMF_BLOCK_IO, &md->flags);
        def = bio_list_get(&md->deferred);
        __flush_deferred_io(md, def);
        up_write(&md->io_lock);
        unlock_fs(md);
        goto out; /* pushback list is already flushed, so skip flush */
    }
    up_write(&md->io_lock);

    dm_table_postsuspend_targets(map);

    set_bit(DMF_SUSPENDED, &md->flags);

    r = 0;

flush_and_out:
    if (r && noflush) {
        /*
         * Because there may be already I/Os in the pushback list,
         * flush them before return.
         */
        down_write(&md->io_lock);

        spin_lock_irqsave(&md->pushback_lock, flags);
        clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
        bio_list_merge_head(&md->deferred, &md->pushback);
        bio_list_init(&md->pushback);
        spin_unlock_irqrestore(&md->pushback_lock, flags);

        def = bio_list_get(&md->deferred);
        __flush_deferred_io(md, def);
        up_write(&md->io_lock);
    }

out:
    if (r && md->suspended_bdev) {
        bdput(md->suspended_bdev);
        md->suspended_bdev = NULL;
    }

    dm_table_put(map);

out_unlock:
    up(&md->suspend_lock);
    return r;
}