コード例 #1
0
ファイル: irq.c プロジェクト: TKr/Wive-ng-rt8186
unsigned long probe_irq_on(void)
{
	unsigned int i;
	irq_desc_t *desc;
	unsigned long val;
	unsigned long delay;

	down(&probe_sem);
	/* 
	 * something may have generated an irq long ago and we want to
	 * flush such a longstanding irq before considering it as spurious. 
	 */
	for (i = NR_IRQS-1; i > 0; i--)  {
		desc = irq_desc + i;

		spin_lock_irq(&desc->lock);
		if (!irq_desc[i].action) 
			irq_desc[i].handler->startup(i);
		spin_unlock_irq(&desc->lock);
	}

	/* Wait for longstanding interrupts to trigger. */
	for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
		/* about 20ms delay */ synchronize_irq();

	/*
	 * enable any unassigned irqs
	 * (we must startup again here because if a longstanding irq
	 * happened in the previous stage, it may have masked itself)
	 */
	for (i = NR_IRQS-1; i > 0; i--) {
		desc = irq_desc + i;

		spin_lock_irq(&desc->lock);
		if (!desc->action) {
			desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
			if (desc->handler->startup(i))
				desc->status |= IRQ_PENDING;
		}
		spin_unlock_irq(&desc->lock);
	}

	/*
	 * Wait for spurious interrupts to trigger
	 */
	for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
		/* about 100ms delay */ synchronize_irq();

	/*
	 * Now filter out any obviously spurious interrupts
	 */
	val = 0;
	for (i = 0; i < NR_IRQS; i++) {
		irq_desc_t *desc = irq_desc + i;
		unsigned int status;

		spin_lock_irq(&desc->lock);
		status = desc->status;

		if (status & IRQ_AUTODETECT) {
			/* It triggered already - consider it spurious. */
			if (!(status & IRQ_WAITING)) {
				desc->status = status & ~IRQ_AUTODETECT;
				desc->handler->shutdown(i);
			} else
				if (i < 32)
					val |= 1 << i;
		}
		spin_unlock_irq(&desc->lock);
	}

	return val;
}
コード例 #2
0
ファイル: printk.c プロジェクト: Wonfee/huawei_u9508_kernel
int do_syslog(int type, char __user *buf, int len, bool from_file)
{
	unsigned i, j, limit, count;
	int do_clear = 0;
	char c;
	int error;

	error = check_syslog_permissions(type, from_file);
	if (error)
		goto out;

	error = security_syslog(type);
	if (error)
		return error;

	switch (type) {
	case SYSLOG_ACTION_CLOSE:	/* Close log */
		break;
	case SYSLOG_ACTION_OPEN:	/* Open log */
		break;
	case SYSLOG_ACTION_READ:	/* Read from log */
		error = -EINVAL;
		if (!buf || len < 0)
			goto out;
		error = 0;
		if (!len)
			goto out;
		if (!access_ok(VERIFY_WRITE, buf, len)) {
			error = -EFAULT;
			goto out;
		}
		error = wait_event_interruptible(log_wait,
							(log_start - log_end));
		if (error)
			goto out;
		i = 0;
		spin_lock_irq(&logbuf_lock);
		while (!error && (log_start != log_end) && i < len) {
			c = LOG_BUF(log_start);
			log_start++;
			spin_unlock_irq(&logbuf_lock);
			error = __put_user(c,buf);
			buf++;
			i++;
			cond_resched();
			spin_lock_irq(&logbuf_lock);
		}
		spin_unlock_irq(&logbuf_lock);
		if (!error)
			error = i;
		break;
	/* Read/clear last kernel messages */
	case SYSLOG_ACTION_READ_CLEAR:
		do_clear = 1;
		/* FALL THRU */
	/* Read last kernel messages */
	case SYSLOG_ACTION_READ_ALL:
		error = -EINVAL;
		if (!buf || len < 0)
			goto out;
		error = 0;
		if (!len)
			goto out;
		if (!access_ok(VERIFY_WRITE, buf, len)) {
			error = -EFAULT;
			goto out;
		}
		count = len;
		if (count > log_buf_len)
			count = log_buf_len;
		spin_lock_irq(&logbuf_lock);
		if (count > logged_chars)
			count = logged_chars;
		if (do_clear)
			logged_chars = 0;
		limit = log_end;
		/*
		 * __put_user() could sleep, and while we sleep
		 * printk() could overwrite the messages
		 * we try to copy to user space. Therefore
		 * the messages are copied in reverse. <manfreds>
		 */
		for (i = 0; i < count && !error; i++) {
			j = limit-1-i;
			if (j + log_buf_len < log_end)
				break;
			c = LOG_BUF(j);
			spin_unlock_irq(&logbuf_lock);
			error = __put_user(c,&buf[count-1-i]);
			cond_resched();
			spin_lock_irq(&logbuf_lock);
		}
		spin_unlock_irq(&logbuf_lock);
		if (error)
			break;
		error = i;
		if (i != count) {
			int offset = count-error;
			/* buffer overflow during copy, correct user buffer. */
			for (i = 0; i < error; i++) {
				if (__get_user(c,&buf[i+offset]) ||
				    __put_user(c,&buf[i])) {
					error = -EFAULT;
					break;
				}
				cond_resched();
			}
		}
		break;
	/* Clear ring buffer */
	case SYSLOG_ACTION_CLEAR:
		logged_chars = 0;
		break;
	/* Disable logging to console */
	case SYSLOG_ACTION_CONSOLE_OFF:
		if (saved_console_loglevel == -1)
			saved_console_loglevel = console_loglevel;
		console_loglevel = minimum_console_loglevel;
		break;
	/* Enable logging to console */
	case SYSLOG_ACTION_CONSOLE_ON:
		if (saved_console_loglevel != -1) {
			console_loglevel = saved_console_loglevel;
			saved_console_loglevel = -1;
		}
		break;
	/* Set level of messages printed to console */
	case SYSLOG_ACTION_CONSOLE_LEVEL:
		error = -EINVAL;
		if (len < 1 || len > 8)
			goto out;
		if (len < minimum_console_loglevel)
			len = minimum_console_loglevel;
		console_loglevel = len;
		/* Implicitly re-enable logging to console */
		saved_console_loglevel = -1;
		error = 0;
		break;
	/* Number of chars in the log buffer */
	case SYSLOG_ACTION_SIZE_UNREAD:
		error = log_end - log_start;
		break;
	/* Size of the log buffer */
	case SYSLOG_ACTION_SIZE_BUFFER:
		error = log_buf_len;
		break;
	default:
		error = -EINVAL;
		break;
	}
out:
	return error;
}
コード例 #3
0
ファイル: rtc-mrst.c プロジェクト: 33d/linux-2.6.21-hh20
static void rtc_mrst_do_shutdown(void)
{
	spin_lock_irq(&rtc_lock);
	mrst_irq_disable(&mrst_rtc, RTC_IRQMASK);
	spin_unlock_irq(&rtc_lock);
}
コード例 #4
0
ファイル: btnode.c プロジェクト: Ale1ster/kerneldir
/**
 * nilfs_btnode_prepare_change_key
 *  prepare to move contents of the block for old key to one of new key.
 *  the old buffer will not be removed, but might be reused for new buffer.
 *  it might return -ENOMEM because of memory allocation errors,
 *  and might return -EIO because of disk read errors.
 */
int nilfs_btnode_prepare_change_key(struct address_space *btnc,
				    struct nilfs_btnode_chkey_ctxt *ctxt)
{
	struct buffer_head *obh, *nbh;
	struct inode *inode = NILFS_BTNC_I(btnc);
	__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
	int err;

	if (oldkey == newkey)
		return 0;

	obh = ctxt->bh;
	ctxt->newbh = NULL;

	if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
		lock_page(obh->b_page);
		/*
		 * We cannot call radix_tree_preload for the kernels older
		 * than 2.6.23, because it is not exported for modules.
		 */
retry:
		err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
		if (err)
			goto failed_unlock;
		/* BUG_ON(oldkey != obh->b_page->index); */
		if (unlikely(oldkey != obh->b_page->index))
			NILFS_PAGE_BUG(obh->b_page,
				       "invalid oldkey %lld (newkey=%lld)",
				       (unsigned long long)oldkey,
				       (unsigned long long)newkey);

		spin_lock_irq(&btnc->tree_lock);
		err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
		spin_unlock_irq(&btnc->tree_lock);
		/*
		 * Note: page->index will not change to newkey until
		 * nilfs_btnode_commit_change_key() will be called.
		 * To protect the page in intermediate state, the page lock
		 * is held.
		 */
		radix_tree_preload_end();
		if (!err)
			return 0;
		else if (err != -EEXIST)
			goto failed_unlock;

		err = invalidate_inode_pages2_range(btnc, newkey, newkey);
		if (!err)
			goto retry;
		/* fallback to copy mode */
		unlock_page(obh->b_page);
	}

	nbh = nilfs_btnode_create_block(btnc, newkey);
	if (!nbh)
		return -ENOMEM;

	BUG_ON(nbh == obh);
	ctxt->newbh = nbh;
	return 0;

 failed_unlock:
	unlock_page(obh->b_page);
	return err;
}
コード例 #5
0
ファイル: cdc-acm.c プロジェクト: LITMUS-RT/litmus-rt-odroidx
static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
{
	struct acm *acm = container_of(port, struct acm, port);
	int retval = -ENODEV;

	dev_dbg(&acm->control->dev, "%s\n", __func__);

	mutex_lock(&acm->mutex);
	if (acm->disconnected)
		goto disconnected;

	retval = usb_autopm_get_interface(acm->control);
	if (retval)
		goto error_get_interface;

	/*
	 * FIXME: Why do we need this? Allocating 64K of physically contiguous
	 * memory is really nasty...
	 */
	set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
	acm->control->needs_remote_wakeup = 1;

	acm->ctrlurb->dev = acm->dev;
	if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
		dev_err(&acm->control->dev,
			"%s - usb_submit_urb(ctrl irq) failed\n", __func__);
		goto error_submit_urb;
	}

	acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS;
	if (acm_set_control(acm, acm->ctrlout) < 0 &&
	    (acm->ctrl_caps & USB_CDC_CAP_LINE))
		goto error_set_control;

	usb_autopm_put_interface(acm->control);

	/*
	 * Unthrottle device in case the TTY was closed while throttled.
	 */
	spin_lock_irq(&acm->read_lock);
	acm->throttled = 0;
	acm->throttle_req = 0;
	spin_unlock_irq(&acm->read_lock);

	if (acm_submit_read_urbs(acm, GFP_KERNEL))
		goto error_submit_read_urbs;

	mutex_unlock(&acm->mutex);

	return 0;

error_submit_read_urbs:
	acm->ctrlout = 0;
	acm_set_control(acm, acm->ctrlout);
error_set_control:
	usb_kill_urb(acm->ctrlurb);
error_submit_urb:
	usb_autopm_put_interface(acm->control);
error_get_interface:
disconnected:
	mutex_unlock(&acm->mutex);
	return retval;
}
コード例 #6
0
/*
 * RX work queue takes data out of the RX queue and hands it up to the TTY
 * layer until it refuses to take any more data (or is throttled back).
 * Then it issues reads for any further data.
 *
 * If the RX queue becomes full enough that no usb_request is queued,
 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
 * can be buffered before the TTY layer's buffers (currently 64 KB).
 */
static void gs_rx_push(struct work_struct *w)
{
	struct gs_port		*port = container_of(w, struct gs_port, push);
	struct tty_struct	*tty;
	struct list_head	*queue = &port->read_queue;
	bool			disconnect = false;
	bool			do_push = false;

	/* hand any queued data to the tty */
	spin_lock_irq(&port->port_lock);
	tty = port->port_tty;
	while (!list_empty(queue)) {
		struct usb_request	*req;

		req = list_first_entry(queue, struct usb_request, list);

		/* discard data if tty was closed */
		if (!tty)
			goto recycle;

		/* leave data queued if tty was rx throttled */
		if (test_bit(TTY_THROTTLED, &tty->flags))
			break;

		switch (req->status) {
		case -ESHUTDOWN:
			disconnect = true;
			pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
			break;

		default:
			/* presumably a transient fault */
			pr_warning(PREFIX "%d: unexpected RX status %d\n",
					port->port_num, req->status);
			/* FALLTHROUGH */
		case 0:
			/* normal completion */
			break;
		}

		/* push data to (open) tty */
		if (req->actual) {
			char		*packet = req->buf;
			unsigned	size = req->actual;
			unsigned	n;
			int		count;

			/* we may have pushed part of this packet already... */
			n = port->n_read;
			if (n) {
				packet += n;
				size -= n;
			}

			count = tty_insert_flip_string(tty, packet, size);
			if (count)
				do_push = true;
			if (count != size) {
				/* stop pushing; TTY layer can't handle more */
				port->n_read += count;
				pr_vdebug(PREFIX "%d: rx block %d/%d\n",
						port->port_num,
						count, req->actual);
				break;
			}
			port->n_read = 0;
		}
recycle:
		list_move(&req->list, &port->read_pool);
	}

	/* Push from tty to ldisc; without low_latency set this is handled by
	 * a workqueue, so we won't get callbacks and can hold port_lock
	 */
	if (tty && do_push) {
		tty_flip_buffer_push(tty);
	}


	/* We want our data queue to become empty ASAP, keeping data
	 * in the tty and ldisc (not here).  If we couldn't push any
	 * this time around, there may be trouble unless there's an
	 * implicit tty_unthrottle() call on its way...
	 *
	 * REVISIT we should probably add a timer to keep the work queue
	 * from starving ... but it's not clear that case ever happens.
	 */
	if (!list_empty(queue) && tty) {
		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
			if (do_push)
				queue_work(gserial_wq, &port->push);
			else
				pr_warning(PREFIX "%d: RX not scheduled?\n",
					port->port_num);
		}
	}

	/* If we're still connected, refill the USB RX queue. */
	if (!disconnect && port->port_usb)
		gs_start_rx(port);

	spin_unlock_irq(&port->port_lock);
}
コード例 #7
0
static int s5p_ehci_resume(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct s5p_ehci_hcd *s5p_ehci = platform_get_drvdata(pdev);
	struct usb_hcd *hcd = s5p_ehci->hcd;
	struct ehci_hcd *ehci = hcd_to_ehci(hcd);

	clk_enable(s5p_ehci->clk);

	s5p_ehci_phy_init(pdev);

	/* if EHCI was off, hcd was removed */
	if (!s5p_ehci->power_on) {
		dev_info(dev, "Nothing to do for the device (power off)\n");
		return 0;
	}

	if (time_before(jiffies, ehci->next_statechange))
		usleep_range(10000, 11000);

	/* Mark hardware accessible again as we are out of D3 state by now */
	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
	if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
		int	mask = INTR_MASK;

		if (!hcd->self.root_hub->do_remote_wakeup)
			mask &= ~STS_PCD;
		ehci_writel(ehci, mask, &ehci->regs->intr_enable);
		ehci_readl(ehci, &ehci->regs->intr_enable);
		return 0;
	}

	ehci_dbg(ehci, "lost power, restarting\n");
	usb_root_hub_lost_power(hcd->self.root_hub);

	(void) ehci_halt(ehci);
	(void) ehci_reset(ehci);

	/* emptying the schedule aborts any urbs */
	spin_lock_irq(&ehci->lock);
	if (ehci->reclaim)
		end_unlink_async(ehci);
	ehci_work(ehci);
	spin_unlock_irq(&ehci->lock);

	ehci_writel(ehci, ehci->command, &ehci->regs->command);
	ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
	ehci_readl(ehci, &ehci->regs->command);	/* unblock posted writes */

	/* here we "know" root ports should always stay powered */
	ehci_port_power(ehci, 1);

	hcd->state = HC_STATE_SUSPENDED;

	/* Update runtime PM status and clear runtime_error */
	pm_runtime_disable(dev);
	pm_runtime_set_active(dev);
	pm_runtime_enable(dev);

	/* Prevent device from runtime suspend during resume time */
	pm_runtime_get_sync(dev);

#ifdef CONFIG_MDM_HSIC_PM
	set_host_stat(hsic_pm_dev, POWER_ON);
	wait_dev_pwr_stat(hsic_pm_dev, POWER_ON);
#endif
#if defined(CONFIG_LINK_DEVICE_HSIC) || defined(CONFIG_LINK_DEVICE_USB) \
	|| defined(CONFIG_MDM_HSIC_PM)
	pm_runtime_mark_last_busy(&hcd->self.root_hub->dev);
#endif
	return 0;
}
コード例 #8
0
/**
 * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
 * @dmap: destination page cache
 * @smap: source page cache
 *
 * No pages must no be added to the cache during this process.
 * This must be ensured by the caller.
 */
void nilfs_copy_back_pages(struct address_space *dmap,
			   struct address_space *smap)
{
	struct pagevec pvec;
	unsigned int i, n;
	pgoff_t index = 0;
	int err;

	pagevec_init(&pvec, 0);
repeat:
	n = pagevec_lookup(&pvec, smap, index, PAGEVEC_SIZE);
	if (!n)
		return;
	index = pvec.pages[n - 1]->index + 1;

	for (i = 0; i < pagevec_count(&pvec); i++) {
		struct page *page = pvec.pages[i], *dpage;
		pgoff_t offset = page->index;

		lock_page(page);
		dpage = find_lock_page(dmap, offset);
		if (dpage) {
			/* override existing page on the destination cache */
			WARN_ON(PageDirty(dpage));
			nilfs_copy_page(dpage, page, 0);
			unlock_page(dpage);
			page_cache_release(dpage);
		} else {
			struct page *page2;

			/* move the page to the destination cache */
			spin_lock_irq(&smap->tree_lock);
			page2 = radix_tree_delete(&smap->page_tree, offset);
			WARN_ON(page2 != page);

			smap->nrpages--;
			spin_unlock_irq(&smap->tree_lock);

			spin_lock_irq(&dmap->tree_lock);
			err = radix_tree_insert(&dmap->page_tree, offset, page);
			if (unlikely(err < 0)) {
				WARN_ON(err == -EEXIST);
				page->mapping = NULL;
				page_cache_release(page); /* for cache */
			} else {
				page->mapping = dmap;
				dmap->nrpages++;
				if (PageDirty(page))
					radix_tree_tag_set(&dmap->page_tree,
							   offset,
							   PAGECACHE_TAG_DIRTY);
			}
			spin_unlock_irq(&dmap->tree_lock);
		}
		unlock_page(page);
	}
	pagevec_release(&pvec);
	cond_resched();

	goto repeat;
}
コード例 #9
0
ファイル: dma.c プロジェクト: E-LLP/n900
static inline void pnx4008_dma_unlock(void)
{
	spin_unlock_irq(&dma_lock);
}
コード例 #10
0
int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int max_resp_size, void *resp_data)
{
	struct mixart_msg resp;
	u32 msg_frame = 0; /* set to 0, so it's no notification to wait for, but the answer */
	int err;
	wait_queue_t wait;
	long timeout;

	mutex_lock(&mgr->msg_mutex);

	init_waitqueue_entry(&wait, current);

	spin_lock_irq(&mgr->msg_lock);
	/* send the message */
	err = send_msg(mgr, request, max_resp_size, 1, &msg_frame);  /* send and mark the answer pending */
	if (err) {
		spin_unlock_irq(&mgr->msg_lock);
		mutex_unlock(&mgr->msg_mutex);
		return err;
	}

	set_current_state(TASK_UNINTERRUPTIBLE);
	add_wait_queue(&mgr->msg_sleep, &wait);
	spin_unlock_irq(&mgr->msg_lock);
	timeout = schedule_timeout(MSG_TIMEOUT_JIFFIES);
	remove_wait_queue(&mgr->msg_sleep, &wait);

	if (! timeout) {
		/* error - no ack */
		mutex_unlock(&mgr->msg_mutex);
		snd_printk(KERN_ERR "error: no response on msg %x\n", msg_frame);
		return -EIO;
	}

	/* retrieve the answer into the same struct mixart_msg */
	resp.message_id = 0;
	resp.uid = (struct mixart_uid){0,0};
	resp.data = resp_data;
	resp.size = max_resp_size;

	err = get_msg(mgr, &resp, msg_frame);

	if( request->message_id != resp.message_id )
		snd_printk(KERN_ERR "RESPONSE ERROR!\n");

	mutex_unlock(&mgr->msg_mutex);
	return err;
}


int snd_mixart_send_msg_wait_notif(struct mixart_mgr *mgr,
				   struct mixart_msg *request, u32 notif_event)
{
	int err;
	wait_queue_t wait;
	long timeout;

	if (snd_BUG_ON(!notif_event))
		return -EINVAL;
	if (snd_BUG_ON((notif_event & MSG_TYPE_MASK) != MSG_TYPE_NOTIFY))
		return -EINVAL;
	if (snd_BUG_ON(notif_event & MSG_CANCEL_NOTIFY_MASK))
		return -EINVAL;

	mutex_lock(&mgr->msg_mutex);

	init_waitqueue_entry(&wait, current);

	spin_lock_irq(&mgr->msg_lock);
	/* send the message */
	err = send_msg(mgr, request, MSG_DEFAULT_SIZE, 1, &notif_event);  /* send and mark the notification event pending */
	if(err) {
		spin_unlock_irq(&mgr->msg_lock);
		mutex_unlock(&mgr->msg_mutex);
		return err;
	}

	set_current_state(TASK_UNINTERRUPTIBLE);
	add_wait_queue(&mgr->msg_sleep, &wait);
	spin_unlock_irq(&mgr->msg_lock);
	timeout = schedule_timeout(MSG_TIMEOUT_JIFFIES);
	remove_wait_queue(&mgr->msg_sleep, &wait);

	if (! timeout) {
		/* error - no ack */
		mutex_unlock(&mgr->msg_mutex);
		snd_printk(KERN_ERR "error: notification %x not received\n", notif_event);
		return -EIO;
	}

	mutex_unlock(&mgr->msg_mutex);
	return 0;
}
コード例 #11
0
static void
netx_set_termios(struct uart_port *port, struct termios *termios,
		   struct termios *old)
{
	unsigned int baud, quot;
	unsigned char old_cr;
	unsigned char line_cr = LINE_CR_FEN;
	unsigned char rts_cr = 0;

	switch (termios->c_cflag & CSIZE) {
	case CS5:
		line_cr |= LINE_CR_5BIT;
		break;
	case CS6:
		line_cr |= LINE_CR_6BIT;
		break;
	case CS7:
		line_cr |= LINE_CR_7BIT;
		break;
	case CS8:
		line_cr |= LINE_CR_8BIT;
		break;
	}

	if (termios->c_cflag & CSTOPB)
		line_cr |= LINE_CR_STP2;

	if (termios->c_cflag & PARENB) {
		line_cr |= LINE_CR_PEN;
		if (!(termios->c_cflag & PARODD))
			line_cr |= LINE_CR_EPS;
	}

	if (termios->c_cflag & CRTSCTS)
		rts_cr = RTS_CR_AUTO | RTS_CR_CTS_CTR | RTS_CR_RTS_POL;

	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
	quot = baud * 4096;
	quot /= 1000;
	quot *= 256;
	quot /= 100000;

	spin_lock_irq(&port->lock);

	uart_update_timeout(port, termios->c_cflag, baud);

	old_cr = readl(port->membase + UART_CR);

	/* disable interrupts */
	writel(old_cr & ~(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE),
		port->membase + UART_CR);

	/* drain transmitter */
	while (readl(port->membase + UART_FR) & FR_BUSY);

	/* disable UART */
	writel(old_cr & ~CR_UART_EN, port->membase + UART_CR);

	/* modem status interrupts */
	old_cr &= ~CR_MSIE;
	if (UART_ENABLE_MS(port, termios->c_cflag))
		old_cr |= CR_MSIE;

	writel((quot>>8) & 0xff, port->membase + UART_BAUDDIV_MSB);
	writel(quot & 0xff, port->membase + UART_BAUDDIV_LSB);
	writel(line_cr, port->membase + UART_LINE_CR);

	writel(rts_cr, port->membase + UART_RTS_CR);

	/*
	 * Characters to ignore
	 */
	port->ignore_status_mask = 0;
	if (termios->c_iflag & IGNPAR)
		port->ignore_status_mask |= SR_PE;
	if (termios->c_iflag & IGNBRK) {
		port->ignore_status_mask |= SR_BE;
		/*
		 * If we're ignoring parity and break indicators,
		 * ignore overruns too (for real raw support).
		 */
		if (termios->c_iflag & IGNPAR)
			port->ignore_status_mask |= SR_PE;
	}

	port->read_status_mask = 0;
	if (termios->c_iflag & (BRKINT | PARMRK))
		port->read_status_mask |= SR_BE;
	if (termios->c_iflag & INPCK)
		port->read_status_mask |= SR_PE | SR_FE;

	writel(old_cr, port->membase + UART_CR);

	spin_unlock_irq(&port->lock);
}
コード例 #12
0
static int msm_hsic_resume_thread(void *data)
{
	struct msm_hsic_hcd *mehci = data;
	struct usb_hcd *hcd = hsic_to_hcd(mehci);
	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
	u32			temp;
	unsigned long		resume_needed = 0;
	int			retry_cnt = 0;
	int			tight_resume = 0;
	struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;

	dbg_log_event(NULL, "Resume RH", 0);

	/* keep delay between bus states */
	if (time_before(jiffies, ehci->next_statechange))
		usleep_range(5000, 5000);

	spin_lock_irq(&ehci->lock);
	if (!HCD_HW_ACCESSIBLE(hcd)) {
		spin_unlock_irq(&ehci->lock);
		mehci->resume_status = -ESHUTDOWN;
		complete(&mehci->rt_completion);
		return 0;
	}

	if (unlikely(ehci->debug)) {
		if (!dbgp_reset_prep())
			ehci->debug = NULL;
		else
			dbgp_external_startup();
	}

	/* at least some APM implementations will try to deliver
	 * IRQs right away, so delay them until we're ready.
	 */
	ehci_writel(ehci, 0, &ehci->regs->intr_enable);

	/* re-init operational registers */
	ehci_writel(ehci, 0, &ehci->regs->segment);
	ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
	ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next);

	/*CMD_RUN will be set after, PORT_RESUME gets cleared*/
	if (ehci->resume_sof_bug)
		ehci->command &= ~CMD_RUN;

	/* restore CMD_RUN, framelist size, and irq threshold */
	ehci_writel(ehci, ehci->command, &ehci->regs->command);

	/* manually resume the ports we suspended during bus_suspend() */
resume_again:
	if (retry_cnt >= RESUME_RETRY_LIMIT) {
		pr_info("retry count(%d) reached max, resume in tight loop\n",
					retry_cnt);
		tight_resume = 1;
	}


	temp = ehci_readl(ehci, &ehci->regs->port_status[0]);
	temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
	if (test_bit(0, &ehci->bus_suspended) && (temp & PORT_SUSPEND)) {
		temp |= PORT_RESUME;
		set_bit(0, &resume_needed);
	}
	dbg_log_event(NULL, "FPR: Set", temp);
	ehci_writel(ehci, temp, &ehci->regs->port_status[0]);

	/* HSIC controller has a h/w bug due to which it can try to send SOFs
	 * (start of frames) during port resume resulting in phy lockup. HSIC hw
	 * controller in MSM clears FPR bit after driving the resume signal for
	 * 20ms. Workaround is to stop SOFs before driving resume and then start
	 * sending SOFs immediately. Need to send SOFs within 3ms of resume
	 * completion otherwise peripheral may enter undefined state. As
	 * usleep_range does not gurantee exact sleep time, GPTimer is used to
	 * to time the resume sequence. If driver exceeds allowable time SOFs,
	 * repeat the resume process.
	 */
	if (ehci->resume_sof_bug && resume_needed) {
		if (!tight_resume) {
			mehci->resume_again = 0;
			ehci_writel(ehci, GPT_LD(RESUME_SIGNAL_TIME_MS),
					&mehci->timer->gptimer0_ld);
			ehci_writel(ehci, GPT_RESET | GPT_RUN,
					&mehci->timer->gptimer0_ctrl);
			ehci_writel(ehci, INTR_MASK | STS_GPTIMER0_INTERRUPT,
					&ehci->regs->intr_enable);

			ehci_writel(ehci, GPT_LD(RESUME_SIGNAL_TIME_SOF_MS),
					&mehci->timer->gptimer1_ld);
			ehci_writel(ehci, GPT_RESET | GPT_RUN,
				&mehci->timer->gptimer1_ctrl);

			spin_unlock_irq(&ehci->lock);
			if (pdata && pdata->swfi_latency)
				pm_qos_update_request(&mehci->pm_qos_req_dma,
					pdata->swfi_latency + 1);
			wait_for_completion(&mehci->gpt0_completion);
			if (pdata && pdata->swfi_latency)
				pm_qos_update_request(&mehci->pm_qos_req_dma,
					PM_QOS_DEFAULT_VALUE);
			spin_lock_irq(&ehci->lock);
		} else {
			dbg_log_event(NULL, "FPR: Tightloop", 0);
			/* do the resume in a tight loop */
			handshake(ehci, &ehci->regs->port_status[0],
				PORT_RESUME, 0, 22 * 1000);
			ehci_writel(ehci, ehci_readl(ehci,
				&ehci->regs->command) | CMD_RUN,
				&ehci->regs->command);
		}

		if (mehci->resume_again) {
			int temp;

			dbg_log_event(NULL, "FPR: Re-Resume", retry_cnt);
			pr_info("FPR: retry count: %d\n", retry_cnt);
			spin_unlock_irq(&ehci->lock);
			temp = ehci_readl(ehci, &ehci->regs->port_status[0]);
			temp &= ~PORT_RWC_BITS;
			temp |= PORT_SUSPEND;
			ehci_writel(ehci, temp, &ehci->regs->port_status[0]);
			/* Keep the bus idle for 5ms so that peripheral
			 * can detect and initiate suspend
			 */
			usleep_range(5000, 5000);
			dbg_log_event(NULL,
				"FPR: RResume",
				ehci_readl(ehci, &ehci->regs->port_status[0]));
			spin_lock_irq(&ehci->lock);
			mehci->resume_again = 0;
			retry_cnt++;
			goto resume_again;
		}
	}

	dbg_log_event(NULL, "FPR: RT-Done", 0);
	mehci->resume_status = 1;
	spin_unlock_irq(&ehci->lock);

	complete(&mehci->rt_completion);

	return 0;
}
コード例 #13
0
ファイル: irixsig.c プロジェクト: kzlin129/tt-gpl
asmlinkage void
irix_sigreturn(struct pt_regs *regs)
{
	struct sigctx_irix5 *context, *magic;
	unsigned long umask, mask;
	u64 *fregs;
	int sig, i, base = 0;
	sigset_t blocked;

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

	if (regs->regs[2] == 1000)
		base = 1;

	context = (struct sigctx_irix5 *) regs->regs[base + 4];
	magic = (struct sigctx_irix5 *) regs->regs[base + 5];
	sig = (int) regs->regs[base + 6];
#ifdef DEBUG_SIG
	printk("[%s:%d] IRIX sigreturn(scp[%p],ucp[%p],sig[%d])\n",
	       current->comm, current->pid, context, magic, sig);
#endif
	if (!context)
		context = magic;
	if (!access_ok(VERIFY_READ, context, sizeof(struct sigctx_irix5)))
		goto badframe;

#ifdef DEBUG_SIG
	dump_irix5_sigctx(context);
#endif

	__get_user(regs->cp0_epc, &context->pc);
	umask = context->rmask; mask = 2;
	for (i = 1; i < 32; i++, mask <<= 1) {
		if(umask & mask)
			__get_user(regs->regs[i], &context->regs[i]);
	}
	__get_user(regs->hi, &context->hi);
	__get_user(regs->lo, &context->lo);

	if ((umask & 1) && context->usedfp) {
		fregs = (u64 *) &current->thread.fpu;
		for(i = 0; i < 32; i++)
			fregs[i] = (u64) context->fpregs[i];
		__get_user(current->thread.fpu.hard.fcr31, &context->fpcsr);
	}

	/* XXX do sigstack crapola here... XXX */

	if (__copy_from_user(&blocked, &context->sigset, sizeof(blocked)))
		goto badframe;

	sigdelsetmask(&blocked, ~_BLOCKABLE);
	spin_lock_irq(&current->sighand->siglock);
	current->blocked = blocked;
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	/*
	 * Don't let your children do this ...
	 */
	if (current_thread_info()->flags & TIF_SYSCALL_TRACE)
		do_syscall_trace(regs, 1);
	__asm__ __volatile__(
		"move\t$29,%0\n\t"
		"j\tsyscall_exit"
		:/* no outputs */
		:"r" (&regs));
		/* Unreached */

badframe:
	force_sig(SIGSEGV, current);
}
コード例 #14
0
ファイル: virtio.c プロジェクト: 383530895/linux
static void virtio_config_disable(struct virtio_device *dev)
{
	spin_lock_irq(&dev->config_lock);
	dev->config_enabled = false;
	spin_unlock_irq(&dev->config_lock);
}
コード例 #15
0
ファイル: signal.c プロジェクト: TitaniumBoy/lin
/* Note that 'init' is a special process: it doesn't get signals it doesn't
 * want to handle. Thus you cannot kill init even with a SIGKILL even by
 * mistake.
 */
asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
			 unsigned long orig_i0, int restart_syscall)
{
	unsigned long signr;
	siginfo_t info;
	struct k_sigaction *ka;
	
	if (!oldset)
		oldset = &current->blocked;

#ifdef CONFIG_SPARC32_COMPAT
	if (current->thread.flags & SPARC_FLAG_32BIT) {
		extern asmlinkage int do_signal32(sigset_t *, struct pt_regs *,
						  unsigned long, int);
		return do_signal32(oldset, regs, orig_i0, restart_syscall);
	}
#endif	
	for (;;) {
		spin_lock_irq(&current->sigmask_lock);
		signr = dequeue_signal(&current->blocked, &info);
		spin_unlock_irq(&current->sigmask_lock);
		
		if (!signr) break;

		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
			current->exit_code = signr;
			current->state = TASK_STOPPED;
			notify_parent(current, SIGCHLD);
			schedule();
			if (!(signr = current->exit_code))
				continue;
			current->exit_code = 0;
			if (signr == SIGSTOP)
				continue;

			/* Update the siginfo structure.  Is this good?  */
			if (signr != info.si_signo) {
				info.si_signo = signr;
				info.si_errno = 0;
				info.si_code = SI_USER;
				info.si_pid = current->p_pptr->pid;
				info.si_uid = current->p_pptr->uid;
			}

			/* If the (new) signal is now blocked, requeue it.  */
			if (sigismember(&current->blocked, signr)) {
				send_sig_info(signr, &info, current);
				continue;
			}
		}
		
		ka = &current->sig->action[signr-1];
		
		if(ka->sa.sa_handler == SIG_IGN) {
			if(signr != SIGCHLD)
				continue;

                        /* sys_wait4() grabs the master kernel lock, so
                         * we need not do so, that sucker should be
                         * threaded and would not be that difficult to
                         * do anyways.
                         */
                        while(sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
                                ;
			continue;
		}
		if(ka->sa.sa_handler == SIG_DFL) {
			unsigned long exit_code = signr;
			
			if(current->pid == 1)
				continue;
			switch(signr) {
			case SIGCONT: case SIGCHLD: case SIGWINCH:
				continue;

			case SIGTSTP: case SIGTTIN: case SIGTTOU:
				if (is_orphaned_pgrp(current->pgrp))
					continue;

			case SIGSTOP:
				if (current->ptrace & PT_PTRACED)
					continue;
				current->state = TASK_STOPPED;
				current->exit_code = signr;
				if(!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags &
				     SA_NOCLDSTOP))
					notify_parent(current, SIGCHLD);
				schedule();
				continue;

			case SIGQUIT: case SIGILL: case SIGTRAP:
			case SIGABRT: case SIGFPE: case SIGSEGV:
			case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
				if (do_coredump(signr, regs))
					exit_code |= 0x80;
#ifdef DEBUG_SIGNALS
				/* Very useful to debug the dynamic linker */
				printk ("Sig %d going...\n", (int)signr);
				show_regs (regs);
#ifdef DEBUG_SIGNALS_TRACE
				{
					struct reg_window *rw = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
					unsigned long ins[8];
                                                
					while(rw &&
					      !(((unsigned long) rw) & 0x3)) {
					        copy_from_user(ins, &rw->ins[0], sizeof(ins));
						printk("Caller[%016lx](%016lx,%016lx,%016lx,%016lx,%016lx,%016lx)\n", ins[7], ins[0], ins[1], ins[2], ins[3], ins[4], ins[5]);
						rw = (struct reg_window *)(unsigned long)(ins[6] + STACK_BIAS);
					}
				}
#endif			
#ifdef DEBUG_SIGNALS_MAPS	
				printk("Maps:\n");
				read_maps();
#endif
#endif
				/* fall through */
			default:
				sigaddset(&current->pending.signal, signr);
				recalc_sigpending(current);
				current->flags |= PF_SIGNALED;
				do_exit(exit_code);
				/* NOT REACHED */
			}
		}
		if(restart_syscall)
			syscall_restart(orig_i0, regs, &ka->sa);
		handle_signal(signr, ka, &info, oldset, regs);
		return 1;
	}
	if(restart_syscall &&
	   (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
	    regs->u_regs[UREG_I0] == ERESTARTSYS ||
	    regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
		/* replay the system call when we are done */
		regs->u_regs[UREG_I0] = orig_i0;
		regs->tpc -= 4;
		regs->tnpc -= 4;
	}
	return 0;
}
コード例 #16
0
ファイル: signal.c プロジェクト: 12019/hg556a_source
static void
handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
	struct pt_regs * regs)
{
	struct k_sigaction *ka = &current->sighand->action[sig-1];

	/* Are we from a system call? */
	if (regs->tra >= 0) {
		/* If so, check system call restarting.. */
		switch (regs->regs[0]) {
			case -ERESTARTNOHAND:
				regs->regs[0] = -EINTR;
				break;

			case -ERESTARTSYS:
				if (!(ka->sa.sa_flags & SA_RESTART)) {
					regs->regs[0] = -EINTR;
					break;
				}
			/* fallthrough */
			case -ERESTARTNOINTR:
				regs->pc -= 2;
		}
	} else {
		/* gUSA handling */
#ifdef CONFIG_PREEMPT
		unsigned long flags;

		local_irq_save(flags);
#endif
		if (regs->regs[15] >= 0xc0000000) {
			int offset = (int)regs->regs[15];

			/* Reset stack pointer: clear critical region mark */
			regs->regs[15] = regs->regs[1];
			if (regs->pc < regs->regs[0])
				/* Go to rewind point #1 */
				regs->pc = regs->regs[0] + offset - 2;
		}
#ifdef CONFIG_PREEMPT
		local_irq_restore(flags);
#endif
	}

	/* Set up the stack frame */
	if (ka->sa.sa_flags & SA_SIGINFO)
		setup_rt_frame(sig, ka, info, oldset, regs);
	else
		setup_frame(sig, ka, oldset, regs);

	if (ka->sa.sa_flags & SA_ONESHOT)
		ka->sa.sa_handler = SIG_DFL;

	if (!(ka->sa.sa_flags & SA_NODEFER)) {
		spin_lock_irq(&current->sighand->siglock);
		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
		sigaddset(&current->blocked,sig);
		recalc_sigpending();
		spin_unlock_irq(&current->sighand->siglock);
	}
}
コード例 #17
0
ファイル: signal.c プロジェクト: TitaniumBoy/lin
/* {set, get}context() needed for 64-bit SparcLinux userland. */
asmlinkage void sparc64_set_context(struct pt_regs *regs)
{
	struct ucontext *ucp = (struct ucontext *) regs->u_regs[UREG_I0];
	struct thread_struct *tp = &current->thread;
	mc_gregset_t *grp;
	unsigned long pc, npc, tstate;
	unsigned long fp, i7;
	unsigned char fenab;
	int err;

	flush_user_windows();
	if(tp->w_saved						||
	   (((unsigned long)ucp) & (sizeof(unsigned long)-1))	||
	   (!__access_ok((unsigned long)ucp, sizeof(*ucp))))
		goto do_sigsegv;
	grp  = &ucp->uc_mcontext.mc_gregs;
	err  = __get_user(pc, &((*grp)[MC_PC]));
	err |= __get_user(npc, &((*grp)[MC_NPC]));
	if(err || ((pc | npc) & 3))
		goto do_sigsegv;
	if(regs->u_regs[UREG_I1]) {
		sigset_t set;

		if (_NSIG_WORDS == 1) {
			if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0]))
				goto do_sigsegv;
		} else {
			if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
				goto do_sigsegv;
		}
		sigdelsetmask(&set, ~_BLOCKABLE);
		spin_lock_irq(&current->sigmask_lock);
		current->blocked = set;
		recalc_sigpending(current);
		spin_unlock_irq(&current->sigmask_lock);
	}
	if ((tp->flags & SPARC_FLAG_32BIT) != 0) {
		pc &= 0xffffffff;
		npc &= 0xffffffff;
	}
	regs->tpc = pc;
	regs->tnpc = npc;
	err |= __get_user(regs->y, &((*grp)[MC_Y]));
	err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
	regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
	regs->tstate |= (tstate & (TSTATE_ICC | TSTATE_XCC));
	err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
	err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
	err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
	err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
	err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
	err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
	err |= __get_user(regs->u_regs[UREG_G7], (&(*grp)[MC_G7]));
	err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
	err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
	err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
	err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
	err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
	err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
	err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
	err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));

	err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
	err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
	err |= __put_user(fp,
	      (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
	err |= __put_user(i7,
	      (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));

	err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
	if(fenab) {
		unsigned long *fpregs = (unsigned long *)(((char *)current) + AOFF_task_fpregs);
		unsigned long fprs;
		
		fprs_write(0);
		err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
		if (fprs & FPRS_DL)
			err |= copy_from_user(fpregs,
					      &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
					      (sizeof(unsigned int) * 32));
		if (fprs & FPRS_DU)
			err |= copy_from_user(fpregs+16,
			 ((unsigned long *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
			 (sizeof(unsigned int) * 32));
		err |= __get_user(current->thread.xfsr[0],
				  &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
		err |= __get_user(current->thread.gsr[0],
				  &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
		regs->tstate &= ~TSTATE_PEF;
	}
	if (err)
		goto do_sigsegv;

	return;
do_sigsegv:
	do_exit(SIGSEGV);
}
コード例 #18
0
ファイル: cdc-acm.c プロジェクト: mb3dot/community-b3-kernel
static int acm_tty_open(struct tty_struct *tty, struct file *filp)
{
	struct acm *acm;
	int rv = -ENODEV;

	mutex_lock(&open_mutex);

	acm = acm_table[tty->index];
	if (!acm || !acm->dev)
		goto out;
	else
		rv = 0;

	dev_dbg(&acm->control->dev, "%s\n", __func__);

	set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);

	tty->driver_data = acm;
	tty_port_tty_set(&acm->port, tty);

	if (usb_autopm_get_interface(acm->control) < 0)
		goto early_bail;
	else
		acm->control->needs_remote_wakeup = 1;

	mutex_lock(&acm->mutex);
	if (acm->port.count++) {
		mutex_unlock(&acm->mutex);
		usb_autopm_put_interface(acm->control);
		goto out;
	}

	acm->ctrlurb->dev = acm->dev;
	if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
		dev_err(&acm->control->dev,
			"%s - usb_submit_urb(ctrl irq) failed\n", __func__);
		goto bail_out;
	}

	if (0 > acm_set_control(acm, acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS) &&
	    (acm->ctrl_caps & USB_CDC_CAP_LINE))
		goto bail_out;

	usb_autopm_put_interface(acm->control);

	/*
	 * Unthrottle device in case the TTY was closed while throttled.
	 */
	spin_lock_irq(&acm->read_lock);
	acm->throttled = 0;
	acm->throttle_req = 0;
	spin_unlock_irq(&acm->read_lock);

	if (acm_submit_read_urbs(acm, GFP_KERNEL))
		goto bail_out;

	set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
	rv = tty_port_block_til_ready(&acm->port, tty, filp);

	mutex_unlock(&acm->mutex);
out:
	mutex_unlock(&open_mutex);
	return rv;

bail_out:
	acm->port.count--;
	mutex_unlock(&acm->mutex);
	usb_autopm_put_interface(acm->control);
early_bail:
	mutex_unlock(&open_mutex);
	tty_port_tty_set(&acm->port, NULL);
	return -EIO;
}
コード例 #19
0
/*
 * gs_open sets up the link between a gs_port and its associated TTY.
 * That link is broken *only* by TTY close(), and all driver methods
 * know that.
 */
static int gs_open(struct tty_struct *tty, struct file *file)
{
	int		port_num = tty->index;
	struct gs_port	*port;
	int		status;

	if (port_num < 0 || port_num >= n_ports)
		return -ENXIO;

	do {
		mutex_lock(&ports[port_num].lock);
		port = ports[port_num].port;
		if (!port)
			status = -ENODEV;
		else {
			spin_lock_irq(&port->port_lock);

			/* already open?  Great. */
			if (port->open_count) {
				status = 0;
				port->open_count++;

			/* currently opening/closing? wait ... */
			} else if (port->openclose) {
				status = -EBUSY;

			/* ... else we do the work */
			} else {
				status = -EAGAIN;
				port->openclose = true;
			}
			spin_unlock_irq(&port->port_lock);
		}
		mutex_unlock(&ports[port_num].lock);

		switch (status) {
		default:
			/* fully handled */
			return status;
		case -EAGAIN:
			/* must do the work */
			break;
		case -EBUSY:
			/* wait for EAGAIN task to finish */
			msleep(1);
			/* REVISIT could have a waitchannel here, if
			 * concurrent open performance is important
			 */
			break;
		}
	} while (status != -EAGAIN);

	/* Do the "real open" */
	spin_lock_irq(&port->port_lock);

	/* allocate circular buffer on first open */
	if (port->port_write_buf.buf_buf == NULL) {

		spin_unlock_irq(&port->port_lock);
		status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
		spin_lock_irq(&port->port_lock);

		if (status) {
			pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
				port->port_num, tty, file);
			port->openclose = false;
			goto exit_unlock_port;
		}
	}

	/* REVISIT if REMOVED (ports[].port NULL), abort the open
	 * to let rmmod work faster (but this way isn't wrong).
	 */

	/* REVISIT maybe wait for "carrier detect" */

	tty->driver_data = port;
	port->port_tty = tty;

	port->open_count = 1;
	port->openclose = false;

	/* low_latency means ldiscs work is carried in the same context
	 * of tty_flip_buffer_push. The same can be called from IRQ with
	 * low_latency = 0. But better to use a dedicated worker thread
	 * to push the data.
	 */
	tty->low_latency = 1;

	/* if connected, start the I/O stream */
	if (port->port_usb) {
		struct gserial	*gser = port->port_usb;

		pr_debug("gs_open: start ttyGS%d\n", port->port_num);
		gs_start_io(port);

		if (gser->connect)
			gser->connect(gser);
	}

	pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);

	status = 0;

exit_unlock_port:
	spin_unlock_irq(&port->port_lock);
	return status;
}
コード例 #20
0
static int s3c24xx_pcm_hw_params(struct snd_pcm_substream *substream,
	struct snd_pcm_hw_params *params)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct s3c24xx_runtime_data *prtd = runtime->private_data;
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct s3c24xx_pcm_dma_params *dma = rtd->dai->cpu_dai->dma_data;
	unsigned long totbytes;
	int ret=0;
	
	s3cdbg("Entered %s, params = %p \n", __FUNCTION__, prtd->params);

	if(substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
		totbytes = params_buffer_bytes(params) * ANDROID_BUF_NUM;
	
	else 
		totbytes = params_buffer_bytes(params);

//	printk("[%d]:ring_buf_num %d\n", substream->stream, ring_buf_num);

	/* return if this is a bufferless transfer e.g.
	 * codec <--> BT codec or GSM modem -- lg FIXME */
	if (!dma)
		return 0;

	/* this may get called several times by oss emulation
	 * with different params */
	if (prtd->params == NULL) {
		prtd->params = dma;
		s3cdbg("params %p, client %p, channel %d\n", prtd->params,
			prtd->params->client, prtd->params->channel);


		/* prepare DMA */
		ret = s3c2410_dma_request(prtd->params->channel,
					  prtd->params->client, NULL);

		if (ret) {
			printk(KERN_ERR "failed to get dma channel\n");
			return ret;
		}
	} else if (prtd->params != dma) {
		s3c2410_dma_free(prtd->params->channel, prtd->params->client);
		prtd->params = dma;
		s3cdbg("params %p, client %p, channel %d\n", prtd->params,
			prtd->params->client, prtd->params->channel);


		/* prepare DMA */
		ret = s3c2410_dma_request(prtd->params->channel,
					  prtd->params->client, NULL);

		if (ret) {
			printk(KERN_ERR "failed to get dma channel\n");
			return ret;
		}
	}

	/* channel needs configuring for mem=>device, increment memory addr,
	 * sync to pclk, half-word transfers to the IIS-FIFO. */
#if !defined (CONFIG_CPU_S3C6400) && !defined (CONFIG_CPU_S3C6410)  && !defined(CONFIG_CPU_S5PC100) && !defined (CONFIG_CPU_S5P6440)
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		s3c2410_dma_devconfig(prtd->params->channel,
				S3C2410_DMASRC_MEM, S3C2410_DISRCC_INC |
				S3C2410_DISRCC_APB, prtd->params->dma_addr);

		s3c2410_dma_config(prtd->params->channel,
				prtd->params->dma_size,
				S3C2410_DCON_SYNC_PCLK | 
				S3C2410_DCON_HANDSHAKE);
	} else {
		s3c2410_dma_config(prtd->params->channel,
				prtd->params->dma_size,
				S3C2410_DCON_HANDSHAKE | 
				S3C2410_DCON_SYNC_PCLK);

		s3c2410_dma_devconfig(prtd->params->channel,
					S3C2410_DMASRC_HW, 0x3,
					prtd->params->dma_addr);
	}

#else
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		s3c2410_dma_devconfig(prtd->params->channel,
				S3C2410_DMASRC_MEM, 0,
				prtd->params->dma_addr);

		s3c2410_dma_config(prtd->params->channel,
				prtd->params->dma_size, 0);
	} else {
		s3c2410_dma_devconfig(prtd->params->channel,
				S3C2410_DMASRC_HW, 0,
				prtd->params->dma_addr);		

		s3c2410_dma_config(prtd->params->channel,
				prtd->params->dma_size, 0);
	}
#endif

	s3c2410_dma_set_buffdone_fn(prtd->params->channel,
				    s3c24xx_audio_buffdone);

	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);

	runtime->dma_bytes = totbytes;

	spin_lock_irq(&prtd->lock);
	prtd->dma_limit = runtime->hw.periods_min;
	prtd->dma_period = params_period_bytes(params);
	prtd->dma_start = runtime->dma_addr;
	prtd->dma_pos = prtd->dma_start;
	prtd->dma_end = prtd->dma_start + totbytes;
	spin_unlock_irq(&prtd->lock);

	s3cdbg("Entered %s, line %d \n", __FUNCTION__, __LINE__);
	return 0;
}
コード例 #21
0
ファイル: smp.c プロジェクト: mrtos/Logitech-Revue
void unlock_ipi_call_lock(void)
{
	spin_unlock_irq(&call_lock);
}
コード例 #22
0
static int target_fabric_mappedlun_link(
	struct config_item *lun_acl_ci,
	struct config_item *lun_ci)
{
	struct se_dev_entry *deve;
	struct se_lun *lun = container_of(to_config_group(lun_ci),
			struct se_lun, lun_group);
	struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
			struct se_lun_acl, se_lun_group);
	struct se_portal_group *se_tpg;
	struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
	int ret = 0, lun_access;
	/*
	 * Ensure that the source port exists
	 */
	if (!lun->lun_sep || !lun->lun_sep->sep_tpg) {
		pr_err("Source se_lun->lun_sep or lun->lun_sep->sep"
				"_tpg does not exist\n");
		return -EINVAL;
	}
	se_tpg = lun->lun_sep->sep_tpg;

	nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
	tpg_ci = &nacl_ci->ci_group->cg_item;
	wwn_ci = &tpg_ci->ci_group->cg_item;
	tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item;
	wwn_ci_s = &tpg_ci_s->ci_group->cg_item;
	/*
	 * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
	 */
	if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
		pr_err("Illegal Initiator ACL SymLink outside of %s\n",
			config_item_name(wwn_ci));
		return -EINVAL;
	}
	if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
		pr_err("Illegal Initiator ACL Symlink outside of %s"
			" TPGT: %s\n", config_item_name(wwn_ci),
			config_item_name(tpg_ci));
		return -EINVAL;
	}
	/*
	 * If this struct se_node_acl was dynamically generated with
	 * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
	 * which be will write protected (READ-ONLY) when
	 * tpg_1/attrib/demo_mode_write_protect=1
	 */
	spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
	deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun];
	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
		lun_access = deve->lun_flags;
	else
		lun_access =
			(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
				se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
					   TRANSPORT_LUNFLAGS_READ_WRITE;
	spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
	/*
	 * Determine the actual mapped LUN value user wants..
	 *
	 * This value is what the SCSI Initiator actually sees the
	 * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
	 */
	ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
			lun->unpacked_lun, lun_access);

	return (ret < 0) ? -EINVAL : 0;
}
コード例 #23
0
ファイル: ldusb.c プロジェクト: Tigrouzen/k1099
/**
 *	ld_usb_read
 */
static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
			   loff_t *ppos)
{
	struct ld_usb *dev;
	size_t *actual_buffer;
	size_t bytes_to_read;
	int retval = 0;
	int rv;

	dev = file->private_data;

	/* verify that we actually have some data to read */
	if (count == 0)
		goto exit;

	/* lock this object */
	if (down_interruptible(&dev->sem)) {
		retval = -ERESTARTSYS;
		goto exit;
	}

	/* verify that the device wasn't unplugged */
	if (dev->intf == NULL) {
		retval = -ENODEV;
		err("No device or device unplugged %d\n", retval);
		goto unlock_exit;
	}

	/* wait for data */
	spin_lock_irq(&dev->rbsl);
	if (dev->ring_head == dev->ring_tail) {
		dev->interrupt_in_done = 0;
		spin_unlock_irq(&dev->rbsl);
		if (file->f_flags & O_NONBLOCK) {
			retval = -EAGAIN;
			goto unlock_exit;
		}
		retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
		if (retval < 0)
			goto unlock_exit;
	} else {
		spin_unlock_irq(&dev->rbsl);
	}

	/* actual_buffer contains actual_length + interrupt_in_buffer */
	actual_buffer = (size_t*)(dev->ring_buffer + dev->ring_tail*(sizeof(size_t)+dev->interrupt_in_endpoint_size));
	bytes_to_read = min(count, *actual_buffer);
	if (bytes_to_read < *actual_buffer)
		dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
			 *actual_buffer-bytes_to_read);

	/* copy one interrupt_in_buffer from ring_buffer into userspace */
	if (copy_to_user(buffer, actual_buffer+1, bytes_to_read)) {
		retval = -EFAULT;
		goto unlock_exit;
	}
	dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size;

	retval = bytes_to_read;

	spin_lock_irq(&dev->rbsl);
	if (dev->buffer_overflow) {
		dev->buffer_overflow = 0;
		spin_unlock_irq(&dev->rbsl);
		rv = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL);
		if (rv < 0)
			dev->buffer_overflow = 1;
	} else {
		spin_unlock_irq(&dev->rbsl);
	}

unlock_exit:
	/* unlock the device */
	up(&dev->sem);

exit:
	return retval;
}
コード例 #24
0
int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, int len,
		  int noblock, int flags, int *addr_len)
{
  	struct sk_buff *skb;
  	int copied, err;

  	if (addr_len)
  		*addr_len=sizeof(struct sockaddr_in6);
  
	if (flags & MSG_ERRQUEUE)
		return ipv6_recv_error(sk, msg, len);

	skb = skb_recv_datagram(sk, flags, noblock, &err);
	if (!skb)
		goto out;

 	copied = skb->len - sizeof(struct udphdr);
  	if (copied > len) {
  		copied = len;
  		msg->msg_flags |= MSG_TRUNC;
  	}

	if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
					      copied);
	} else if (msg->msg_flags&MSG_TRUNC) {
		if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
			goto csum_copy_err;
		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
					      copied);
	} else {
		err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
		if (err == -EINVAL)
			goto csum_copy_err;
	}
	if (err)
		goto out_free;

	sock_recv_timestamp(msg, sk, skb);

	/* Copy the address. */
	if (msg->msg_name) {
		struct sockaddr_in6 *sin6;
	  
		sin6 = (struct sockaddr_in6 *) msg->msg_name;
		sin6->sin6_family = AF_INET6;
		sin6->sin6_port = skb->h.uh->source;
		sin6->sin6_flowinfo = 0;
		sin6->sin6_scope_id = 0;

		if (skb->protocol == htons(ETH_P_IP)) {
			ipv6_addr_set(&sin6->sin6_addr, 0, 0,
				      htonl(0xffff), skb->nh.iph->saddr);
			if (sk->protinfo.af_inet.cmsg_flags)
				ip_cmsg_recv(msg, skb);
		} else {
			memcpy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr,
			       sizeof(struct in6_addr));

			if (sk->net_pinfo.af_inet6.rxopt.all)
				datagram_recv_ctl(sk, msg, skb);
			if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) {
				struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb;
				sin6->sin6_scope_id = opt->iif;
			}
		}
  	}
	err = copied;

out_free:
	skb_free_datagram(sk, skb);
out:
	return err;

csum_copy_err:
	/* Clear queue. */
	if (flags&MSG_PEEK) {
		int clear = 0;
		spin_lock_irq(&sk->receive_queue.lock);
		if (skb == skb_peek(&sk->receive_queue)) {
			__skb_unlink(skb, &sk->receive_queue);
			clear = 1;
		}
		spin_unlock_irq(&sk->receive_queue.lock);
		if (clear)
			kfree_skb(skb);
	}

	/* Error for blocking case is chosen to masquerade
	   as some normal condition.
	 */
	err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
	UDP6_INC_STATS_USER(UdpInErrors);
	goto out_free;
}
コード例 #25
0
static int stheno_do_request(void* arg)
{
    while(1)
    {
        wait_event_interruptible(stheno_process_q, stheno_processing == 1);

        stheno_processing = 0;

        do
        {
            struct request *req;
            int ret;

            spin_lock_irq( stheno_queue->queue_lock );
            req = blk_fetch_request( stheno_queue );
            spin_unlock_irq( stheno_queue->queue_lock );

next_segment:
            if(req == NULL)break;

            /* ignore not fs cmd */
            if( ! blk_fs_request( req ) )
            {
                printk( KERN_ERR "skip no fs request\n" );
                spin_lock_irq( stheno_queue->queue_lock );
				ret=__blk_end_request_cur(req, -EIO);
				spin_unlock_irq( stheno_queue->queue_lock );
				if(ret==true) goto next_segment;
                continue;
            }

            /* ignore nr_sectors == 0 request */
            if( blk_rq_sectors(req) == 0 || blk_rq_cur_sectors(req) == 0)
            {
                printk( KERN_ERR "skip nr_sectors == (%d) current_nr_sectors == (%d) request\n", (int)blk_rq_sectors(req), (int)blk_rq_cur_sectors(req) );
                spin_lock_irq( stheno_queue->queue_lock );
				ret=__blk_end_request_cur(req, -EIO);
				spin_unlock_irq( stheno_queue->queue_lock );
				if(ret==true) goto next_segment;
                continue;
            }

            stheno_printk( KERN_NOTICE "stheno_request: REQUEST START! %s sect(%d) curr_count(%d) count(%d)\n", (rq_data_dir( req ) == WRITE) ? "write" : "read",  (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));

            if( seg_info.dirty )
            {
                stheno_printk( KERN_NOTICE "stheno_request:                %s dirty(%d) seg_start_sect(%d) seg_pos(%d) seg_nr_count(%d)\n", (rq_data_dir( req ) == WRITE) ? "write" : "read",  (int)seg_info.dirty,  (int)seg_info.start_sector, (int)seg_info.current_pos, (int)seg_info.nr_sectors );
            }

            if( seg_info.dirty && rq_data_dir( req ) != seg_info.cmd )
            {
                if( seg_info.cmd == WRITE )
                {
                    stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) start\n", (int)seg_info.start_sector, (int)seg_info.current_pos);
                    ret = euryale_write_process(seg_info.start_sector, seg_info.current_pos, seg_info.buffer);
                    stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) ret(%d) end\n", (int)seg_info.start_sector, (int)seg_info.current_pos, ret);
                    if( ret < 0 )
                    {
                        printk( KERN_ERR "stheno_request: segment write sect(%d) count(%d) ERROR\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                        
                        spin_lock_irq( stheno_queue->queue_lock );
						ret=__blk_end_request_cur(req, -EIO);
						spin_unlock_irq( stheno_queue->queue_lock );
						if(ret==true) goto next_segment;
						continue;                      
                    }
                }
                seg_info.dirty = 0;
            }
    
            if( rq_data_dir( req ) == WRITE )
            {
                if( !seg_info.dirty
                    && blk_rq_cur_sectors(req) < blk_rq_sectors(req) )
                {
                    stheno_printk( KERN_NOTICE "stheno_request: new_segment(%d) cur_count(%d) count(%d)\n", (int)seg_info.dirty, (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));

                    seg_info.cmd = rq_data_dir( req ); 
                    seg_info.start_sector = blk_rq_pos(req);
                    seg_info.nr_sectors = blk_rq_sectors(req);
                    memcpy( seg_info.buffer, req->buffer, blk_rq_cur_sectors(req) * SECT_SIZE);
                    seg_info.current_pos = blk_rq_cur_sectors(req);
                    seg_info.dirty = 1;

					spin_lock_irq( stheno_queue->queue_lock );
					ret=__blk_end_request_cur(req, 0);
					spin_unlock_irq( stheno_queue->queue_lock );
					if(ret==true) goto next_segment;
                    continue;
                }
                else if( seg_info.dirty
                    && seg_info.start_sector + seg_info.current_pos == blk_rq_pos(req)
                    && seg_info.nr_sectors - seg_info.current_pos == blk_rq_sectors(req)
                    && seg_info.nr_sectors - seg_info.current_pos >= blk_rq_cur_sectors(req) )
                {
                    stheno_printk( KERN_NOTICE "stheno_request: add_segment(%d) cur_count(%d) count(%d)\n", (int)seg_info.dirty, (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));

                    memcpy( seg_info.buffer + (seg_info.current_pos * SECT_SIZE), req->buffer, blk_rq_cur_sectors(req) * SECT_SIZE);
                    seg_info.current_pos += blk_rq_cur_sectors(req);
                    if( seg_info.current_pos == seg_info.nr_sectors )
                    {
                        stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) start\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                        ret = euryale_write_process(seg_info.start_sector, seg_info.current_pos, seg_info.buffer);
                        stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) end\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                        seg_info.dirty = 0;
                        if( ret < 0 )
                        {
                            printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) ERROR\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                            spin_lock_irq( stheno_queue->queue_lock );
							ret=__blk_end_request_cur(req, -EIO);
							spin_unlock_irq( stheno_queue->queue_lock );
							if(ret==true) goto next_segment;
							continue;                                   
                        }
                    }
					
					spin_lock_irq( stheno_queue->queue_lock );
					ret=__blk_end_request_cur(req, 0);
					spin_unlock_irq( stheno_queue->queue_lock );
					if(ret==true) goto next_segment;
                    continue;
                }
                else if( seg_info.dirty )
                {
                    stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) start\n", (int)seg_info.start_sector, (int)seg_info.current_pos);
                    ret = euryale_write_process(seg_info.start_sector, seg_info.current_pos, seg_info.buffer);
                    stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) end\n", (int)seg_info.start_sector, (int)seg_info.current_pos);
                    seg_info.dirty = 0;
                    if( ret < 0 )
                    {
                        printk( KERN_ERR "stheno_request: segment write sect(%d) count(%d) ERROR\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                        spin_lock_irq( stheno_queue->queue_lock );
						ret=__blk_end_request_cur(req, -EIO);
						spin_unlock_irq( stheno_queue->queue_lock );
						if(ret==true) goto next_segment;
						continue;                                
                    }
                }
            }
            else /* READ */
            {
                 if( !seg_info.dirty 
                    && blk_rq_cur_sectors(req) < blk_rq_sectors(req) )
                {
                    seg_info.cmd = rq_data_dir( req ); 
                    seg_info.start_sector = blk_rq_pos(req);
                    seg_info.nr_sectors = blk_rq_sectors(req);

                    stheno_printk( KERN_NOTICE "stheno_request: segment read sect(%d) count(%d) start\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                    ret = euryale_read_process(seg_info.start_sector, seg_info.nr_sectors, seg_info.buffer);
                    stheno_printk( KERN_NOTICE "stheno_request: segment read sect(%d) count(%d) end\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                    if( ret < 0 )
                    {
                        printk( KERN_ERR "stheno_request: segment read sect(%d) count(%d) ERROR\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                        spin_lock_irq( stheno_queue->queue_lock );
						ret=__blk_end_request_cur(req, -EIO);
						spin_unlock_irq( stheno_queue->queue_lock );
						if(ret==true) goto next_segment;
                        continue;
                    }

                    memcpy(req->buffer, seg_info.buffer, blk_rq_cur_sectors(req) * SECT_SIZE);
                    seg_info.current_pos = blk_rq_cur_sectors(req);
                    seg_info.dirty = 1;

					spin_lock_irq( stheno_queue->queue_lock );
					ret=__blk_end_request_cur(req, 0);
					spin_unlock_irq( stheno_queue->queue_lock );
					if(ret==true) goto next_segment;
                    continue;
                }
                else if( seg_info.dirty
                    && seg_info.start_sector + seg_info.current_pos == blk_rq_pos(req)
                    && seg_info.nr_sectors - seg_info.current_pos == blk_rq_sectors(req)
                    && seg_info.nr_sectors - seg_info.current_pos >= blk_rq_cur_sectors(req) )
                {
                    memcpy(req->buffer, seg_info.buffer + (seg_info.current_pos * SECT_SIZE), blk_rq_cur_sectors(req) * SECT_SIZE);
                    seg_info.current_pos += blk_rq_cur_sectors(req);
                    if( seg_info.current_pos == seg_info.nr_sectors )
                    {
                        seg_info.dirty = 0;
                    }

					spin_lock_irq( stheno_queue->queue_lock );
					ret=__blk_end_request_cur(req, 0);
					spin_unlock_irq( stheno_queue->queue_lock );
					if(ret==true) goto next_segment;
                    continue;
                }
                else
                {
                    seg_info.dirty = 0;
                }
            }

            if( rq_data_dir( req ) == WRITE )
            {
                stheno_printk( KERN_NOTICE "stheno_request: write sect(%d) cur_count(%d) count(%d) start\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));
                ret = euryale_write_process(blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer);
                stheno_printk( KERN_NOTICE "stheno_request: write sect(%d) cur_count(%d) count(%d) end\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));
                if( ret < 0 )
                {
                    printk( KERN_ERR "stheno_request: write sect(%d) cur_count(%d) count(%d) ERROR\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));
                    spin_lock_irq( stheno_queue->queue_lock );
					ret=__blk_end_request_cur(req, -EIO);
					spin_unlock_irq( stheno_queue->queue_lock );
					if(ret==true) goto next_segment;
					continue;                            
                }
            }
            else
            {
                stheno_printk( KERN_NOTICE "stheno_request: read sect(%d) cur_count(%d) count(%d) start\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));
                ret = euryale_read_process(blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer);
                stheno_printk( KERN_NOTICE "stheno_request: read sect(%d) cur_count(%d) count(%d) end\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));
                if( ret < 0 )
                {
                    printk( KERN_ERR "stheno_request: read sect(%d) cur_count(%d) count(%d) ERROR\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));
                    spin_lock_irq( stheno_queue->queue_lock );
					ret=__blk_end_request_cur(req, -EIO);
					spin_unlock_irq( stheno_queue->queue_lock );
					if(ret==true) goto next_segment;
                    continue;
                }
            }

			spin_lock_irq( stheno_queue->queue_lock );
			ret=__blk_end_request_cur(req, 0);
			spin_unlock_irq( stheno_queue->queue_lock );
			if(ret==true) goto next_segment;
        }
        while(1);

    }
    
    return 0;
}
コード例 #26
0
ファイル: hostap.c プロジェクト: AkyZero/wrapfs-latest
/*
 * Description:
 *      vt6655_hostap_ioctl main function supported for hostap deamon.
 *
 * Parameters:
 *  In:
 *      pDevice   -
 *      iw_point  -
 *  Out:
 *
 * Return Value:
 *
 */
int vt6655_hostap_ioctl(PSDevice pDevice, struct iw_point *p)
{
	struct viawget_hostapd_param *param;
	int ret = 0;
	int ap_ioctl = 0;

	if (p->length < sizeof(struct viawget_hostapd_param) ||
	    p->length > VIAWGET_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
		return -EINVAL;

	param = kmalloc((int)p->length, GFP_KERNEL);
	if (param == NULL)
		return -ENOMEM;

	if (copy_from_user(param, p->pointer, p->length)) {
		ret = -EFAULT;
		goto out;
	}

	switch (param->cmd) {
	case VIAWGET_HOSTAPD_SET_ENCRYPTION:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_ENCRYPTION\n");
		spin_lock_irq(&pDevice->lock);
		ret = hostap_set_encryption(pDevice, param, p->length);
		spin_unlock_irq(&pDevice->lock);
		break;
	case VIAWGET_HOSTAPD_GET_ENCRYPTION:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_GET_ENCRYPTION\n");
		spin_lock_irq(&pDevice->lock);
		ret = hostap_get_encryption(pDevice, param, p->length);
		spin_unlock_irq(&pDevice->lock);
		break;
	case VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR\n");
		ret = -EOPNOTSUPP;
		goto out;
	case VIAWGET_HOSTAPD_FLUSH:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_FLUSH\n");
		spin_lock_irq(&pDevice->lock);
		hostap_flush_sta(pDevice);
		spin_unlock_irq(&pDevice->lock);
		break;
	case VIAWGET_HOSTAPD_ADD_STA:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_ADD_STA\n");
		spin_lock_irq(&pDevice->lock);
		ret = hostap_add_sta(pDevice, param);
		spin_unlock_irq(&pDevice->lock);
		break;
	case VIAWGET_HOSTAPD_REMOVE_STA:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_REMOVE_STA\n");
		spin_lock_irq(&pDevice->lock);
		ret = hostap_remove_sta(pDevice, param);
		spin_unlock_irq(&pDevice->lock);
		break;
	case VIAWGET_HOSTAPD_GET_INFO_STA:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_GET_INFO_STA\n");
		ret = hostap_get_info_sta(pDevice, param);
		ap_ioctl = 1;
		break;
	case VIAWGET_HOSTAPD_SET_FLAGS_STA:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_FLAGS_STA\n");
		ret = hostap_set_flags_sta(pDevice, param);
		break;
	case VIAWGET_HOSTAPD_MLME:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_MLME\n");
		ret = -EOPNOTSUPP;
		goto out;
	case VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT\n");
		ret = hostap_set_generic_element(pDevice, param);
		break;
	case VIAWGET_HOSTAPD_SCAN_REQ:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SCAN_REQ\n");
		ret = -EOPNOTSUPP;
		goto out;
	case VIAWGET_HOSTAPD_STA_CLEAR_STATS:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_STA_CLEAR_STATS\n");
		ret = -EOPNOTSUPP;
		goto out;
	default:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "vt6655_hostap_ioctl: unknown cmd=%d\n",
			(int)param->cmd);
		ret = -EOPNOTSUPP;
		goto out;
	}

	if ((ret == 0) && ap_ioctl) {
		if (copy_to_user(p->pointer, param, p->length))
			ret = -EFAULT;
	}

out:
	kfree(param);
	return ret;
}
コード例 #27
0
ファイル: rtc-mrst.c プロジェクト: 33d/linux-2.6.21-hh20
static int __devinit
vrtc_mrst_do_probe(struct device *dev, struct resource *iomem, int rtc_irq)
{
	int retval = 0;
	unsigned char rtc_control;

	/* There can be only one ... */
	if (mrst_rtc.dev)
		return -EBUSY;

	if (!iomem)
		return -ENODEV;

	iomem = request_mem_region(iomem->start, resource_size(iomem),
				   driver_name);
	if (!iomem) {
		dev_dbg(dev, "i/o mem already in use.\n");
		return -EBUSY;
	}

	mrst_rtc.irq = rtc_irq;
	mrst_rtc.iomem = iomem;
	mrst_rtc.dev = dev;
	dev_set_drvdata(dev, &mrst_rtc);

	mrst_rtc.rtc = rtc_device_register(driver_name, dev,
				&mrst_rtc_ops, THIS_MODULE);
	if (IS_ERR(mrst_rtc.rtc)) {
		retval = PTR_ERR(mrst_rtc.rtc);
		goto cleanup0;
	}

	rename_region(iomem, dev_name(&mrst_rtc.rtc->dev));

	spin_lock_irq(&rtc_lock);
	mrst_irq_disable(&mrst_rtc, RTC_PIE | RTC_AIE);
	rtc_control = vrtc_cmos_read(RTC_CONTROL);
	spin_unlock_irq(&rtc_lock);

	if (!(rtc_control & RTC_24H) || (rtc_control & (RTC_DM_BINARY)))
		dev_dbg(dev, "TODO: support more than 24-hr BCD mode\n");

	if (rtc_irq) {
		retval = request_irq(rtc_irq, mrst_rtc_irq,
				IRQF_DISABLED, dev_name(&mrst_rtc.rtc->dev),
				mrst_rtc.rtc);
		if (retval < 0) {
			dev_dbg(dev, "IRQ %d is already in use, err %d\n",
				rtc_irq, retval);
			goto cleanup1;
		}
	}
	dev_dbg(dev, "initialised\n");
	return 0;

cleanup1:
	rtc_device_unregister(mrst_rtc.rtc);
cleanup0:
	dev_set_drvdata(dev, NULL);
	mrst_rtc.dev = NULL;
	release_mem_region(iomem->start, resource_size(iomem));
	dev_err(dev, "rtc-mrst: unable to initialise\n");
	return retval;
}
コード例 #28
0
/*
 * Note that 'init' is a special process: it doesn't get signals it doesn't
 * want to handle. Thus you cannot kill init even with a SIGKILL even by
 * mistake.
 *
 * Note that we go through the signals twice: once to check the signals that
 * the kernel can handle, and then we build all the user-level signal handling
 * stack-frames in one go after that.
 */
asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
{
	struct k_sigaction *ka;
	siginfo_t info;
	int single_stepping;

	/*
	 * We want the common case to go fast, which
	 * is why we may in certain cases get here from
	 * kernel mode. Just return without doing anything
	 * if so.
	 */
	if (!user_mode(regs))
		return 0;

	if (!oldset)
		oldset = &current->blocked;

	single_stepping = ptrace_cancel_bpt(current);

	for (;;) {
		unsigned long signr;

		spin_lock_irq (&current->sigmask_lock);
		signr = dequeue_signal(&current->blocked, &info);
		spin_unlock_irq (&current->sigmask_lock);

		if (!signr)
			break;

		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
			/* Let the debugger run.  */
			current->exit_code = signr;
			current->state = TASK_STOPPED;
			notify_parent(current, SIGCHLD);
			schedule();
			single_stepping |= ptrace_cancel_bpt(current);

			/* We're back.  Did the debugger cancel the sig?  */
			if (!(signr = current->exit_code))
				continue;
			current->exit_code = 0;

			/* The debugger continued.  Ignore SIGSTOP.  */
			if (signr == SIGSTOP)
				continue;

			/* Update the siginfo structure.  Is this good? */
			if (signr != info.si_signo) {
				info.si_signo = signr;
				info.si_errno = 0;
				info.si_code = SI_USER;
				info.si_pid = current->p_pptr->pid;
				info.si_uid = current->p_pptr->uid;
			}

			/* If the (new) signal is now blocked, requeue it.  */
			if (sigismember(&current->blocked, signr)) {
				send_sig_info(signr, &info, current);
				continue;
			}
		}

		ka = &current->sig->action[signr-1];
		if (ka->sa.sa_handler == SIG_IGN) {
			if (signr != SIGCHLD)
				continue;
			/* Check for SIGCHLD: it's special.  */
			while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
				/* nothing */;
			continue;
		}

		if (ka->sa.sa_handler == SIG_DFL) {
			int exit_code = signr;

			/* Init gets no signals it doesn't want.  */
			if (current->pid == 1)
				continue;

			switch (signr) {
			case SIGCONT: case SIGCHLD: case SIGWINCH: case SIGURG:
				continue;

			case SIGTSTP: case SIGTTIN: case SIGTTOU:
				if (is_orphaned_pgrp(current->pgrp))
					continue;
				/* FALLTHRU */

			case SIGSTOP: {
				struct signal_struct *sig;
				current->state = TASK_STOPPED;
				current->exit_code = signr;
				sig = current->p_pptr->sig;
				if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
					notify_parent(current, SIGCHLD);
				schedule();
				single_stepping |= ptrace_cancel_bpt(current);
				continue;
			}

			case SIGQUIT: case SIGILL: case SIGTRAP:
			case SIGABRT: case SIGFPE: case SIGSEGV:
			case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
				if (do_coredump(signr, regs))
					exit_code |= 0x80;
				/* FALLTHRU */

			default:
				sigaddset(&current->pending.signal, signr);
				recalc_sigpending(current);
				current->flags |= PF_SIGNALED;
				do_exit(exit_code);
				/* NOTREACHED */
			}
		}

		/* Are we from a system call? */
		if (syscall) {
			switch (regs->ARM_r0) {
			case -ERESTARTNOHAND:
				regs->ARM_r0 = -EINTR;
				break;

			case -ERESTARTSYS:
				if (!(ka->sa.sa_flags & SA_RESTART)) {
					regs->ARM_r0 = -EINTR;
					break;
				}
				/* fallthrough */
			case -ERESTARTNOINTR:
				regs->ARM_r0 = regs->ARM_ORIG_r0;
				regs->ARM_pc -= 4;
			}
		}
		/* Whee!  Actually deliver the signal.  */
		handle_signal(signr, ka, &info, oldset, regs);
		if (single_stepping)
		    	ptrace_set_bpt(current);
		return 1;
	}

	if (syscall &&
	    (regs->ARM_r0 == -ERESTARTNOHAND ||
	     regs->ARM_r0 == -ERESTARTSYS ||
	     regs->ARM_r0 == -ERESTARTNOINTR)) {
		regs->ARM_r0 = regs->ARM_ORIG_r0;
		regs->ARM_pc -= 4;
	}
	if (single_stepping)
		ptrace_set_bpt(current);
	return 0;
}
コード例 #29
0
ファイル: main.c プロジェクト: Excito/compat-wireless
/**
 *  lbs_thread - handles the major jobs in the LBS driver.
 *  It handles all events generated by firmware, RX data received
 *  from firmware and TX data sent from kernel.
 *
 *  @data:	A pointer to &lbs_thread structure
 *  returns:	0
 */
static int lbs_thread(void *data)
{
	struct net_device *dev = data;
	struct lbs_private *priv = dev->ml_priv;
	wait_queue_t wait;

	lbs_deb_enter(LBS_DEB_THREAD);

	init_waitqueue_entry(&wait, current);

	for (;;) {
		int shouldsleep;
		u8 resp_idx;

		lbs_deb_thread("1: currenttxskb %p, dnld_sent %d\n",
				priv->currenttxskb, priv->dnld_sent);

		add_wait_queue(&priv->waitq, &wait);
		set_current_state(TASK_INTERRUPTIBLE);
		spin_lock_irq(&priv->driver_lock);

		if (kthread_should_stop())
			shouldsleep = 0;	/* Bye */
		else if (priv->surpriseremoved)
			shouldsleep = 1;	/* We need to wait until we're _told_ to die */
		else if (priv->psstate == PS_STATE_SLEEP)
			shouldsleep = 1;	/* Sleep mode. Nothing we can do till it wakes */
		else if (priv->cmd_timed_out)
			shouldsleep = 0;	/* Command timed out. Recover */
		else if (!priv->fw_ready)
			shouldsleep = 1;	/* Firmware not ready. We're waiting for it */
		else if (priv->dnld_sent)
			shouldsleep = 1;	/* Something is en route to the device already */
		else if (priv->tx_pending_len > 0)
			shouldsleep = 0;	/* We've a packet to send */
		else if (priv->resp_len[priv->resp_idx])
			shouldsleep = 0;	/* We have a command response */
		else if (priv->cur_cmd)
			shouldsleep = 1;	/* Can't send a command; one already running */
		else if (!list_empty(&priv->cmdpendingq) &&
					!(priv->wakeup_dev_required))
			shouldsleep = 0;	/* We have a command to send */
		else if (kfifo_len(&priv->event_fifo))
			shouldsleep = 0;	/* We have an event to process */
		else
			shouldsleep = 1;	/* No command */

		if (shouldsleep) {
			lbs_deb_thread("sleeping, connect_status %d, "
				"psmode %d, psstate %d\n",
				priv->connect_status,
				priv->psmode, priv->psstate);
			spin_unlock_irq(&priv->driver_lock);
			schedule();
		} else
			spin_unlock_irq(&priv->driver_lock);

		lbs_deb_thread("2: currenttxskb %p, dnld_send %d\n",
			       priv->currenttxskb, priv->dnld_sent);

		set_current_state(TASK_RUNNING);
		remove_wait_queue(&priv->waitq, &wait);

		lbs_deb_thread("3: currenttxskb %p, dnld_sent %d\n",
			       priv->currenttxskb, priv->dnld_sent);

		if (kthread_should_stop()) {
			lbs_deb_thread("break from main thread\n");
			break;
		}

		if (priv->surpriseremoved) {
			lbs_deb_thread("adapter removed; waiting to die...\n");
			continue;
		}

		lbs_deb_thread("4: currenttxskb %p, dnld_sent %d\n",
		       priv->currenttxskb, priv->dnld_sent);

		/* Process any pending command response */
		spin_lock_irq(&priv->driver_lock);
		resp_idx = priv->resp_idx;
		if (priv->resp_len[resp_idx]) {
			spin_unlock_irq(&priv->driver_lock);
			lbs_process_command_response(priv,
				priv->resp_buf[resp_idx],
				priv->resp_len[resp_idx]);
			spin_lock_irq(&priv->driver_lock);
			priv->resp_len[resp_idx] = 0;
		}
		spin_unlock_irq(&priv->driver_lock);

		/* Process hardware events, e.g. card removed, link lost */
		spin_lock_irq(&priv->driver_lock);
		while (kfifo_len(&priv->event_fifo)) {
			u32 event;

			if (kfifo_out(&priv->event_fifo,
				(unsigned char *) &event, sizeof(event)) !=
				sizeof(event))
					break;
			spin_unlock_irq(&priv->driver_lock);
			lbs_process_event(priv, event);
			spin_lock_irq(&priv->driver_lock);
		}
		spin_unlock_irq(&priv->driver_lock);

		if (priv->wakeup_dev_required) {
			lbs_deb_thread("Waking up device...\n");
			/* Wake up device */
			if (priv->exit_deep_sleep(priv))
				lbs_deb_thread("Wakeup device failed\n");
			continue;
		}

		/* command timeout stuff */
		if (priv->cmd_timed_out && priv->cur_cmd) {
			struct cmd_ctrl_node *cmdnode = priv->cur_cmd;

			netdev_info(dev, "Timeout submitting command 0x%04x\n",
				    le16_to_cpu(cmdnode->cmdbuf->command));
			lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
			if (priv->reset_card)
				priv->reset_card(priv);
		}
		priv->cmd_timed_out = 0;

		if (!priv->fw_ready)
			continue;

		/* Check if we need to confirm Sleep Request received previously */
		if (priv->psstate == PS_STATE_PRE_SLEEP &&
		    !priv->dnld_sent && !priv->cur_cmd) {
			if (priv->connect_status == LBS_CONNECTED) {
				lbs_deb_thread("pre-sleep, currenttxskb %p, "
					"dnld_sent %d, cur_cmd %p\n",
					priv->currenttxskb, priv->dnld_sent,
					priv->cur_cmd);

				lbs_ps_confirm_sleep(priv);
			} else {
				/* workaround for firmware sending
				 * deauth/linkloss event immediately
				 * after sleep request; remove this
				 * after firmware fixes it
				 */
				priv->psstate = PS_STATE_AWAKE;
				netdev_alert(dev,
					     "ignore PS_SleepConfirm in non-connected state\n");
			}
		}

		/* The PS state is changed during processing of Sleep Request
		 * event above
		 */
		if ((priv->psstate == PS_STATE_SLEEP) ||
		    (priv->psstate == PS_STATE_PRE_SLEEP))
			continue;

		if (priv->is_deep_sleep)
			continue;

		/* Execute the next command */
		if (!priv->dnld_sent && !priv->cur_cmd)
			lbs_execute_next_command(priv);

		spin_lock_irq(&priv->driver_lock);
		if (!priv->dnld_sent && priv->tx_pending_len > 0) {
			int ret = priv->hw_host_to_card(priv, MVMS_DAT,
							priv->tx_pending_buf,
							priv->tx_pending_len);
			if (ret) {
				lbs_deb_tx("host_to_card failed %d\n", ret);
				priv->dnld_sent = DNLD_RES_RECEIVED;
			} else {
				mod_timer(&priv->tx_lockup_timer,
					  jiffies + (HZ * 5));
			}
			priv->tx_pending_len = 0;
			if (!priv->currenttxskb) {
				/* We can wake the queues immediately if we aren't
				   waiting for TX feedback */
				if (priv->connect_status == LBS_CONNECTED)
					netif_wake_queue(priv->dev);
				if (priv->mesh_dev &&
				    netif_running(priv->mesh_dev))
					netif_wake_queue(priv->mesh_dev);
			}
		}
		spin_unlock_irq(&priv->driver_lock);
	}

	del_timer(&priv->command_timer);
	del_timer(&priv->tx_lockup_timer);
	del_timer(&priv->auto_deepsleep_timer);

	lbs_deb_leave(LBS_DEB_THREAD);
	return 0;
}
コード例 #30
0
static void cgwb_bdi_register(struct backing_dev_info *bdi)
{
	spin_lock_irq(&cgwb_lock);
	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
	spin_unlock_irq(&cgwb_lock);
}