示例#1
0
static inline void _local_bh_enable_ip(unsigned long ip)
{
	WARN_ON_ONCE(in_irq() || irqs_disabled());
	if (in_irq() || irqs_disabled())
		printk("in_irq()%d  irqs_disabled()%d\n", in_irq(), irqs_disabled());

#ifdef CONFIG_TRACE_IRQFLAGS
	local_irq_disable();
#endif
	/*
	 * Are softirqs going to be turned on now:
	 */
	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
		trace_softirqs_on(ip);
	/*
	 * Keep preemption disabled until we are done with
	 * softirq processing:
 	 */
	sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);

	if (unlikely(!in_interrupt() && local_softirq_pending()))
		do_softirq();

	dec_preempt_count();
#ifdef CONFIG_TRACE_IRQFLAGS
	local_irq_enable();
#endif
	preempt_check_resched();
}
示例#2
0
static void falcon_get_lock(void)
{
	unsigned long flags;

	if (IS_A_TT())
		return;

	local_irq_save(flags);

	while (!in_irq() && falcon_got_lock && stdma_others_waiting())
		sleep_on(&falcon_fairness_wait);

	while (!falcon_got_lock) {
		if (in_irq())
			panic("Falcon SCSI hasn't ST-DMA lock in interrupt");
		if (!falcon_trying_lock) {
			falcon_trying_lock = 1;
			stdma_lock(scsi_falcon_intr, NULL);
			falcon_got_lock = 1;
			falcon_trying_lock = 0;
			wake_up(&falcon_try_wait);
		} else {
			sleep_on(&falcon_try_wait);
		}
	}

	local_irq_restore(flags);
	if (!falcon_got_lock)
		panic("Falcon SCSI: someone stole the lock :-(\n");
}
示例#3
0
static int write_ep0_fifo(struct rt_ep_struct *rt_ep, struct rt_request *req)
{
	u8	*buf;
	int	length, i;
	u32 maxpacket;

DBG;
	xprintk("q.l=%d, q.a=%d, maxp=%d\n", req->req.length, req->req.actual, rt_ep->ep.maxpacket);

	buf = req->req.buf + req->req.actual;
	maxpacket = (u32)(rt_ep->ep.maxpacket);
	length = min(req->req.length - req->req.actual, maxpacket);

	req->req.actual += length;

	if (!length && req->req.zero)
		FATAL_ERROR("zlp");

	if(!in_irq())
		FATAL_ERROR("Not in irq context");

	//write to ep0in fifo
	for (i=0; i< length; i++)
		usb_write(EP0INDAT+i, *buf++);

	// arm ep0in
	usb_write(IN0BC, length);
	if(length != rt_ep->ep.maxpacket)
		usb_write(EP0CS, 0x2);		// clear NAK bit to ACK host.

	return length;
}
示例#4
0
/*
 *******************************************************************************
 * Data tansfer over USB functions
 *******************************************************************************
 */
static int read_ep0_fifo(struct rt_ep_struct *rt_ep, struct rt_request *req)
{
	u8	*buf;
	int	byte_count, req_bufferspace, count, i;

DBG;
	if(!in_irq())
		FATAL_ERROR("not irq context.");

	byte_count = read_outbc(EP_NO(rt_ep));
	req_bufferspace = req->req.length - req->req.actual;

	buf = req->req.buf + req->req.actual;

	if(!req_bufferspace)
		FATAL_ERROR("zlp");

	if(byte_count > req_bufferspace)
		FATAL_ERROR("buffer overflow, byte_count=%d, req->req.length=%d, req->req.actual=%d\n", byte_count, req->req.length ,req->req.actual);

	count = min(byte_count, req_bufferspace);


	for (i = 0; i < count; i++){
		*buf = usb_read(EP0OUTDAT+i);
		buf++;
	}
	req->req.actual += count;

	return count;
}
示例#5
0
文件: erst.c 项目: bpopovych/xen
static int erst_exec_move_data(struct apei_exec_context *ctx,
			       struct acpi_whea_header *entry)
{
	int rc;
	u64 offset;
	void *src, *dst;

	/* ioremap does not work in interrupt context */
	if (in_irq()) {
		printk(KERN_WARNING
		       "MOVE_DATA cannot be used in interrupt context\n");
		return -EBUSY;
	}

	rc = __apei_exec_read_register(entry, &offset);
	if (rc)
		return rc;

	src = ioremap(ctx->src_base + offset, ctx->var2);
	if (!src)
		return -ENOMEM;

	dst = ioremap(ctx->dst_base + offset, ctx->var2);
	if (dst) {
		memmove(dst, src, ctx->var2);
		iounmap(dst);
	} else
		rc = -ENOMEM;

	iounmap(src);

	return rc;
}
示例#6
0
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
	unsigned long flags;

	WARN_ON_ONCE(in_irq());

	raw_local_irq_save(flags);
	/*
	 * The preempt tracer hooks into preempt_count_add and will break
	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
	 * is set and before current->softirq_enabled is cleared.
	 * We must manually increment preempt_count here and manually
	 * call the trace_preempt_off later.
	 */
	__preempt_count_add(cnt);
	/*
	 * Were softirqs turned off above:
	 */
	if (softirq_count() == (cnt & SOFTIRQ_MASK))
		trace_softirqs_off(ip);
	raw_local_irq_restore(flags);

	if (preempt_count() == cnt) {
#ifdef CONFIG_DEBUG_PREEMPT
		current->preempt_disable_ip = get_lock_parent_ip();
#endif
		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
	}
}
示例#7
0
void __kfree_skb(struct sk_buff *skb)
{
	BUG_ON(skb->list != NULL);

	dst_release(skb->dst);
#ifdef CONFIG_XFRM
	secpath_put(skb->sp);
#endif
	if (skb->destructor) {
		WARN_ON(in_irq());
		skb->destructor(skb);
	}
#ifdef CONFIG_NETFILTER
	nf_conntrack_put(skb->nfct);
#ifdef CONFIG_BRIDGE_NETFILTER
	nf_bridge_put(skb->nf_bridge);
#endif
#endif
/* XXX: IS this still necessary? - JHS */
#ifdef CONFIG_NET_SCHED
	skb->tc_index = 0;
#ifdef CONFIG_NET_CLS_ACT
	skb->tc_verd = 0;
	skb->tc_classid = 0;
#endif
#endif

	kfree_skbmem(skb);
}
示例#8
0
static void inline procfile_context_print(void)
{
    printk_d("preemptible 0x%x\n", preemptible());
    printk_d("in_atomic_preempt_off 0x%x\n", in_atomic_preempt_off());
    printk_d("in_atomic 0x%x\n", in_atomic());
    printk_d("in_nmi 0x%lx\n", in_nmi());
    printk_d("in_serving_softirq 0x%lx\n", in_serving_softirq());
    printk_d("in_interrupt 0x%lx\n", in_interrupt());
    printk_d("in_softirq 0x%lx\n", in_softirq());
    printk_d("in_irq 0x%lx\n", in_irq());
    printk_d("preempt_count 0x%x\n", preempt_count());
    printk_d("irqs_disabled 0x%x\n", irqs_disabled());
    if(current) {
        printk_d("task->comm %s\n", current->comm);
        printk_d("task->flags 0x%x\n", current->flags);
        printk_d("task->state %lu\n", current->state);
        printk_d("task->usage %d\n", atomic_read(&(current->usage)));
        printk_d("task->prio %d\n", current->prio);
        printk_d("task->static_prio %d\n", current->static_prio);
        printk_d("task->normal_prio %d\n", current->normal_prio);
        printk_d("task->rt_priority %d\n", current->rt_priority);
        printk_d("task->policy %d\n", current->policy);
        printk_d("task->pid %d\n", current->pid);
        printk_d("task->tgid %d\n", current->tgid);
    }
    else
        printk_d("task pointer NULL\n");
}
static void __local_bh_disable(unsigned long ip, unsigned int cnt)
{
	unsigned long flags;

	WARN_ON_ONCE(in_irq());

	raw_local_irq_save(flags);
	/*
	 * The preempt tracer hooks into add_preempt_count and will break
	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
	 * is set and before current->softirq_enabled is cleared.
	 * We must manually increment preempt_count here and manually
	 * call the trace_preempt_off later.
	 */
	add_preempt_count_notrace(cnt);
	/*
	 * Were softirqs turned off above:
	 */
	if (softirq_count() == cnt)
		trace_softirqs_off(ip);
	raw_local_irq_restore(flags);

	if (preempt_count() == cnt)
		trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
示例#10
0
static int snd_dummy_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
	struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol);
	int change, addr = kcontrol->private_value;
	int left, right;

	if (in_irq())
		printk("dummy: line %d we are in HARDWARE IRQ\n", __LINE__);
	left = ucontrol->value.integer.value[0];
	if (left < -50)
		left = -50;
	if (left > 100)
		left = 100;
	right = ucontrol->value.integer.value[1];
	if (right < -50)
		right = -50;
	if (right > 100)
		right = 100;
	spin_lock_bh(&dummy->mixer_lock);
	change = dummy->mixer_volume[addr][0] != left || dummy->mixer_volume[addr][1] != right;
	dummy->mixer_volume[addr][0] = left;
	dummy->mixer_volume[addr][1] = right;
	spin_unlock_bh(&dummy->mixer_lock);
	return change;
}
示例#11
0
static int blkcipher_walk_first(struct blkcipher_desc *desc,
				struct blkcipher_walk *walk)
{
#ifdef CONFIG_CRYPTO_FIPS
    if (unlikely(in_fips_err()))
        return (-EACCES);
#endif

	if (WARN_ON_ONCE(in_irq()))
		return -EDEADLK;

	walk->nbytes = walk->total;
	if (unlikely(!walk->total))
		return 0;

	walk->buffer = NULL;
	walk->iv = desc->info;
	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
		int err = blkcipher_copy_iv(walk);
		if (err)
			return err;
	}

	scatterwalk_start(&walk->in, walk->in.sg);
	scatterwalk_start(&walk->out, walk->out.sg);
	walk->page = NULL;

	return blkcipher_walk_next(desc, walk);
}
示例#12
0
static int update(struct hash_desc *desc,
		  struct scatterlist *sg, unsigned int nbytes)
{
	if (WARN_ON_ONCE(in_irq()))
		return -EDEADLK;
	return update2(desc, sg, nbytes);
}
示例#13
0
void __kfree_skb(struct sk_buff *skb)
{
	if (skb->list) {
	 	printk(KERN_WARNING "Warning: kfree_skb passed an skb still "
		       "on a list (from %p).\n", NET_CALLER(skb));
		BUG();
	}

	dst_release(skb->dst);
#ifdef CONFIG_XFRM
	secpath_put(skb->sp);
#endif
	if(skb->destructor) {
		if (in_irq())
			printk(KERN_WARNING "Warning: kfree_skb on "
					    "hard IRQ %p\n", NET_CALLER(skb));
		skb->destructor(skb);
	}
#ifdef CONFIG_NETFILTER
	nf_conntrack_put(skb->nfct);
#ifdef CONFIG_BRIDGE_NETFILTER
	nf_bridge_put(skb->nf_bridge);
#endif
#endif
/* XXX: IS this still necessary? - JHS */
#ifdef CONFIG_NET_SCHED
	skb->tc_index = 0;
#ifdef CONFIG_NET_CLS_ACT
	skb->tc_verd = 0;
	skb->tc_classid = 0;
#endif
#endif

	kfree_skbmem(skb);
}
示例#14
0
void iovmm_unmap_oto(struct device *dev, phys_addr_t phys)
{
	struct exynos_vm_region *region;
	struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
	size_t unmapped_size;

	/* This function must not be called in IRQ handlers */
	BUG_ON(in_irq());

	if (WARN_ON(phys & ~PAGE_MASK))
		phys = round_down(phys, PAGE_SIZE);

	spin_lock(&vmm->lock);

	region = find_region(vmm, (dma_addr_t)phys);
	if (WARN_ON(!region)) {
		spin_unlock(&vmm->lock);
		return;
	}

	list_del(&region->node);

	spin_unlock(&vmm->lock);

	unmapped_size = iommu_unmap(vmm->domain, region->start, region->size);

	exynos_sysmmu_tlb_invalidate(dev);

	WARN_ON(unmapped_size != region->size);
	dev_dbg(dev, "IOVMM: Unmapped %#x bytes from %#x.\n",
					unmapped_size, region->start);

	kfree(region);
}
示例#15
0
/*
 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
 * context with local IRQs disabled.
 */
void irq_work_run(void)
{
	struct irq_work *list, **head;

	head = &__get_cpu_var(irq_work_list);
	if (*head == NULL)
		return;

	BUG_ON(!in_irq());
	BUG_ON(!irqs_disabled());

	list = xchg(head, NULL);
	while (list != NULL) {
		struct irq_work *entry = list;

		list = irq_work_next(list);

		/*
		 * Clear the PENDING bit, after this point the @entry
		 * can be re-used.
		 */
		entry->next = next_flags(NULL, IRQ_WORK_BUSY);
		entry->func(entry);
		/*
		 * Clear the BUSY bit and return to the free state if
		 * no-one else claimed it meanwhile.
		 */
		cmpxchg(&entry->next, next_flags(NULL, IRQ_WORK_BUSY), NULL);
	}
}
示例#16
0
static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
								int err)
{
	struct sock *sk = (struct sock *) context;

	BUG_ON(in_irq());

	nfc_dbg("sk=%p err=%d", sk, err);

	if (err)
		goto error;

	err = rawsock_add_header(skb);
	if (err)
		goto error;

	err = sock_queue_rcv_skb(sk, skb);
	if (err)
		goto error;

	spin_lock_bh(&sk->sk_write_queue.lock);
	if (!skb_queue_empty(&sk->sk_write_queue))
		schedule_work(&nfc_rawsock(sk)->tx_work);
	else
		nfc_rawsock(sk)->tx_work_scheduled = false;
	spin_unlock_bh(&sk->sk_write_queue.lock);

	sock_put(sk);
	return;

error:
	rawsock_report_error(sk, err);
	sock_put(sk);
}
示例#17
0
void __kfree_skb(struct sk_buff *skb)
{
	dst_release(skb->dst);
#ifdef CONFIG_XFRM
	secpath_put(skb->sp);
#endif
	if (skb->destructor) {
		WARN_ON(in_irq());
		skb->destructor(skb);
	}
#ifdef CONFIG_NETFILTER
	nf_conntrack_put(skb->nfct);
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
	nf_conntrack_put_reasm(skb->nfct_reasm);
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
	nf_bridge_put(skb->nf_bridge);
#endif
#endif
/* XXX: IS this still necessary? - JHS */
#ifdef CONFIG_NET_SCHED
	skb->tc_index = 0;
#ifdef CONFIG_NET_CLS_ACT
	skb->tc_verd = 0;
#endif
#endif

	kfree_skbmem(skb);
}
/*
 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
 * context with local IRQs disabled.
 */
void irq_work_run(void)
{
	struct irq_work *work;
	struct llist_head *this_list;
	struct llist_node *llnode;

	this_list = &__get_cpu_var(irq_work_list);
	if (llist_empty(this_list))
		return;

	BUG_ON(!in_irq());
	BUG_ON(!irqs_disabled());

	llnode = llist_del_all(this_list);
	while (llnode != NULL) {
		work = llist_entry(llnode, struct irq_work, llnode);

		llnode = llist_next(llnode);

		/*
		 * Clear the PENDING bit, after this point the @work
		 * can be re-used.
		 */
		work->flags = IRQ_WORK_BUSY;
		work->func(work);
		/*
		 * Clear the BUSY bit and return to the free state if
		 * no-one else claimed it meanwhile.
		 */
		(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
	}
}
static int blkcipher_walk_first(struct blkcipher_desc *desc,
				struct blkcipher_walk *walk)
{
	struct crypto_blkcipher *tfm = desc->tfm;
	unsigned int alignmask = crypto_blkcipher_alignmask(tfm);

	if (WARN_ON_ONCE(in_irq()))
		return -EDEADLK;

	walk->nbytes = walk->total;
	if (unlikely(!walk->total))
		return 0;

	walk->buffer = NULL;
	walk->iv = desc->info;
	if (unlikely(((unsigned long)walk->iv & alignmask))) {
		int err = blkcipher_copy_iv(walk, tfm, alignmask);
		if (err)
			return err;
	}

	scatterwalk_start(&walk->in, walk->in.sg);
	scatterwalk_start(&walk->out, walk->out.sg);
	walk->page = NULL;

	return blkcipher_walk_next(desc, walk);
}
static int cmc221_idpram_wakeup(struct dpram_link_device *dpld)
{
	struct link_device *ld = &dpld->ld;
	struct modem_data *mdm_data = ld->mdm_data;
	int cnt = 0;

	gpio_set_value(mdm_data->gpio_dpram_wakeup, 1);

	while (!gpio_get_value(mdm_data->gpio_dpram_status)) {
		if (cnt++ > 10) {
			if (in_irq())
				mif_err("ERR! gpio_dpram_status == 0 in IRQ\n");
			else
				mif_err("ERR! gpio_dpram_status == 0\n");
			return -EACCES;
		}

		mif_info("gpio_dpram_status == 0 (cnt %d)\n", cnt);
		if (in_interrupt())
			udelay(1000);
		else
			usleep_range(1000, 2000);
	}

	return 0;
}
示例#21
0
ksocknal_data_ready(struct sock *sk, int n)
#endif
{
	ksock_conn_t  *conn;
	ENTRY;

        /* interleave correctly with closing sockets... */
        LASSERT(!in_irq());
	read_lock(&ksocknal_data.ksnd_global_lock);

	conn = sk->sk_user_data;
	if (conn == NULL) {	/* raced with ksocknal_terminate_conn */
		LASSERT(sk->sk_data_ready != &ksocknal_data_ready);
#ifdef HAVE_SK_DATA_READY_ONE_ARG
		sk->sk_data_ready(sk);
#else
		sk->sk_data_ready(sk, n);
#endif
	} else
		ksocknal_read_callback(conn);

	read_unlock(&ksocknal_data.ksnd_global_lock);

	EXIT;
}
示例#22
0
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
{
	WARN_ON_ONCE(in_irq() || irqs_disabled());
#ifdef CONFIG_TRACE_IRQFLAGS
	local_irq_disable();
#endif
	/*
	 * Are softirqs going to be turned on now:
	 */
	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
		trace_softirqs_on(ip);
	/*
	 * Keep preemption disabled until we are done with
	 * softirq processing:
	 */
	preempt_count_sub(cnt - 1);

	if (unlikely(!in_interrupt() && local_softirq_pending())) {
		/*
		 * Run softirq if any pending. And do it in its own stack
		 * as we may be calling this deep in a task call stack already.
		 */
		do_softirq();
	}

	preempt_count_dec();
#ifdef CONFIG_TRACE_IRQFLAGS
	local_irq_enable();
#endif
	preempt_check_resched();
}
示例#23
0
void local_bh_enable_ip(unsigned long ip)
{
#ifdef CONFIG_TRACE_IRQFLAGS
	unsigned long flags;

	WARN_ON_ONCE(in_irq());

	local_irq_save(flags);
#endif
	/*
	 * Are softirqs going to be turned on now:
	 */
	if (softirq_count() == SOFTIRQ_OFFSET)
		trace_softirqs_on(ip);
	/*
	 * Keep preemption disabled until we are done with
	 * softirq processing:
 	 */
 	sub_preempt_count(SOFTIRQ_OFFSET - 1);

	if (unlikely(!in_interrupt() && local_softirq_pending()))
		do_softirq();

	dec_preempt_count();
#ifdef CONFIG_TRACE_IRQFLAGS
	local_irq_restore(flags);
#endif
	preempt_check_resched();
}
示例#24
0
static int usb_send(struct link_device *ld, struct io_device *iod,
			struct sk_buff *skb)
{
	struct sk_buff_head *txq;
	size_t tx_size;
	struct usb_link_device *usb_ld = to_usb_link_device(ld);
	struct link_pm_data *pm_data = usb_ld->link_pm_data;

	switch (iod->format) {
	case IPC_RAW:
		txq = &ld->sk_raw_tx_q;

		if (unlikely(ld->raw_tx_suspended)) {
			/* Unlike misc_write, vnet_xmit is in interrupt.
			 * Despite call netif_stop_queue on CMD_SUSPEND,
			 * packets can be reached here.
			 */
			if (in_irq()) {
				mif_err("raw tx is suspended, "
						"drop packet. size=%d",
						skb->len);
				return -EBUSY;
			}

			mif_err("wait RESUME CMD...\n");
			INIT_COMPLETION(ld->raw_tx_resumed_by_cp);
			wait_for_completion(&ld->raw_tx_resumed_by_cp);
			mif_err("resumed done.\n");
		}
		break;
	case IPC_BOOT:
	case IPC_FMT:
	case IPC_RFS:
	default:
		txq = &ld->sk_fmt_tx_q;
		break;
	}
	/* store the tx size before run the tx_delayed_work*/
	tx_size = skb->len;

	/* drop packet, when link is not online */
	if (ld->com_state == COM_BOOT && iod->format != IPC_BOOT) {
		mif_err("%s: drop packet, size=%d, com_state=%d\n",
				iod->name, skb->len, ld->com_state);
		dev_kfree_skb_any(skb);
		return 0;
	}

	/* en queue skb data */
	skb_queue_tail(txq, skb);
	/* Hold wake_lock for getting schedule the tx_work */
	wake_lock(&pm_data->tx_async_wake);

	if (!work_pending(&ld->tx_delayed_work.work))
		queue_delayed_work(ld->tx_wq, &ld->tx_delayed_work, 0);

	return tx_size;
}
示例#25
0
/*
 * Special-case - softirqs can safely be enabled in
 * cond_resched_softirq(), or by __do_softirq(),
 * without processing still-pending softirqs:
 */
void _local_bh_enable(void)
{
	WARN_ON_ONCE(in_irq());
	WARN_ON_ONCE(!irqs_disabled());

	if (softirq_count() == SOFTIRQ_OFFSET)
		trace_softirqs_on((unsigned long)__builtin_return_address(0));
	sub_preempt_count(SOFTIRQ_OFFSET);
}
示例#26
0
static void __local_bh_enable(unsigned int cnt)
{
	WARN_ON_ONCE(in_irq());
	WARN_ON_ONCE(!irqs_disabled());

	if (softirq_count() == cnt)
		trace_softirqs_on((unsigned long)__builtin_return_address(0));
	sub_preempt_count(cnt);
}
static inline int kmap_atomic_idx_push(void)
{
	int idx = __get_cpu_var(__kmap_atomic_idx)++;
#ifdef CONFIG_DEBUG_HIGHMEM
	WARN_ON_ONCE(in_irq() && !irqs_disabled());
	BUG_ON(idx > KM_TYPE_NR);
#endif
	return idx;
}
示例#28
0
static void __local_bh_enable(unsigned int cnt)
{
    WARN_ON_ONCE(in_irq());
    WARN_ON_ONCE(!irqs_disabled());

    if (softirq_count() == cnt)
        trace_softirqs_on(_RET_IP_);
    sub_preempt_count(cnt);
}
void kernel_neon_end(void)
{
	if (in_interrupt()) {
		struct fpsimd_partial_state *s = this_cpu_ptr(
			in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
		fpsimd_load_partial_state(s);
	} else {
		preempt_enable();
	}
}
示例#30
0
/**
 * usb_serial_generic_write_start - kick off an URB write
 * @port:	Pointer to the &struct usb_serial_port data
 *
 * Returns zero on success, or a negative errno value
 */
static int usb_serial_generic_write_start(struct usb_serial_port *port)
{
	struct urb *urb;
	int count, result;
	unsigned long flags;
	int i;

	if (test_and_set_bit_lock(USB_SERIAL_WRITE_BUSY, &port->flags))
		return 0;
retry:
	spin_lock_irqsave(&port->lock, flags);
	if (!port->write_urbs_free || !kfifo_len(&port->write_fifo)) {
		clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
		spin_unlock_irqrestore(&port->lock, flags);
		return 0;
	}
	i = (int)find_first_bit(&port->write_urbs_free,
						ARRAY_SIZE(port->write_urbs));
	spin_unlock_irqrestore(&port->lock, flags);

	urb = port->write_urbs[i];
	count = port->serial->type->prepare_write_buffer(port,
						urb->transfer_buffer,
						port->bulk_out_size);
	urb->transfer_buffer_length = count;
	usb_serial_debug_data(debug, &port->dev, __func__, count,
						urb->transfer_buffer);
	spin_lock_irqsave(&port->lock, flags);
	port->tx_bytes += count;
	spin_unlock_irqrestore(&port->lock, flags);

	clear_bit(i, &port->write_urbs_free);
	result = usb_submit_urb(urb, GFP_ATOMIC);
	if (result) {
		dev_err(&port->dev, "%s - error submitting urb: %d\n",
						__func__, result);
		set_bit(i, &port->write_urbs_free);
		spin_lock_irqsave(&port->lock, flags);
		port->tx_bytes -= count;
		spin_unlock_irqrestore(&port->lock, flags);

		clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
		return result;
	}

	/* Try sending off another urb, unless in irq context (in which case
	 * there will be no free urb). */
	if (!in_irq())
		goto retry;

	clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);

	return 0;
}