Example #1
0
static int acm_write_start(struct acm *acm, int wbn)
{
	unsigned long flags;
	struct acm_wb *wb = &acm->wb[wbn];
	int rc;

	spin_lock_irqsave(&acm->write_lock, flags);
	if (!acm->dev) {
		wb->use = 0;
		spin_unlock_irqrestore(&acm->write_lock, flags);
		return -ENODEV;
	}

	dbg("%s susp_count: %d", __func__, acm->susp_count);
	usb_autopm_get_interface_async(acm->control);
	if (acm->susp_count) {
		if (!acm->delayed_wb)
			acm->delayed_wb = wb;
		else
			usb_autopm_put_interface_async(acm->control);
		spin_unlock_irqrestore(&acm->write_lock, flags);
		return 0;	/* A white lie */
	}
	usb_mark_last_busy(acm->dev);

	rc = acm_start_wb(acm, wb);
	spin_unlock_irqrestore(&acm->write_lock, flags);

	return rc;

}
Example #2
0
void vfs_entry_put(struct fs_entry *entry)
{
	struct fs_entry *dir = entry->parent;

	const bool enabled = spin_lock_irqsave(&entry->lock);
	const int refcount = --entry->refcount;
	spin_unlock_irqrestore(&entry->lock, enabled);

	if (refcount != 0)
		return;

	spin_lock(&dir->lock);
	spin_lock(&entry->lock);

	if (entry->refcount) {
		spin_unlock_irqrestore(&entry->lock, false);
		spin_unlock_irqrestore(&dir->lock, enabled);
		return;
	}
	__vfs_entry_detach(entry, dir);

	spin_unlock_irqrestore(&entry->lock, false);
	spin_unlock_irqrestore(&dir->lock, enabled);

	vfs_entry_put(entry->parent);
	vfs_node_put(entry->node);
	kmem_cache_free(fs_entry_cache, entry);
}
Example #3
0
static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
{
    struct usb_request	*req;
    unsigned long		flags;
    int			req_cnt = 0;

    /* fill unused rxq slots with some skb */
    spin_lock_irqsave(&dev->req_lock, flags);
    while (!list_empty(&dev->rx_reqs)) {
        /* break the nexus of continuous completion and re-submission*/
        if (++req_cnt > qlen(dev->gadget))
            break;

        req = container_of(dev->rx_reqs.next,
                           struct usb_request, list);
        list_del_init(&req->list);
        spin_unlock_irqrestore(&dev->req_lock, flags);

        if (rx_submit(dev, req, gfp_flags) < 0) {
            spin_lock_irqsave(&dev->req_lock, flags);
            list_add(&req->list, &dev->rx_reqs);
            spin_unlock_irqrestore(&dev->req_lock, flags);
            defer_kevent(dev, WORK_RX_MEMORY);
            return;
        }

        spin_lock_irqsave(&dev->req_lock, flags);
    }
    spin_unlock_irqrestore(&dev->req_lock, flags);
}
Example #4
0
void usb_serial_generic_read_bulk_callback(struct urb *urb)
{
	struct usb_serial_port *port = urb->context;
	unsigned char *data = urb->transfer_buffer;
	int status = urb->status;
	unsigned long flags;

	dbg("%s - port %d", __func__, port->number);

	if (unlikely(status != 0)) {
		dbg("%s - nonzero read bulk status received: %d",
		    __func__, status);
		return;
	}

	usb_serial_debug_data(debug, &port->dev, __func__,
						urb->actual_length, data);
	port->serial->type->process_read_urb(urb);

	/* Throttle the device if requested by tty */
	spin_lock_irqsave(&port->lock, flags);
	port->throttled = port->throttle_req;
	if (!port->throttled) {
		spin_unlock_irqrestore(&port->lock, flags);
		usb_serial_generic_submit_read_urb(port, GFP_ATOMIC);
	} else
		spin_unlock_irqrestore(&port->lock, flags);
}
Example #5
0
/* Sysfs entry to shutdown a virtual connection */
static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
{
	struct vhci_device *vdev = &vhci_hcd->vdev[rhport];
	struct vhci *vhci = vhci_hcd->vhci;
	unsigned long flags;

	usbip_dbg_vhci_sysfs("enter\n");

	/* lock */
	spin_lock_irqsave(&vhci->lock, flags);
	spin_lock(&vdev->ud.lock);

	if (vdev->ud.status == VDEV_ST_NULL) {
		pr_err("not connected %d\n", vdev->ud.status);

		/* unlock */
		spin_unlock(&vdev->ud.lock);
		spin_unlock_irqrestore(&vhci->lock, flags);

		return -EINVAL;
	}

	/* unlock */
	spin_unlock(&vdev->ud.lock);
	spin_unlock_irqrestore(&vhci->lock, flags);

	usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);

	return 0;
}
/* Retrieve offset and pnode information from the rb tree for a specific irq */
int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
{
    struct uv_irq_2_mmr_pnode *e;
    struct rb_node *n;
    unsigned long irqflags;

    spin_lock_irqsave(&uv_irq_lock, irqflags);
    n = uv_irq_root.rb_node;
    while (n) {
        e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);

        if (e->irq == irq) {
            *offset = e->offset;
            *pnode = e->pnode;
            spin_unlock_irqrestore(&uv_irq_lock, irqflags);
            return 0;
        }

        if (irq < e->irq)
            n = n->rb_left;
        else
            n = n->rb_right;
    }
    spin_unlock_irqrestore(&uv_irq_lock, irqflags);
    return -1;
}
Example #7
0
/********************************************************************************************************************
 *function:  I still not understand this function, so wait for further implementation
 *   input:  unsigned long	 data		//acturally we send struct tx_ts_record or struct rx_ts_record to these timer
 *  return:  NULL
 *  notice:
 ********************************************************************************************************************/
static void RxPktPendingTimeout(struct timer_list *t)
{
	struct rx_ts_record     *pRxTs = from_timer(pRxTs, t, rx_pkt_pending_timer);
	struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]);

	PRX_REORDER_ENTRY	pReorderEntry = NULL;

	//u32 flags = 0;
	unsigned long flags = 0;
	u8 index = 0;
	bool bPktInBuf = false;

	spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
	IEEE80211_DEBUG(IEEE80211_DL_REORDER, "==================>%s()\n", __func__);
	if(pRxTs->rx_timeout_indicate_seq != 0xffff) {
		// Indicate the pending packets sequentially according to SeqNum until meet the gap.
		while(!list_empty(&pRxTs->rx_pending_pkt_list)) {
			pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTs->rx_pending_pkt_list.prev, RX_REORDER_ENTRY, List);
			if(index == 0)
				pRxTs->rx_indicate_seq = pReorderEntry->SeqNum;

			if( SN_LESS(pReorderEntry->SeqNum, pRxTs->rx_indicate_seq) ||
				SN_EQUAL(pReorderEntry->SeqNum, pRxTs->rx_indicate_seq)	) {
				list_del_init(&pReorderEntry->List);

				if(SN_EQUAL(pReorderEntry->SeqNum, pRxTs->rx_indicate_seq))
					pRxTs->rx_indicate_seq = (pRxTs->rx_indicate_seq + 1) % 4096;

				IEEE80211_DEBUG(IEEE80211_DL_REORDER, "RxPktPendingTimeout(): IndicateSeq: %d\n", pReorderEntry->SeqNum);
				ieee->stats_IndicateArray[index] = pReorderEntry->prxb;
				index++;

				list_add_tail(&pReorderEntry->List, &ieee->RxReorder_Unused_List);
			} else {
				bPktInBuf = true;
				break;
			}
		}
	}

	if(index>0) {
		// Set rx_timeout_indicate_seq to 0xffff to indicate no pending packets in buffer now.
		pRxTs->rx_timeout_indicate_seq = 0xffff;

		// Indicate packets
		if(index > REORDER_WIN_SIZE) {
			IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder buffer full!! \n");
			spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
			return;
		}
		ieee80211_indicate_packets(ieee, ieee->stats_IndicateArray, index);
	}

	if(bPktInBuf && (pRxTs->rx_timeout_indicate_seq == 0xffff)) {
		pRxTs->rx_timeout_indicate_seq = pRxTs->rx_indicate_seq;
		mod_timer(&pRxTs->rx_pkt_pending_timer,
			  jiffies + msecs_to_jiffies(ieee->pHTInfo->RxReorderPendingTime));
	}
	spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
Example #8
0
/**
 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
 * and have it clean up
 *
 * Called by a driver to yield back the device after it has finished with it.
 * Should be called as soon as possible after reaching a state which allows
 * other instances to take control of the device.
 *
 * This function has to be called only after device_run() callback has been
 * called on the driver. To prevent recursion, it should not be called directly
 * from the device_run() callback though.
 */
void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
			 struct v4l2_m2m_ctx *m2m_ctx)
{
	unsigned long flags;

	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
	if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
		dprintk("Called by an instance not currently running\n");
		return;
	}

	list_del(&m2m_dev->curr_ctx->queue);
	m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
	wake_up(&m2m_dev->curr_ctx->finished);
	m2m_dev->curr_ctx = NULL;

	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);

	/* This instance might have more buffers ready, but since we do not
	 * allow more than one job on the job_queue per instance, each has
	 * to be scheduled separately after the previous one finishes. */
	v4l2_m2m_try_schedule(m2m_ctx);
	v4l2_m2m_try_run(m2m_dev);
}
Example #9
0
static void
hfc_l1_timer(struct hfc4s8s_l1 *l1)
{
	u_long flags;

	if (!l1->enabled)
		return;

	spin_lock_irqsave(&l1->lock, flags);
	if (l1->nt_mode) {
		l1->l1_state = 1;
		Write_hfc8(l1->hw, R_ST_SEL, l1->st_num);
		Write_hfc8(l1->hw, A_ST_WR_STA, 0x11);
		spin_unlock_irqrestore(&l1->lock, flags);
		l1->d_if.ifc.l1l2(&l1->d_if.ifc,
				  PH_DEACTIVATE | INDICATION, NULL);
		spin_lock_irqsave(&l1->lock, flags);
		l1->l1_state = 1;
		Write_hfc8(l1->hw, A_ST_WR_STA, 0x1);
		spin_unlock_irqrestore(&l1->lock, flags);
	} else {
		/* activation timed out */
		Write_hfc8(l1->hw, R_ST_SEL, l1->st_num);
		Write_hfc8(l1->hw, A_ST_WR_STA, 0x13);
		spin_unlock_irqrestore(&l1->lock, flags);
		l1->d_if.ifc.l1l2(&l1->d_if.ifc,
				  PH_DEACTIVATE | INDICATION, NULL);
		spin_lock_irqsave(&l1->lock, flags);
		Write_hfc8(l1->hw, R_ST_SEL, l1->st_num);
		Write_hfc8(l1->hw, A_ST_WR_STA, 0x3);
		spin_unlock_irqrestore(&l1->lock, flags);
	}
}				/* hfc_l1_timer */
Example #10
0
static int alarm_release(struct inode *inode, struct file *file)
{
	int i;
	unsigned long flags;

	spin_lock_irqsave(&alarm_slock, flags);
	if (file->private_data) {
		for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
			uint32_t alarm_type_mask = 1U << i;
			if (alarm_enabled & alarm_type_mask) {
				alarm_dbg(INFO,
					  "%s: clear alarm, pending %d\n",
					  __func__,
					  !!(alarm_pending & alarm_type_mask));
				alarm_enabled &= ~alarm_type_mask;
			}
			spin_unlock_irqrestore(&alarm_slock, flags);
			devalarm_cancel(&alarms[i]);
			spin_lock_irqsave(&alarm_slock, flags);
		}
		if (alarm_pending | wait_pending) {
			if (alarm_pending)
				alarm_dbg(INFO, "%s: clear pending alarms %x\n",
					  __func__, alarm_pending);
			__pm_relax(&alarm_wake_lock);
			wait_pending = 0;
			alarm_pending = 0;
		}
		alarm_opened = 0;
	}
	spin_unlock_irqrestore(&alarm_slock, flags);
	return 0;
}
Example #11
0
/**
 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
 *
 * In case of streamoff or release called on any context,
 * 1] If the context is currently running, then abort job will be called
 * 2] If the context is queued, then the context will be removed from
 *    the job_queue
 */
static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
{
	struct v4l2_m2m_dev *m2m_dev;
	unsigned long flags;

	m2m_dev = m2m_ctx->m2m_dev;
	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);

	m2m_ctx->job_flags |= TRANS_ABORT;
	if (m2m_ctx->job_flags & TRANS_RUNNING) {
		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
		m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
		dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
		wait_event(m2m_ctx->finished,
				!(m2m_ctx->job_flags & TRANS_RUNNING));
	} else if (m2m_ctx->job_flags & TRANS_QUEUED) {
		list_del(&m2m_ctx->queue);
		m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
		dprintk("m2m_ctx: %p had been on queue and was removed\n",
			m2m_ctx);
	} else {
		/* Do nothing, was not on queue/running */
		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
	}
}
int msm_sdio_dmux_open(uint32_t id, void *priv,
                       void (*receive_cb)(void *, struct sk_buff *),
                       void (*write_done)(void *, struct sk_buff *))
{
    unsigned long flags;

    DBG("%s: opening ch %d\n", __func__, id);
    if (!sdio_mux_initialized)
        return -ENODEV;
    if (id >= SDIO_DMUX_NUM_CHANNELS)
        return -EINVAL;

    spin_lock_irqsave(&sdio_ch[id].lock, flags);
    if (sdio_ch_is_local_open(id)) {
        pr_info("%s: Already opened %d\n", __func__, id);
        spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
        goto open_done;
    }

    sdio_ch[id].receive_cb = receive_cb;
    sdio_ch[id].write_done = write_done;
    sdio_ch[id].priv = priv;
    sdio_ch[id].status |= SDIO_CH_LOCAL_OPEN;
    sdio_ch[id].num_tx_pkts = 0;
    sdio_ch[id].use_wm = 0;
    spin_unlock_irqrestore(&sdio_ch[id].lock, flags);

    sdio_mux_send_open_cmd(id);

open_done:
    pr_info("%s: opened ch %d\n", __func__, id);
    return 0;
}
Example #13
0
/**
 * p9_client_cb - call back from transport to client
 * c: client state
 * req: request received
 *
 */
void p9_client_cb(struct p9_client *c, struct p9_req_t *req)
{
	struct p9_req_t *other_req;
	unsigned long flags;

	P9_DPRINTK(P9_DEBUG_MUX, " tag %d\n", req->tc->tag);

	if (req->status == REQ_STATUS_ERROR)
		wake_up(req->wq);

	if (req->flush_tag) { 			/* flush receive path */
		P9_DPRINTK(P9_DEBUG_9P, "<<< RFLUSH %d\n", req->tc->tag);
		spin_lock_irqsave(&c->lock, flags);
		other_req = p9_tag_lookup(c, req->flush_tag);
		if (other_req->status != REQ_STATUS_FLSH) /* stale flush */
			spin_unlock_irqrestore(&c->lock, flags);
		else {
			other_req->status = REQ_STATUS_FLSHD;
			spin_unlock_irqrestore(&c->lock, flags);
			wake_up(other_req->wq);
		}
		p9_free_req(c, req);
	} else { 				/* normal receive path */
		P9_DPRINTK(P9_DEBUG_MUX, "normal: tag %d\n", req->tc->tag);
		spin_lock_irqsave(&c->lock, flags);
		if (req->status != REQ_STATUS_FLSHD)
			req->status = REQ_STATUS_RCVD;
		spin_unlock_irqrestore(&c->lock, flags);
		wake_up(req->wq);
		P9_DPRINTK(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag);
	}
}
Example #14
0
static int acm_tty_write(struct tty_struct *tty,
					const unsigned char *buf, int count)
{
	struct acm *acm = tty->driver_data;
	int stat;
	unsigned long flags;
	int wbn;
	struct acm_wb *wb;

	dbg("Entering acm_tty_write to write %d bytes,", count);

	if (!ACM_READY(acm))
		return -EINVAL;
	if (!count)
		return 0;

	spin_lock_irqsave(&acm->write_lock, flags);
	wbn = acm_wb_alloc(acm);
	if (wbn < 0) {
		spin_unlock_irqrestore(&acm->write_lock, flags);
		return 0;
	}
	wb = &acm->wb[wbn];

	count = (count > acm->writesize) ? acm->writesize : count;
	dbg("Get %d bytes...", count);
	memcpy(wb->buf, buf, count);
	wb->len = count;
	spin_unlock_irqrestore(&acm->write_lock, flags);

	stat = acm_write_start(acm, wbn);
	if (stat < 0)
		return stat;
	return count;
}
Example #15
0
static int host_write(struct sscape_info *devc, unsigned char *data, int count)
{
    unsigned long flags;
    int i, timeout_val;

    spin_lock_irqsave(&devc->lock,flags);
    /*
     * Send the command and data bytes
     */

    for (i = 0; i < count; i++)
    {
        for (timeout_val = 10000; timeout_val > 0; timeout_val--)
            if (inb(PORT(HOST_CTRL)) & TX_READY)
                break;

        if (timeout_val <= 0)
        {
            spin_unlock_irqrestore(&devc->lock,flags);
            return 0;
        }
        outb(data[i], PORT(HOST_DATA));
    }
    spin_unlock_irqrestore(&devc->lock,flags);
    return 1;
}
Example #16
0
/**
 * bfq_cic_link - add @cic to @ioc.
 * @bfqd: bfq_data @cic refers to.
 * @ioc: io_context @cic belongs to.
 * @cic: the cic to link.
 * @gfp_mask: the mask to use for radix tree preallocations.
 *
 * Add @cic to @ioc, using @bfqd as the search key.  This enables us to
 * lookup the process specific cfq io context when entered from the block
 * layer.  Also adds @cic to a per-bfqd list, used when this queue is
 * removed.
 */
static int bfq_cic_link(struct bfq_data *bfqd, struct io_context *ioc,
			struct cfq_io_context *cic, gfp_t gfp_mask)
{
	unsigned long flags;
	int ret;

	ret = radix_tree_preload(gfp_mask);
	if (ret == 0) {
		cic->ioc = ioc;

		/* No write-side locking, cic is not published yet. */
		rcu_assign_pointer(cic->key, bfqd);

		spin_lock_irqsave(&ioc->lock, flags);
		ret = radix_tree_insert(&ioc->bfq_radix_root,
					bfqd->cic_index, cic);
		if (ret == 0)
			hlist_add_head_rcu(&cic->cic_list, &ioc->bfq_cic_list);
		spin_unlock_irqrestore(&ioc->lock, flags);

		radix_tree_preload_end();

		if (ret == 0) {
			spin_lock_irqsave(bfqd->queue->queue_lock, flags);
			list_add(&cic->queue_list, &bfqd->cic_list);
			spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
		}
	}

	if (ret != 0)
		printk(KERN_ERR "bfq: cic link failed!\n");

	return ret;
}
Example #17
0
static int mwifiex_process_rx(struct mwifiex_adapter *adapter)
{
    unsigned long flags;
    struct sk_buff *skb;

    spin_lock_irqsave(&adapter->rx_proc_lock, flags);
    if (adapter->rx_processing || adapter->rx_locked) {
        spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
        goto exit_rx_proc;
    } else {
        adapter->rx_processing = true;
        spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
    }

    /* Check for Rx data */
    while ((skb = skb_dequeue(&adapter->rx_data_q))) {
        atomic_dec(&adapter->rx_pending);
        if ((adapter->delay_main_work ||
                adapter->iface_type == MWIFIEX_USB) &&
                (atomic_read(&adapter->rx_pending) < LOW_RX_PENDING)) {
            if (adapter->if_ops.submit_rem_rx_urbs)
                adapter->if_ops.submit_rem_rx_urbs(adapter);
            adapter->delay_main_work = false;
            queue_work(adapter->workqueue, &adapter->main_work);
        }
        mwifiex_handle_rx_packet(adapter, skb);
    }
    spin_lock_irqsave(&adapter->rx_proc_lock, flags);
    adapter->rx_processing = false;
    spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);

exit_rx_proc:
    return 0;
}
Example #18
0
/**
 *	__mmc_claim_host - exclusively claim a host
 *	@host: mmc host to claim
 *	@abort: whether or not the operation should be aborted
 *
 *	Claim a host for a set of operations.  If @abort is non null and
 *	dereference a non-zero value then this will return prematurely with
 *	that non-zero value without acquiring the lock.  Returns zero
 *	with the lock held otherwise.
 */
int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
{
    DECLARE_WAITQUEUE(wait, current);
    unsigned long flags;
    int stop;

    might_sleep();

    add_wait_queue(&host->wq, &wait);
    spin_lock_irqsave(&host->lock, flags);
    while (1) {
        set_current_state(TASK_UNINTERRUPTIBLE);
        stop = abort ? atomic_read(abort) : 0;
        if (stop || !host->claimed || host->claimer == current)
            break;
        spin_unlock_irqrestore(&host->lock, flags);
        schedule();
        spin_lock_irqsave(&host->lock, flags);
    }
    set_current_state(TASK_RUNNING);
    if (!stop) {
        host->claimed = 1;
        host->claimer = current;
        host->claim_cnt += 1;
    } else
        wake_up(&host->wq);
    spin_unlock_irqrestore(&host->lock, flags);
    remove_wait_queue(&host->wq, &wait);
    if (!stop)
        mmc_host_enable(host);
    return stop;
}
Example #19
0
/**
 * percpu_ida_for_each_free - iterate free ids of a pool
 * @pool: pool to iterate
 * @fn: interate callback function
 * @data: parameter for @fn
 *
 * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
 * ids might be missed, some might be iterated duplicated, and some might
 * be iterated and not free soon.
 */
int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
	void *data)
{
	unsigned long flags;
	struct percpu_ida_cpu *remote;
	unsigned cpu, i, err = 0;

	for_each_possible_cpu(cpu) {
		remote = per_cpu_ptr(pool->tag_cpu, cpu);
		spin_lock_irqsave(&remote->lock, flags);
		for (i = 0; i < remote->nr_free; i++) {
			err = fn(remote->freelist[i], data);
			if (err)
				break;
		}
		spin_unlock_irqrestore(&remote->lock, flags);
		if (err)
			goto out;
	}

	spin_lock_irqsave(&pool->lock, flags);
	for (i = 0; i < pool->nr_free; i++) {
		err = fn(pool->freelist[i], data);
		if (err)
			break;
	}
	spin_unlock_irqrestore(&pool->lock, flags);
out:
	return err;
}
static void
sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
{
	unsigned long flags;
	void *page;

	do {
		page = sclp_unmake_buffer(buffer);
		spin_lock_irqsave(&sclp_con_lock, flags);

		/*                             */
		list_del(&buffer->list);
		list_add_tail((struct list_head *) page, &sclp_con_pages);

		/*                                                      */
		buffer = NULL;
		if (!list_empty(&sclp_con_outqueue))
			buffer = list_first_entry(&sclp_con_outqueue,
						  struct sclp_buffer, list);
		if (!buffer || sclp_con_suspended) {
			sclp_con_queue_running = 0;
			spin_unlock_irqrestore(&sclp_con_lock, flags);
			break;
		}
		spin_unlock_irqrestore(&sclp_con_lock, flags);
	} while (sclp_emit_buffer(buffer, sclp_conbuf_callback));
}
Example #21
0
/**
 * caam_jr_register() - Alloc a ring for someone to use as needed. Returns
 * an ordinal of the rings allocated, else returns -ENODEV if no rings
 * are available.
 * @ctrldev: points to the controller level dev (parent) that
 *           owns rings available for use.
 * @dev:     points to where a pointer to the newly allocated queue's
 *           dev can be written to if successful.
 **/
int caam_jr_register(struct device *ctrldev, struct device **rdev)
{
	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
	struct caam_drv_private_jr *jrpriv = NULL;
	unsigned long flags;
	int ring;

	/* Lock, if free ring - assign, unlock */
	spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
	for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
		jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
		if (jrpriv->assign == JOBR_UNASSIGNED) {
			jrpriv->assign = JOBR_ASSIGNED;
			*rdev = ctrlpriv->jrdev[ring];
			spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
			return ring;
		}
	}

	/* If assigned, write dev where caller needs it */
	spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
	*rdev = NULL;

	return -ENODEV;
}
static void sclp_conbuf_emit(void)
{
	struct sclp_buffer* buffer;
	unsigned long flags;
	int rc;

	spin_lock_irqsave(&sclp_con_lock, flags);
	if (sclp_conbuf)
		list_add_tail(&sclp_conbuf->list, &sclp_con_outqueue);
	sclp_conbuf = NULL;
	if (sclp_con_queue_running || sclp_con_suspended)
		goto out_unlock;
	if (list_empty(&sclp_con_outqueue))
		goto out_unlock;
	buffer = list_first_entry(&sclp_con_outqueue, struct sclp_buffer,
				  list);
	sclp_con_queue_running = 1;
	spin_unlock_irqrestore(&sclp_con_lock, flags);

	rc = sclp_emit_buffer(buffer, sclp_conbuf_callback);
	if (rc)
		sclp_conbuf_callback(buffer, rc);
	return;
out_unlock:
	spin_unlock_irqrestore(&sclp_con_lock, flags);
}
Example #23
0
void usb_serial_generic_write_bulk_callback(struct urb *urb)
{
	unsigned long flags;
	struct usb_serial_port *port = urb->context;
	int status = urb->status;
	int i;

	dbg("%s - port %d", __func__, port->number);

	for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
		if (port->write_urbs[i] == urb)
			break;

	spin_lock_irqsave(&port->lock, flags);
	port->tx_bytes -= urb->transfer_buffer_length;
	set_bit(i, &port->write_urbs_free);
	spin_unlock_irqrestore(&port->lock, flags);

	if (status) {
		dbg("%s - non-zero urb status: %d", __func__, status);

		spin_lock_irqsave(&port->lock, flags);
		kfifo_reset_out(&port->write_fifo);
		spin_unlock_irqrestore(&port->lock, flags);
	} else {
		usb_serial_generic_write_start(port);
	}

	usb_serial_port_softint(port);
}
Example #24
0
static void run_comp_task(struct ehca_cpu_comp_task *cct)
{
	struct ehca_cq *cq;
	unsigned long flags;

	spin_lock_irqsave(&cct->task_lock, flags);

	while (!list_empty(&cct->cq_list)) {
		cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
		spin_unlock_irqrestore(&cct->task_lock, flags);

		comp_event_callback(cq);
		if (atomic_dec_and_test(&cq->nr_events))
			wake_up(&cq->wait_completion);

		spin_lock_irqsave(&cct->task_lock, flags);
		spin_lock(&cq->task_lock);
		cq->nr_callbacks--;
		if (!cq->nr_callbacks) {
			list_del_init(cct->cq_list.next);
			cct->cq_jobs--;
		}
		spin_unlock(&cq->task_lock);
	}

	spin_unlock_irqrestore(&cct->task_lock, flags);
}
Example #25
0
//FIXME: May have to swap out with define directive. Also, remove excessive overhead.
void curse_trigger (_Bool defer_action, curse_id_t cid)
{
	struct task_curse_struct *cur_struct;
	unsigned long spinf;
	int index;

//	debug("Trigger on %lld\n", cid);
	index = index_from_curse_id(cid);

	cur_struct = &(current->curse_data);

	if (!unlikely(defer_action)) {
		uint64_t proc_active;

		spin_lock_irqsave(&((current->curse_data).protection), spinf);	//Check if curse is  active.
		proc_active = curse_list_pointer[index].curse_bit;
		spin_unlock_irqrestore(&((current->curse_data).protection), spinf);
		if (!(proc_active &= current->curse_data.curse_field))
			return;
		(curse_list_pointer[index].functions)->fun_inject(curse_list_pointer[index].curse_bit);
	} else {
		spin_lock_irqsave(&(cur_struct->protection), spinf);
		cur_struct->triggered |= (curse_list_pointer[index].curse_bit);
		spin_unlock_irqrestore(&(cur_struct->protection), spinf);
	}

}
/**
 * gelic_net_xmit - transmits a frame over the device
 * @skb: packet to send out
 * @netdev: interface device structure
 *
 * returns 0 on success, <0 on failure
 */
int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	struct gelic_card *card = netdev_card(netdev);
	struct gelic_descr *descr;
	int result;
	unsigned long flags;

	spin_lock_irqsave(&card->tx_lock, flags);

	gelic_card_release_tx_chain(card, 0);

	descr = gelic_card_get_next_tx_descr(card);
	if (!descr) {
		/*
		 * no more descriptors free
		 */
		gelic_card_stop_queues(card);
		spin_unlock_irqrestore(&card->tx_lock, flags);
		return NETDEV_TX_BUSY;
	}

	result = gelic_descr_prepare_tx(card, descr, skb);
	if (result) {
		/*
		 * DMA map failed.  As chanses are that failure
		 * would continue, just release skb and return
		 */
		netdev->stats.tx_dropped++;
		dev_kfree_skb_any(skb);
		spin_unlock_irqrestore(&card->tx_lock, flags);
		return NETDEV_TX_OK;
	}
	/*
	 * link this prepared descriptor to previous one
	 * to achieve high performance
	 */
	descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr);
	/*
	 * as hardware descriptor is modified in the above lines,
	 * ensure that the hardware sees it
	 */
	wmb();
	if (gelic_card_kick_txdma(card, descr)) {
		/*
		 * kick failed.
		 * release descriptors which were just prepared
		 */
		netdev->stats.tx_dropped++;
		gelic_descr_release_tx(card, descr);
		gelic_descr_release_tx(card, descr->next);
		card->tx_chain.tail = descr->next->next;
		dev_info(ctodev(card), "%s: kick failure\n", __func__);
	} else {
		/* OK, DMA started/reserved */
		netdev->trans_start = jiffies;
	}

	spin_unlock_irqrestore(&card->tx_lock, flags);
	return NETDEV_TX_OK;
}
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	smd_channel_t *ch = p->ch;
	unsigned long flags;

	if (netif_queue_stopped(dev)) {
		pr_err("[%s] fatal: rmnet_xmit called when netif_queue is stopped",
			dev->name);
		return 0;
	}

	spin_lock_irqsave(&p->lock, flags);
	smd_enable_read_intr(ch);
	if (smd_write_avail(ch) < skb->len) {
		netif_stop_queue(dev);
		p->skb = skb;
		spin_unlock_irqrestore(&p->lock, flags);
		return 0;
	}
	smd_disable_read_intr(ch);
	spin_unlock_irqrestore(&p->lock, flags);

	_rmnet_xmit(skb, dev);

	return 0;
}
static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
    struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
    struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
    unsigned long flags;

    switch (flow_type) {
    case IRQ_TYPE_EDGE_FALLING:
        spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
        setbits32(mm->regs + GPIO_ICR,
                  mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
        spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
        break;

    case IRQ_TYPE_EDGE_BOTH:
        spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
        clrbits32(mm->regs + GPIO_ICR,
                  mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
        spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
        break;

    default:
        return -EINVAL;
    }

    return 0;
}
Example #29
0
static void
rpcrdma_run_tasklet(unsigned long data)
{
	struct rpcrdma_rep *rep;
	void (*func)(struct rpcrdma_rep *);
	unsigned long flags;

	data = data;
	spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
	while (!list_empty(&rpcrdma_tasklets_g)) {
		rep = list_entry(rpcrdma_tasklets_g.next,
				 struct rpcrdma_rep, rr_list);
		list_del(&rep->rr_list);
		func = rep->rr_func;
		rep->rr_func = NULL;
		spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);

		if (func)
			func(rep);
		else
			rpcrdma_recv_buffer_put(rep);

		spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
	}
	spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
}
Example #30
0
static ssize_t xhci_port_write(struct file *file,  const char __user *ubuf,
			       size_t count, loff_t *ppos)
{
	struct seq_file         *s = file->private_data;
	struct xhci_port	*port = s->private;
	struct xhci_hcd		*xhci = hcd_to_xhci(port->rhub->hcd);
	char                    buf[32];
	u32			portsc;
	unsigned long		flags;

	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
		return -EFAULT;

	if (!strncmp(buf, "compliance", 10)) {
		/* If CTC is clear, compliance is enabled by default */
		if (!HCC2_CTC(xhci->hcc_params2))
			return count;
		spin_lock_irqsave(&xhci->lock, flags);
		/* compliance mode can only be enabled on ports in RxDetect */
		portsc = readl(port->addr);
		if ((portsc & PORT_PLS_MASK) != XDEV_RXDETECT) {
			spin_unlock_irqrestore(&xhci->lock, flags);
			return -EPERM;
		}
		portsc = xhci_port_state_to_neutral(portsc);
		portsc &= ~PORT_PLS_MASK;
		portsc |= PORT_LINK_STROBE | XDEV_COMP_MODE;
		writel(portsc, port->addr);
		spin_unlock_irqrestore(&xhci->lock, flags);
	} else {
		return -EINVAL;
	}
	return count;
}