Example #1
0
/*
 * Cabrcmed upon a sleep-indication from the device
 */
static void brcm_device_want_to_sleep(struct hci_uart *hu)
{
	unsigned long flags;
	struct brcm_struct *brcm = hu->priv;

	BT_DBG("hu %p", hu);

	/* lock hcibrcm state */
	spin_lock_irqsave(&brcm->hcibrcm_lock, flags);

	/* sanity check */
	if (brcm->hcibrcm_state != HCIBRCM_AWAKE)
		BT_ERR("ERR: HCIBRCM_GO_TO_SLEEP_IND in state %ld", brcm->hcibrcm_state);

	/* acknowledge device sleep */
	if (send_hcibrcm_cmd(HCIBRCM_GO_TO_SLEEP_ACK, hu) < 0) {
		BT_ERR("cannot acknowledge device sleep");
		goto out;
	}

	/* update state */
	brcm->hcibrcm_state = HCIBRCM_ASLEEP;

out:
	spin_unlock_irqrestore(&brcm->hcibrcm_lock, flags);

	/* actuabrcmy send the sleep ack packet */
	hci_uart_tx_wakeup(hu);

	spin_lock_irqsave(&brcm->hcibrcm_lock, flags);
	if (brcm->hcibrcm_state == HCIBRCM_ASLEEP)
		__brcm_msm_serial_clock_request_off(hu->tty);
	spin_unlock_irqrestore(&brcm->hcibrcm_lock, flags);
}
Example #2
0
static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
	struct iwch_dev *rhp;
	struct iwch_cq *chp;
	enum t3_cq_opcode cq_op;
	int err;
	unsigned long flag;
	u32 rptr;

	chp = to_iwch_cq(ibcq);
	rhp = chp->rhp;
	if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
		cq_op = CQ_ARM_SE;
	else
		cq_op = CQ_ARM_AN;
	if (chp->user_rptr_addr) {
		if (get_user(rptr, chp->user_rptr_addr))
			return -EFAULT;
		spin_lock_irqsave(&chp->lock, flag);
		chp->cq.rptr = rptr;
	} else
		spin_lock_irqsave(&chp->lock, flag);
	PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr);
	err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
	spin_unlock_irqrestore(&chp->lock, flag);
	if (err < 0)
		printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,
		       chp->cq.cqid);
	if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
		err = 0;
	return err;
}
Example #3
0
static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
{
    struct usb_request	*req;
    unsigned long		flags;
    int			req_cnt = 0;

    /* fill unused rxq slots with some skb */
    spin_lock_irqsave(&dev->req_lock, flags);
    while (!list_empty(&dev->rx_reqs)) {
        /* break the nexus of continuous completion and re-submission*/
        if (++req_cnt > qlen(dev->gadget))
            break;

        req = container_of(dev->rx_reqs.next,
                           struct usb_request, list);
        list_del_init(&req->list);
        spin_unlock_irqrestore(&dev->req_lock, flags);

        if (rx_submit(dev, req, gfp_flags) < 0) {
            spin_lock_irqsave(&dev->req_lock, flags);
            list_add(&req->list, &dev->rx_reqs);
            spin_unlock_irqrestore(&dev->req_lock, flags);
            defer_kevent(dev, WORK_RX_MEMORY);
            return;
        }

        spin_lock_irqsave(&dev->req_lock, flags);
    }
    spin_unlock_irqrestore(&dev->req_lock, flags);
}
static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
    struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
    struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
    unsigned long flags;

    switch (flow_type) {
    case IRQ_TYPE_EDGE_FALLING:
        spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
        setbits32(mm->regs + GPIO_ICR,
                  mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
        spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
        break;

    case IRQ_TYPE_EDGE_BOTH:
        spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
        clrbits32(mm->regs + GPIO_ICR,
                  mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
        spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
        break;

    default:
        return -EINVAL;
    }

    return 0;
}
Example #5
0
/**
 * percpu_ida_for_each_free - iterate free ids of a pool
 * @pool: pool to iterate
 * @fn: interate callback function
 * @data: parameter for @fn
 *
 * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
 * ids might be missed, some might be iterated duplicated, and some might
 * be iterated and not free soon.
 */
int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
	void *data)
{
	unsigned long flags;
	struct percpu_ida_cpu *remote;
	unsigned cpu, i, err = 0;

	for_each_possible_cpu(cpu) {
		remote = per_cpu_ptr(pool->tag_cpu, cpu);
		spin_lock_irqsave(&remote->lock, flags);
		for (i = 0; i < remote->nr_free; i++) {
			err = fn(remote->freelist[i], data);
			if (err)
				break;
		}
		spin_unlock_irqrestore(&remote->lock, flags);
		if (err)
			goto out;
	}

	spin_lock_irqsave(&pool->lock, flags);
	for (i = 0; i < pool->nr_free; i++) {
		err = fn(pool->freelist[i], data);
		if (err)
			break;
	}
	spin_unlock_irqrestore(&pool->lock, flags);
out:
	return err;
}
Example #6
0
static void
hfc_l1_timer(struct hfc4s8s_l1 *l1)
{
	u_long flags;

	if (!l1->enabled)
		return;

	spin_lock_irqsave(&l1->lock, flags);
	if (l1->nt_mode) {
		l1->l1_state = 1;
		Write_hfc8(l1->hw, R_ST_SEL, l1->st_num);
		Write_hfc8(l1->hw, A_ST_WR_STA, 0x11);
		spin_unlock_irqrestore(&l1->lock, flags);
		l1->d_if.ifc.l1l2(&l1->d_if.ifc,
				  PH_DEACTIVATE | INDICATION, NULL);
		spin_lock_irqsave(&l1->lock, flags);
		l1->l1_state = 1;
		Write_hfc8(l1->hw, A_ST_WR_STA, 0x1);
		spin_unlock_irqrestore(&l1->lock, flags);
	} else {
		/* activation timed out */
		Write_hfc8(l1->hw, R_ST_SEL, l1->st_num);
		Write_hfc8(l1->hw, A_ST_WR_STA, 0x13);
		spin_unlock_irqrestore(&l1->lock, flags);
		l1->d_if.ifc.l1l2(&l1->d_if.ifc,
				  PH_DEACTIVATE | INDICATION, NULL);
		spin_lock_irqsave(&l1->lock, flags);
		Write_hfc8(l1->hw, R_ST_SEL, l1->st_num);
		Write_hfc8(l1->hw, A_ST_WR_STA, 0x3);
		spin_unlock_irqrestore(&l1->lock, flags);
	}
}				/* hfc_l1_timer */
Example #7
0
/**
 *	__mmc_claim_host - exclusively claim a host
 *	@host: mmc host to claim
 *	@abort: whether or not the operation should be aborted
 *
 *	Claim a host for a set of operations.  If @abort is non null and
 *	dereference a non-zero value then this will return prematurely with
 *	that non-zero value without acquiring the lock.  Returns zero
 *	with the lock held otherwise.
 */
int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
{
    DECLARE_WAITQUEUE(wait, current);
    unsigned long flags;
    int stop;

    might_sleep();

    add_wait_queue(&host->wq, &wait);
    spin_lock_irqsave(&host->lock, flags);
    while (1) {
        set_current_state(TASK_UNINTERRUPTIBLE);
        stop = abort ? atomic_read(abort) : 0;
        if (stop || !host->claimed || host->claimer == current)
            break;
        spin_unlock_irqrestore(&host->lock, flags);
        schedule();
        spin_lock_irqsave(&host->lock, flags);
    }
    set_current_state(TASK_RUNNING);
    if (!stop) {
        host->claimed = 1;
        host->claimer = current;
        host->claim_cnt += 1;
    } else
        wake_up(&host->wq);
    spin_unlock_irqrestore(&host->lock, flags);
    remove_wait_queue(&host->wq, &wait);
    if (!stop)
        mmc_host_enable(host);
    return stop;
}
static int pm_power_set_property(struct power_supply *psy,
                                 enum power_supply_property psp,
                                 const union power_supply_propval *val)
{
    struct isl9519q_struct *isl_chg = container_of(psy,
                                      struct isl9519q_struct,
                                      dc_psy);
    unsigned long flags;
    int rc;

    switch (psp) {
    case POWER_SUPPLY_PROP_ONLINE:
        if (val->intval) {
            isl_chg->present = val->intval;
        } else {
            isl_chg->present = 0;
            if (isl_chg->charging)
                goto stop_charging;
        }
        break;
    case POWER_SUPPLY_PROP_CURRENT_MAX:
        if (val->intval) {
            if (isl_chg->chgcurrent != val->intval)
                return -EINVAL;
        }
        break;
    case POWER_SUPPLY_PROP_CHARGE_TYPE:
        if (val->intval && isl_chg->present) {
            if (val->intval == POWER_SUPPLY_CHARGE_TYPE_FAST)
                goto start_charging;
            if (val->intval == POWER_SUPPLY_CHARGE_TYPE_NONE)
                goto stop_charging;
        } else {
            return -EINVAL;
        }
        break;
    default:
        return -EINVAL;
    }
    power_supply_changed(&isl_chg->dc_psy);
    return 0;

start_charging:
    spin_lock_irqsave(&isl_chg->lock, flags);
    rc = isl9519q_start_charging(isl_chg, 0, isl_chg->chgcurrent);
    if (rc)
        pr_err("Failed to start charging rc=%d\n", rc);
    spin_unlock_irqrestore(&isl_chg->lock, flags);
    power_supply_changed(&isl_chg->dc_psy);
    return rc;

stop_charging:
    spin_lock_irqsave(&isl_chg->lock, flags);
    rc = isl9519q_stop_charging(isl_chg);
    if (rc)
        pr_err("Failed to start charging rc=%d\n", rc);
    spin_unlock_irqrestore(&isl_chg->lock, flags);
    power_supply_changed(&isl_chg->dc_psy);
    return rc;
}
static int
Asus_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
    u_long flags;

    switch (mt) {
    case CARD_RESET:
        spin_lock_irqsave(&cs->lock, flags);
        reset_asuscom(cs);
        spin_unlock_irqrestore(&cs->lock, flags);
        return(0);
    case CARD_RELEASE:
        release_io_asuscom(cs);
        return(0);
    case CARD_INIT:
        spin_lock_irqsave(&cs->lock, flags);
        cs->debug |= L1_DEB_IPAC;
        inithscxisac(cs, 3);
        spin_unlock_irqrestore(&cs->lock, flags);
        return(0);
    case CARD_TEST:
        return(0);
    }
    return(0);
}
void kbase_backend_ctx_count_changed(struct kbase_device *kbdev)
{
	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
	unsigned long flags;

	lockdep_assert_held(&js_devdata->runpool_mutex);

	if (!timer_callback_should_run(kbdev)) {
		/* Take spinlock to force synchronisation with timer */
		spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
		backend->timer_running = false;
		spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
		/* From now on, return value of timer_callback_should_run() will
		 * also cause the timer to not requeue itself. Its return value
		 * cannot change, because it depends on variables updated with
		 * the runpool_mutex held, which the caller of this must also
		 * hold */
		hrtimer_cancel(&backend->scheduling_timer);
	}

	if (timer_callback_should_run(kbdev) && !backend->timer_running) {
		/* Take spinlock to force synchronisation with timer */
		spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
		backend->timer_running = true;
		spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
		hrtimer_start(&backend->scheduling_timer,
			HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns),
							HRTIMER_MODE_REL);

		KBASE_TRACE_ADD(kbdev, JS_POLICY_TIMER_START, NULL, NULL, 0u,
									0u);
	}
}
Example #11
0
/* Old_termios contains the original termios settings and
 * tty->termios contains the new setting to be used.
 */
static void ch341_set_termios(struct tty_struct *tty,
		struct usb_serial_port *port, struct ktermios *old_termios)
{
	struct ch341_private *priv = usb_get_serial_port_data(port);
	unsigned baud_rate;
	unsigned long flags;

	baud_rate = tty_get_baud_rate(tty);

	priv->baud_rate = baud_rate;

	if (baud_rate) {
		spin_lock_irqsave(&priv->lock, flags);
		priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
		spin_unlock_irqrestore(&priv->lock, flags);
		ch341_set_baudrate(port->serial->dev, priv);
	} else {
		spin_lock_irqsave(&priv->lock, flags);
		priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
		spin_unlock_irqrestore(&priv->lock, flags);
	}

	ch341_set_handshake(port->serial->dev, priv->line_control);

	/* Unimplemented:
	 * (cflag & CSIZE) : data bits [5, 8]
	 * (cflag & PARENB) : parity {NONE, EVEN, ODD}
	 * (cflag & CSTOPB) : stop bits [1, 2]
	 */
}
Example #12
0
int vpe_disable(void)
{
	int rc = 0;
	unsigned long flags = 0;
	CDBG("%s", __func__);
	spin_lock_irqsave(&vpe_ctrl->lock, flags);
	if (vpe_ctrl->state == VPE_STATE_IDLE) {
		CDBG("%s: VPE already disabled", __func__);
		spin_unlock_irqrestore(&vpe_ctrl->lock, flags);
		return rc;
	}
	spin_unlock_irqrestore(&vpe_ctrl->lock, flags);

	disable_irq(vpe_ctrl->vpeirq->start);
	tasklet_kill(&vpe_tasklet);
	msm_cam_clk_enable(&vpe_ctrl->pdev->dev, vpe_clk_info,
			vpe_ctrl->vpe_clk, ARRAY_SIZE(vpe_clk_info), 0);

	regulator_disable(vpe_ctrl->fs_vpe);
	regulator_put(vpe_ctrl->fs_vpe);
	vpe_ctrl->fs_vpe = NULL;
	spin_lock_irqsave(&vpe_ctrl->lock, flags);
	vpe_ctrl->state = VPE_STATE_IDLE;
	spin_unlock_irqrestore(&vpe_ctrl->lock, flags);
	return rc;
}
void
xpc_activate_partition(struct xpc_partition *part)
{
	short partid = XPC_PARTID(part);
	unsigned long irq_flags;
	struct task_struct *kthread;

	spin_lock_irqsave(&part->act_lock, irq_flags);

	DBUG_ON(part->act_state != XPC_P_AS_INACTIVE);

	part->act_state = XPC_P_AS_ACTIVATION_REQ;
	XPC_SET_REASON(part, xpCloneKThread, __LINE__);

	spin_unlock_irqrestore(&part->act_lock, irq_flags);

	kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
			      partid);
	if (IS_ERR(kthread)) {
		spin_lock_irqsave(&part->act_lock, irq_flags);
		part->act_state = XPC_P_AS_INACTIVE;
		XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
		spin_unlock_irqrestore(&part->act_lock, irq_flags);
	}
}
Example #14
0
static int exemple_read(struct file * filp, char * buffer,
                        size_t length, loff_t * offset)
{
	char chaine[80];
	unsigned long masque;

	spin_lock_irqsave(& spl_data_exemple, masque);

	while (lg_data_exemple == 0) {
		spin_unlock_irqrestore(& spl_data_exemple, masque);
		if (wait_event_interruptible(wq_data_exemple,
		                    (lg_data_exemple != 0)) != 0) {
			printk(KERN_INFO "Sortie sur signal\n");
			return -ERESTARTSYS;
		}
		spin_lock_irqsave(& spl_data_exemple, masque);
	}
	
	snprintf(chaine, 80, "%ld\n", data_exemple[0]);
	if (length < (strlen(chaine)+1)) {
		spin_unlock_irqrestore(& spl_data_exemple, masque);
		return -ENOMEM;
	}

	lg_data_exemple --;
	if (lg_data_exemple > 0)
		memmove(data_exemple, & (data_exemple[1]), lg_data_exemple * sizeof(long int));
	
	spin_unlock_irqrestore(& spl_data_exemple, masque);

	if (copy_to_user(buffer, chaine, strlen(chaine)+1) != 0)
		return -EFAULT;
	return strlen(chaine)+1;
}
Example #15
0
/**
 * p9_client_cb - call back from transport to client
 * c: client state
 * req: request received
 *
 */
void p9_client_cb(struct p9_client *c, struct p9_req_t *req)
{
	struct p9_req_t *other_req;
	unsigned long flags;

	P9_DPRINTK(P9_DEBUG_MUX, " tag %d\n", req->tc->tag);

	if (req->status == REQ_STATUS_ERROR)
		wake_up(req->wq);

	if (req->flush_tag) { 			/* flush receive path */
		P9_DPRINTK(P9_DEBUG_9P, "<<< RFLUSH %d\n", req->tc->tag);
		spin_lock_irqsave(&c->lock, flags);
		other_req = p9_tag_lookup(c, req->flush_tag);
		if (other_req->status != REQ_STATUS_FLSH) /* stale flush */
			spin_unlock_irqrestore(&c->lock, flags);
		else {
			other_req->status = REQ_STATUS_FLSHD;
			spin_unlock_irqrestore(&c->lock, flags);
			wake_up(other_req->wq);
		}
		p9_free_req(c, req);
	} else { 				/* normal receive path */
		P9_DPRINTK(P9_DEBUG_MUX, "normal: tag %d\n", req->tc->tag);
		spin_lock_irqsave(&c->lock, flags);
		if (req->status != REQ_STATUS_FLSHD)
			req->status = REQ_STATUS_RCVD;
		spin_unlock_irqrestore(&c->lock, flags);
		wake_up(req->wq);
		P9_DPRINTK(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag);
	}
}
Example #16
0
/*******************************************************************************

	Lock/Synchronization related functions.
	
 *******************************************************************************/
int ral_spin_lock(KSPIN_LOCK *pLock, unsigned long *irq_flag)
{
	unsigned long local_flag;

	if (!pLock->pOSLock) {
		printk("Error, invalid lock structure!\n");
		dump_stack();
		return FALSE;
	}

	spin_lock_irqsave(&g_reslock, local_flag);
	if (pLock->state == RES_INVALID) {
		spin_unlock_irqrestore(&g_reslock, local_flag);
		printk("Error, try to lock a invalid structure!\n");
		return FALSE;
	}
	else {
		pLock->ref_cnt++;
		pLock->state = RES_INUSE;
	}
	spin_unlock_irqrestore(&g_reslock, local_flag);
	
	spin_lock_irqsave((spinlock_t *)pLock->pOSLock, *irq_flag);

	return TRUE;
}
Example #17
0
static int alarm_release(struct inode *inode, struct file *file)
{
	int i;
	unsigned long flags;

	spin_lock_irqsave(&alarm_slock, flags);
	if (file->private_data) {
		for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
			uint32_t alarm_type_mask = 1U << i;
			if (alarm_enabled & alarm_type_mask) {
				alarm_dbg(INFO,
					  "%s: clear alarm, pending %d\n",
					  __func__,
					  !!(alarm_pending & alarm_type_mask));
				alarm_enabled &= ~alarm_type_mask;
			}
			spin_unlock_irqrestore(&alarm_slock, flags);
			devalarm_cancel(&alarms[i]);
			spin_lock_irqsave(&alarm_slock, flags);
		}
		if (alarm_pending | wait_pending) {
			if (alarm_pending)
				alarm_dbg(INFO, "%s: clear pending alarms %x\n",
					  __func__, alarm_pending);
			__pm_relax(&alarm_wake_lock);
			wait_pending = 0;
			alarm_pending = 0;
		}
		alarm_opened = 0;
	}
	spin_unlock_irqrestore(&alarm_slock, flags);
	return 0;
}
Example #18
0
static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
{
    struct usb_request	*req;
    unsigned long		flags;
    int			req_cnt = 0;


    spin_lock_irqsave(&dev->req_lock, flags);
    while (!list_empty(&dev->rx_reqs)) {

        if (++req_cnt > qlen(dev->gadget))
            break;

        req = container_of(dev->rx_reqs.next,
                           struct usb_request, list);
        list_del_init(&req->list);
        spin_unlock_irqrestore(&dev->req_lock, flags);

        if (rx_submit(dev, req, gfp_flags) < 0) {
            spin_lock_irqsave(&dev->req_lock, flags);
            list_add(&req->list, &dev->rx_reqs);
            spin_unlock_irqrestore(&dev->req_lock, flags);
            defer_kevent(dev, WORK_RX_MEMORY);
            return;
        }

        spin_lock_irqsave(&dev->req_lock, flags);
    }
    spin_unlock_irqrestore(&dev->req_lock, flags);
}
Example #19
0
/**
 * bfq_cic_link - add @cic to @ioc.
 * @bfqd: bfq_data @cic refers to.
 * @ioc: io_context @cic belongs to.
 * @cic: the cic to link.
 * @gfp_mask: the mask to use for radix tree preallocations.
 *
 * Add @cic to @ioc, using @bfqd as the search key.  This enables us to
 * lookup the process specific cfq io context when entered from the block
 * layer.  Also adds @cic to a per-bfqd list, used when this queue is
 * removed.
 */
static int bfq_cic_link(struct bfq_data *bfqd, struct io_context *ioc,
			struct cfq_io_context *cic, gfp_t gfp_mask)
{
	unsigned long flags;
	int ret;

	ret = radix_tree_preload(gfp_mask);
	if (ret == 0) {
		cic->ioc = ioc;

		/* No write-side locking, cic is not published yet. */
		rcu_assign_pointer(cic->key, bfqd);

		spin_lock_irqsave(&ioc->lock, flags);
		ret = radix_tree_insert(&ioc->bfq_radix_root,
					bfqd->cic_index, cic);
		if (ret == 0)
			hlist_add_head_rcu(&cic->cic_list, &ioc->bfq_cic_list);
		spin_unlock_irqrestore(&ioc->lock, flags);

		radix_tree_preload_end();

		if (ret == 0) {
			spin_lock_irqsave(bfqd->queue->queue_lock, flags);
			list_add(&cic->queue_list, &bfqd->cic_list);
			spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
		}
	}

	if (ret != 0)
		printk(KERN_ERR "bfq: cic link failed!\n");

	return ret;
}
Example #20
0
static int
NETjet_S_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
    u_long flags;

    switch (mt) {
    case CARD_RESET:
        spin_lock_irqsave(&cs->lock, flags);
        reset_netjet_s(cs);
        spin_unlock_irqrestore(&cs->lock, flags);
        return(0);
    case CARD_RELEASE:
        release_io_netjet(cs);
        return(0);
    case CARD_INIT:
        reset_netjet_s(cs);
        inittiger(cs);
        spin_lock_irqsave(&cs->lock, flags);
        clear_pending_isac_ints(cs);
        initisac(cs);
        /* Reenable all IRQ */
        cs->writeisac(cs, ISAC_MASK, 0);
        spin_unlock_irqrestore(&cs->lock, flags);
        return(0);
    case CARD_TEST:
        return(0);
    }
    return(0);
}
Example #21
0
static void run_comp_task(struct ehca_cpu_comp_task *cct)
{
	struct ehca_cq *cq;
	unsigned long flags;

	spin_lock_irqsave(&cct->task_lock, flags);

	while (!list_empty(&cct->cq_list)) {
		cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
		spin_unlock_irqrestore(&cct->task_lock, flags);

		comp_event_callback(cq);
		if (atomic_dec_and_test(&cq->nr_events))
			wake_up(&cq->wait_completion);

		spin_lock_irqsave(&cct->task_lock, flags);
		spin_lock(&cq->task_lock);
		cq->nr_callbacks--;
		if (!cq->nr_callbacks) {
			list_del_init(cct->cq_list.next);
			cct->cq_jobs--;
		}
		spin_unlock(&cq->task_lock);
	}

	spin_unlock_irqrestore(&cct->task_lock, flags);
}
ssize_t mdp4_dsi_video_show_event(struct device *dev,
                                  struct device_attribute *attr, char *buf)
{
    int cndx;
    struct vsycn_ctrl *vctrl;
    ssize_t ret = 0;
    u64 vsync_tick;
    ktime_t timestamp;
    unsigned long flags;

    cndx = 0;
    vctrl = &vsync_ctrl_db[0];

    spin_lock_irqsave(&vctrl->spin_lock, flags);
    timestamp = vctrl->vsync_time;
    spin_unlock_irqrestore(&vctrl->spin_lock, flags);

    ret = wait_event_interruptible(vctrl->wait_queue,
                                   !ktime_equal(timestamp, vctrl->vsync_time) &&
                                   vctrl->vsync_irq_enabled);
    if (ret == -ERESTARTSYS)
        return ret;

    spin_lock_irqsave(&vctrl->spin_lock, flags);
    vsync_tick = ktime_to_ns(vctrl->vsync_time);
    spin_unlock_irqrestore(&vctrl->spin_lock, flags);

    ret = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
    buf[strlen(buf) + 1] = '\0';
    return ret;
}
Example #23
0
static int mwifiex_process_rx(struct mwifiex_adapter *adapter)
{
    unsigned long flags;
    struct sk_buff *skb;

    spin_lock_irqsave(&adapter->rx_proc_lock, flags);
    if (adapter->rx_processing || adapter->rx_locked) {
        spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
        goto exit_rx_proc;
    } else {
        adapter->rx_processing = true;
        spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
    }

    /* Check for Rx data */
    while ((skb = skb_dequeue(&adapter->rx_data_q))) {
        atomic_dec(&adapter->rx_pending);
        if ((adapter->delay_main_work ||
                adapter->iface_type == MWIFIEX_USB) &&
                (atomic_read(&adapter->rx_pending) < LOW_RX_PENDING)) {
            if (adapter->if_ops.submit_rem_rx_urbs)
                adapter->if_ops.submit_rem_rx_urbs(adapter);
            adapter->delay_main_work = false;
            queue_work(adapter->workqueue, &adapter->main_work);
        }
        mwifiex_handle_rx_packet(adapter, skb);
    }
    spin_lock_irqsave(&adapter->rx_proc_lock, flags);
    adapter->rx_processing = false;
    spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);

exit_rx_proc:
    return 0;
}
Example #24
0
static void
setva(struct PStack *st, unsigned int nr)
{
	struct Layer2 *l2 = &st->l2;
	int len;
	u_long flags;

	spin_lock_irqsave(&l2->lock, flags);
	while (l2->va != nr) {
		(l2->va)++;
		if(test_bit(FLG_MOD128, &l2->flag))
			l2->va %= 128;
		else
			l2->va %= 8;
		len = l2->windowar[l2->sow]->len;
		if (PACKET_NOACK == l2->windowar[l2->sow]->pkt_type)
			len = -1;
		dev_kfree_skb(l2->windowar[l2->sow]);
		l2->windowar[l2->sow] = NULL;
		l2->sow = (l2->sow + 1) % l2->window;
		spin_unlock_irqrestore(&l2->lock, flags);
		if (test_bit(FLG_LLI_L2WAKEUP, &st->lli.flag) && (len >=0))
			lli_writewakeup(st, len);
		spin_lock_irqsave(&l2->lock, flags);
	}
	spin_unlock_irqrestore(&l2->lock, flags);
}
Example #25
0
void usb_serial_generic_write_bulk_callback(struct urb *urb)
{
	unsigned long flags;
	struct usb_serial_port *port = urb->context;
	int status = urb->status;
	int i;

	dbg("%s - port %d", __func__, port->number);

	for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
		if (port->write_urbs[i] == urb)
			break;

	spin_lock_irqsave(&port->lock, flags);
	port->tx_bytes -= urb->transfer_buffer_length;
	set_bit(i, &port->write_urbs_free);
	spin_unlock_irqrestore(&port->lock, flags);

	if (status) {
		dbg("%s - non-zero urb status: %d", __func__, status);

		spin_lock_irqsave(&port->lock, flags);
		kfifo_reset_out(&port->write_fifo);
		spin_unlock_irqrestore(&port->lock, flags);
	} else {
		usb_serial_generic_write_start(port);
	}

	usb_serial_port_softint(port);
}
Example #26
0
static int
Sportster_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
	u_long flags;

	switch (mt) {
		case CARD_RESET:
			spin_lock_irqsave(&cs->lock, flags);
			reset_sportster(cs);
			spin_unlock_irqrestore(&cs->lock, flags);
			return(0);
		case CARD_RELEASE:
			release_io_sportster(cs);
			return(0);
		case CARD_INIT:
			spin_lock_irqsave(&cs->lock, flags);
			reset_sportster(cs);
			inithscxisac(cs, 1);
			cs->hw.spt.res_irq |= SPORTSTER_INTE; /* IRQ On */
			byteout(cs->hw.spt.cfg_reg + SPORTSTER_RES_IRQ, cs->hw.spt.res_irq);
			inithscxisac(cs, 2);
			spin_unlock_irqrestore(&cs->lock, flags);
			return(0);
		case CARD_TEST:
			return(0);
	}
	return(0);
}
Example #27
0
//FIXME: May have to swap out with define directive. Also, remove excessive overhead.
void curse_trigger (_Bool defer_action, curse_id_t cid)
{
	struct task_curse_struct *cur_struct;
	unsigned long spinf;
	int index;

//	debug("Trigger on %lld\n", cid);
	index = index_from_curse_id(cid);

	cur_struct = &(current->curse_data);

	if (!unlikely(defer_action)) {
		uint64_t proc_active;

		spin_lock_irqsave(&((current->curse_data).protection), spinf);	//Check if curse is  active.
		proc_active = curse_list_pointer[index].curse_bit;
		spin_unlock_irqrestore(&((current->curse_data).protection), spinf);
		if (!(proc_active &= current->curse_data.curse_field))
			return;
		(curse_list_pointer[index].functions)->fun_inject(curse_list_pointer[index].curse_bit);
	} else {
		spin_lock_irqsave(&(cur_struct->protection), spinf);
		cur_struct->triggered |= (curse_list_pointer[index].curse_bit);
		spin_unlock_irqrestore(&(cur_struct->protection), spinf);
	}

}
Example #28
0
static irqreturn_t if_cs_interrupt(int irq, void *data)
{
	struct if_cs_card *card = data;
	struct lbs_private *priv = card->priv;
	u16 cause;

	lbs_deb_enter(LBS_DEB_CS);

	/* Ask card interrupt cause register if there is something for us */
	cause = if_cs_read16(card, IF_CS_CARD_INT_CAUSE);
	lbs_deb_cs("cause 0x%04x\n", cause);

	if (cause == 0) {
		/* Not for us */
		return IRQ_NONE;
	}

	if (cause == 0xffff) {
		/* Read in junk, the card has probably been removed */
		card->priv->surpriseremoved = 1;
		return IRQ_HANDLED;
	}

	if (cause & IF_CS_BIT_RX) {
		struct sk_buff *skb;
		lbs_deb_cs("rx packet\n");
		skb = if_cs_receive_data(priv);
		if (skb)
			lbs_process_rxed_packet(priv, skb);
	}

	if (cause & IF_CS_BIT_TX) {
		lbs_deb_cs("tx done\n");
		lbs_host_to_card_done(priv);
	}

	if (cause & IF_CS_BIT_RESP) {
		unsigned long flags;
		u8 i;

		lbs_deb_cs("cmd resp\n");
		spin_lock_irqsave(&priv->driver_lock, flags);
		i = (priv->resp_idx == 0) ? 1 : 0;
		spin_unlock_irqrestore(&priv->driver_lock, flags);

		BUG_ON(priv->resp_len[i]);
		if_cs_receive_cmdres(priv, priv->resp_buf[i],
			&priv->resp_len[i]);

		spin_lock_irqsave(&priv->driver_lock, flags);
		lbs_notify_command_response(priv, i);
		spin_unlock_irqrestore(&priv->driver_lock, flags);
	}

	if (cause & IF_CS_BIT_EVENT) {
		u16 status = if_cs_read16(priv->card, IF_CS_CARD_STATUS);
		if_cs_write16(priv->card, IF_CS_HOST_INT_CAUSE,
			IF_CS_BIT_EVENT);
		lbs_queue_event(priv, (status & IF_CS_CARD_STATUS_MASK) >> 8);
	}
Example #29
0
static void
rpcrdma_run_tasklet(unsigned long data)
{
	struct rpcrdma_rep *rep;
	void (*func)(struct rpcrdma_rep *);
	unsigned long flags;

	data = data;
	spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
	while (!list_empty(&rpcrdma_tasklets_g)) {
		rep = list_entry(rpcrdma_tasklets_g.next,
				 struct rpcrdma_rep, rr_list);
		list_del(&rep->rr_list);
		func = rep->rr_func;
		rep->rr_func = NULL;
		spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);

		if (func)
			func(rep);
		else
			rpcrdma_recv_buffer_put(rep);

		spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
	}
	spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
}
Example #30
0
void zd_usb_disable_rx(struct zd_usb *usb)
{
    int i;
    unsigned long flags;
    struct urb **urbs;
    unsigned int count;
    struct zd_usb_rx *rx = &usb->rx;

    spin_lock_irqsave(&rx->lock, flags);
    urbs = rx->urbs;
    count = rx->urbs_count;
    spin_unlock_irqrestore(&rx->lock, flags);
    if (!urbs)
        return;

    for (i = 0; i < count; i++) {
        usb_kill_urb(urbs[i]);
        free_urb(urbs[i]);
    }
    kfree(urbs);

    spin_lock_irqsave(&rx->lock, flags);
    rx->urbs = NULL;
    rx->urbs_count = 0;
    spin_unlock_irqrestore(&rx->lock, flags);
}