Пример #1
0
/*
 * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
.*
.* Called by queue when the queue posted allocation request and
 * has freed 8 RBDs in order to restock itself.
 */
static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
				     struct iwl_rx_mem_buffer
				     *out[RX_CLAIM_REQ_ALLOC])
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	int i;

	/*
	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
	 * function will return -ENOMEM, as there are no ready requests.
	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
	 * req_ready > 0, i.e. - there are ready requests and the function
	 * hands one request to the caller.
	 */
	if (atomic_dec_if_positive(&rba->req_ready) < 0)
		return -ENOMEM;

	spin_lock(&rba->lock);
	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
		/* Get next free Rx buffer, remove it from free list */
		out[i] = list_first_entry(&rba->rbd_allocated,
			       struct iwl_rx_mem_buffer, list);
		list_del(&out[i]->list);
	}
	spin_unlock(&rba->lock);

	return 0;
}
void gelic_card_down(struct gelic_card *card)
{
	u64 mask;
	pr_debug("%s: called\n", __func__);
	mutex_lock(&card->updown_lock);
	if (atomic_dec_if_positive(&card->users) == 0) {
		pr_debug("%s: real do\n", __func__);
		napi_disable(&card->napi);
		/*
		 * Disable irq. Wireless interrupts will
		 * be disabled later if any
		 */
		mask = card->irq_mask & (GELIC_CARD_WLAN_EVENT_RECEIVED |
					 GELIC_CARD_WLAN_COMMAND_COMPLETED);
		gelic_card_set_irq_mask(card, mask);
		/* stop rx */
		gelic_card_disable_rxdmac(card);
		gelic_card_reset_chain(card, &card->rx_chain,
				       card->descr + GELIC_NET_TX_DESCRIPTORS);
		/* stop tx */
		gelic_card_disable_txdmac(card);
	}
	mutex_unlock(&card->updown_lock);
	pr_debug("%s: done\n", __func__);
}
Пример #3
0
/* /dev/watchdog operations */
static int wdt_gpi_open(struct inode *inode, struct file *file)
{
	int res;

	if (unlikely(atomic_dec_if_positive(&opencnt) < 0))
		return -EBUSY;

	expect_close = 0;
	if (locked) {
		module_put(THIS_MODULE);
		free_irq(wd_irq, &miscdev);
		locked = 0;
	}

	res = request_irq(wd_irq, wdt_gpi_irqhdl, IRQF_SHARED | IRQF_DISABLED,
			  wdt_gpi_name, &miscdev);
	if (unlikely(res))
		return res;

	wdt_gpi_set_timeout(timeout);
	wdt_gpi_start();

	printk(KERN_INFO "%s: watchdog started, timeout = %u seconds\n",
		wdt_gpi_name, timeout);
	return nonseekable_open(inode, file);
}
Пример #4
0
static void __octeon_i2c_irq_disable(atomic_t *cnt, int irq)
{
    int count;

    /*
     * The interrupt can be disabled in two places, but we only
     * want to make the disable_irq_nosync() call once, so keep
     * track with the atomic variable.
     */
    count = atomic_dec_if_positive(cnt);
    if (count >= 0)
        disable_irq_nosync(irq);
}
Пример #5
0
static void dma_stop_watchdog(struct work_struct *work)
{
	struct jz_pcm_runtime_data *prtd =
		container_of(work, struct jz_pcm_runtime_data, dwork_stop_dma.work);
	struct snd_pcm_substream *substream = prtd->substream;
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;

	if (!atomic_dec_if_positive(&prtd->stopped_pending)) {
		DMA_SUBSTREAM_MSG(substream,"stop real\n");
		dmaengine_terminate_all(prtd->dma_chan);
		if (cpu_dai->driver->ops->trigger)
			cpu_dai->driver->ops->trigger(substream, prtd->stopped_cmd, cpu_dai);
	}
}
static struct viocharlpevent *viocons_get_cfu_buffer(void)
{
	int i;

	/*
	 * Grab the first available buffer.  It doesn't matter if we
	 * are interrupted during this array traversal as long as we
	 * get an available space.
	 */
	for (i = 0; i < VIOCHAR_NUM_CFU_BUFFERS; i++)
		if (atomic_dec_if_positive(&viocons_cfu_buffer_available[i])
				== 0 )
			return &viocons_cfu_buffer[i];
	hvlog("\n\rviocons: viocons_get_cfu_buffer : no free buffers found");
	return NULL;
}
Пример #7
0
static void jz_asoc_dma_callback(void *data)
{
	struct snd_pcm_substream *substream = data;
	struct jz_pcm_runtime_data *prtd = substream->runtime->private_data;
	void* old_pos_addr = snd_pcm_get_ptr(substream, prtd->pos);

	DMA_SUBSTREAM_MSG(substream,"%s enter stopped_pending == %d\n", __func__,
			atomic_read(&prtd->stopped_pending));
	if (!atomic_dec_if_positive(&prtd->stopped_pending)) {
		struct snd_soc_pcm_runtime *rtd = substream->private_data;
		struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
		DMA_SUBSTREAM_MSG(substream,"stop real\n");
		cancel_delayed_work(&prtd->dwork_stop_dma);
		dmaengine_terminate_all(prtd->dma_chan);
		if (cpu_dai->driver->ops->trigger)
			cpu_dai->driver->ops->trigger(substream, prtd->stopped_cmd, cpu_dai);
		return;
	}

	if (!IS_ERR_OR_NULL(prtd->file) && !work_pending(&prtd->debug_work)) {
		prtd->copy_start = old_pos_addr;
		prtd->copy_length = snd_pcm_lib_period_bytes(substream);
		schedule_work(&prtd->debug_work);
	} else {
#if defined(CONFIG_JZ_ASOC_DMA_AUTO_CLR_DRT_MEM)
		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
			DMA_DEBUG_MSG("dma start %x pos %p size %d\n",
					substream->runtime->dma_addr,
					old_pos_addr,
					snd_pcm_lib_period_bytes(substream));
			memset(old_pos_addr, 0,
					snd_pcm_lib_period_bytes(substream));
		}
#endif
	}

	prtd->pos += snd_pcm_lib_period_bytes(substream);
	if (prtd->pos >= snd_pcm_lib_buffer_bytes(substream))
		prtd->pos = 0;
	snd_pcm_period_elapsed(substream);
	return;
}
Пример #8
0
static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
{
	int state_val;
	int report_val;
	s32 poll_value = 0;

	if (state) {
		if (!atomic_read(&st->user_requested_state))
			return 0;
		if (sensor_hub_device_open(st->hsdev))
			return -EIO;

		atomic_inc(&st->data_ready);

		state_val = hid_sensor_get_usage_index(st->hsdev,
			st->power_state.report_id,
			st->power_state.index,
			HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM);
		report_val = hid_sensor_get_usage_index(st->hsdev,
			st->report_state.report_id,
			st->report_state.index,
			HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);

		poll_value = hid_sensor_read_poll_value(st);
	} else {
		int val;

		val = atomic_dec_if_positive(&st->data_ready);
		if (val < 0)
			return 0;

		sensor_hub_device_close(st->hsdev);
		state_val = hid_sensor_get_usage_index(st->hsdev,
			st->power_state.report_id,
			st->power_state.index,
			HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM);
		report_val = hid_sensor_get_usage_index(st->hsdev,
			st->report_state.report_id,
			st->report_state.index,
			HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM);
	}

	if (state_val >= 0) {
		state_val += st->power_state.logical_minimum;
		sensor_hub_set_feature(st->hsdev, st->power_state.report_id,
				       st->power_state.index, sizeof(state_val),
				       &state_val);
	}

	if (report_val >= 0) {
		report_val += st->report_state.logical_minimum;
		sensor_hub_set_feature(st->hsdev, st->report_state.report_id,
				       st->report_state.index,
				       sizeof(report_val),
				       &report_val);
	}

	sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
			       st->power_state.index,
			       sizeof(state_val), &state_val);
	if (state && poll_value)
		msleep_interruptible(poll_value * 2);

	return 0;
}
Пример #9
0
static void cxl_afu_configured_put(struct cxl_afu *afu)
{
	atomic_dec_if_positive(&afu->configured_state);
}