static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
{
	int sta_id;

	WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));

	lockdep_assert_held(&mvm->mutex);

	/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
	for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++)
		if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
					       lockdep_is_held(&mvm->mutex)))
			return sta_id;
	return IWL_MVM_STATION_COUNT;
}
Exemplo n.º 2
0
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
{
	int msdu_id;

	lockdep_assert_held(&htt->tx_lock);

	msdu_id = find_first_zero_bit(htt->used_msdu_ids,
				      htt->max_num_pending_tx);
	if (msdu_id == htt->max_num_pending_tx)
		return -ENOBUFS;

	ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
	__set_bit(msdu_id, htt->used_msdu_ids);
	return msdu_id;
}
static void __put_rmid(u32 rmid)
{
	struct cqm_rmid_entry *entry;

	lockdep_assert_held(&cache_mutex);

	WARN_ON(!__rmid_valid(rmid));
	entry = __rmid_entry(rmid);

	entry->queue_time = jiffies;
	entry->state = RMID_YOUNG;
	__mbm_reset_stats(rmid);

	list_add_tail(&entry->list, &cqm_rmid_limbo_lru);
}
/*
 * Returns < 0 on fail.
 *
 * We expect to be called with cache_mutex held.
 */
static u32 __get_rmid(void)
{
	struct cqm_rmid_entry *entry;

	lockdep_assert_held(&cache_mutex);

	if (list_empty(&cqm_rmid_free_lru))
		return INVALID_RMID;

	entry = list_first_entry(&cqm_rmid_free_lru, struct cqm_rmid_entry, list);
	list_del(&entry->list);
	__mbm_reset_stats(entry->rmid);

	return entry->rmid;
}
Exemplo n.º 5
0
int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
{
	struct iwl_mvm_vif *mvmvif;
	struct iwl_power_vifs vifs = {};
	bool ba_enable;
	int ret;

	lockdep_assert_held(&mvm->mutex);

	iwl_mvm_power_set_pm(mvm, &vifs);

	/* disable PS if CAM */
	if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
		mvm->ps_disabled = true;
	} else {
	/* don't update device power state unless we add / remove monitor */
		if (vifs.monitor_vif) {
			if (vifs.monitor_active)
				mvm->ps_disabled = true;
			ret = iwl_mvm_power_update_device(mvm);
			if (ret)
				return ret;
		}
	}

	if (vifs.bss_vif) {
		ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
		if (ret)
			return ret;
	}

	if (vifs.p2p_vif) {
		ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
		if (ret)
			return ret;
	}

	if (!vifs.bf_vif)
		return 0;

	mvmvif = iwl_mvm_vif_from_mac80211(vifs.bf_vif);

	ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled ||
		      !vifs.bf_vif->bss_conf.ps ||
		      iwl_mvm_vif_low_latency(mvmvif));

	return iwl_mvm_update_beacon_abort(mvm, vifs.bf_vif, ba_enable);
}
Exemplo n.º 6
0
static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
				       struct ieee80211_txq *txq)
{
	struct ath10k *ar = hw->priv;
	struct ath10k_sta *arsta = (void *)txq->sta->drv_priv;
	struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
	unsigned long frame_cnt;
	unsigned long byte_cnt;
	int idx;
	u32 bit;
	u16 peer_id;
	u8 tid;
	u8 count;

	lockdep_assert_held(&ar->htt.tx_lock);

	if (!ar->htt.tx_q_state.enabled)
		return;

	if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
		return;

	if (txq->sta)
		peer_id = arsta->peer_id;
	else
		peer_id = arvif->peer_id;

	tid = txq->tid;
	bit = BIT(peer_id % 32);
	idx = peer_id / 32;

	ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
	count = ath10k_htt_tx_txq_calc_size(byte_cnt);

	if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
	    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
		ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
			    peer_id, tid);
		return;
	}

	ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
	ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
	ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;

	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
		   peer_id, tid, count);
}
void kbase_pm_update_cores_state_nolock(kbase_device *kbdev)
{
	u64 desired_bitmap;
	mali_bool cores_are_available;

	lockdep_assert_held(&kbdev->pm.power_change_lock);

	if (kbdev->pm.pm_current_policy == NULL)
		return;

	desired_bitmap = kbdev->pm.pm_current_policy->get_core_mask(kbdev); 
	desired_bitmap &= kbase_pm_ca_get_core_mask(kbdev);

	/* Enable core 0 if tiler required, regardless of core availability */
	if (kbdev->tiler_needed_cnt > 0 || kbdev->tiler_inuse_cnt > 0)
		desired_bitmap |= 1;

	if (kbdev->pm.desired_shader_state != desired_bitmap)
		KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_DESIRED, NULL, NULL, 0u, (u32)desired_bitmap);

	/* Are any cores being powered on? */
	if (~kbdev->pm.desired_shader_state & desired_bitmap ||
	    kbdev->pm.ca_in_transition != MALI_FALSE) {
		kbdev->pm.desired_shader_state = desired_bitmap;

		/* If any cores are being powered on, transition immediately */
		cores_are_available = kbase_pm_check_transitions_nolock(kbdev);

		/* Ensure timer does not power off wanted cores */
		if (kbdev->pm.shader_poweroff_pending != 0) {
			kbdev->pm.shader_poweroff_pending &= ~kbdev->pm.desired_shader_state;
			if (kbdev->pm.shader_poweroff_pending == 0)
				kbdev->pm.shader_poweroff_pending_time = 0;
		}
	} else if (kbdev->pm.desired_shader_state & ~desired_bitmap) {
		/* Start timer to power off cores */
		kbdev->pm.shader_poweroff_pending |= (kbdev->pm.desired_shader_state & ~desired_bitmap);
		kbdev->pm.shader_poweroff_pending_time = kbdev->pm.poweroff_shader_ticks;
	} else if (kbdev->pm.active_count == 0 && desired_bitmap != 0 && kbdev->pm.poweroff_timer_running) {
		/* If power policy is keeping cores on despite there being no active contexts
		 * then disable poweroff timer as it isn't required */
		kbdev->pm.poweroff_timer_running = MALI_FALSE;
		hrtimer_cancel(&kbdev->pm.gpu_poweroff_timer);
	}

	/* Don't need 'cores_are_available', because we don't return anything */
	CSTD_UNUSED(cores_are_available);
}
Exemplo n.º 8
0
int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
{
	struct intel_render_state *so;
	int ret;

	lockdep_assert_held(&req->i915->drm.struct_mutex);

	so = req->engine->render_state;
	if (!so)
		return 0;

	/* Recreate the page after shrinking */
	if (!so->vma->obj->mm.pages)
		so->batch_offset = -1;

	ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
	if (ret)
		return ret;

	if (so->vma->node.start != so->batch_offset) {
		ret = render_state_setup(so, req->i915);
		if (ret)
			goto err_unpin;
	}

	ret = req->engine->emit_flush(req, EMIT_INVALIDATE);
	if (ret)
		goto err_unpin;

	ret = req->engine->emit_bb_start(req,
					 so->batch_offset, so->batch_size,
					 I915_DISPATCH_SECURE);
	if (ret)
		goto err_unpin;

	if (so->aux_size > 8) {
		ret = req->engine->emit_bb_start(req,
						 so->aux_offset, so->aux_size,
						 I915_DISPATCH_SECURE);
		if (ret)
			goto err_unpin;
	}

	i915_vma_move_to_active(so->vma, req, 0);
err_unpin:
	i915_vma_unpin(so->vma);
	return ret;
}
Exemplo n.º 9
0
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

	for_each_engine(engine, dev_priv, id) {
		engine->legacy_active_context = NULL;

		if (!engine->last_retired_context)
			continue;

		engine->context_unpin(engine, engine->last_retired_context);
		engine->last_retired_context = NULL;
	}
Exemplo n.º 10
0
void iwl_force_scan_end(struct iwl_priv *priv)
{
    lockdep_assert_held(&priv->mutex);

    if (!test_bit(STATUS_SCANNING, &priv->status)) {
        IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
        return;
    }

    IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
    clear_bit(STATUS_SCANNING, &priv->status);
    clear_bit(STATUS_SCAN_HW, &priv->status);
    clear_bit(STATUS_SCAN_ABORTING, &priv->status);
    clear_bit(STATUS_SCAN_COMPLETE, &priv->status);
    iwl_complete_scan(priv, true);
}
u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev)
{
    lockdep_assert_held(&kbdev->pm.power_change_lock);

    /* All cores must be enabled when instrumentation is in use */
    if (kbdev->pm.backend.instr_enabled)
        return kbdev->gpu_props.props.raw_props.shader_present &
               kbdev->pm.debug_core_mask;

    if (kbdev->pm.backend.ca_current_policy == NULL)
        return kbdev->gpu_props.props.raw_props.shader_present &
               kbdev->pm.debug_core_mask;

    return kbdev->pm.backend.ca_current_policy->get_core_mask(kbdev) &
           kbdev->pm.debug_core_mask;
}
Exemplo n.º 12
0
void kbase_pm_do_poweron(kbase_device *kbdev)
{
	lockdep_assert_held(&kbdev->pm.lock);

	/* Turn clocks and interrupts on - no-op if we haven't done a previous
	 * kbase_pm_clock_off() */
	kbase_pm_clock_on(kbdev);

	/* Update core status as required by the policy */
	KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START);
	kbase_pm_update_cores_state(kbdev);
	KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END);

	/* NOTE: We don't wait to reach the desired state, since running atoms
	 * will wait for that state to be reached anyway */
}
Exemplo n.º 13
0
void kbase_timeline_job_slot_submit(kbase_device *kbdev, kbase_context *kctx,
                                    kbase_jd_atom *katom, int js)
{
	lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);

	if(kbdev->timeline.slot_atoms_submitted[js] > 0) {
		KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 1);
	} else {
		base_atom_id atom_number = kbase_jd_atom_id(kctx, katom);
		KBASE_TIMELINE_JOB_START_HEAD(kctx, js, 1);
		KBASE_TIMELINE_JOB_START(kctx, js, atom_number);
	}
	++kbdev->timeline.slot_atoms_submitted[js];

	KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]);
}
Exemplo n.º 14
0
static int iwl_scan_initiate(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
	lockdep_assert_held(&priv->mutex);

	IWL_DEBUG_INFO(priv, "Starting scan...\n");
	set_bit(STATUS_SCANNING, &priv->status);
	priv->is_internal_short_scan = false;
	priv->scan_start = jiffies;

	if (WARN_ON(!priv->cfg->ops->utils->request_scan))
		return -EOPNOTSUPP;

	priv->cfg->ops->utils->request_scan(priv, vif);

	return 0;
}
Exemplo n.º 15
0
/* see cgroup_stat_flush() */
static void cgroup_stat_flush_locked(struct cgroup *cgrp)
{
	int cpu;

	lockdep_assert_held(&cgroup_stat_mutex);

	for_each_possible_cpu(cpu) {
		raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_cpu_stat_lock, cpu);
		struct cgroup *pos = NULL;

		raw_spin_lock_irq(cpu_lock);
		while ((pos = cgroup_cpu_stat_pop_updated(pos, cgrp, cpu)))
			cgroup_cpu_stat_flush_one(pos, cpu);
		raw_spin_unlock_irq(cpu_lock);
	}
}
Exemplo n.º 16
0
static void __rport_fail_io_fast(struct srp_rport *rport)
{
	struct Scsi_Host *shost = rport_to_shost(rport);
	struct srp_internal *i;

	lockdep_assert_held(&rport->mutex);

	if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
		return;
	scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);

	/* Involve the LLD if possible to terminate all I/O on the rport. */
	i = to_srp_internal(shost->transportt);
	if (i->f->terminate_rport_io)
		i->f->terminate_rport_io(rport);
}
Exemplo n.º 17
0
static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
				   struct ieee80211_sta *sta,
				   struct napi_struct *napi,
				   struct iwl_mvm_reorder_buffer *reorder_buf,
				   u16 nssn)
{
	u16 ssn = reorder_buf->head_sn;

	lockdep_assert_held(&reorder_buf->lock);

	/* ignore nssn smaller than head sn - this can happen due to timeout */
	if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
		return;

	while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
		int index = ssn % reorder_buf->buf_size;
		struct sk_buff_head *skb_list = &reorder_buf->entries[index];
		struct sk_buff *skb;

		ssn = ieee80211_sn_inc(ssn);

		/* holes are valid since nssn indicates frames were received. */
		if (skb_queue_empty(skb_list) || !skb_peek_tail(skb_list))
			continue;
		/* Empty the list. Will have more than one frame for A-MSDU */
		while ((skb = __skb_dequeue(skb_list))) {
			iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
							reorder_buf->queue,
							sta);
			reorder_buf->num_stored--;
		}
	}
	reorder_buf->head_sn = nssn;

	if (reorder_buf->num_stored && !reorder_buf->removed) {
		u16 index = reorder_buf->head_sn % reorder_buf->buf_size;

		while (!skb_peek_tail(&reorder_buf->entries[index]))
			index = (index + 1) % reorder_buf->buf_size;
		/* modify timer to match next frame's expiration time */
		mod_timer(&reorder_buf->reorder_timer,
			  reorder_buf->reorder_time[index] + 1 +
			  RX_REORDER_BUF_TIMEOUT_MQ);
	} else {
		del_timer(&reorder_buf->reorder_timer);
	}
}
Exemplo n.º 18
0
static void isert_device_release(struct isert_device *isert_dev)
{
	int err, i;

	TRACE_ENTRY();

	lockdep_assert_held(&dev_list_mutex);

	isert_dev_list_remove(isert_dev); /* remove from global list */

	for (i = 0; i < isert_dev->num_cqs; ++i) {
		struct isert_cq *cq_desc = &isert_dev->cq_desc[i];

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)
		/*
		 * cancel_work_sync() was introduced in 2.6.22. We can
		 * only wait until all scheduled work is done.
		 */
		flush_workqueue(cq_desc->cq_workqueue);
#else
		cancel_work_sync(&cq_desc->cq_comp_work);
#endif

		err = ib_destroy_cq(cq_desc->cq);
		if (unlikely(err))
			pr_err("Failed to destroy cq, err:%d\n", err);

		destroy_workqueue(cq_desc->cq_workqueue);
	}

	err = ib_dereg_mr(isert_dev->mr);
	if (unlikely(err))
		pr_err("Failed to destroy mr, err:%d\n", err);
	err = ib_dealloc_pd(isert_dev->pd);
	if (unlikely(err))
		pr_err("Failed to destroy pd, err:%d\n", err);

	vfree(isert_dev->cq_desc);
	isert_dev->cq_desc = NULL;

	kfree(isert_dev->cq_qps);
	isert_dev->cq_qps = NULL;

	kfree(isert_dev);

	TRACE_EXIT();
}
Exemplo n.º 19
0
int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
		       bool force)
{
	int ret;
	bool update_chains;

	lockdep_assert_held(&priv->mutex);

	/* Don't update the RX chain when chain noise calibration is running */
	update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
			priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;

	if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
		return 0;

	if (!iwl_is_ready_rf(priv))
		return -EIO;

	/* scan complete use sleep_power_next, need to be updated */
	memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
	if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
		IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
		return 0;
	}

	if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
		set_bit(STATUS_POWER_PMI, &priv->status);

	ret = iwl_set_power(priv, cmd);
	if (!ret) {
		if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
			clear_bit(STATUS_POWER_PMI, &priv->status);

		if (priv->cfg->ops->lib->update_chain_flags && update_chains)
			priv->cfg->ops->lib->update_chain_flags(priv);
		else if (priv->cfg->ops->lib->update_chain_flags)
			IWL_DEBUG_POWER(priv,
					"Cannot update the power, chain noise "
					"calibration running: %d\n",
					priv->chain_noise_data.state);

		memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
	} else
		IWL_ERR(priv, "set power fail, ret = %d", ret);

	return ret;
}
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
			 struct sk_buff_head *skbs)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;
	int last_to_free;
	int freed = 0;

	
	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
		return 0;

	lockdep_assert_held(&txq->lock);

	last_to_free = iwl_queue_dec_wrap(index, q->n_bd);

	if ((index >= q->n_bd) ||
	   (iwl_queue_used(q, last_to_free) == 0)) {
		IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
			  "last_to_free %d is out of range [0-%d] %d %d.\n",
			  __func__, txq_id, last_to_free, q->n_bd,
			  q->write_ptr, q->read_ptr);
		return 0;
	}

	if (WARN_ON(!skb_queue_empty(skbs)))
		return 0;

	for (;
	     q->read_ptr != index;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {

		if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
			continue;

		__skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);

		txq->skbs[txq->q.read_ptr] = NULL;

		iwlagn_txq_inval_byte_cnt_tbl(trans, txq);

		iwlagn_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
		freed++;
	}
	return freed;
}
Exemplo n.º 21
0
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
{
	int ret, i;

	lockdep_assert_held(&mvm->mutex);

	ret = iwl_trans_start_hw(mvm->trans);
	if (ret)
		return ret;

	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
	if (ret) {
		IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
		goto error;
	}

#ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE
	iwl_dnt_start(mvm->trans);
#endif
	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
	if (ret)
		goto error;

	/* Send phy db control command and then phy db calibration*/
	ret = iwl_send_phy_db_data(mvm->phy_db);
	if (ret)
		goto error;

	ret = iwl_send_phy_cfg_cmd(mvm);
	if (ret)
		goto error;

	/* init the fw <-> mac80211 STA mapping */
	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);

	/* Add auxiliary station for scanning */
	ret = iwl_mvm_add_aux_sta(mvm);
	if (ret)
		goto error;

	return 0;
 error:
	iwl_mvm_stop_device(mvm);
	return ret;
}
Exemplo n.º 22
0
/*
 * Free up msg_slots and clear other stuff that were setup for the specified
 * channel.
 */
static void
xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
{
	struct xpc_channel_uv *ch_uv = &ch->sn.uv;

	lockdep_assert_held(&ch->lock);

	kfree(ch_uv->cached_notify_gru_mq_desc);
	ch_uv->cached_notify_gru_mq_desc = NULL;

	if (ch->flags & XPC_C_SETUP) {
		xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
		kfree(ch_uv->send_msg_slots);
		xpc_init_fifo_uv(&ch_uv->recv_msg_list);
		kfree(ch_uv->recv_msg_slots);
	}
}
Exemplo n.º 23
0
/**
 * i915_gem_batch_pool_fini() - clean up a batch buffer pool
 * @pool: the pool to clean up
 *
 * Note: Callers must hold the struct_mutex.
 */
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
{
	int n;

	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);

	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
		struct drm_i915_gem_object *obj, *next;

		list_for_each_entry_safe(obj, next,
					 &pool->cache_list[n],
					 batch_pool_link)
			i915_gem_object_put(obj);

		INIT_LIST_HEAD(&pool->cache_list[n]);
	}
}
Exemplo n.º 24
0
static void
xfs_perag_clear_reclaim_tag(
	struct xfs_perag	*pag)
{
	struct xfs_mount	*mp = pag->pag_mount;

	lockdep_assert_held(&pag->pag_ici_lock);
	if (--pag->pag_ici_reclaimable)
		return;

	/* clear the reclaim tag from the perag radix tree */
	spin_lock(&mp->m_perag_lock);
	radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
			     XFS_ICI_RECLAIM_TAG);
	spin_unlock(&mp->m_perag_lock);
	trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
}
/*
 * Hold the runpool_mutex for this
 */
static inline bool timer_callback_should_run(struct kbase_device *kbdev)
{
	s8 nr_running_ctxs;

	lockdep_assert_held(&kbdev->js_data.runpool_mutex);

	/* nr_contexts_pullable is updated with the runpool_mutex. However, the
	 * locking in the caller gives us a barrier that ensures
	 * nr_contexts_pullable is up-to-date for reading */
	nr_running_ctxs = atomic_read(&kbdev->js_data.nr_contexts_runnable);

#ifdef CONFIG_MALI_DEBUG
	if (kbdev->js_data.softstop_always) {
		/* Debug support for allowing soft-stop on a single context */
		return true;
	}
#endif				/* CONFIG_MALI_DEBUG */

	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9435)) {
		/* Timeouts would have to be 4x longer (due to micro-
		 * architectural design) to support OpenCL conformance tests, so
		 * only run the timer when there's:
		 * - 2 or more CL contexts
		 * - 1 or more GLES contexts
		 *
		 * NOTE: We will treat a context that has both Compute and Non-
		 * Compute jobs will be treated as an OpenCL context (hence, we
		 * don't check KBASEP_JS_CTX_ATTR_NON_COMPUTE).
		 */
		{
			s8 nr_compute_ctxs =
				kbasep_js_ctx_attr_count_on_runpool(kbdev,
						KBASEP_JS_CTX_ATTR_COMPUTE);
			s8 nr_noncompute_ctxs = nr_running_ctxs -
							nr_compute_ctxs;

			return (bool) (nr_compute_ctxs >= 2 ||
							nr_noncompute_ctxs > 0);
		}
	} else {
		/* Run the timer callback whenever you have at least 1 context
		 */
		return (bool) (nr_running_ctxs > 0);
	}
}
Exemplo n.º 26
0
int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
				   bool is_presp)
{
	struct ath10k *ar = htt->ar;

	lockdep_assert_held(&htt->tx_lock);

	if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
		return 0;

	if (is_presp &&
	    ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
		return -EBUSY;

	htt->num_pending_mgmt_tx++;

	return 0;
}
Exemplo n.º 27
0
Arquivo: rx.c Projeto: andy-shev/linux
static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	int i;

	lockdep_assert_held(&rba->lock);

	for (i = 0; i < RX_POOL_SIZE; i++) {
		if (!rba->pool[i].page)
			continue;
		dma_unmap_page(trans->dev, rba->pool[i].page_dma,
			       PAGE_SIZE << trans_pcie->rx_page_order,
			       DMA_FROM_DEVICE);
		__free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
		rba->pool[i].page = NULL;
	}
}
Exemplo n.º 28
0
/**
 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
 * @ms: amount of time to wait (in milliseconds) for scan to abort
 *
 */
int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
{
	unsigned long timeout = jiffies + msecs_to_jiffies(ms);

	lockdep_assert_held(&priv->mutex);

	IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");

	iwl_do_scan_abort(priv);

	while (time_before_eq(jiffies, timeout)) {
		if (!test_bit(STATUS_SCAN_HW, &priv->status))
			break;
		msleep(20);
	}

	return test_bit(STATUS_SCAN_HW, &priv->status);
}
void kbase_pm_cancel_deferred_poweroff(kbase_device *kbdev)
{
	unsigned long flags;

	lockdep_assert_held(&kbdev->pm.lock);

	hrtimer_cancel(&kbdev->pm.gpu_poweroff_timer);

	/* If wq is already running but is held off by pm.lock, make sure it has no effect */
	kbdev->pm.gpu_poweroff_pending = 0;

	spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);

	kbdev->pm.shader_poweroff_pending = 0;
	kbdev->pm.shader_poweroff_pending_time = 0;

	spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
}
Exemplo n.º 30
0
Arquivo: rx.c Projeto: 383530895/linux
static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	int i;

	lockdep_assert_held(&rxq->lock);

	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
		if (!rxq->pool[i].page)
			continue;
		dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
			       PAGE_SIZE << trans_pcie->rx_page_order,
			       DMA_FROM_DEVICE);
		__free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
		rxq->pool[i].page = NULL;
	}
}