示例#1
0
/**
 *	tty_flush_to_ldisc
 *	@tty: tty to push
 *
 *	Push the terminal flip buffers to the line discipline.
 *
 *	Must not be called from IRQ context.
 */
void tty_flush_to_ldisc(struct tty_struct *tty)
{
	flush_work(&tty->buf.work);
}
static void __exit boost_exit(void)
{
	flush_work(&touchboost_wk);
	input_unregister_handler(&touchboost_handler);
	pm_qos_remove_request(&touchboost_cpu_qos_min);
}
示例#3
0
int smc_close_active(struct smc_sock *smc)
{
	struct smc_cdc_conn_state_flags *txflags =
		&smc->conn.local_tx_ctrl.conn_state_flags;
	struct smc_connection *conn = &smc->conn;
	struct sock *sk = &smc->sk;
	int old_state;
	long timeout;
	int rc = 0;

	timeout = current->flags & PF_EXITING ?
		  0 : sock_flag(sk, SOCK_LINGER) ?
		      sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;

	old_state = sk->sk_state;
again:
	switch (sk->sk_state) {
	case SMC_INIT:
		sk->sk_state = SMC_CLOSED;
		break;
	case SMC_LISTEN:
		sk->sk_state = SMC_CLOSED;
		sk->sk_state_change(sk); /* wake up accept */
		if (smc->clcsock && smc->clcsock->sk) {
			rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
			/* wake up kernel_accept of smc_tcp_listen_worker */
			smc->clcsock->sk->sk_data_ready(smc->clcsock->sk);
		}
		smc_close_cleanup_listen(sk);
		release_sock(sk);
		flush_work(&smc->tcp_listen_work);
		lock_sock(sk);
		break;
	case SMC_ACTIVE:
		smc_close_stream_wait(smc, timeout);
		release_sock(sk);
		cancel_delayed_work_sync(&conn->tx_work);
		lock_sock(sk);
		if (sk->sk_state == SMC_ACTIVE) {
			/* send close request */
			rc = smc_close_final(conn);
			if (rc)
				break;
			sk->sk_state = SMC_PEERCLOSEWAIT1;
		} else {
			/* peer event has changed the state */
			goto again;
		}
		break;
	case SMC_APPFINCLOSEWAIT:
		/* socket already shutdown wr or both (active close) */
		if (txflags->peer_done_writing &&
		    !smc_close_sent_any_close(conn)) {
			/* just shutdown wr done, send close request */
			rc = smc_close_final(conn);
			if (rc)
				break;
		}
		sk->sk_state = SMC_CLOSED;
		break;
	case SMC_APPCLOSEWAIT1:
	case SMC_APPCLOSEWAIT2:
		if (!smc_cdc_rxed_any_close(conn))
			smc_close_stream_wait(smc, timeout);
		release_sock(sk);
		cancel_delayed_work_sync(&conn->tx_work);
		lock_sock(sk);
		if (sk->sk_state != SMC_APPCLOSEWAIT1 &&
		    sk->sk_state != SMC_APPCLOSEWAIT2)
			goto again;
		/* confirm close from peer */
		rc = smc_close_final(conn);
		if (rc)
			break;
		if (smc_cdc_rxed_any_close(conn)) {
			/* peer has closed the socket already */
			sk->sk_state = SMC_CLOSED;
			sock_put(sk); /* postponed passive closing */
		} else {
			/* peer has just issued a shutdown write */
			sk->sk_state = SMC_PEERFINCLOSEWAIT;
		}
		break;
	case SMC_PEERCLOSEWAIT1:
	case SMC_PEERCLOSEWAIT2:
		if (txflags->peer_done_writing &&
		    !smc_close_sent_any_close(conn)) {
			/* just shutdown wr done, send close request */
			rc = smc_close_final(conn);
			if (rc)
				break;
		}
		/* peer sending PeerConnectionClosed will cause transition */
		break;
	case SMC_PEERFINCLOSEWAIT:
		/* peer sending PeerConnectionClosed will cause transition */
		break;
	case SMC_PROCESSABORT:
		smc_close_abort(conn);
		sk->sk_state = SMC_CLOSED;
		break;
	case SMC_PEERABORTWAIT:
	case SMC_CLOSED:
		/* nothing to do, add tracing in future patch */
		break;
	}

	if (old_state != sk->sk_state)
		sk->sk_state_change(sk);
	return rc;
}
示例#4
0
static int i915_drm_suspend(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	pci_power_t opregion_target_state;
	int error;

	/* ignore lid events during suspend */
	mutex_lock(&dev_priv->modeset_restore_lock);
	dev_priv->modeset_restore = MODESET_SUSPENDED;
	mutex_unlock(&dev_priv->modeset_restore_lock);

	disable_rpm_wakeref_asserts(dev_priv);

	/* We do a lot of poking in a lot of registers, make sure they work
	 * properly. */
	intel_display_set_init_power(dev_priv, true);

	drm_kms_helper_poll_disable(dev);

	pci_save_state(dev->pdev);

	error = i915_gem_suspend(dev);
	if (error) {
		dev_err(&dev->pdev->dev,
			"GEM idle failed, resume might fail\n");
		goto out;
	}

	intel_guc_suspend(dev);

	intel_suspend_gt_powersave(dev);

	/*
	 * Disable CRTCs directly since we want to preserve sw state
	 * for _thaw. Also, power gate the CRTC power wells.
	 */
	drm_modeset_lock_all(dev);
	intel_display_suspend(dev);
	drm_modeset_unlock_all(dev);

	intel_dp_mst_suspend(dev);

	intel_runtime_pm_disable_interrupts(dev_priv);
	intel_hpd_cancel_work(dev_priv);

	intel_suspend_encoders(dev_priv);

	intel_suspend_hw(dev);

	i915_gem_suspend_gtt_mappings(dev);

	i915_save_state(dev);

	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
	intel_opregion_notify_adapter(dev, opregion_target_state);

	intel_uncore_forcewake_reset(dev, false);
	intel_opregion_fini(dev);

	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);

	dev_priv->suspend_count++;

	intel_display_set_init_power(dev_priv, false);

	if (HAS_CSR(dev_priv))
		flush_work(&dev_priv->csr.work);

out:
	enable_rpm_wakeref_asserts(dev_priv);

	return error;
}
示例#5
0
static int armada_drm_load(struct drm_device *dev, unsigned long flags)
{
	struct armada_private *priv;
	struct resource *mem = NULL;
	int ret, n;

	for (n = 0; ; n++) {
		struct resource *r = platform_get_resource(dev->platformdev,
							   IORESOURCE_MEM, n);
		if (!r)
			break;

		/* Resources above 64K are graphics memory */
		if (resource_size(r) > SZ_64K)
			mem = r;
		else
			return -EINVAL;
	}

	if (!mem)
		return -ENXIO;

	if (!devm_request_mem_region(dev->dev, mem->start,
			resource_size(mem), "armada-drm"))
		return -EBUSY;

	priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		DRM_ERROR("failed to allocate private\n");
		return -ENOMEM;
	}

	platform_set_drvdata(dev->platformdev, dev);
	dev->dev_private = priv;

	INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
	INIT_KFIFO(priv->fb_unref);

	/* Mode setting support */
	drm_mode_config_init(dev);
	dev->mode_config.min_width = 320;
	dev->mode_config.min_height = 200;

	/*
	 * With vscale enabled, the maximum width is 1920 due to the
	 * 1920 by 3 lines RAM
	 */
	dev->mode_config.max_width = 1920;
	dev->mode_config.max_height = 2048;

	dev->mode_config.preferred_depth = 24;
	dev->mode_config.funcs = &armada_drm_mode_config_funcs;
	drm_mm_init(&priv->linear, mem->start, resource_size(mem));
	mutex_init(&priv->linear_lock);

	ret = component_bind_all(dev->dev, dev);
	if (ret)
		goto err_kms;

	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
	if (ret)
		goto err_comp;

	dev->irq_enabled = true;

	ret = armada_fbdev_init(dev);
	if (ret)
		goto err_comp;

	drm_kms_helper_poll_init(dev);

	return 0;

 err_comp:
	component_unbind_all(dev->dev, dev);
 err_kms:
	drm_mode_config_cleanup(dev);
	drm_mm_takedown(&priv->linear);
	flush_work(&priv->fb_unref_work);

	return ret;
}
int diag_mux_write(int proc, unsigned char *buf, int len, int ctx)
{

#ifdef CONFIG_LGE_DM_APP
    int i;
    uint8_t found = 0;
    unsigned long flags;
    struct diag_usb_info *usb_info = NULL;
#endif

	if (proc < 0 || proc >= NUM_MUX_PROC)
		return -EINVAL;

#ifdef CONFIG_LGE_DM_APP
    if (driver->logging_mode == DM_APP_MODE) {
        /* only diag cmd #250 for supporting testmode tool */
        if ((GET_BUF_PERIPHERAL(ctx) == APPS_DATA) && (*((char *)buf) == 0xFA)) {
            usb_info = &diag_usb[lge_dm_tty->id];
            lge_dm_tty->dm_usb_req = diagmem_alloc(driver, sizeof(struct diag_request),
                usb_info->mempool);
            if (lge_dm_tty->dm_usb_req) {
                lge_dm_tty->dm_usb_req->buf = buf;
                lge_dm_tty->dm_usb_req->length = len;
                lge_dm_tty->dm_usb_req->context = (void *)(uintptr_t)ctx;

                queue_work(lge_dm_tty->dm_wq,
                    &(lge_dm_tty->dm_usb_work));
                flush_work(&(lge_dm_tty->dm_usb_work));

            }

            return 0;
        }

        for (i = 0; i < lge_dm_tty->num_tbl_entries && !found; i++) {
            spin_lock_irqsave(&lge_dm_tty->tbl[i].lock, flags);
            if (lge_dm_tty->tbl[i].len == 0) {
                lge_dm_tty->tbl[i].buf = buf;
                lge_dm_tty->tbl[i].len = len;
                lge_dm_tty->tbl[i].ctx = ctx;
                found = 1;
                diag_ws_on_read(DIAG_WS_MD, len);
            }
            spin_unlock_irqrestore(&lge_dm_tty->tbl[i].lock, flags);
        }

        lge_dm_tty->set_logging = 1;
        wake_up_interruptible(&lge_dm_tty->waitq);

        return 0;
    }
#endif

#ifdef CONFIG_LGE_DIAG_BYPASS
    if(diag_bypass_response(buf, len, proc, ctx, logger) > 0) {
        return 0;
    }
#endif

	if (logger && logger->log_ops && logger->log_ops->write)
		return logger->log_ops->write(proc, buf, len, ctx);
	return 0;
}
示例#7
0
static void flush_request_submodules(struct saa7134_dev *dev)
{
	flush_work(&dev->request_module_wk);
}
示例#8
0
static int armada_drm_load(struct drm_device *dev, unsigned long flags)
{
	const struct platform_device_id *id;
	const struct armada_variant *variant;
	struct armada_private *priv;
	struct resource *res[ARRAY_SIZE(priv->dcrtc)];
	struct resource *mem = NULL;
	int ret, n, i;

	memset(res, 0, sizeof(res));

	for (n = i = 0; ; n++) {
		struct resource *r = platform_get_resource(dev->platformdev,
							   IORESOURCE_MEM, n);
		if (!r)
			break;

		/* Resources above 64K are graphics memory */
		if (resource_size(r) > SZ_64K)
			mem = r;
		else if (i < ARRAY_SIZE(priv->dcrtc))
			res[i++] = r;
		else
			return -EINVAL;
	}

	if (!mem)
		return -ENXIO;

	if (!devm_request_mem_region(dev->dev, mem->start,
			resource_size(mem), "armada-drm"))
		return -EBUSY;

	priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		DRM_ERROR("failed to allocate private\n");
		return -ENOMEM;
	}

	platform_set_drvdata(dev->platformdev, dev);
	dev->dev_private = priv;

	/* Get the implementation specific driver data. */
	id = platform_get_device_id(dev->platformdev);
	if (!id)
		return -ENXIO;

	variant = (const struct armada_variant *)id->driver_data;

	INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
	INIT_KFIFO(priv->fb_unref);

	/* Mode setting support */
	drm_mode_config_init(dev);
	dev->mode_config.min_width = 320;
	dev->mode_config.min_height = 200;

	/*
	 * With vscale enabled, the maximum width is 1920 due to the
	 * 1920 by 3 lines RAM
	 */
	dev->mode_config.max_width = 1920;
	dev->mode_config.max_height = 2048;

	dev->mode_config.preferred_depth = 24;
	dev->mode_config.funcs = &armada_drm_mode_config_funcs;
	drm_mm_init(&priv->linear, mem->start, resource_size(mem));

	/* Create all LCD controllers */
	for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
		int irq;

		if (!res[n])
			break;

		irq = platform_get_irq(dev->platformdev, n);
		if (irq < 0)
			goto err_kms;

		ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq,
					     variant, NULL);
		if (ret)
			goto err_kms;
	}

	if (is_componentized(dev->dev)) {
		ret = component_bind_all(dev->dev, dev);
		if (ret)
			goto err_kms;
	} else {
#ifdef CONFIG_DRM_ARMADA_TDA1998X
		ret = armada_drm_connector_slave_create(dev, &tda19988_config);
		if (ret)
			goto err_kms;
#endif
	}

	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
	if (ret)
		goto err_comp;

	dev->irq_enabled = true;
	dev->vblank_disable_allowed = 1;

	ret = armada_fbdev_init(dev);
	if (ret)
		goto err_comp;

	drm_kms_helper_poll_init(dev);

	return 0;

 err_comp:
	if (is_componentized(dev->dev))
		component_unbind_all(dev->dev, dev);
 err_kms:
	drm_mode_config_cleanup(dev);
	drm_mm_takedown(&priv->linear);
	flush_work(&priv->fb_unref_work);

	return ret;
}
static int iwl_mvm_configure_mcast_filter(struct iwl_mvm *mvm,
					  struct ieee80211_vif *vif)
{
	struct iwl_mcast_filter_cmd mcast_filter_cmd = {
		.pass_all = 1,
	};

	memcpy(mcast_filter_cmd.bssid, vif->bss_conf.bssid, ETH_ALEN);

	return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC,
				    sizeof(mcast_filter_cmd),
				    &mcast_filter_cmd);
}

static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
					     struct ieee80211_vif *vif,
					     struct ieee80211_bss_conf *bss_conf,
					     u32 changes)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
	int ret;

	ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
	if (ret)
		IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);

	if (changes & BSS_CHANGED_ASSOC) {
		if (bss_conf->assoc) {
			/* add quota for this interface */
			ret = iwl_mvm_update_quotas(mvm, vif);
			if (ret) {
				IWL_ERR(mvm, "failed to update quotas\n");
				return;
			}
			iwl_mvm_bt_coex_vif_assoc(mvm, vif);
			iwl_mvm_configure_mcast_filter(mvm, vif);
		} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
			/* remove AP station now that the MAC is unassoc */
			ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
			if (ret)
				IWL_ERR(mvm, "failed to remove AP station\n");
			mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
			/* remove quota for this interface */
			ret = iwl_mvm_update_quotas(mvm, NULL);
			if (ret)
				IWL_ERR(mvm, "failed to update quotas\n");
		}
	} else if (changes & BSS_CHANGED_DTIM_PERIOD) {
		/*
		 * We received a beacon _after_ association so
		 * remove the session protection.
		 */
		iwl_mvm_remove_time_event(mvm, mvmvif,
					  &mvmvif->time_event_data);
	} else if (changes & BSS_CHANGED_PS) {
		/*
		 * TODO: remove this temporary code.
		 * Currently MVM FW supports power management only on single
		 * MAC. Avoid power mode update if more than one interface
		 * is active.
		 */
		IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
				   mvm->vif_count);
		if (mvm->vif_count == 1) {
			ret = iwl_mvm_power_update_mode(mvm, vif);
			if (ret)
				IWL_ERR(mvm, "failed to update power mode\n");
		}
	}
}

static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
	int ret;

	mutex_lock(&mvm->mutex);

	/* Send the beacon template */
	ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
	if (ret)
		goto out_unlock;

	/* Add the mac context */
	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
	if (ret)
		goto out_unlock;

	/* Perform the binding */
	ret = iwl_mvm_binding_add_vif(mvm, vif);
	if (ret)
		goto out_remove;

	mvmvif->ap_active = true;

	/* Send the bcast station. At this stage the TBTT and DTIM time events
	 * are added and applied to the scheduler */
	ret = iwl_mvm_send_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
	if (ret)
		goto out_unbind;

	ret = iwl_mvm_update_quotas(mvm, vif);
	if (ret)
		goto out_rm_bcast;

	/* Need to update the P2P Device MAC */
	if (vif->p2p && mvm->p2p_device_vif)
		iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);

	mutex_unlock(&mvm->mutex);
	return 0;

out_rm_bcast:
	iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
out_unbind:
	iwl_mvm_binding_remove_vif(mvm, vif);
out_remove:
	iwl_mvm_mac_ctxt_remove(mvm, vif);
out_unlock:
	mutex_unlock(&mvm->mutex);
	return ret;
}

static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);

	iwl_mvm_prepare_mac_removal(mvm, vif);

	mutex_lock(&mvm->mutex);

	mvmvif->ap_active = false;

	/* Need to update the P2P Device MAC */
	if (vif->p2p && mvm->p2p_device_vif)
		iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);

	iwl_mvm_update_quotas(mvm, NULL);
	iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
	iwl_mvm_binding_remove_vif(mvm, vif);
	iwl_mvm_mac_ctxt_remove(mvm, vif);

	mutex_unlock(&mvm->mutex);
}

static void iwl_mvm_bss_info_changed_ap(struct iwl_mvm *mvm,
					struct ieee80211_vif *vif,
					struct ieee80211_bss_conf *bss_conf,
					u32 changes)
{
	/* Need to send a new beacon template to the FW */
	if (changes & BSS_CHANGED_BEACON) {
		if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
			IWL_WARN(mvm, "Failed updating beacon data\n");
	}
}

static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
				     struct ieee80211_vif *vif,
				     struct ieee80211_bss_conf *bss_conf,
				     u32 changes)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);

	mutex_lock(&mvm->mutex);

	switch (vif->type) {
	case NL80211_IFTYPE_STATION:
		iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
		break;
	case NL80211_IFTYPE_AP:
		iwl_mvm_bss_info_changed_ap(mvm, vif, bss_conf, changes);
		break;
	default:
		/* shouldn't happen */
		WARN_ON_ONCE(1);
	}

	mutex_unlock(&mvm->mutex);
}

static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
			       struct ieee80211_vif *vif,
			       struct cfg80211_scan_request *req)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	int ret;

	if (req->n_channels == 0 || req->n_channels > MAX_NUM_SCAN_CHANNELS)
		return -EINVAL;

	mutex_lock(&mvm->mutex);

	if (mvm->scan_status == IWL_MVM_SCAN_NONE)
		ret = iwl_mvm_scan_request(mvm, vif, req);
	else
		ret = -EBUSY;

	mutex_unlock(&mvm->mutex);

	return ret;
}

static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
				       struct ieee80211_vif *vif)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);

	mutex_lock(&mvm->mutex);

	iwl_mvm_cancel_scan(mvm);

	mutex_unlock(&mvm->mutex);
}

static void
iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
				  struct ieee80211_sta *sta, u16 tid,
				  int num_frames,
				  enum ieee80211_frame_release_type reason,
				  bool more_data)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);

	/* TODO: how do we tell the fw to send frames for a specific TID */

	/*
	 * The fw will send EOSP notification when the last frame will be
	 * transmitted.
	 */
	iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames);
}

static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
				   struct ieee80211_vif *vif,
				   enum sta_notify_cmd cmd,
				   struct ieee80211_sta *sta)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;

	switch (cmd) {
	case STA_NOTIFY_SLEEP:
		if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
			ieee80211_sta_block_awake(hw, sta, true);
		/*
		 * The fw updates the STA to be asleep. Tx packets on the Tx
		 * queues to this station will not be transmitted. The fw will
		 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
		 */
		break;
	case STA_NOTIFY_AWAKE:
		if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
			break;
		iwl_mvm_sta_modify_ps_wake(mvm, sta);
		break;
	default:
		break;
	}
}

static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
				 struct ieee80211_vif *vif,
				 struct ieee80211_sta *sta,
				 enum ieee80211_sta_state old_state,
				 enum ieee80211_sta_state new_state)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
	int ret;

	IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
			   sta->addr, old_state, new_state);

	/* this would be a mac80211 bug ... but don't crash */
	if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
		return -EINVAL;

	/* if a STA is being removed, reuse its ID */
	flush_work(&mvm->sta_drained_wk);

	mutex_lock(&mvm->mutex);
	if (old_state == IEEE80211_STA_NOTEXIST &&
	    new_state == IEEE80211_STA_NONE) {
		ret = iwl_mvm_add_sta(mvm, vif, sta);
	} else if (old_state == IEEE80211_STA_NONE &&
		   new_state == IEEE80211_STA_AUTH) {
		ret = 0;
	} else if (old_state == IEEE80211_STA_AUTH &&
		   new_state == IEEE80211_STA_ASSOC) {
		ret = iwl_mvm_update_sta(mvm, vif, sta);
		if (ret == 0)
			iwl_mvm_rs_rate_init(mvm, sta,
					     mvmvif->phy_ctxt->channel->band);
	} else if (old_state == IEEE80211_STA_ASSOC &&
		   new_state == IEEE80211_STA_AUTHORIZED) {
		ret = 0;
	} else if (old_state == IEEE80211_STA_AUTHORIZED &&
		   new_state == IEEE80211_STA_ASSOC) {
		ret = 0;
	} else if (old_state == IEEE80211_STA_ASSOC &&
		   new_state == IEEE80211_STA_AUTH) {
		ret = 0;
	} else if (old_state == IEEE80211_STA_AUTH &&
		   new_state == IEEE80211_STA_NONE) {
		ret = 0;
	} else if (old_state == IEEE80211_STA_NONE &&
		   new_state == IEEE80211_STA_NOTEXIST) {
		ret = iwl_mvm_rm_sta(mvm, vif, sta);
	} else {
		ret = -EIO;
	}
	mutex_unlock(&mvm->mutex);

	return ret;
}

static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);

	mvm->rts_threshold = value;

	return 0;
}

static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
			       struct ieee80211_vif *vif, u16 ac,
			       const struct ieee80211_tx_queue_params *params)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);

	mvmvif->queue_params[ac] = *params;

	/*
	 * No need to update right away, we'll get BSS_CHANGED_QOS
	 * The exception is P2P_DEVICE interface which needs immediate update.
	 */
	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
		int ret;

		mutex_lock(&mvm->mutex);
		ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
		mutex_unlock(&mvm->mutex);
		return ret;
	}
	return 0;
}

static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
				      struct ieee80211_vif *vif)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
			   200 + vif->bss_conf.beacon_int);
	u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
			       100 + vif->bss_conf.beacon_int);

	if (WARN_ON_ONCE(vif->bss_conf.assoc))
		return;

	mutex_lock(&mvm->mutex);
	/* Try really hard to protect the session and hear a beacon */
	iwl_mvm_protect_session(mvm, vif, duration, min_duration);
	mutex_unlock(&mvm->mutex);
}

static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
			       enum set_key_cmd cmd,
			       struct ieee80211_vif *vif,
			       struct ieee80211_sta *sta,
			       struct ieee80211_key_conf *key)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	int ret;

	if (iwlwifi_mod_params.sw_crypto) {
		IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
		return -EOPNOTSUPP;
	}

	switch (key->cipher) {
	case WLAN_CIPHER_SUITE_TKIP:
		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
		/* fall-through */
	case WLAN_CIPHER_SUITE_CCMP:
		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
		break;
	case WLAN_CIPHER_SUITE_AES_CMAC:
		WARN_ON_ONCE(!(hw->flags & IEEE80211_HW_MFP_CAPABLE));
		break;
	case WLAN_CIPHER_SUITE_WEP40:
	case WLAN_CIPHER_SUITE_WEP104:
		/*
		 * Support for TX only, at least for now, so accept
		 * the key and do nothing else. Then mac80211 will
		 * pass it for TX but we don't have to use it for RX.
		 */
		return 0;
	default:
		return -EOPNOTSUPP;
	}

	mutex_lock(&mvm->mutex);

	switch (cmd) {
	case SET_KEY:
		if (vif->type == NL80211_IFTYPE_AP && !sta) {
			/* GTK on AP interface is a TX-only key, return 0 */
			ret = 0;
			key->hw_key_idx = STA_KEY_IDX_INVALID;
			break;
		}

		IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
		ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, false);
		if (ret) {
			IWL_WARN(mvm, "set key failed\n");
			/*
			 * can't add key for RX, but we don't need it
			 * in the device for TX so still return 0
			 */
			key->hw_key_idx = STA_KEY_IDX_INVALID;
			ret = 0;
		}

		break;
	case DISABLE_KEY:
		if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
			ret = 0;
			break;
		}

		IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
		ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
		break;
	default:
		ret = -EINVAL;
	}

	mutex_unlock(&mvm->mutex);
	return ret;
}

static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
					struct ieee80211_vif *vif,
					struct ieee80211_key_conf *keyconf,
					struct ieee80211_sta *sta,
					u32 iv32, u16 *phase1key)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);

	iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
}


static int iwl_mvm_roc(struct ieee80211_hw *hw,
		       struct ieee80211_vif *vif,
		       struct ieee80211_channel *channel,
		       int duration,
		       enum ieee80211_roc_type type)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	struct cfg80211_chan_def chandef;
	int ret;

	if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
		IWL_ERR(mvm, "vif isn't a P2P_DEVICE: %d\n", vif->type);
		return -EINVAL;
	}

	IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
			   duration, type);

	mutex_lock(&mvm->mutex);

	cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
	ret = iwl_mvm_phy_ctxt_changed(mvm, &mvm->phy_ctxt_roc,
				       &chandef, 1, 1);

	/* Schedule the time events */
	ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);

	mutex_unlock(&mvm->mutex);
	IWL_DEBUG_MAC80211(mvm, "leave\n");

	return ret;
}

static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);

	IWL_DEBUG_MAC80211(mvm, "enter\n");

	mutex_lock(&mvm->mutex);
	iwl_mvm_stop_p2p_roc(mvm);
	mutex_unlock(&mvm->mutex);

	IWL_DEBUG_MAC80211(mvm, "leave\n");
	return 0;
}

static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
			       struct ieee80211_chanctx_conf *ctx)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
	int ret;

	mutex_lock(&mvm->mutex);

	IWL_DEBUG_MAC80211(mvm, "Add PHY context\n");
	ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, &ctx->def,
				   ctx->rx_chains_static,
				   ctx->rx_chains_dynamic);
	mutex_unlock(&mvm->mutex);
	return ret;
}

static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
				   struct ieee80211_chanctx_conf *ctx)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;

	mutex_lock(&mvm->mutex);
	iwl_mvm_phy_ctxt_remove(mvm, phy_ctxt);
	mutex_unlock(&mvm->mutex);
}

static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
				   struct ieee80211_chanctx_conf *ctx,
				   u32 changed)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;

	mutex_lock(&mvm->mutex);
	iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
				 ctx->rx_chains_static,
				 ctx->rx_chains_dynamic);
	mutex_unlock(&mvm->mutex);
}

static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
				      struct ieee80211_vif *vif,
				      struct ieee80211_chanctx_conf *ctx)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	struct iwl_mvm_phy_ctxt *phyctx = (void *)ctx->drv_priv;
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
	int ret;

	mutex_lock(&mvm->mutex);

	mvmvif->phy_ctxt = phyctx;

	switch (vif->type) {
	case NL80211_IFTYPE_AP:
		/*
		 * The AP binding flow is handled as part of the start_ap flow
		 * (in bss_info_changed).
		 */
		ret = 0;
		goto out_unlock;
	case NL80211_IFTYPE_STATION:
	case NL80211_IFTYPE_ADHOC:
	case NL80211_IFTYPE_MONITOR:
		break;
	default:
		ret = -EINVAL;
		goto out_unlock;
	}

	ret = iwl_mvm_binding_add_vif(mvm, vif);
	if (ret)
		goto out_unlock;

	/*
	 * Setting the quota at this stage is only required for monitor
	 * interfaces. For the other types, the bss_info changed flow
	 * will handle quota settings.
	 */
	if (vif->type == NL80211_IFTYPE_MONITOR) {
		mvmvif->monitor_active = true;
		ret = iwl_mvm_update_quotas(mvm, vif);
		if (ret)
			goto out_remove_binding;
	}

	goto out_unlock;

 out_remove_binding:
	iwl_mvm_binding_remove_vif(mvm, vif);
 out_unlock:
	mutex_unlock(&mvm->mutex);
	if (ret)
		mvmvif->phy_ctxt = NULL;
	return ret;
}

static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
					 struct ieee80211_vif *vif,
					 struct ieee80211_chanctx_conf *ctx)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);

	mutex_lock(&mvm->mutex);

	iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);

	if (vif->type == NL80211_IFTYPE_AP)
		goto out_unlock;

	switch (vif->type) {
	case NL80211_IFTYPE_MONITOR:
		mvmvif->monitor_active = false;
		iwl_mvm_update_quotas(mvm, NULL);
		break;
	default:
		break;
	}

	iwl_mvm_binding_remove_vif(mvm, vif);
out_unlock:
	mvmvif->phy_ctxt = NULL;
	mutex_unlock(&mvm->mutex);
}

static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
			   struct ieee80211_sta *sta,
			   bool set)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;

	if (!mvm_sta || !mvm_sta->vif) {
		IWL_ERR(mvm, "Station is not associated to a vif\n");
		return -EINVAL;
	}

	return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
}

static void iwl_mvm_mac_rssi_callback(struct ieee80211_hw *hw,
				      struct ieee80211_vif *vif,
				      enum ieee80211_rssi_event rssi_event)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);

	iwl_mvm_bt_rssi_event(mvm, vif, rssi_event);
}

struct ieee80211_ops iwl_mvm_hw_ops = {
	.tx = iwl_mvm_mac_tx,
	.ampdu_action = iwl_mvm_mac_ampdu_action,
	.start = iwl_mvm_mac_start,
	.restart_complete = iwl_mvm_mac_restart_complete,
	.stop = iwl_mvm_mac_stop,
	.add_interface = iwl_mvm_mac_add_interface,
	.remove_interface = iwl_mvm_mac_remove_interface,
	.config = iwl_mvm_mac_config,
	.configure_filter = iwl_mvm_configure_filter,
	.bss_info_changed = iwl_mvm_bss_info_changed,
	.hw_scan = iwl_mvm_mac_hw_scan,
	.cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
	.sta_state = iwl_mvm_mac_sta_state,
	.sta_notify = iwl_mvm_mac_sta_notify,
	.allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
	.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
	.conf_tx = iwl_mvm_mac_conf_tx,
	.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
	.set_key = iwl_mvm_mac_set_key,
	.update_tkip_key = iwl_mvm_mac_update_tkip_key,
	.remain_on_channel = iwl_mvm_roc,
	.cancel_remain_on_channel = iwl_mvm_cancel_roc,
	.rssi_callback = iwl_mvm_mac_rssi_callback,

	.add_chanctx = iwl_mvm_add_chanctx,
	.remove_chanctx = iwl_mvm_remove_chanctx,
	.change_chanctx = iwl_mvm_change_chanctx,
	.assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
	.unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,

	.start_ap = iwl_mvm_start_ap,
	.stop_ap = iwl_mvm_stop_ap,

	.set_tim = iwl_mvm_set_tim,

#ifdef CONFIG_PM_SLEEP
	/* look at d3.c */
	.suspend = iwl_mvm_suspend,
	.resume = iwl_mvm_resume,
	.set_wakeup = iwl_mvm_set_wakeup,
	.set_rekey_data = iwl_mvm_set_rekey_data,
#if IS_ENABLED(CONFIG_IPV6)
	.ipv6_addr_change = iwl_mvm_ipv6_addr_change,
#endif
	.set_default_unicast_key = iwl_mvm_set_default_unicast_key,
#endif
};
/**
 * @brief Disable HW counters collection
 *
 * Note: might sleep, waiting for an ongoing dump to complete
 */
mali_error kbase_instr_hwcnt_disable_sec(struct kbase_context *kctx)
{
	unsigned long flags, pm_flags;
	mali_error err = MALI_ERROR_FUNCTION_FAILED;
	u32 irq_mask;
	struct kbase_device *kbdev;

	KBASE_DEBUG_ASSERT(NULL != kctx);
	kbdev = kctx->kbdev;
	KBASE_DEBUG_ASSERT(NULL != kbdev);

	/* MALI_SEC 140925 */
	flush_work(&kbdev->hwcnt.cache_clean_work);

	while (1) {
		spin_lock_irqsave(&kbdev->hwcnt.lock, flags);

		if (kbdev->hwcnt.state == KBASE_INSTR_STATE_DISABLED) {
			/* Instrumentation is not enabled */
			spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
			goto out;
		}

		if (kbdev->hwcnt.kctx != kctx) {
			/* Instrumentation has been setup for another context */
			spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
			goto out;
		}

		if (kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE)
			break;

		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);

		/* Ongoing dump/setup - wait for its completion */
		if (wait_event_timeout(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0, kbdev->hwcnt.timeout) == 0)
			kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE;
	}

	kbdev->hwcnt.state = KBASE_INSTR_STATE_DISABLED;
	kbdev->hwcnt.triggered = 0;

	/* Disable interrupt */
	spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags);
	irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask & ~PRFCNT_SAMPLE_COMPLETED, NULL);
	spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags);

	/* Disable the counters */
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), 0, kctx);

	kbdev->hwcnt.kctx = NULL;
	kbdev->hwcnt.addr = 0ULL;

	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);

	dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p", kctx);

	err = MALI_ERROR_NONE;

 out:

	kbdev->hwcnt.trig_exception = 0;

	return err;
}
static void rpmsg_echo_test_kern_app_remove(struct rpmsg_channel *rpdev)
{
	struct _rpmsg_dev_params *local = dev_get_drvdata(&rpdev->dev);
	flush_work(&local->rpmsg_work);
}
示例#12
0
/**
 *	tty_flush_to_ldisc
 *	@tty: tty to push
 *
 *	Push the terminal flip buffers to the line discipline.
 *
 *	Must not be called from IRQ context.
 */
void tty_flush_to_ldisc(struct tty_struct *tty)
{
	if (!tty->low_latency)
		flush_work(&tty->port->buf.work);
}
static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state)
{
	/* flush all pending status updates */
	flush_work(&bat_work);
	return 0;
}
示例#14
0
void o2quo_exit(void)
{
	struct o2quo_state *qs = &o2quo_state;

	flush_work(&qs->qs_work);
}
示例#15
0
文件: rtasd.c 项目: 1800alex/linux
void prrn_schedule_update(u32 scope)
{
	flush_work(&prrn_work);
	prrn_update_scope = scope;
	schedule_work(&prrn_work);
}
static int wm97xx_bat_suspend(struct device *dev)
{
    flush_work(&bat_work);
    return 0;
}
示例#17
0
static void flush_request_modules(struct cx18 *dev)
{
	flush_work(&dev->request_module_wk);
}
示例#18
0
static inline int
nvif_notify_put_(struct nvif_notify *notify)
{
	struct nvif_object *object = notify->object;
	struct {
		struct nvif_ioctl_v0 ioctl;
		struct nvif_ioctl_ntfy_put_v0 ntfy;
	} args = {
		.ioctl.type = NVIF_IOCTL_V0_NTFY_PUT,
		.ntfy.index = notify->index,
	};

	if (atomic_inc_return(&notify->putcnt) != 1)
		return 0;

	return nvif_object_ioctl(object, &args, sizeof(args), NULL);
}

int
nvif_notify_put(struct nvif_notify *notify)
{
	if (likely(notify->object) &&
	    test_and_clear_bit(NVIF_NOTIFY_USER, &notify->flags)) {
		int ret = nvif_notify_put_(notify);
		if (test_bit(NVIF_NOTIFY_WORK, &notify->flags))
			flush_work(&notify->work);
		return ret;
	}
	return 0;
}

static inline int
nvif_notify_get_(struct nvif_notify *notify)
{
	struct nvif_object *object = notify->object;
	struct {
		struct nvif_ioctl_v0 ioctl;
		struct nvif_ioctl_ntfy_get_v0 ntfy;
	} args = {
		.ioctl.type = NVIF_IOCTL_V0_NTFY_GET,
		.ntfy.index = notify->index,
	};

	if (atomic_dec_return(&notify->putcnt) != 0)
		return 0;

	return nvif_object_ioctl(object, &args, sizeof(args), NULL);
}

int
nvif_notify_get(struct nvif_notify *notify)
{
	if (likely(notify->object) &&
	    !test_and_set_bit(NVIF_NOTIFY_USER, &notify->flags))
		return nvif_notify_get_(notify);
	return 0;
}

static inline int
nvif_notify_func(struct nvif_notify *notify, bool keep)
{
	int ret = notify->func(notify);
	if (ret == NVIF_NOTIFY_KEEP ||
	    !test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
		if (!keep)
			atomic_dec(&notify->putcnt);
		else
			nvif_notify_get_(notify);
	}
	return ret;
}

static void
nvif_notify_work(struct work_struct *work)
{
	struct nvif_notify *notify = container_of(work, typeof(*notify), work);
	nvif_notify_func(notify, true);
}

int
nvif_notify(const void *header, u32 length, const void *data, u32 size)
{
	struct nvif_notify *notify = NULL;
	const union {
		struct nvif_notify_rep_v0 v0;
	} *args = header;
	int ret = NVIF_NOTIFY_DROP;

	if (length == sizeof(args->v0) && args->v0.version == 0) {
		if (WARN_ON(args->v0.route))
			return NVIF_NOTIFY_DROP;
		notify = (void *)(unsigned long)args->v0.token;
	}

	if (!WARN_ON(notify == NULL)) {
		struct nvif_client *client = nvif_client(notify->object);
		if (!WARN_ON(notify->size != size)) {
			atomic_inc(&notify->putcnt);
			if (test_bit(NVIF_NOTIFY_WORK, &notify->flags)) {
				memcpy((void *)notify->data, data, size);
				schedule_work(&notify->work);
				return NVIF_NOTIFY_DROP;
			}
			notify->data = data;
			ret = nvif_notify_func(notify, client->driver->keep);
			notify->data = NULL;
		}
	}

	return ret;
}

int
nvif_notify_fini(struct nvif_notify *notify)
{
	struct nvif_object *object = notify->object;
	struct {
		struct nvif_ioctl_v0 ioctl;
		struct nvif_ioctl_ntfy_del_v0 ntfy;
	} args = {
		.ioctl.type = NVIF_IOCTL_V0_NTFY_DEL,
		.ntfy.index = notify->index,
	};
	int ret = nvif_notify_put(notify);
	if (ret >= 0 && object) {
		ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
		if (ret == 0) {
			nvif_object_ref(NULL, &notify->object);
			kfree((void *)notify->data);
		}
	}
	return ret;
}

int
nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *),
		 int (*func)(struct nvif_notify *), bool work, u8 event,
		 void *data, u32 size, u32 reply, struct nvif_notify *notify)
{
	struct {
		struct nvif_ioctl_v0 ioctl;
		struct nvif_ioctl_ntfy_new_v0 ntfy;
		struct nvif_notify_req_v0 req;
	} *args;
	int ret = -ENOMEM;

	notify->object = NULL;
	nvif_object_ref(object, &notify->object);
	notify->flags = 0;
	atomic_set(&notify->putcnt, 1);
	notify->dtor = dtor;
	notify->func = func;
	notify->data = NULL;
	notify->size = reply;
	if (work) {
		INIT_WORK(&notify->work, nvif_notify_work);
		set_bit(NVIF_NOTIFY_WORK, &notify->flags);
		notify->data = kmalloc(notify->size, GFP_KERNEL);
		if (!notify->data)
			goto done;
	}

	if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
		goto done;
	args->ioctl.version = 0;
	args->ioctl.type = NVIF_IOCTL_V0_NTFY_NEW;
	args->ntfy.version = 0;
	args->ntfy.event = event;
	args->req.version = 0;
	args->req.reply = notify->size;
	args->req.route = 0;
	args->req.token = (unsigned long)(void *)notify;

	memcpy(args->req.data, data, size);
	ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
	notify->index = args->ntfy.index;
	kfree(args);
done:
	if (ret)
		nvif_notify_fini(notify);
	return ret;
}

static void
nvif_notify_del(struct nvif_notify *notify)
{
	nvif_notify_fini(notify);
	kfree(notify);
}

void
nvif_notify_ref(struct nvif_notify *notify, struct nvif_notify **pnotify)
{
	BUG_ON(notify != NULL);
	if (*pnotify)
		(*pnotify)->dtor(*pnotify);
	*pnotify = notify;
}
示例#19
0
/**
 *	tty_ldisc_flush_works	-	flush all works of a tty
 *	@tty: tty device to flush works for
 *
 *	Sync flush all works belonging to @tty.
 */
static void tty_ldisc_flush_works(struct tty_struct *tty)
{
	flush_work(&tty->hangup_work);
	flush_work(&tty->SAK_work);
	flush_work(&tty->port->buf.work);
}
示例#20
0
/*
 * vtpm_proxy_work_stop: make sure the work has finished
 *
 * This function is useful when user space closed the fd
 * while the driver still determines timeouts.
 */
static void vtpm_proxy_work_stop(struct proxy_dev *proxy_dev)
{
	vtpm_proxy_fops_undo_open(proxy_dev);
	flush_work(&proxy_dev->work);
}
示例#21
0
static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
        struct ieee80211_vif *vif)
{
    struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
    struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
    u32 tfd_msk = 0, ac;

    for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
        if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
            tfd_msk |= BIT(vif->hw_queue[ac]);

    if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
        tfd_msk |= BIT(vif->cab_queue);

    if (tfd_msk) {
        mutex_lock(&mvm->mutex);
        iwl_mvm_flush_tx_path(mvm, tfd_msk, true);
        mutex_unlock(&mvm->mutex);
    }

    if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
        /*
         * Flush the ROC worker which will flush the OFFCHANNEL queue.
         * We assume here that all the packets sent to the OFFCHANNEL
         * queue are sent in ROC session.
         */
        flush_work(&mvm->roc_done_wk);
    } else {
        /*
         * By now, all the AC queues are empty. The AGG queues are
         * empty too. We already got all the Tx responses for all the
         * packets in the queues. The drain work can have been
         * triggered. Flush it. This work item takes the mutex, so kill
         * it before we take it.
         */
        flush_work(&mvm->sta_drained_wk);
    }

    mutex_lock(&mvm->mutex);

    /*
     * For AP/GO interface, the tear down of the resources allocated to the
     * interface should be handled as part of the bss_info_changed flow.
     */
    if (vif->type == NL80211_IFTYPE_AP) {
        iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
        goto out_release;
    }

    if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
        mvm->p2p_device_vif = NULL;
        iwl_mvm_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
        iwl_mvm_binding_remove_vif(mvm, vif);
        iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt);
        mvmvif->phy_ctxt = NULL;
    }

    /*
     * TODO: remove this temporary code.
     * Currently MVM FW supports power management only on single MAC.
     * Check if only one additional interface remains after removing
     * current one. Update power mode on the remaining interface.
     */
    if (mvm->vif_count)
        mvm->vif_count--;
    IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
                       mvm->vif_count);
    if (mvm->vif_count == 1) {
        ieee80211_iterate_active_interfaces(
            mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
            iwl_mvm_power_update_iterator, mvm);
    }

    iwl_mvm_mac_ctxt_remove(mvm, vif);

out_release:
    iwl_mvm_mac_ctxt_release(mvm, vif);
    mutex_unlock(&mvm->mutex);
}
示例#22
0
void pid_wait_quiescent(void)
{
	flush_work(&put_pid_work);
}