Ejemplo n.º 1
0
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
	WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
		  "Device suspended\n");
}
Ejemplo n.º 2
0
static void brw_emit_vertices(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->ctx;
   GLuint i, nr_elements;

   brw_prepare_vertices(brw);

   brw_emit_query_begin(brw);

   nr_elements = brw->vb.nr_enabled + brw->vs.prog_data->uses_vertexid;

   /* If the VS doesn't read any inputs (calculating vertex position from
    * a state variable for some reason, for example), emit a single pad
    * VERTEX_ELEMENT struct and bail.
    *
    * The stale VB state stays in place, but they don't do anything unless
    * a VE loads from them.
    */
   if (nr_elements == 0) {
      BEGIN_BATCH(3);
      OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | 1);
      if (brw->gen >= 6) {
	 OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT) |
		   GEN6_VE0_VALID |
		   (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) |
		   (0 << BRW_VE0_SRC_OFFSET_SHIFT));
      } else {
	 OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT) |
		   BRW_VE0_VALID |
		   (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) |
		   (0 << BRW_VE0_SRC_OFFSET_SHIFT));
      }
      OUT_BATCH((BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_0_SHIFT) |
		(BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
		(BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
		(BRW_VE1_COMPONENT_STORE_1_FLT << BRW_VE1_COMPONENT_3_SHIFT));
      ADVANCE_BATCH();
      return;
   }

   /* Now emit VB and VEP state packets.
    */

   if (brw->vb.nr_buffers) {
      if (brw->gen >= 6) {
	 assert(brw->vb.nr_buffers <= 33);
      } else {
	 assert(brw->vb.nr_buffers <= 17);
      }

      BEGIN_BATCH(1 + 4*brw->vb.nr_buffers);
      OUT_BATCH((_3DSTATE_VERTEX_BUFFERS << 16) | (4*brw->vb.nr_buffers - 1));
      for (i = 0; i < brw->vb.nr_buffers; i++) {
	 struct brw_vertex_buffer *buffer = &brw->vb.buffers[i];
	 uint32_t dw0;

	 if (brw->gen >= 6) {
	    dw0 = buffer->step_rate
	             ? GEN6_VB0_ACCESS_INSTANCEDATA
	             : GEN6_VB0_ACCESS_VERTEXDATA;
	    dw0 |= i << GEN6_VB0_INDEX_SHIFT;
	 } else {
	    dw0 = buffer->step_rate
	             ? BRW_VB0_ACCESS_INSTANCEDATA
	             : BRW_VB0_ACCESS_VERTEXDATA;
	    dw0 |= i << BRW_VB0_INDEX_SHIFT;
	 }

	 if (brw->gen >= 7)
	    dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE;

         if (brw->gen == 7)
	    dw0 |= GEN7_MOCS_L3 << 16;

         WARN_ONCE(buffer->stride >= (brw->gen >= 5 ? 2048 : 2047),
                   "VBO stride %d too large, bad rendering may occur\n",
                   buffer->stride);
	 OUT_BATCH(dw0 | (buffer->stride << BRW_VB0_PITCH_SHIFT));
	 OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->offset);
	 if (brw->gen >= 5) {
	    OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->bo->size - 1);
	 } else
	    OUT_BATCH(0);
	 OUT_BATCH(buffer->step_rate);
      }
      ADVANCE_BATCH();
   }

   /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS, presumably
    * for VertexID/InstanceID.
    */
   if (brw->gen >= 6) {
      assert(nr_elements <= 34);
   } else {
      assert(nr_elements <= 18);
   }

   struct brw_vertex_element *gen6_edgeflag_input = NULL;

   BEGIN_BATCH(1 + nr_elements * 2);
   OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | (2 * nr_elements - 1));
   for (i = 0; i < brw->vb.nr_enabled; i++) {
      struct brw_vertex_element *input = brw->vb.enabled[i];
      uint32_t format = brw_get_vertex_surface_type(brw, input->glarray);
      uint32_t comp0 = BRW_VE1_COMPONENT_STORE_SRC;
      uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC;
      uint32_t comp2 = BRW_VE1_COMPONENT_STORE_SRC;
      uint32_t comp3 = BRW_VE1_COMPONENT_STORE_SRC;

      /* The gen4 driver expects edgeflag to come in as a float, and passes
       * that float on to the tests in the clipper.  Mesa's current vertex
       * attribute value for EdgeFlag is stored as a float, which works out.
       * glEdgeFlagPointer, on the other hand, gives us an unnormalized
       * integer ubyte.  Just rewrite that to convert to a float.
       */
      if (input->attrib == VERT_ATTRIB_EDGEFLAG) {
         /* Gen6+ passes edgeflag as sideband along with the vertex, instead
          * of in the VUE.  We have to upload it sideband as the last vertex
          * element according to the B-Spec.
          */
         if (brw->gen >= 6) {
            gen6_edgeflag_input = input;
            continue;
         }

         if (format == BRW_SURFACEFORMAT_R8_UINT)
            format = BRW_SURFACEFORMAT_R8_SSCALED;
      }

      switch (input->glarray->Size) {
      case 0: comp0 = BRW_VE1_COMPONENT_STORE_0;
      case 1: comp1 = BRW_VE1_COMPONENT_STORE_0;
      case 2: comp2 = BRW_VE1_COMPONENT_STORE_0;
      case 3: comp3 = input->glarray->Integer ? BRW_VE1_COMPONENT_STORE_1_INT
                                              : BRW_VE1_COMPONENT_STORE_1_FLT;
	 break;
      }

      if (brw->gen >= 6) {
	 OUT_BATCH((input->buffer << GEN6_VE0_INDEX_SHIFT) |
		   GEN6_VE0_VALID |
		   (format << BRW_VE0_FORMAT_SHIFT) |
		   (input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
      } else {
	 OUT_BATCH((input->buffer << BRW_VE0_INDEX_SHIFT) |
		   BRW_VE0_VALID |
		   (format << BRW_VE0_FORMAT_SHIFT) |
		   (input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
      }

      if (brw->gen >= 5)
          OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
                    (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
                    (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
                    (comp3 << BRW_VE1_COMPONENT_3_SHIFT));
      else
          OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
                    (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
                    (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
                    (comp3 << BRW_VE1_COMPONENT_3_SHIFT) |
                    ((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
   }

   if (brw->gen >= 6 && gen6_edgeflag_input) {
      uint32_t format =
         brw_get_vertex_surface_type(brw, gen6_edgeflag_input->glarray);

      OUT_BATCH((gen6_edgeflag_input->buffer << GEN6_VE0_INDEX_SHIFT) |
                GEN6_VE0_VALID |
                GEN6_VE0_EDGE_FLAG_ENABLE |
                (format << BRW_VE0_FORMAT_SHIFT) |
                (gen6_edgeflag_input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
      OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT) |
                (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
                (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
                (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT));
   }

   if (brw->vs.prog_data->uses_vertexid) {
      uint32_t dw0 = 0, dw1 = 0;

      dw1 = ((BRW_VE1_COMPONENT_STORE_VID << BRW_VE1_COMPONENT_0_SHIFT) |
	     (BRW_VE1_COMPONENT_STORE_IID << BRW_VE1_COMPONENT_1_SHIFT) |
	     (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
	     (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT));

      if (brw->gen >= 6) {
	 dw0 |= GEN6_VE0_VALID;
      } else {
	 dw0 |= BRW_VE0_VALID;
	 dw1 |= (i * 4) << BRW_VE1_DST_OFFSET_SHIFT;
      }

      /* Note that for gl_VertexID, gl_InstanceID, and gl_PrimitiveID values,
       * the format is ignored and the value is always int.
       */

      OUT_BATCH(dw0);
      OUT_BATCH(dw1);
   }

   ADVANCE_BATCH();
}
Ejemplo n.º 3
0
static irqreturn_t bad_chained_irq(int irq, void *dev_id)
{
	WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
	return IRQ_NONE;
}
Ejemplo n.º 4
0
void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
	WARN_ONCE(1, "not implemented");
}
Ejemplo n.º 5
0
int qxl_gem_prime_mmap(struct drm_gem_object *obj,
		       struct vm_area_struct *area)
{
	WARN_ONCE(1, "not implemented");
	return ENOSYS;
}
Ejemplo n.º 6
0
/**
 * omap_abb_set_opp() - program ABB ldo based on new voltage
 * @voltdm:	voltage domain that just finished scaling voltage
 * @opp_sel:	target ABB ldo operating mode
 *
 * Program the ABB ldo to the new state (if necessary), clearing the
 * PRM_IRQSTATUS bit before and after the transition.  Returns 0 on
 * success, -ETIMEDOUT otherwise.
 */
static int omap_abb_set_opp(struct voltagedomain *voltdm, u8 opp_sel)
{
	struct omap_abb_instance *abb = voltdm->abb;
	int ret = 0;

	/* bail early if no transition is necessary */
	if (opp_sel == abb->_opp_sel)
		return ret;

	/* clear interrupt status */
	ret = omap_abb_clear_tranx(voltdm, abb);
	if (ret)
		goto out;

	/* program the setup register */
	switch (opp_sel) {
	case OMAP_ABB_NOMINAL_OPP:
		voltdm->rmw(abb->common->active_fbb_sel_mask |
				abb->common->active_rbb_sel_mask,
				0x0,
				abb->setup_offs);
		break;
	case OMAP_ABB_SLOW_OPP:
		voltdm->rmw(abb->common->active_fbb_sel_mask |
				abb->common->active_rbb_sel_mask,
				abb->common->active_rbb_sel_mask,
				abb->setup_offs);
		break;
	case OMAP_ABB_FAST_OPP:
		voltdm->rmw(abb->common->active_fbb_sel_mask |
				abb->common->active_rbb_sel_mask,
				abb->common->active_fbb_sel_mask,
				abb->setup_offs);
		break;
	default:
		/* Should have never been here! */
		WARN_ONCE(1, "%s: voltage domain %s: opp_sel %d!!!\n",
			  __func__, voltdm->name, opp_sel);
		return -EINVAL;
	}

	/* program next state of ABB ldo */
	voltdm->rmw(abb->common->opp_sel_mask,
			opp_sel << __ffs(abb->common->opp_sel_mask),
			abb->ctrl_offs);

	/* initiate ABB ldo change */
	voltdm->rmw(abb->common->opp_change_mask,
			abb->common->opp_change_mask,
			abb->ctrl_offs);

	/* Wait for conversion completion */
	ret = omap_abb_wait_tranx(voltdm, abb);
	WARN_ONCE(ret, "%s: voltdm %s ABB TRANXDONE was not set on time:%d\n",
		  __func__, voltdm->name, ret);
	/* clear interrupt status */
	ret |= omap_abb_clear_tranx(voltdm, abb);

out:
	if (ret) {
		pr_warning("%s: %s: failed scale: opp_sel=%d (%d)\n",
			   __func__, voltdm->name, opp_sel, ret);
	} else {
		/* track internal state */
		abb->_opp_sel = opp_sel;
		pr_debug("%s: %s: scaled - opp_sel=%d\n",
			 __func__, voltdm->name, opp_sel);
	}
	return ret;
}
Ejemplo n.º 7
0
Archivo: core.c Proyecto: 020gzh/linux
/**
 * dwc3_core_init - Low-level initialization of DWC3 Core
 * @dwc: Pointer to our controller context structure
 *
 * Returns 0 on success otherwise negative errno.
 */
static int dwc3_core_init(struct dwc3 *dwc)
{
	u32			hwparams4 = dwc->hwparams.hwparams4;
	u32			reg;
	int			ret;

	reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
	/* This should read as U3 followed by revision number */
	if ((reg & DWC3_GSNPSID_MASK) == 0x55330000) {
		/* Detected DWC_usb3 IP */
		dwc->revision = reg;
	} else if ((reg & DWC3_GSNPSID_MASK) == 0x33310000) {
		/* Detected DWC_usb31 IP */
		dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
		dwc->revision |= DWC3_REVISION_IS_DWC31;
	} else {
		dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
		ret = -ENODEV;
		goto err0;
	}

	/*
	 * Write Linux Version Code to our GUID register so it's easy to figure
	 * out which kernel version a bug was found.
	 */
	dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);

	/* Handle USB2.0-only core configuration */
	if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
			DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
		if (dwc->maximum_speed == USB_SPEED_SUPER)
			dwc->maximum_speed = USB_SPEED_HIGH;
	}

	/* issue device SoftReset too */
	ret = dwc3_soft_reset(dwc);
	if (ret)
		goto err0;

	ret = dwc3_core_soft_reset(dwc);
	if (ret)
		goto err0;

	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
	reg &= ~DWC3_GCTL_SCALEDOWN_MASK;

	switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
	case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
		/**
		 * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
		 * issue which would cause xHCI compliance tests to fail.
		 *
		 * Because of that we cannot enable clock gating on such
		 * configurations.
		 *
		 * Refers to:
		 *
		 * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based
		 * SOF/ITP Mode Used
		 */
		if ((dwc->dr_mode == USB_DR_MODE_HOST ||
				dwc->dr_mode == USB_DR_MODE_OTG) &&
				(dwc->revision >= DWC3_REVISION_210A &&
				dwc->revision <= DWC3_REVISION_250A))
			reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC;
		else
			reg &= ~DWC3_GCTL_DSBLCLKGTNG;
		break;
	case DWC3_GHWPARAMS1_EN_PWROPT_HIB:
		/* enable hibernation here */
		dwc->nr_scratch = DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(hwparams4);

		/*
		 * REVISIT Enabling this bit so that host-mode hibernation
		 * will work. Device-mode hibernation is not yet implemented.
		 */
		reg |= DWC3_GCTL_GBLHIBERNATIONEN;
		break;
	default:
		dwc3_trace(trace_dwc3_core, "No power optimization available\n");
	}

	/* check if current dwc3 is on simulation board */
	if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
		dwc3_trace(trace_dwc3_core,
				"running on FPGA platform\n");
		dwc->is_fpga = true;
	}

	WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga,
			"disable_scramble cannot be used on non-FPGA builds\n");

	if (dwc->disable_scramble_quirk && dwc->is_fpga)
		reg |= DWC3_GCTL_DISSCRAMBLE;
	else
		reg &= ~DWC3_GCTL_DISSCRAMBLE;

	if (dwc->u2exit_lfps_quirk)
		reg |= DWC3_GCTL_U2EXIT_LFPS;

	/*
	 * WORKAROUND: DWC3 revisions <1.90a have a bug
	 * where the device can fail to connect at SuperSpeed
	 * and falls back to high-speed mode which causes
	 * the device to enter a Connect/Disconnect loop
	 */
	if (dwc->revision < DWC3_REVISION_190A)
		reg |= DWC3_GCTL_U2RSTECN;

	dwc3_core_num_eps(dwc);

	dwc3_writel(dwc->regs, DWC3_GCTL, reg);

	ret = dwc3_alloc_scratch_buffers(dwc);
	if (ret)
		goto err1;

	ret = dwc3_setup_scratch_buffers(dwc);
	if (ret)
		goto err2;

	return 0;

err2:
	dwc3_free_scratch_buffers(dwc);

err1:
	usb_phy_shutdown(dwc->usb2_phy);
	usb_phy_shutdown(dwc->usb3_phy);
	phy_exit(dwc->usb2_generic_phy);
	phy_exit(dwc->usb3_generic_phy);

err0:
	return ret;
}
Ejemplo n.º 8
0
int qxl_gem_prime_pin(struct drm_gem_object *obj)
{
	WARN_ONCE(1, "not implemented");
	return -ENOSYS;
}
Ejemplo n.º 9
0
void qxl_gem_prime_unpin(struct drm_gem_object *obj)
{
	WARN_ONCE(1, "not implemented");
}
Ejemplo n.º 10
0
/*
 * start REPLY_TX command process
 */
int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct iwl_station_priv *sta_priv = NULL;
	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
	struct iwl_device_cmd *dev_cmd = NULL;
	struct iwl_tx_cmd *tx_cmd;

	__le16 fc;
	u8 hdr_len;
	u16 len, seq_number = 0;
	u8 sta_id, tid = IWL_MAX_TID_COUNT;
	unsigned long flags;
	bool is_agg = false;

	if (info->control.vif)
		ctx = iwl_rxon_ctx_from_vif(info->control.vif);

	spin_lock_irqsave(&priv->shrd->lock, flags);
	if (iwl_is_rfkill(priv->shrd)) {
		IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
		goto drop_unlock_priv;
	}

	fc = hdr->frame_control;

#ifdef CONFIG_IWLWIFI_DEBUG
	if (ieee80211_is_auth(fc))
		IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
	else if (ieee80211_is_assoc_req(fc))
		IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
	else if (ieee80211_is_reassoc_req(fc))
		IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
#endif

	if (unlikely(ieee80211_is_probe_resp(fc))) {
		struct iwl_wipan_noa_data *noa_data =
			rcu_dereference(priv->noa_data);

		if (noa_data &&
		    pskb_expand_head(skb, 0, noa_data->length,
				     GFP_ATOMIC) == 0) {
			memcpy(skb_put(skb, noa_data->length),
			       noa_data->data, noa_data->length);
			hdr = (struct ieee80211_hdr *)skb->data;
		}
	}

	hdr_len = ieee80211_hdrlen(fc);

	/* For management frames use broadcast id to do not break aggregation */
	if (!ieee80211_is_data(fc))
		sta_id = ctx->bcast_sta_id;
	else {
		/* Find index into station table for destination station */
		sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
		if (sta_id == IWL_INVALID_STATION) {
			IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
				       hdr->addr1);
			goto drop_unlock_priv;
		}
	}

	IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);

	if (info->control.sta)
		sta_priv = (void *)info->control.sta->drv_priv;

	if (sta_priv && sta_priv->asleep &&
	    (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
		/*
		 * This sends an asynchronous command to the device,
		 * but we can rely on it being processed before the
		 * next frame is processed -- and the next frame to
		 * this station is the one that will consume this
		 * counter.
		 * For now set the counter to just 1 since we do not
		 * support uAPSD yet.
		 */
		iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
	}

	if (info->flags & IEEE80211_TX_CTL_AMPDU)
		is_agg = true;

	/* irqs already disabled/saved above when locking priv->shrd->lock */
	spin_lock(&priv->shrd->sta_lock);

	dev_cmd = kmem_cache_alloc(priv->tx_cmd_pool, GFP_ATOMIC);

	if (unlikely(!dev_cmd))
		goto drop_unlock_sta;

	memset(dev_cmd, 0, sizeof(*dev_cmd));
	tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;

	/* Total # bytes to be transmitted */
	len = (u16)skb->len;
	tx_cmd->len = cpu_to_le16(len);

	if (info->control.hw_key)
		iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);

	/* TODO need this for burst mode later on */
	iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
	iwl_dbg_log_tx_data_frame(priv, len, hdr);

	iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);

	iwl_update_stats(priv, true, fc, len);

	memset(&info->status, 0, sizeof(info->status));

	info->driver_data[0] = ctx;
	info->driver_data[1] = dev_cmd;

	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
		u8 *qc = NULL;
		struct iwl_tid_data *tid_data;
		qc = ieee80211_get_qos_ctl(hdr);
		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
		if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
			goto drop_unlock_sta;
		tid_data = &priv->tid_data[sta_id][tid];

		/* aggregation is on for this <sta,tid> */
		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
		    tid_data->agg.state != IWL_AGG_ON) {
			IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:"
				" Tx flags = 0x%08x, agg.state = %d",
				info->flags, tid_data->agg.state);
			IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d",
				sta_id, tid, SEQ_TO_SN(tid_data->seq_number));
			goto drop_unlock_sta;
		}

		/* We can receive packets from the stack in IWL_AGG_{ON,OFF}
		 * only. Check this here.
		 */
		if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
		    tid_data->agg.state != IWL_AGG_OFF,
		    "Tx while agg.state = %d", tid_data->agg.state))
			goto drop_unlock_sta;

		seq_number = tid_data->seq_number;
		seq_number &= IEEE80211_SCTL_SEQ;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(seq_number);
		seq_number += 0x10;
	}

	/* Copy MAC header from skb into command buffer */
	memcpy(tx_cmd->hdr, hdr, hdr_len);

	if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id, tid))
		goto drop_unlock_sta;

	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) &&
	    !ieee80211_has_morefrags(fc))
		priv->tid_data[sta_id][tid].seq_number = seq_number;

	spin_unlock(&priv->shrd->sta_lock);
	spin_unlock_irqrestore(&priv->shrd->lock, flags);

	/*
	 * Avoid atomic ops if it isn't an associated client.
	 * Also, if this is a packet for aggregation, don't
	 * increase the counter because the ucode will stop
	 * aggregation queues when their respective station
	 * goes to sleep.
	 */
	if (sta_priv && sta_priv->client && !is_agg)
		atomic_inc(&sta_priv->pending_frames);

	return 0;

drop_unlock_sta:
	if (dev_cmd)
		kmem_cache_free(priv->tx_cmd_pool, dev_cmd);
	spin_unlock(&priv->shrd->sta_lock);
drop_unlock_priv:
	spin_unlock_irqrestore(&priv->shrd->lock, flags);
	return -1;
}
Ejemplo n.º 11
0
static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
{
	struct p9_flock flock;
	struct p9_fid *fid;
	uint8_t status = P9_LOCK_ERROR;
	int res = 0;
	unsigned char fl_type;

	fid = filp->private_data;
	BUG_ON(fid == NULL);

	if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
		BUG();

	res = locks_lock_file_wait(filp, fl);
	if (res < 0)
		goto out;

	/* convert posix lock to p9 tlock args */
	memset(&flock, 0, sizeof(flock));
	/* map the lock type */
	switch (fl->fl_type) {
	case F_RDLCK:
		flock.type = P9_LOCK_TYPE_RDLCK;
		break;
	case F_WRLCK:
		flock.type = P9_LOCK_TYPE_WRLCK;
		break;
	case F_UNLCK:
		flock.type = P9_LOCK_TYPE_UNLCK;
		break;
	}
	flock.start = fl->fl_start;
	if (fl->fl_end == OFFSET_MAX)
		flock.length = 0;
	else
		flock.length = fl->fl_end - fl->fl_start + 1;
	flock.proc_id = fl->fl_pid;
	flock.client_id = fid->clnt->name;
	if (IS_SETLKW(cmd))
		flock.flags = P9_LOCK_FLAGS_BLOCK;

	/*
	 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
	 * for lock request, keep on trying
	 */
	for (;;) {
		res = p9_client_lock_dotl(fid, &flock, &status);
		if (res < 0)
			goto out_unlock;

		if (status != P9_LOCK_BLOCKED)
			break;
		if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
			break;
		if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
			break;
	}

	/* map 9p status to VFS status */
	switch (status) {
	case P9_LOCK_SUCCESS:
		res = 0;
		break;
	case P9_LOCK_BLOCKED:
		res = -EAGAIN;
		break;
	default:
		WARN_ONCE(1, "unknown lock status code: %d\n", status);
		/* fallthough */
	case P9_LOCK_ERROR:
	case P9_LOCK_GRACE:
		res = -ENOLCK;
		break;
	}

out_unlock:
	/*
	 * incase server returned error for lock request, revert
	 * it locally
	 */
	if (res < 0 && fl->fl_type != F_UNLCK) {
		fl_type = fl->fl_type;
		fl->fl_type = F_UNLCK;
		/* Even if this fails we want to return the remote error */
		locks_lock_file_wait(filp, fl);
		fl->fl_type = fl_type;
	}
out:
	return res;
}
Ejemplo n.º 12
0
static int perf_flag_probe(void)
{
	/* use 'safest' configuration as used in perf_evsel__fallback() */
	struct perf_event_attr attr = {
		.type = PERF_TYPE_SOFTWARE,
		.config = PERF_COUNT_SW_CPU_CLOCK,
		.exclude_kernel = 1,
	};
	int fd;
	int err;
	int cpu;
	pid_t pid = -1;
	char sbuf[STRERR_BUFSIZE];

	cpu = sched_getcpu();
	if (cpu < 0)
		cpu = 0;

	/*
	 * Using -1 for the pid is a workaround to avoid gratuitous jump label
	 * changes.
	 */
	while (1) {
		/* check cloexec flag */
		fd = sys_perf_event_open(&attr, pid, cpu, -1,
					 PERF_FLAG_FD_CLOEXEC);
		if (fd < 0 && pid == -1 && errno == EACCES) {
			pid = 0;
			continue;
		}
		break;
	}
	err = errno;

	if (fd >= 0) {
		close(fd);
		return 1;
	}

	WARN_ONCE(err != EINVAL && err != EBUSY,
		  "perf_event_open(..., PERF_FLAG_FD_CLOEXEC) failed with unexpected error %d (%s)\n",
		  err, strerror_r(err, sbuf, sizeof(sbuf)));

	/* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
	while (1) {
		fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
		if (fd < 0 && pid == -1 && errno == EACCES) {
			pid = 0;
			continue;
		}
		break;
	}
	err = errno;

	if (fd >= 0)
		close(fd);

	if (WARN_ONCE(fd < 0 && err != EBUSY,
		      "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
		      err, strerror_r(err, sbuf, sizeof(sbuf))))
		return -1;

	return 0;
}

unsigned long perf_event_open_cloexec_flag(void)
{
	static bool probed;

	if (!probed) {
		if (perf_flag_probe() <= 0)
			flag = 0;
		probed = true;
	}

	return flag;
}
Ejemplo n.º 13
0
irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
	irqreturn_t retval = IRQ_NONE;
	unsigned int random = 0, irq = desc->irq_data.irq;
#ifdef CONFIG_MSM_SM_EVENT
	sm_msm_irq_data_t sm_irq;
#endif

	do {
		irqreturn_t res;

		trace_irq_handler_entry(irq, action);
#ifdef CONFIG_MSM_SM_EVENT
		sm_irq.func_addr = (unsigned int)action->handler;
		sm_irq.irq_num = irq;
		sm_add_event(SM_IRQ_EVENT | IRQ_EVENT_ENTER,
			SM_EVENT_START, 0, &sm_irq, sizeof(sm_msm_irq_data_t));
#endif
		res = action->handler(irq, action->dev_id);
		trace_irq_handler_exit(irq, action, res);

/*
#ifdef CONFIG_MSM_SM_EVENT
		sm_add_event(SM_IRQ_EVENT | IRQ_EVENT_LEAVE,
			SM_EVENT_END, 0, &sm_irq, sizeof(sm_msm_irq_data_t));
#endif
*/
		if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
			      irq, action->handler))
			local_irq_disable();

		switch (res) {
		case IRQ_WAKE_THREAD:
			/*
			 * Catch drivers which return WAKE_THREAD but
			 * did not set up a thread function
			 */
			if (unlikely(!action->thread_fn)) {
				warn_no_thread(irq, action);
				break;
			}

			irq_wake_thread(desc, action);

			/* Fall through to add to randomness */
		case IRQ_HANDLED:
			random |= action->flags;
			break;

		default:
			break;
		}

		retval |= res;
		action = action->next;
	} while (action);

	if (random & IRQF_SAMPLE_RANDOM)
		add_interrupt_randomness(irq);

	if (!noirqdebug)
		note_interrupt(irq, desc, retval);
	return retval;
}
Ejemplo n.º 14
0
void reiserfs_lock_check_recursive(struct super_block *sb)
{
	struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);

	WARN_ONCE((sb_i->lock_depth > 0), "Unwanted recursive reiserfs lock!\n");
}
Ejemplo n.º 15
0
Archivo: motif.c Proyecto: E-LLP/QuIP
void make_pullright(QSP_ARG_DECL  Screen_Obj *mip, Screen_Obj *pr,
	Screen_Obj *parent)
{
	WARN_ONCE("menus not implemented in this version");
}
Ejemplo n.º 16
0
struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
	WARN_ONCE(1, "not implemented");
	return ERR_PTR(-ENOSYS);
}
Ejemplo n.º 17
0
Archivo: tx.c Proyecto: 7799/linux
/*
 * Sets the fields in the Tx cmd that are rate related
 */
static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
				    struct iwl_tx_cmd *tx_cmd,
				    struct ieee80211_tx_info *info,
				    struct ieee80211_sta *sta,
				    __le16 fc)
{
	u32 rate_flags;
	int rate_idx;
	u8 rate_plcp;

	/* Set retry limit on RTS packets */
	tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;

	/* Set retry limit on DATA packets and Probe Responses*/
	if (ieee80211_is_probe_resp(fc)) {
		tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
		tx_cmd->rts_retry_limit =
			min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
	} else if (ieee80211_is_back_req(fc)) {
		tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
	} else {
		tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
	}

	/*
	 * for data packets, rate info comes from the table inside the fw. This
	 * table is controlled by LINK_QUALITY commands
	 */

	if (ieee80211_is_data(fc) && sta) {
		tx_cmd->initial_rate_index = 0;
		tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
		return;
	} else if (ieee80211_is_back_req(fc)) {
		tx_cmd->tx_flags |=
			cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
	}

	/* HT rate doesn't make sense for a non data frame */
	WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
		  "Got an HT rate for a non data frame 0x%x\n",
		  info->control.rates[0].flags);

	rate_idx = info->control.rates[0].idx;
	/* if the rate isn't a well known legacy rate, take the lowest one */
	if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT_LEGACY)
		rate_idx = rate_lowest_index(
				&mvm->nvm_data->bands[info->band], sta);

	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
	if (info->band == IEEE80211_BAND_5GHZ)
		rate_idx += IWL_FIRST_OFDM_RATE;

	/* For 2.4 GHZ band, check that there is no need to remap */
	BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);

	/* Get PLCP rate for tx_cmd->rate_n_flags */
	rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);

	mvm->mgmt_last_antenna_idx =
		iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
				     mvm->mgmt_last_antenna_idx);
	rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;

	/* Set CCK flag as needed */
	if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
		rate_flags |= RATE_MCS_CCK_MSK;

	/* Set the rate in the TX cmd */
	tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags);
}
Ejemplo n.º 18
0
void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
{
	WARN_ONCE(1, "not implemented");
	return ERR_PTR(-ENOSYS);
}
Ejemplo n.º 19
0
/**
 * async_xor_val - attempt a xor parity check with a dma engine.
 * @dest: destination page used if the xor is performed synchronously
 * @src_list: array of source pages
 * @offset: offset in pages to start transaction
 * @src_cnt: number of source pages
 * @len: length in bytes
 * @result: 0 if sum == 0 else non-zero
 * @submit: submission / completion modifiers
 *
 * honored flags: ASYNC_TX_ACK
 *
 * src_list note: if the dest is also a source it must be at index zero.
 * The contents of this array will be overwritten if a scribble region
 * is not specified.
 */
struct dma_async_tx_descriptor *
async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
	      int src_cnt, size_t len, enum sum_check_flags *result,
	      struct async_submit_ctl *submit)
{
	struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
	struct dma_device *device = chan ? chan->device : NULL;
	struct dma_async_tx_descriptor *tx = NULL;
	dma_addr_t *dma_src = NULL;

	BUG_ON(src_cnt <= 1);

	if (submit->scribble)
		dma_src = submit->scribble;
	else if (sizeof(dma_addr_t) <= sizeof(struct page *))
		dma_src = (dma_addr_t *) src_list;

	if (dma_src && device && src_cnt <= device->max_xor &&
	    is_dma_xor_aligned(device, offset, 0, len)) {
		unsigned long dma_prep_flags = 0;
		int i;

		pr_debug("%s: (async) len: %zu\n", __func__, len);

		if (submit->cb_fn)
			dma_prep_flags |= DMA_PREP_INTERRUPT;
		if (submit->flags & ASYNC_TX_FENCE)
			dma_prep_flags |= DMA_PREP_FENCE;
		for (i = 0; i < src_cnt; i++)
			dma_src[i] = dma_map_page(device->dev, src_list[i],
						  offset, len, DMA_TO_DEVICE);

		tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt,
						     len, result,
						     dma_prep_flags);
		if (unlikely(!tx)) {
			async_tx_quiesce(&submit->depend_tx);

			while (!tx) {
				dma_async_issue_pending(chan);
				tx = device->device_prep_dma_xor_val(chan,
					dma_src, src_cnt, len, result,
					dma_prep_flags);
			}
		}

		async_tx_submit(chan, tx, submit);
	} else {
		enum async_tx_flags flags_orig = submit->flags;

		pr_debug("%s: (sync) len: %zu\n", __func__, len);
		WARN_ONCE(device && src_cnt <= device->max_xor,
			  "%s: no space for dma address conversion\n",
			  __func__);

		submit->flags |= ASYNC_TX_XOR_DROP_DST;
		submit->flags &= ~ASYNC_TX_ACK;

		tx = async_xor(dest, src_list, offset, src_cnt, len, submit);

		async_tx_quiesce(&tx);

		*result = !page_is_zero(dest, offset, len) << SUM_CHECK_P;

		async_tx_sync_epilog(submit);
		submit->flags = flags_orig;
	}

	return tx;
}
Ejemplo n.º 20
0
static int sprd_pm_deepsleep(suspend_state_t state)
{
    int ret_val = 0;
    unsigned long flags;
    u32 battery_time, cur_time;
    battery_time = cur_time = get_sys_cnt();
    unsigned int cpu = smp_processor_id();

    /* add for debug & statisic*/
    clr_sleep_mode();
    time_statisic_begin();

#if defined(CONFIG_NKERNEL) && !defined(CONFIG_NKERNEL_PM_MASTER)
    hw_local_irq_disable();
    os_ctx->suspend_to_memory(os_ctx);
    hw_local_irq_enable();
#else
    /*
    * when we get here, only boot cpu still alive
    */
    if (smp_processor_id()) {
        __WARN();
        goto enter_exit;
    }

    while(1) {
        hw_local_irq_disable();
        local_fiq_disable();
        local_irq_save(flags);

        if (arch_local_irq_pending()) {
            /* add for debug & statisic*/
            irq_wakeup_set();

            local_irq_restore(flags);
            hw_local_irq_enable();
            local_fiq_enable();
            break;
        } else {
            local_irq_restore(flags);
            WARN_ONCE(!irqs_disabled(), "#####: Interrupts enabled in pm_enter()!\n");
#if defined(CONFIG_NKERNEL)
            /*
            * return value 0 means that other guest OS  are all idle
            */
            ret_val = os_ctx->idle(os_ctx);
            if (0 == ret_val) {
#ifdef CONFIG_NKERNEL_PM_MASTER
                os_ctx->smp_cpu_stop(0);
#endif
                sprd_cpu_deep_sleep(cpu);
#ifdef CONFIG_NKERNEL_PM_MASTER
                os_ctx->smp_cpu_start(0, 0);/* the 2nd parameter is meaningless*/
#endif
            } else {
                printk("******** os_ctx->idle return %d ********\n", ret_val);
            }
#else
            sprd_cpu_deep_sleep(cpu);
#endif
            hw_local_irq_enable();
            local_fiq_enable();
        }

        battery_sleep();
        cur_time = get_sys_cnt();
        if ((cur_time -  battery_time) > BATTERY_CHECK_INTERVAL) {
            battery_time = cur_time;
            if (sprd_check_battery()) {
                printk("###: battery low!\n");
                break;
            }
        }
    }/*end while*/

    time_statisic_end();
#endif

enter_exit:
    return ret_val;
}