static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
{
	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
			   _MASKED_BIT_DISABLE(0xffff));
	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
			   _MASKED_BIT_DISABLE(0xffff));
	/* something from same cacheline, but !FORCEWAKE_VLV */
	__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
}
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
	/* The below doubles as a POSTING_READ */
	gen6_gt_check_fifodbg(dev_priv);
}
Esempio n. 3
0
static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
{
	__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
			_MASKED_BIT_DISABLE(0xffff));

	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
			_MASKED_BIT_DISABLE(0xffff));

	__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
			_MASKED_BIT_DISABLE(0xffff));
}
Esempio n. 4
0
void
__gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{

	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
	/* The below doubles as a POSTING_READ */
	gen6_gt_check_fifodbg(dev_priv);
}
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
	/* something from same cacheline, but !FORCEWAKE_MT */
	__raw_posting_read(dev_priv, ECOBUS);
	gen6_gt_check_fifodbg(dev_priv);
}
Esempio n. 6
0
static void
__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
	/* Check for Render Engine */
	if (FORCEWAKE_RENDER & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));

	/* Check for Media Engine */
	if (FORCEWAKE_MEDIA & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));

	/* Check for Blitter Engine */
	if (FORCEWAKE_BLITTER & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
}
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
					int fw_engine)
{

	/* Check for Render Engine */
	if (FORCEWAKE_RENDER & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
					_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));


	/* Check for Media Engine */
	if (FORCEWAKE_MEDIA & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));

	/* The below doubles as a POSTING_READ */
	gen6_gt_check_fifodbg(dev_priv);

}
Esempio n. 8
0
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
					int fw_engine)
{

	/* Check for Render Engine */
	if (FORCEWAKE_RENDER & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
					_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));


	/* Check for Media Engine */
	if (FORCEWAKE_MEDIA & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));

	/* something from same cacheline, but !FORCEWAKE_VLV */
	__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
	if (!IS_CHERRYVIEW(dev_priv->dev))
		gen6_gt_check_fifodbg(dev_priv);
}
static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *ring;
	int i, irqs;

	/* tell all command streamers NOT to forward interrupts and vblank to GuC */
	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
	for_each_ring(ring, dev_priv, i)
		I915_WRITE(RING_MODE_GEN7(ring), irqs);

	/* route all GT interrupts to the host */
	I915_WRITE(GUC_BCS_RCS_IER, 0);
	I915_WRITE(GUC_VCS2_VCS1_IER, 0);
	I915_WRITE(GUC_WD_VECS_IER, 0);
}
Esempio n. 10
0
static void guc_interrupts_release(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int irqs;

	/* tell all command streamers NOT to forward interrupts or vblank to GuC */
	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
	for_each_engine(engine, dev_priv, id)
		I915_WRITE(RING_MODE_GEN7(engine), irqs);

	/* route all GT interrupts to the host */
	I915_WRITE(GUC_BCS_RCS_IER, 0);
	I915_WRITE(GUC_VCS2_VCS1_IER, 0);
	I915_WRITE(GUC_WD_VECS_IER, 0);
}
Esempio n. 11
0
static void bdw_forcewake_get(void __iomem *mmio)
{
	WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT);

	RD(ECOBUS);

	if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50))
		gvt_err("fail to wait forcewake idle\n");

	WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT);

	if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50))
		gvt_err("fail to wait forcewake ack\n");

	if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) &
		      GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50))
		gvt_err("fail to wait c0 wake up\n");
}
Esempio n. 12
0
/**
 * huc_fw_xfer() - DMA's the firmware
 * @huc_fw: the firmware descriptor
 * @vma: the firmware image (bound into the GGTT)
 *
 * Transfer the firmware image to RAM for execution by the microcontroller.
 *
 * Return: 0 on success, non-zero on failure
 */
static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
{
	struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
	struct drm_i915_private *dev_priv = huc_to_i915(huc);
	unsigned long offset = 0;
	u32 size;
	int ret;

	GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);

	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

	/* Set the source address for the uCode */
	offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) +
		 huc_fw->header_offset;
	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);

	/* Hardware doesn't look at destination address for HuC. Set it to 0,
	 * but still program the correct address space.
	 */
	I915_WRITE(DMA_ADDR_1_LOW, 0);
	I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);

	size = huc_fw->header_size + huc_fw->ucode_size;
	I915_WRITE(DMA_COPY_SIZE, size);

	/* Start the DMA */
	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));

	/* Wait for DMA to finish */
	ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100);

	DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);

	/* Disable the bits once DMA is over */
	I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));

	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);

	return ret;
}
Esempio n. 13
0
int nugpgpu_ringbuffer_render_init(struct nugpgpu_private *gpu_priv)
{
  int ret;
  u32 head;

  printk(LOG_INFO "nugpgpu_ringbuffer_render_init\n" LOG_END);
  TRACE_IN

  RING->mmio_base = RENDER_RING_BASE;
  RING->size = PAGE_SIZE * RING_PAGES;

  /* Allocate the status page. */
  ret = allocate_object(gpu_priv, &RING->status_obj, 1);
  if (ret){
    printk(LOG_ERR "Failed to allocate the status page\n" LOG_END);
    return 1;
  }

  RING->gva_status = nugpgpu_gtt_insert(gpu_priv, RING->status_obj.pg_list, 
                                        NUGPGPU_CACHE_LLC);
  if (RING->gva_status == (unsigned int)-1){
    printk(LOG_ERR "Failed to insert the status page in gtt\n" LOG_END);
    return 1;
  }

  printk(LOG_INFO "RING->gva_status : 0x%x\n" LOG_END, (unsigned int) RING->gva_status);

  RING->page_status = kmap(sg_page(RING->status_obj.pg_list->sgl));
  if (RING->page_status == NULL) {
    printk(LOG_ERR "Failed to map page_status\n" LOG_END);
    return 1;
  }
  memset(RING->page_status, 0, PAGE_SIZE);
  printk(LOG_INFO "RING->page_status : 0x%lx\n" LOG_END, (unsigned long) RING->page_status);

  /* Allocate the ringbuffer object */
  ret = allocate_object(gpu_priv, &RING->ringbuf_obj, RING_PAGES);
  if (ret){
    printk(LOG_ERR "Failed to allocate the status page\n" LOG_END);
    return 1;
  }

  RING->gva_ringbuffer = nugpgpu_gtt_insert(gpu_priv, RING->ringbuf_obj.pg_list, 
                                            NUGPGPU_CACHE_LLC);
  if (RING->gva_ringbuffer == (unsigned int)-1){
    printk(LOG_ERR "Failed to insert the status page in gtt\n" LOG_END);
    return 1;
  }

  printk(LOG_INFO "RING->gva_ringbuffer : 0x%x\n" LOG_END, (unsigned int) RING->gva_ringbuffer);

  RING->page_ringbuffer = kmap(sg_page(RING->ringbuf_obj.pg_list->sgl));
  if (RING->page_ringbuffer == NULL) {
    printk(LOG_ERR "Failed to map page_ringbuffer\n" LOG_END);
    return 1;
  }

  RING->virtual_start = ioremap_wc(gpu_priv->gtt.mappable_base + PAGE_SIZE, RING->size);
  if (RING->virtual_start == NULL) {
    printk(LOG_ERR "Problem while mapping virtual start ioremap_wc\n" LOG_END);
    return 1;
  }

  printk(LOG_INFO "Allocated the ringbuffer\n" LOG_END);

  /* Initialize the ring now.*/

  gpu_forcewake_get(gpu_priv);

  /* Write status page register */
  printk(LOG_INFO "writing status page register\n" LOG_END);

  NUGPGPU_WRITE(RENDER_HWS_PGA_GEN7, RING->gva_status);
  NUGPGPU_READ(RENDER_HWS_PGA_GEN7);

  flushtlb(gpu_priv);

  // Stop ring
  printk(LOG_INFO "stopping ring\n" LOG_END);

  RING_WRITE_CTL(RING, 0);
  RING_WRITE_HEAD(RING, 0);
  RING_WRITE_TAIL(RING, 0);

  // The doc says this enforces ordering between multiple writes
  head = RING_READ_HEAD(RING) & RING_HEAD_ADDR;
  if ( head !=0 ){
    printk(LOG_ERR "failed to set head to zero\n" LOG_END);
    RING_WRITE_HEAD(RING, 0);

    if (RING_READ_HEAD(RING) & RING_HEAD_ADDR) {
      printk(LOG_ERR "failed to set ring head to zero "
                     "ctl %08x head %08x tail %08x start %08x\n"
             LOG_END,
             RING_READ_CTL(RING),
             RING_READ_HEAD(RING),
             RING_READ_TAIL(RING),
             RING_READ_START(RING));
    }
  }

  /* i915 driver says the below line...?? */
  /* Enforce ordering by reading HEAD register back */
  RING_READ_HEAD(RING);

  /* Comment taken directly from i915 driver */
  /* Initialize the ring. This must happen _after_ we've cleared the ring
   * registers with the above sequence (the readback of the HEAD registers
   * also enforces ordering), otherwise the hw might lose the new ring
   * register values. */
  RING_WRITE_START(RING, RING->gva_ringbuffer);

  RING_WRITE_CTL(RING, (((RING->size - PAGE_SIZE) &
                          RING_NR_PAGES) |
                          RING_VALID));

  /* If the head is still not zero, the ring is dead */
  if( wait_for((RING_READ_CTL(RING) & RING_VALID) != 0 &&
                RING_READ_START(RING) == RING->gva_ringbuffer &&
                (RING_READ_HEAD(RING) & RING_HEAD_ADDR) == 0, 50) ){
    printk(LOG_ERR "ring failed to start ring\n" LOG_END);
    return -EIO;
  }

  RING->head = RING_READ_HEAD(RING);
  RING->tail = RING_READ_TAIL(RING) & RING_TAIL_ADDR;
  RING->space = ring_space(RING);

  printk(LOG_INFO "ring->space = %d\n" LOG_END, RING->space);

  gpu_forcewake_put(gpu_priv);

  RING_WRITE_MODE(RING, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
  RING_WRITE_MODE(RING, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
  RING_WRITE_MODE_GEN7(RING, _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
                       _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
  RING_WRITE_INSTPM(RING, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));

  dword_check(gpu_priv, RING, temp);

  TRACE_OUT
  return 0;
}
Esempio n. 14
0
static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
{
	__raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
	/* something from same cacheline, but !FORCEWAKE_MT */
	__raw_posting_read(dev_priv, ECOBUS);
}