static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
							int fw_engine)
{
	u32 forcewake_ack;

	if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
		forcewake_ack = FORCEWAKE_ACK_HSW;
	else
		forcewake_ack = FORCEWAKE_MT_ACK;

	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
			    FORCEWAKE_ACK_TIMEOUT_MS))
		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");

	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
	/* something from same cacheline, but !FORCEWAKE_MT */
	__raw_posting_read(dev_priv, ECOBUS);

	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
			    FORCEWAKE_ACK_TIMEOUT_MS))
		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");

	/* WaRsForcewakeWaitTC0:ivb,hsw */
	if (INTEL_INFO(dev_priv->dev)->gen < 8)
		__gen6_gt_wait_for_thread_c0(dev_priv);
}
Esempio n. 2
0
/**
 * intel_gvt_init_vgpu_types - initialize vGPU type list
 * @gvt : GVT device
 *
 * Initialize vGPU type list based on available resource.
 *
 */
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
{
	unsigned int num_types;
	unsigned int i, low_avail, high_avail;
	unsigned int min_low;

	/* vGPU type name is defined as GVTg_Vx_y which contains
	 * physical GPU generation type and 'y' means maximum vGPU
	 * instances user can create on one physical GPU for this
	 * type.
	 *
	 * Depend on physical SKU resource, might see vGPU types like
	 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
	 * different types of vGPU on same physical GPU depending on
	 * available resource. Each vGPU type will have "avail_instance"
	 * to indicate how many vGPU instance can be created for this
	 * type.
	 *
	 */
	low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
	high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
	num_types = 4;

	gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
			     GFP_KERNEL);
	if (!gvt->types)
		return -ENOMEM;

	min_low = MB_TO_BYTES(32);
	for (i = 0; i < num_types; ++i) {
		if (low_avail / min_low == 0)
			break;
		gvt->types[i].low_gm_size = min_low;
		gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
		gvt->types[i].fence = 4;
		gvt->types[i].max_instance = min(low_avail / min_low,
						 high_avail / gvt->types[i].high_gm_size);
		gvt->types[i].avail_instance = gvt->types[i].max_instance;

		if (IS_GEN8(gvt->dev_priv))
			sprintf(gvt->types[i].name, "GVTg_V4_%u",
						gvt->types[i].max_instance);
		else if (IS_GEN9(gvt->dev_priv))
			sprintf(gvt->types[i].name, "GVTg_V5_%u",
						gvt->types[i].max_instance);

		min_low <<= 1;
		gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
			     i, gvt->types[i].name, gvt->types[i].max_instance,
			     gvt->types[i].avail_instance,
			     gvt->types[i].low_gm_size,
			     gvt->types[i].high_gm_size, gvt->types[i].fence);
	}

	gvt->num_types = i;
	return 0;
}
Esempio n. 3
0
/**
 * intel_gvt_init_vgpu_types - initialize vGPU type list
 * @gvt : GVT device
 *
 * Initialize vGPU type list based on available resource.
 *
 */
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
{
	unsigned int num_types;
	unsigned int i, low_avail, high_avail;
	unsigned int min_low;

	/* vGPU type name is defined as GVTg_Vx_y which contains
	 * physical GPU generation type (e.g V4 as BDW server, V5 as
	 * SKL server).
	 *
	 * Depend on physical SKU resource, might see vGPU types like
	 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
	 * different types of vGPU on same physical GPU depending on
	 * available resource. Each vGPU type will have "avail_instance"
	 * to indicate how many vGPU instance can be created for this
	 * type.
	 *
	 */
	low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
	high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
	num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);

	gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
			     GFP_KERNEL);
	if (!gvt->types)
		return -ENOMEM;

	min_low = MB_TO_BYTES(32);
	for (i = 0; i < num_types; ++i) {
		if (low_avail / vgpu_types[i].low_mm == 0)
			break;

		gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
		gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
		gvt->types[i].fence = vgpu_types[i].fence;
		gvt->types[i].resolution = vgpu_types[i].edid;
		gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
						   high_avail / vgpu_types[i].high_mm);

		if (IS_GEN8(gvt->dev_priv))
			sprintf(gvt->types[i].name, "GVTg_V4_%s",
						vgpu_types[i].name);
		else if (IS_GEN9(gvt->dev_priv))
			sprintf(gvt->types[i].name, "GVTg_V5_%s",
						vgpu_types[i].name);

		gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
			     i, gvt->types[i].name,
			     gvt->types[i].avail_instance,
			     gvt->types[i].low_gm_size,
			     gvt->types[i].high_gm_size, gvt->types[i].fence,
			     vgpu_edid_str(gvt->types[i].resolution));
	}

	gvt->num_types = i;
	return 0;
}
media_fillfunc_t get_media_fillfunc(int devid)
{
	media_fillfunc_t fill = NULL;

	if (IS_GEN8(devid))
		fill = gen8_media_fillfunc;
	else if (IS_GEN7(devid))
		fill = gen7_media_fillfunc;
	else if (IS_GEN9(devid))
		fill = gen9_media_fillfunc;

	return fill;
}
Esempio n. 5
0
static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long irqflags;

	del_timer_sync(&dev_priv->uncore.force_wake_timer);

	/* Hold uncore.lock across reset to prevent any register access
	 * with forcewake not set correctly
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

	if (IS_VALLEYVIEW(dev))
		vlv_force_wake_reset(dev_priv);
	else if (IS_GEN6(dev) || IS_GEN7(dev))
		__gen6_gt_force_wake_reset(dev_priv);

	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
		__gen7_gt_force_wake_mt_reset(dev_priv);

	if (restore) { /* If reset with a user forcewake, try to restore */
		unsigned fw = 0;

		if (IS_VALLEYVIEW(dev)) {
			if (dev_priv->uncore.fw_rendercount)
				fw |= FORCEWAKE_RENDER;

			if (dev_priv->uncore.fw_mediacount)
				fw |= FORCEWAKE_MEDIA;
		} else {
			if (dev_priv->uncore.forcewake_count)
				fw = FORCEWAKE_ALL;
		}

		if (fw)
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);

		if (IS_GEN6(dev) || IS_GEN7(dev))
			dev_priv->uncore.fifo_count =
				__raw_i915_read32(dev_priv, GTFIFOCTL) &
				GT_FIFO_FREE_ENTRIES_MASK;
	} else {
		dev_priv->uncore.forcewake_count = 0;
		dev_priv->uncore.fw_rendercount = 0;
		dev_priv->uncore.fw_mediacount = 0;
	}

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
Esempio n. 6
0
bool i915_semaphore_is_enabled(struct drm_device *dev)
{
	if (INTEL_INFO(dev)->gen < 6)
		return false;

	if (i915.semaphores >= 0)
		return i915.semaphores;

	/* Until we get further testing... */
	if (IS_GEN8(dev))
		return false;

#ifdef CONFIG_INTEL_IOMMU
	/* Enable semaphores on SNB when IO remapping is off */
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif

	return true;
}
Esempio n. 7
0
static u32 default_desc_template(const struct drm_i915_private *i915,
				 const struct i915_hw_ppgtt *ppgtt)
{
	u32 address_mode;
	u32 desc;

	desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;

	address_mode = INTEL_LEGACY_32B_CONTEXT;
	if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
		address_mode = INTEL_LEGACY_64B_CONTEXT;
	desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;

	if (IS_GEN8(i915))
		desc |= GEN8_CTX_L3LLC_COHERENT;

	/* TODO: WaDisableLiteRestore when we start using semaphore
	 * signalling between Command Streamers
	 * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
	 */

	return desc;
}
Esempio n. 8
0
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
					 struct drm_mm_node *node, u64 size,
					 unsigned alignment, u64 start, u64 end)
{
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return -ENODEV;

	/* See the comment at the drm_mm_init() call for more about this check.
	 * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
	 */
	if (start < 4096 && (IS_GEN8(dev_priv) ||
			     IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
		start = 4096;

	mutex_lock(&dev_priv->mm.stolen_lock);
	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
					  alignment, start, end,
					  DRM_MM_SEARCH_DEFAULT);
	mutex_unlock(&dev_priv->mm.stolen_lock);

	return ret;
}
void intel_uncore_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
			  gen6_force_wake_work);

	if (IS_VALLEYVIEW(dev)) {
		dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
	} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
		dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
		dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
	} else if (IS_IVYBRIDGE(dev)) {
		u32 ecobus;

		/* IVB configs may use multi-threaded forcewake */

		/* A small trick here - if the bios hasn't configured
		 * MT forcewake, and if the device is in RC6, then
		 * force_wake_mt_get will not wake the device and the
		 * ECOBUS read will return zero. Which will be
		 * (correctly) interpreted by the test below as MT
		 * forcewake being disabled.
		 */
		mutex_lock(&dev->struct_mutex);
		__gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
		__gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
		mutex_unlock(&dev->struct_mutex);

		if (ecobus & FORCEWAKE_MT_ENABLE) {
			dev_priv->uncore.funcs.force_wake_get =
				__gen6_gt_force_wake_mt_get;
			dev_priv->uncore.funcs.force_wake_put =
				__gen6_gt_force_wake_mt_put;
		} else {
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
			DRM_INFO("when using vblank-synced partial screen updates.\n");
			dev_priv->uncore.funcs.force_wake_get =
				__gen6_gt_force_wake_get;
			dev_priv->uncore.funcs.force_wake_put =
				__gen6_gt_force_wake_put;
		}
	} else if (IS_GEN6(dev)) {
		dev_priv->uncore.funcs.force_wake_get =
			__gen6_gt_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put =
			__gen6_gt_force_wake_put;
	}

	switch (INTEL_INFO(dev)->gen) {
	default:
		dev_priv->uncore.funcs.mmio_writeb  = gen8_write8;
		dev_priv->uncore.funcs.mmio_writew  = gen8_write16;
		dev_priv->uncore.funcs.mmio_writel  = gen8_write32;
		dev_priv->uncore.funcs.mmio_writeq  = gen8_write64;
		dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
		dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
		dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
		dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
		break;
	case 7:
	case 6:
		if (IS_HASWELL(dev)) {
			dev_priv->uncore.funcs.mmio_writeb  = hsw_write8;
			dev_priv->uncore.funcs.mmio_writew  = hsw_write16;
			dev_priv->uncore.funcs.mmio_writel  = hsw_write32;
			dev_priv->uncore.funcs.mmio_writeq  = hsw_write64;
		} else {
			dev_priv->uncore.funcs.mmio_writeb  = gen6_write8;
			dev_priv->uncore.funcs.mmio_writew  = gen6_write16;
			dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
			dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
		}

		if (IS_VALLEYVIEW(dev)) {
			dev_priv->uncore.funcs.mmio_readb  = vlv_read8;
			dev_priv->uncore.funcs.mmio_readw  = vlv_read16;
			dev_priv->uncore.funcs.mmio_readl  = vlv_read32;
			dev_priv->uncore.funcs.mmio_readq  = vlv_read64;
		} else {
			dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
			dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
			dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
			dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
		}
		break;
	case 5:
		dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
		dev_priv->uncore.funcs.mmio_writew  = gen5_write16;
		dev_priv->uncore.funcs.mmio_writel  = gen5_write32;
		dev_priv->uncore.funcs.mmio_writeq  = gen5_write64;
		dev_priv->uncore.funcs.mmio_readb  = gen5_read8;
		dev_priv->uncore.funcs.mmio_readw  = gen5_read16;
		dev_priv->uncore.funcs.mmio_readl  = gen5_read32;
		dev_priv->uncore.funcs.mmio_readq  = gen5_read64;
		break;
	case 4:
	case 3:
	case 2:
		dev_priv->uncore.funcs.mmio_writeb  = gen4_write8;
		dev_priv->uncore.funcs.mmio_writew  = gen4_write16;
		dev_priv->uncore.funcs.mmio_writel  = gen4_write32;
		dev_priv->uncore.funcs.mmio_writeq  = gen4_write64;
		dev_priv->uncore.funcs.mmio_readb  = gen4_read8;
		dev_priv->uncore.funcs.mmio_readw  = gen4_read16;
		dev_priv->uncore.funcs.mmio_readl  = gen4_read32;
		dev_priv->uncore.funcs.mmio_readq  = gen4_read64;
		break;
	}
}
BOOL
media_driver_data_init (VADriverContextP ctx)
{
  MEDIA_DRV_CONTEXT *drv_ctx = NULL;
  MEDIA_DRV_ASSERT (ctx);
  drv_ctx = ctx->pDriverData;
  if (IS_GEN75 (drv_ctx->drv_data.device_id))
    drv_ctx->codec_info = &gen75_hw_codec_info;
  else if (IS_GEN7 (drv_ctx->drv_data.device_id))
    drv_ctx->codec_info = &gen7_hw_codec_info;
  else if (IS_GEN8(drv_ctx->drv_data.device_id))
    drv_ctx->codec_info = &gen8_hw_codec_info;
  else if (IS_CHERRYVIEW(drv_ctx->drv_data.device_id))
    drv_ctx->codec_info = &chv_hw_codec_info;
  else
    return false;

  if (object_heap_init (&drv_ctx->config_heap,
			sizeof (struct object_config), CONFIG_ID_OFFSET))
    goto err_config_heap;
  if (object_heap_init (&drv_ctx->context_heap,
			sizeof (struct object_context), CONTEXT_ID_OFFSET))
    goto err_context_heap;

  if (object_heap_init (&drv_ctx->surface_heap,
			sizeof (struct object_surface), SURFACE_ID_OFFSET))
    goto err_surface_heap;
  if (object_heap_init (&drv_ctx->buffer_heap,
			sizeof (struct object_buffer), BUFFER_ID_OFFSET))
    goto err_buffer_heap;
  if (object_heap_init (&drv_ctx->image_heap,
			sizeof (struct object_image), IMAGE_ID_OFFSET))
    goto err_image_heap;

  if (object_heap_init (&drv_ctx->subpic_heap,
                        sizeof (struct object_subpic), IMAGE_ID_OFFSET))
    goto err_subpic_heap;

  drv_ctx->batch =
    media_batchbuffer_new (&drv_ctx->drv_data, I915_EXEC_RENDER, 0);
  drv_ctx->pp_batch =
    media_batchbuffer_new (&drv_ctx->drv_data, I915_EXEC_RENDER, 0);
  drv_ctx->render_batch =
    media_batchbuffer_new (&drv_ctx->drv_data, I915_EXEC_RENDER, 0);
  media_drv_mutex_init (&drv_ctx->render_mutex);
  media_drv_mutex_init (&drv_ctx->pp_mutex);

  return true;

err_subpic_heap:
  object_heap_destroy(&drv_ctx->subpic_heap);

err_image_heap:
  object_heap_destroy (&drv_ctx->buffer_heap);
err_buffer_heap:
  object_heap_destroy (&drv_ctx->surface_heap);
err_surface_heap:
  object_heap_destroy (&drv_ctx->context_heap);
err_context_heap:
  object_heap_destroy (&drv_ctx->config_heap);
err_config_heap:
  return false;
}
Esempio n. 11
0
void
intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
{
    struct intel_driver_data *intel = batch->intel; 

    if (IS_GEN6(intel->device_info) ||
        IS_GEN7(intel->device_info) ||
        IS_GEN8(intel->device_info)) {
        if (batch->flag == I915_EXEC_RENDER) {
            if (IS_GEN8(intel->device_info)) {
                BEGIN_BATCH(batch, 6);
                OUT_BATCH(batch, CMD_PIPE_CONTROL | (6 - 2));

                OUT_BATCH(batch,
                          CMD_PIPE_CONTROL_CS_STALL |
                          CMD_PIPE_CONTROL_WC_FLUSH |
                          CMD_PIPE_CONTROL_TC_FLUSH |
                          CMD_PIPE_CONTROL_DC_FLUSH |
                          CMD_PIPE_CONTROL_NOWRITE);
                OUT_BATCH(batch, 0); /* write address */
                OUT_BATCH(batch, 0);
                OUT_BATCH(batch, 0); /* write data */
                OUT_BATCH(batch, 0);
                ADVANCE_BATCH(batch);
            } else if (IS_GEN6(intel->device_info)) {
                assert(batch->wa_render_bo);

                BEGIN_BATCH(batch, 4 * 3);

                OUT_BATCH(batch, CMD_PIPE_CONTROL | (4 - 2));
                OUT_BATCH(batch,
                          CMD_PIPE_CONTROL_CS_STALL |
                          CMD_PIPE_CONTROL_STALL_AT_SCOREBOARD);
                OUT_BATCH(batch, 0); /* address */
                OUT_BATCH(batch, 0); /* write data */

                OUT_BATCH(batch, CMD_PIPE_CONTROL | (4 - 2));
                OUT_BATCH(batch, CMD_PIPE_CONTROL_WRITE_QWORD);
                OUT_RELOC(batch,
                          batch->wa_render_bo,
                          I915_GEM_DOMAIN_INSTRUCTION,
                          I915_GEM_DOMAIN_INSTRUCTION,
                          0);
                OUT_BATCH(batch, 0); /* write data */

                /* now finally the _real flush */
                OUT_BATCH(batch, CMD_PIPE_CONTROL | (4 - 2));
                OUT_BATCH(batch,
                          CMD_PIPE_CONTROL_WC_FLUSH |
                          CMD_PIPE_CONTROL_TC_FLUSH |
                          CMD_PIPE_CONTROL_NOWRITE);
                OUT_BATCH(batch, 0); /* write address */
                OUT_BATCH(batch, 0); /* write data */
                ADVANCE_BATCH(batch);
            } else {
                BEGIN_BATCH(batch, 4);
                OUT_BATCH(batch, CMD_PIPE_CONTROL | (4 - 2));

                OUT_BATCH(batch, 
                          CMD_PIPE_CONTROL_WC_FLUSH |
                          CMD_PIPE_CONTROL_TC_FLUSH |
                          CMD_PIPE_CONTROL_DC_FLUSH |
                          CMD_PIPE_CONTROL_NOWRITE);
                OUT_BATCH(batch, 0); /* write address */
                OUT_BATCH(batch, 0); /* write data */
                ADVANCE_BATCH(batch);
            }

        } else {
            if (batch->flag == I915_EXEC_BLT) {
                BEGIN_BLT_BATCH(batch, 4);
                OUT_BLT_BATCH(batch, MI_FLUSH_DW);
                OUT_BLT_BATCH(batch, 0);
                OUT_BLT_BATCH(batch, 0);
                OUT_BLT_BATCH(batch, 0);
                ADVANCE_BLT_BATCH(batch);
            }else if (batch->flag == I915_EXEC_VEBOX) {
                BEGIN_VEB_BATCH(batch, 4);
                OUT_VEB_BATCH(batch, MI_FLUSH_DW);
                OUT_VEB_BATCH(batch, 0);
                OUT_VEB_BATCH(batch, 0);
                OUT_VEB_BATCH(batch, 0);
                ADVANCE_VEB_BATCH(batch);
            } else {
                assert(batch->flag == I915_EXEC_BSD);
                BEGIN_BCS_BATCH(batch, 4);
                OUT_BCS_BATCH(batch, MI_FLUSH_DW | MI_FLUSH_DW_VIDEO_PIPELINE_CACHE_INVALIDATE);
                OUT_BCS_BATCH(batch, 0);
                OUT_BCS_BATCH(batch, 0);
                OUT_BCS_BATCH(batch, 0);
                ADVANCE_BCS_BATCH(batch);
            }
        }
    } else {
        if (batch->flag == I915_EXEC_RENDER) {
            BEGIN_BATCH(batch, 1);
            OUT_BATCH(batch, MI_FLUSH | MI_FLUSH_STATE_INSTRUCTION_CACHE_INVALIDATE);
            ADVANCE_BATCH(batch);		
         } else {
            assert(batch->flag == I915_EXEC_BSD);
            BEGIN_BCS_BATCH(batch, 1);
            OUT_BCS_BATCH(batch, MI_FLUSH | MI_FLUSH_STATE_INSTRUCTION_CACHE_INVALIDATE);
            ADVANCE_BCS_BATCH(batch);
        }
    }
}