Esempio n. 1
0
static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
					 struct whitelist *w)
{
	struct drm_i915_private *i915 = engine->i915;

	GEM_BUG_ON(engine->id != RCS);

	w->count = 0;
	w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base));

	if (INTEL_GEN(i915) < 8)
		return NULL;
	else if (IS_BROADWELL(i915))
		bdw_whitelist_build(w);
	else if (IS_CHERRYVIEW(i915))
		chv_whitelist_build(w);
	else if (IS_SKYLAKE(i915))
		skl_whitelist_build(w);
	else if (IS_BROXTON(i915))
		bxt_whitelist_build(w);
	else if (IS_KABYLAKE(i915))
		kbl_whitelist_build(w);
	else if (IS_GEMINILAKE(i915))
		glk_whitelist_build(w);
	else if (IS_COFFEELAKE(i915))
		cfl_whitelist_build(w);
	else if (IS_CANNONLAKE(i915))
		cnl_whitelist_build(w);
	else if (IS_ICELAKE(i915))
		icl_whitelist_build(w);
	else
		MISSING_CASE(INTEL_GEN(i915));

	return w;
}
Esempio n. 2
0
void intel_img_view_init(struct intel_dev *dev,
                         const VkImageViewCreateInfo *info,
                         struct intel_img_view *view)
{
    VkComponentMapping state_swizzles;
    uint32_t mip_levels, array_size;
    struct intel_img *img = intel_img(info->image);

    mip_levels = info->subresourceRange.levelCount;
    if (mip_levels > img->mip_levels - info->subresourceRange.baseMipLevel)
        mip_levels = img->mip_levels - info->subresourceRange.baseMipLevel;

    array_size = info->subresourceRange.layerCount;
    if (array_size > img->array_size - info->subresourceRange.baseArrayLayer)
        array_size = img->array_size - info->subresourceRange.baseArrayLayer;

    view->obj.destroy = img_view_destroy;

    view->img = img;

    if (!(img->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
        if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7.5)) {
            state_swizzles = info->components;
            view->shader_swizzles.r = VK_COMPONENT_SWIZZLE_R;
            view->shader_swizzles.g = VK_COMPONENT_SWIZZLE_G;
            view->shader_swizzles.b = VK_COMPONENT_SWIZZLE_B;
            view->shader_swizzles.a = VK_COMPONENT_SWIZZLE_A;
        } else {
            state_swizzles.r = VK_COMPONENT_SWIZZLE_R;
            state_swizzles.g = VK_COMPONENT_SWIZZLE_G;
            state_swizzles.b = VK_COMPONENT_SWIZZLE_B;
            state_swizzles.a = VK_COMPONENT_SWIZZLE_A;
            view->shader_swizzles = info->components;
        }

        /* shader_swizzles is ignored by the compiler */
        if (view->shader_swizzles.r != VK_COMPONENT_SWIZZLE_R ||
            view->shader_swizzles.g != VK_COMPONENT_SWIZZLE_G ||
            view->shader_swizzles.b != VK_COMPONENT_SWIZZLE_B ||
            view->shader_swizzles.a != VK_COMPONENT_SWIZZLE_A) {
            intel_dev_log(dev, VK_DEBUG_REPORT_WARNING_BIT_EXT,
                          (struct intel_base*)view, 0, 0,
                          "image data swizzling is ignored");
        }

        if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
            surface_state_tex_gen7(dev->gpu, img, info->viewType, info->format,
                    info->subresourceRange.baseMipLevel, mip_levels,
                    info->subresourceRange.baseArrayLayer, array_size,
                    state_swizzles, false, view->cmd);
            view->cmd_len = 8;
        } else {
            surface_state_tex_gen6(dev->gpu, img, info->viewType, info->format,
                    info->subresourceRange.baseMipLevel, mip_levels,
                    info->subresourceRange.baseArrayLayer, array_size,
                    false, view->cmd);
            view->cmd_len = 6;
        }
    }
}
static int gpu_set(struct drm_i915_gem_object *obj,
		   unsigned long offset,
		   u32 v)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct drm_i915_gem_request *rq;
	struct i915_vma *vma;
	u32 *cs;
	int err;

	err = i915_gem_object_set_to_gtt_domain(obj, true);
	if (err)
		return err;

	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
	if (IS_ERR(rq)) {
		i915_vma_unpin(vma);
		return PTR_ERR(rq);
	}

	cs = intel_ring_begin(rq, 4);
	if (IS_ERR(cs)) {
		__i915_add_request(rq, false);
		i915_vma_unpin(vma);
		return PTR_ERR(cs);
	}

	if (INTEL_GEN(i915) >= 8) {
		*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
		*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
		*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
		*cs++ = v;
	} else if (INTEL_GEN(i915) >= 4) {
		*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
		*cs++ = 0;
		*cs++ = i915_ggtt_offset(vma) + offset;
		*cs++ = v;
	} else {
		*cs++ = MI_STORE_DWORD_IMM | 1 << 22;
		*cs++ = i915_ggtt_offset(vma) + offset;
		*cs++ = v;
		*cs++ = MI_NOOP;
	}
	intel_ring_advance(rq, cs);

	i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
	i915_vma_unpin(vma);

	reservation_object_lock(obj->resv, NULL);
	reservation_object_add_excl_fence(obj->resv, &rq->fence);
	reservation_object_unlock(obj->resv);

	__i915_add_request(rq, true);

	return 0;
}
Esempio n. 4
0
int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
{
	int err = 0;

	dev_priv->workarounds.count = 0;

	if (INTEL_GEN(dev_priv) < 8)
		err = 0;
	else if (IS_BROADWELL(dev_priv))
		err = bdw_ctx_workarounds_init(dev_priv);
	else if (IS_CHERRYVIEW(dev_priv))
		err = chv_ctx_workarounds_init(dev_priv);
	else if (IS_SKYLAKE(dev_priv))
		err = skl_ctx_workarounds_init(dev_priv);
	else if (IS_BROXTON(dev_priv))
		err = bxt_ctx_workarounds_init(dev_priv);
	else if (IS_KABYLAKE(dev_priv))
		err = kbl_ctx_workarounds_init(dev_priv);
	else if (IS_GEMINILAKE(dev_priv))
		err = glk_ctx_workarounds_init(dev_priv);
	else if (IS_COFFEELAKE(dev_priv))
		err = cfl_ctx_workarounds_init(dev_priv);
	else if (IS_CANNONLAKE(dev_priv))
		err = cnl_ctx_workarounds_init(dev_priv);
	else if (IS_ICELAKE(dev_priv))
		err = icl_ctx_workarounds_init(dev_priv);
	else
		MISSING_CASE(INTEL_GEN(dev_priv));
	if (err)
		return err;

	DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
			 dev_priv->workarounds.count);
	return 0;
}
Esempio n. 5
0
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
{
	if (INTEL_GEN(dev_priv) < 8)
		return;
	else if (IS_BROADWELL(dev_priv))
		bdw_gt_workarounds_apply(dev_priv);
	else if (IS_CHERRYVIEW(dev_priv))
		chv_gt_workarounds_apply(dev_priv);
	else if (IS_SKYLAKE(dev_priv))
		skl_gt_workarounds_apply(dev_priv);
	else if (IS_BROXTON(dev_priv))
		bxt_gt_workarounds_apply(dev_priv);
	else if (IS_KABYLAKE(dev_priv))
		kbl_gt_workarounds_apply(dev_priv);
	else if (IS_GEMINILAKE(dev_priv))
		glk_gt_workarounds_apply(dev_priv);
	else if (IS_COFFEELAKE(dev_priv))
		cfl_gt_workarounds_apply(dev_priv);
	else if (IS_CANNONLAKE(dev_priv))
		cnl_gt_workarounds_apply(dev_priv);
	else if (IS_ICELAKE(dev_priv))
		icl_gt_workarounds_apply(dev_priv);
	else
		MISSING_CASE(INTEL_GEN(dev_priv));
}
Esempio n. 6
0
static void
layout_init_layer_height(struct intel_layout *layout,
                         struct intel_layout_params *params)
{
   const VkImageCreateInfo *info = params->info;
   unsigned num_layers;

   if (layout->walk != INTEL_LAYOUT_WALK_LAYER)
      return;

   num_layers = layout_get_num_layers(layout, params);
   if (num_layers <= 1)
      return;

   /*
    * From the Sandy Bridge PRM, volume 1 part 1, page 115:
    *
    *     "The following equation is used for surface formats other than
    *      compressed textures:
    *
    *        QPitch = (h0 + h1 + 11j)"
    *
    *     "The equation for compressed textures (BC* and FXT1 surface formats)
    *      follows:
    *
    *        QPitch = (h0 + h1 + 11j) / 4"
    *
    *     "[DevSNB] Errata: Sampler MSAA Qpitch will be 4 greater than the
    *      value calculated in the equation above, for every other odd Surface
    *      Height starting from 1 i.e. 1,5,9,13"
    *
    * From the Ivy Bridge PRM, volume 1 part 1, page 111-112:
    *
    *     "If Surface Array Spacing is set to ARYSPC_FULL (note that the depth
    *      buffer and stencil buffer have an implied value of ARYSPC_FULL):
    *
    *        QPitch = (h0 + h1 + 12j)
    *        QPitch = (h0 + h1 + 12j) / 4 (compressed)
    *
    *      (There are many typos or missing words here...)"
    *
    * To access the N-th slice, an offset of (Stride * QPitch * N) is added to
    * the base address.  The PRM divides QPitch by 4 for compressed formats
    * because the block height for those formats are 4, and it wants QPitch to
    * mean the number of memory rows, as opposed to texel rows, between
    * slices.  Since we use texel rows everywhere, we do not need to divide
    * QPitch by 4.
    */
   layout->layer_height = params->h0 + params->h1 +
      ((intel_gpu_gen(params->gpu) >= INTEL_GEN(7)) ? 12 : 11) * layout->align_j;

   if (intel_gpu_gen(params->gpu) == INTEL_GEN(6) &&
       info->samples != VK_SAMPLE_COUNT_1_BIT &&
       layout->height0 % 4 == 1)
      layout->layer_height += 4;

   params->max_y += layout->layer_height * (num_layers - 1);
}
Esempio n. 7
0
static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
					struct intel_fbc_state_cache *cache)
{
	int lines;

	intel_fbc_get_plane_source_size(cache, NULL, &lines);
	if (INTEL_GEN(dev_priv) == 7)
		lines = min(lines, 2048);
	else if (INTEL_GEN(dev_priv) >= 8)
		lines = min(lines, 2560);

	/* Hardware needs the full buffer stride, not just the active area. */
	return lines * cache->fb.stride;
}
Esempio n. 8
0
static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
{
	struct intel_fbc *fbc = &dev_priv->fbc;

	fbc->active = true;

	if (INTEL_GEN(dev_priv) >= 7)
		gen7_fbc_activate(dev_priv);
	else if (INTEL_GEN(dev_priv) >= 5)
		ilk_fbc_activate(dev_priv);
	else if (IS_GM45(dev_priv))
		g4x_fbc_activate(dev_priv);
	else
		i8xx_fbc_activate(dev_priv);
}
Esempio n. 9
0
static void att_view_init_for_input(struct intel_att_view *view,
                                    const struct intel_gpu *gpu,
                                    const struct intel_img *img,
                                    VkImageViewType view_type,
                                    VkFormat format, unsigned level,
                                    unsigned first_layer, unsigned num_layers)
{
    if (intel_gpu_gen(gpu) >= INTEL_GEN(7)) {
        if (false) {
            surface_state_tex_gen7(gpu, img, view_type, format,
                    level, 1, first_layer, num_layers,
                    identity_channel_mapping, false, view->cmd);
        } else {
            surface_state_null_gen7(gpu, view->cmd);
        }

        view->cmd_len = 8;
    } else {
        if (false) {
            surface_state_tex_gen6(gpu, img, view_type, format,
                    level, 1, first_layer, num_layers, false, view->cmd);
        } else {
            surface_state_null_gen6(gpu, view->cmd);
        }

        view->cmd_len = 6;
    }
}
Esempio n. 10
0
static int get_context_size(struct drm_i915_private *dev_priv)
{
	int ret;
	u32 reg;

	switch (INTEL_GEN(dev_priv)) {
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
		reg = I915_READ(GEN7_CXT_SIZE);
		if (IS_HASWELL(dev_priv))
			ret = HSW_CXT_TOTAL_SIZE;
		else
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 8:
		ret = GEN8_CXT_TOTAL_SIZE;
		break;
	default:
		BUG();
	}

	return ret;
}
Esempio n. 11
0
/**
 * intel_fbc_init - Initialize FBC
 * @dev_priv: the i915 device
 *
 * This function might be called during PM init process.
 */
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
	struct intel_fbc *fbc = &dev_priv->fbc;

	INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
	INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
	mutex_init(&fbc->lock);
	fbc->enabled = false;
	fbc->active = false;
	fbc->work.scheduled = false;

	if (need_fbc_vtd_wa(dev_priv))
		mkwrite_device_info(dev_priv)->has_fbc = false;

	i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
	DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
		      i915_modparams.enable_fbc);

	if (!HAS_FBC(dev_priv)) {
		fbc->no_fbc_reason = "unsupported by this chipset";
		return;
	}

	/* This value was pulled out of someone's hat */
	if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);

	/* We still don't have any sort of hardware state readout for FBC, so
	 * deactivate it in case the BIOS activated it to make sure software
	 * matches the hardware state. */
	if (intel_fbc_hw_is_active(dev_priv))
		intel_fbc_hw_deactivate(dev_priv);
}
Esempio n. 12
0
static bool
layout_want_hiz(const struct intel_layout *layout,
                const struct intel_layout_params *params)
{
   const VkImageCreateInfo *info = params->info;

   if (intel_debug & INTEL_DEBUG_NOHIZ)
       return false;

   if (!(info->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT))
      return false;

   if (!intel_format_has_depth(params->gpu, info->format))
      return false;

   /*
    * HiZ implies separate stencil on Gen6.  We do not want to copy stencils
    * values between combined and separate stencil buffers when HiZ is enabled
    * or disabled.
    */
   if (intel_gpu_gen(params->gpu) == INTEL_GEN(6))
       return false;

   return true;
}
Esempio n. 13
0
static bool
layout_want_mcs(struct intel_layout *layout,
                struct intel_layout_params *params)
{
   const VkImageCreateInfo *info = params->info;
   bool want_mcs = false;

   /* MCS is for RT on GEN7+ */
   if (intel_gpu_gen(params->gpu) < INTEL_GEN(7))
      return false;

   if (info->imageType != VK_IMAGE_TYPE_2D ||
       !(info->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT))
      return false;

   /*
    * From the Ivy Bridge PRM, volume 4 part 1, page 77:
    *
    *     "For Render Target and Sampling Engine Surfaces:If the surface is
    *      multisampled (Number of Multisamples any value other than
    *      MULTISAMPLECOUNT_1), this field (MCS Enable) must be enabled."
    *
    *     "This field must be set to 0 for all SINT MSRTs when all RT channels
    *      are not written"
    */
   if (info->samples != VK_SAMPLE_COUNT_1_BIT &&
       !icd_format_is_int(info->format)) {
      want_mcs = true;
   } else if (info->samples == VK_SAMPLE_COUNT_1_BIT) {
      /*
       * From the Ivy Bridge PRM, volume 2 part 1, page 326:
       *
       *     "When MCS is buffer is used for color clear of non-multisampler
       *      render target, the following restrictions apply.
       *      - Support is limited to tiled render targets.
       *      - Support is for non-mip-mapped and non-array surface types
       *        only.
       *      - Clear is supported only on the full RT; i.e., no partial clear
       *        or overlapping clears.
       *      - MCS buffer for non-MSRT is supported only for RT formats
       *        32bpp, 64bpp and 128bpp.
       *      ..."
       */
      if (layout->tiling != GEN6_TILING_NONE &&
          info->mipLevels == 1 && info->arrayLayers == 1) {
         switch (layout->block_size) {
         case 4:
         case 8:
         case 16:
            want_mcs = true;
            break;
         default:
            break;
         }
      }
   }

   return want_mcs;
}
Esempio n. 14
0
static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
{
	if (INTEL_GEN(dev_priv) >= 5)
		return ilk_fbc_is_active(dev_priv);
	else if (IS_GM45(dev_priv))
		return g4x_fbc_is_active(dev_priv);
	else
		return i8xx_fbc_is_active(dev_priv);
}
Esempio n. 15
0
static void
layout_init_walk(struct intel_layout *layout,
                 struct intel_layout_params *params)
{
   if (intel_gpu_gen(params->gpu) >= INTEL_GEN(7))
      layout_init_walk_gen7(layout, params);
   else
      layout_init_walk_gen6(layout, params);
}
Esempio n. 16
0
void intel_buf_view_init(const struct intel_dev *dev,
                         const VkBufferViewCreateInfo *info,
                         struct intel_buf_view *view,
                         bool raw)
{
    struct intel_buf *buf = intel_buf(info->buffer);
    /* TODO: Is transfer destination the only shader write operation? */
    const bool will_write = (buf->usage & (VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
                             VK_BUFFER_USAGE_STORAGE_BUFFER_BIT));
    VkFormat format;
    VkDeviceSize stride;
    uint32_t *cmd;
    int i;

    view->obj.destroy = buf_view_destroy;

    view->buf = buf;

    /*
     * The compiler expects uniform buffers to have pitch of
     * 4 for fragment shaders, but 16 for other stages.  The format
     * must be VK_FORMAT_R32G32B32A32_SFLOAT.
     */
    if (raw) {
        format = VK_FORMAT_R32G32B32A32_SFLOAT;
        stride = 16;
    } else {
        format = info->format;
        stride = icd_format_get_size(format);
    }
    cmd = view->cmd;

    for (i = 0; i < 2; i++) {
        if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
            surface_state_buf_gen7(dev->gpu, info->offset,
                    info->range, stride, format,
                    will_write, will_write, cmd);
            view->cmd_len = 8;
        } else {
            surface_state_buf_gen6(dev->gpu, info->offset,
                    info->range, stride, format,
                    will_write, will_write, cmd);
            view->cmd_len = 6;
        }

        /* switch to view->fs_cmd */
        if (raw) {
            cmd = view->fs_cmd;
            stride = 4;
        } else {
            memcpy(view->fs_cmd, view->cmd, sizeof(uint32_t) * view->cmd_len);
            break;
        }
    }
}
Esempio n. 17
0
void intel_null_view_init(struct intel_null_view *view,
                          struct intel_dev *dev)
{
    if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
        surface_state_null_gen7(dev->gpu, view->cmd);
        view->cmd_len = 8;
    } else {
        surface_state_null_gen6(dev->gpu, view->cmd);
        view->cmd_len = 6;
    }
}
Esempio n. 18
0
static int devid_to_gen(int devid)
{
    int gen;

    if (gen_is_hsw(devid))
        gen = INTEL_GEN(7.5);
    else if (gen_is_ivb(devid))
        gen = INTEL_GEN(7);
    else if (gen_is_snb(devid))
        gen = INTEL_GEN(6);
    else
        gen = -1;

#ifdef INTEL_GEN_SPECIALIZED
    if (gen != INTEL_GEN(INTEL_GEN_SPECIALIZED))
        gen = -1;
#endif

    return gen;
}
Esempio n. 19
0
static u32 get_core_family(struct drm_i915_private *dev_priv)
{
	u32 gen = INTEL_GEN(dev_priv);

	switch (gen) {
	case 9:
		return GFXCORE_FAMILY_GEN9;

	default:
		WARN(1, "GEN%d does not support GuC operation!\n", gen);
		return GFXCORE_FAMILY_UNKNOWN;
	}
}
Esempio n. 20
0
/*
 * The DDX driver changes its behavior depending on the value it reads from
 * i915.enable_fbc, so sanitize it by translating the default value into either
 * 0 or 1 in order to allow it to know what's going on.
 *
 * Notice that this is done at driver initialization and we still allow user
 * space to change the value during runtime without sanitizing it again. IGT
 * relies on being able to change i915.enable_fbc at runtime.
 */
static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
{
	if (i915_modparams.enable_fbc >= 0)
		return !!i915_modparams.enable_fbc;

	if (!HAS_FBC(dev_priv))
		return 0;

	if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
		return 1;

	return 0;
}
Esempio n. 21
0
static u32 get_core_family(struct drm_i915_private *dev_priv)
{
	u32 gen = INTEL_GEN(dev_priv);

	switch (gen) {
	case 9:
		return GUC_CORE_FAMILY_GEN9;

	default:
		MISSING_CASE(gen);
		return GUC_CORE_FAMILY_UNKNOWN;
	}
}
Esempio n. 22
0
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
{
	unsigned int mode;
	void *vaddr;
	int err;

	GEM_BUG_ON(INTEL_GEN(i915) < 8);

	memset(spin, 0, sizeof(*spin));
	spin->i915 = i915;

	spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(spin->hws)) {
		err = PTR_ERR(spin->hws);
		goto err;
	}

	spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(spin->obj)) {
		err = PTR_ERR(spin->obj);
		goto err_hws;
	}

	i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		err = PTR_ERR(vaddr);
		goto err_obj;
	}
	spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);

	mode = i915_coherent_map_type(i915);
	vaddr = i915_gem_object_pin_map(spin->obj, mode);
	if (IS_ERR(vaddr)) {
		err = PTR_ERR(vaddr);
		goto err_unpin_hws;
	}
	spin->batch = vaddr;

	return 0;

err_unpin_hws:
	i915_gem_object_unpin_map(spin->hws);
err_obj:
	i915_gem_object_put(spin->obj);
err_hws:
	i915_gem_object_put(spin->hws);
err:
	return err;
}
Esempio n. 23
0
static void
layout_init_size_and_format(struct intel_layout *layout,
                            struct intel_layout_params *params)
{
   const VkImageCreateInfo *info = params->info;
   VkFormat format = info->format;
   bool require_separate_stencil = false;

   layout->width0 = info->extent.width;
   layout->height0 = info->extent.height;

   /*
    * From the Sandy Bridge PRM, volume 2 part 1, page 317:
    *
    *     "This field (Separate Stencil Buffer Enable) must be set to the same
    *      value (enabled or disabled) as Hierarchical Depth Buffer Enable."
    *
    * GEN7+ requires separate stencil buffers.
    */
   if (info->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
      if (intel_gpu_gen(params->gpu) >= INTEL_GEN(7))
         require_separate_stencil = true;
      else
         require_separate_stencil = (layout->aux == INTEL_LAYOUT_AUX_HIZ);
   }

   switch (format) {
   case VK_FORMAT_D24_UNORM_S8_UINT:
      if (require_separate_stencil) {
         format = VK_FORMAT_X8_D24_UNORM_PACK32;
         layout->separate_stencil = true;
      }
      break;
   case VK_FORMAT_D32_SFLOAT_S8_UINT:
      if (require_separate_stencil) {
         format = VK_FORMAT_D32_SFLOAT;
         layout->separate_stencil = true;
      }
      break;
   default:
      break;
   }

   layout->format = format;
   layout->block_width = icd_format_get_block_width(format);
   layout->block_height = layout->block_width;
   layout->block_size = icd_format_get_size(format);

   params->compressed = icd_format_is_compressed(format);
}
Esempio n. 24
0
static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
			       enum pipe pipe,
			       enum intel_pipe_crc_source *source, u32 *val)
{
	if (IS_GEN2(dev_priv))
		return i8xx_pipe_crc_ctl_reg(source, val);
	else if (INTEL_GEN(dev_priv) < 5)
		return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
	else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
		return ilk_pipe_crc_ctl_reg(source, val);
	else
		return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
}
Esempio n. 25
0
/*
 * For some reason, the hardware tracking starts looking at whatever we
 * programmed as the display plane base address register. It does not look at
 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
 * variables instead of just looking at the pipe/plane size.
 */
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_fbc *fbc = &dev_priv->fbc;
	unsigned int effective_w, effective_h, max_w, max_h;

	if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
		max_w = 4096;
		max_h = 4096;
	} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
		max_w = 4096;
		max_h = 2048;
	} else {
		max_w = 2048;
		max_h = 1536;
	}

	intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
					&effective_h);
	effective_w += fbc->state_cache.plane.adjusted_x;
	effective_h += fbc->state_cache.plane.adjusted_y;

	return effective_w <= max_w && effective_h <= max_h;
}
Esempio n. 26
0
static int find_compression_threshold(struct drm_i915_private *dev_priv,
				      struct drm_mm_node *node,
				      int size,
				      int fb_cpp)
{
	int compression_threshold = 1;
	int ret;
	u64 end;

	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
	 * reserved range size, so it always assumes the maximum (8mb) is used.
	 * If we enable FBC using a CFB on that memory range we'll get FIFO
	 * underruns, even if that range is not reserved by the BIOS. */
	if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
		end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
	else
		end = U64_MAX;

	/* HACK: This code depends on what we will do in *_enable_fbc. If that
	 * code changes, this code needs to change as well.
	 *
	 * The enable_fbc code will attempt to use one of our 2 compression
	 * thresholds, therefore, in that case, we only have 1 resort.
	 */

	/* Try to over-allocate to reduce reallocations and fragmentation. */
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
						   4096, 0, end);
	if (ret == 0)
		return compression_threshold;

again:
	/* HW's ability to limit the CFB is 1:4 */
	if (compression_threshold > 4 ||
	    (fb_cpp == 2 && compression_threshold == 2))
		return 0;

	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
						   4096, 0, end);
	if (ret && INTEL_GEN(dev_priv) <= 4) {
		return 0;
	} else if (ret) {
		compression_threshold <<= 1;
		goto again;
	} else {
		return compression_threshold;
	}
}
Esempio n. 27
0
static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct drm_i915_gem_request *req)
{
	switch (INTEL_GEN(req->i915)) {
	case 6:
		return &gen6_null_state;
	case 7:
		return &gen7_null_state;
	case 8:
		return &gen8_null_state;
	case 9:
		return &gen9_null_state;
	}

	return NULL;
}
Esempio n. 28
0
static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct intel_engine_cs *engine)
{
	switch (INTEL_GEN(engine->i915)) {
	case 6:
		return &gen6_null_state;
	case 7:
		return &gen7_null_state;
	case 8:
		return &gen8_null_state;
	case 9:
		return &gen9_null_state;
	}

	return NULL;
}
Esempio n. 29
0
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
					 struct drm_mm_node *node, u64 size,
					 unsigned alignment, u64 start, u64 end)
{
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return -ENODEV;

	/* WaSkipStolenMemoryFirstPage:bdw+ */
	if (INTEL_GEN(dev_priv) >= 8 && start < 4096)
		start = 4096;

	mutex_lock(&dev_priv->mm.stolen_lock);
	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
					  size, alignment, 0,
					  start, end, DRM_MM_INSERT_BEST);
	mutex_unlock(&dev_priv->mm.stolen_lock);

	return ret;
}
Esempio n. 30
0
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
					 struct drm_mm_node *node, u64 size,
					 unsigned alignment, u64 start, u64 end)
{
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return -ENODEV;

	/* See the comment at the drm_mm_init() call for more about this check.
	 * WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
	 */
	if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
		start = 4096;

	mutex_lock(&dev_priv->mm.stolen_lock);
	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
					  alignment, start, end,
					  DRM_MM_SEARCH_DEFAULT);
	mutex_unlock(&dev_priv->mm.stolen_lock);

	return ret;
}