예제 #1
0
/**
 * i915_gem_context_create_gvt - create a GVT GEM context
 * @dev: drm device *
 *
 * This function is used to create a GVT specific GEM context.
 *
 * Returns:
 * pointer to i915_gem_context on success, error pointer if failed
 *
 */
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev)
{
	struct i915_gem_context *ctx;
	int ret;

	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
		return ERR_PTR(-ENODEV);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ERR_PTR(ret);

	ctx = __create_hw_context(to_i915(dev), NULL);
	if (IS_ERR(ctx))
		goto out;

	ctx->file_priv = ERR_PTR(-EBADF);
	i915_gem_context_set_closed(ctx); /* not user accessible */
	i915_gem_context_clear_bannable(ctx);
	i915_gem_context_set_force_single_submission(ctx);
	if (!i915.enable_guc_submission)
		ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */

	GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
out:
	mutex_unlock(&dev->struct_mutex);
	return ctx;
}
예제 #2
0
static int gpu_set(struct drm_i915_gem_object *obj,
		   unsigned long offset,
		   u32 v)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct drm_i915_gem_request *rq;
	struct i915_vma *vma;
	u32 *cs;
	int err;

	err = i915_gem_object_set_to_gtt_domain(obj, true);
	if (err)
		return err;

	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
	if (IS_ERR(rq)) {
		i915_vma_unpin(vma);
		return PTR_ERR(rq);
	}

	cs = intel_ring_begin(rq, 4);
	if (IS_ERR(cs)) {
		__i915_add_request(rq, false);
		i915_vma_unpin(vma);
		return PTR_ERR(cs);
	}

	if (INTEL_GEN(i915) >= 8) {
		*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
		*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
		*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
		*cs++ = v;
	} else if (INTEL_GEN(i915) >= 4) {
		*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
		*cs++ = 0;
		*cs++ = i915_ggtt_offset(vma) + offset;
		*cs++ = v;
	} else {
		*cs++ = MI_STORE_DWORD_IMM | 1 << 22;
		*cs++ = i915_ggtt_offset(vma) + offset;
		*cs++ = v;
		*cs++ = MI_NOOP;
	}
	intel_ring_advance(rq, cs);

	i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
	i915_vma_unpin(vma);

	reservation_object_lock(obj->resv, NULL);
	reservation_object_add_excl_fence(obj->resv, &rq->fence);
	reservation_object_unlock(obj->resv);

	__i915_add_request(rq, true);

	return 0;
}
예제 #3
0
static int intelfb_alloc(struct drm_fb_helper *helper,
			 struct drm_fb_helper_surface_size *sizes)
{
	struct intel_fbdev *ifbdev =
		container_of(helper, struct intel_fbdev, helper);
	struct drm_framebuffer *fb;
	struct drm_device *dev = helper->dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	struct drm_mode_fb_cmd2 mode_cmd = {};
	struct drm_i915_gem_object *obj = NULL;
	int size, ret;

	/* we don't do packed 24bpp */
	if (sizes->surface_bpp == 24)
		sizes->surface_bpp = 32;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
				    DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	mutex_lock(&dev->struct_mutex);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	size = PAGE_ALIGN(size);

	/* If the FB is too big, just don't use it since fbdev is not very
	 * important and we should probably use that space with FBC or other
	 * features. */
	if (size * 2 < ggtt->stolen_usable_size)
		obj = i915_gem_object_create_stolen(dev_priv, size);
	if (obj == NULL)
		obj = i915_gem_object_create(dev_priv, size);
	if (IS_ERR(obj)) {
		DRM_ERROR("failed to allocate framebuffer\n");
		ret = PTR_ERR(obj);
		goto out;
	}

	fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
	if (IS_ERR(fb)) {
		i915_gem_object_put(obj);
		ret = PTR_ERR(fb);
		goto out;
	}

	mutex_unlock(&dev->struct_mutex);

	ifbdev->fb = to_intel_framebuffer(fb);

	return 0;

out:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
예제 #4
0
static void __intel_fbc_post_update(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct intel_fbc_reg_params old_params;

	WARN_ON(!mutex_is_locked(&fbc->lock));

	if (!fbc->enabled || fbc->crtc != crtc)
		return;

	if (!intel_fbc_can_activate(crtc)) {
		WARN_ON(fbc->active);
		return;
	}

	old_params = fbc->params;
	intel_fbc_get_reg_params(crtc, &fbc->params);

	/* If the scanout has not changed, don't modify the FBC settings.
	 * Note that we make the fundamental assumption that the fb->obj
	 * cannot be unpinned (and have its GTT offset and fence revoked)
	 * without first being decoupled from the scanout and FBC disabled.
	 */
	if (fbc->active &&
	    intel_fbc_reg_params_equal(&old_params, &fbc->params))
		return;

	intel_fbc_deactivate(dev_priv);
	intel_fbc_schedule_activation(crtc);
	fbc->no_fbc_reason = "FBC enabled (active or scheduled)";
}
예제 #5
0
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
{
	void __iomem *ptr;

	/* Access through the GTT requires the device to be awake. */
	assert_rpm_wakelock_held(to_i915(vma->vm->dev));

	lockdep_assert_held(&vma->vm->dev->struct_mutex);
	if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
		return IO_ERR_PTR(-ENODEV);

	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
	GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);

	ptr = vma->iomap;
	if (ptr == NULL) {
		ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
					vma->node.start,
					vma->node.size);
		if (ptr == NULL)
			return IO_ERR_PTR(-ENOMEM);

		vma->iomap = ptr;
	}

	__i915_vma_pin(vma);
	return ptr;
}
예제 #6
0
void intel_fbc_pre_update(struct intel_crtc *crtc,
			  struct intel_crtc_state *crtc_state,
			  struct intel_plane_state *plane_state)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_fbc *fbc = &dev_priv->fbc;

	if (!fbc_supported(dev_priv))
		return;

	mutex_lock(&fbc->lock);

	if (!multiple_pipes_ok(crtc, plane_state)) {
		fbc->no_fbc_reason = "more than one pipe active";
		goto deactivate;
	}

	if (!fbc->enabled || fbc->crtc != crtc)
		goto unlock;

	intel_fbc_update_state_cache(crtc, crtc_state, plane_state);

deactivate:
	intel_fbc_deactivate(dev_priv);
unlock:
	mutex_unlock(&fbc->lock);
}
예제 #7
0
void vlv_dsi_pll_enable(struct intel_encoder *encoder,
			const struct intel_crtc_state *config)
{
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);

	DRM_DEBUG_KMS("\n");

	mutex_lock(&dev_priv->sb_lock);

	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL,
		      config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN);

	/* wait at least 0.5 us after ungating before enabling VCO,
	 * allow hrtimer subsystem optimization by relaxing timing
	 */
	usleep_range(10, 50);

	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);

	if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
						DSI_PLL_LOCK, 20)) {

		mutex_unlock(&dev_priv->sb_lock);
		DRM_ERROR("DSI PLL lock failed\n");
		return;
	}
	mutex_unlock(&dev_priv->sb_lock);

	DRM_DEBUG_KMS("DSI PLL locked\n");
}
예제 #8
0
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
	u32 tmp;
	struct drm_device *dev = encoder->base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);

	/* Clear old configurations */
	if (IS_BROXTON(dev_priv)) {
		tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
		tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
		tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
		tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
		tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
		I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
	} else {
		tmp = I915_READ(MIPIO_TXESC_CLK_DIV1);
		tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK;
		I915_WRITE(MIPIO_TXESC_CLK_DIV1, tmp);

		tmp = I915_READ(MIPIO_TXESC_CLK_DIV2);
		tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK;
		I915_WRITE(MIPIO_TXESC_CLK_DIV2, tmp);
	}
	I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
예제 #9
0
u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
		     struct intel_crtc_state *config)
{
	u32 pclk;
	u32 dsi_clk;
	u32 dsi_ratio;
	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);

	/* Divide by zero */
	if (!pipe_bpp) {
		DRM_ERROR("Invalid BPP(0)\n");
		return 0;
	}

	config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL);

	dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;

	dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;

	/* pixel_format and pipe_bpp should agree */
	assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);

	pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, pipe_bpp);

	DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk);
	return pclk;
}
예제 #10
0
static void intel_mst_enable_dp(struct intel_encoder *encoder,
				const struct intel_crtc_state *pipe_config,
				const struct drm_connector_state *conn_state)
{
	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
	struct intel_digital_port *intel_dig_port = intel_mst->primary;
	struct intel_dp *intel_dp = &intel_dig_port->dp;
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	enum port port = intel_dig_port->base.port;

	DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);

	if (intel_wait_for_register(&dev_priv->uncore,
				    DP_TP_STATUS(port),
				    DP_TP_STATUS_ACT_SENT,
				    DP_TP_STATUS_ACT_SENT,
				    1))
		DRM_ERROR("Timed out waiting for ACT sent\n");

	drm_dp_check_act_status(&intel_dp->mst_mgr);

	drm_dp_update_payload_part2(&intel_dp->mst_mgr);
	if (pipe_config->has_audio)
		intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
예제 #11
0
/*
 * XXX: The muxing and gating is hard coded for now. Need to add support for
 * sharing PLLs with two DSI outputs.
 */
int vlv_dsi_pll_compute(struct intel_encoder *encoder,
			struct intel_crtc_state *config)
{
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
	int ret;
	u32 dsi_clk;

	dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
				    intel_dsi->lane_count);

	ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
	if (ret) {
		DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
		return ret;
	}

	if (intel_dsi->ports & (1 << PORT_A))
		config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;

	if (intel_dsi->ports & (1 << PORT_C))
		config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;

	config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;

	DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
		      config->dsi_pll.div, config->dsi_pll.ctrl);

	return 0;
}
예제 #12
0
static void intel_psr_write_vsc(struct intel_dp *intel_dp,
				const struct edp_vsc_psr *vsc_psr)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = dig_port->base.base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
	i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
	uint32_t *data = (uint32_t *) vsc_psr;
	unsigned int i;

	/* As per BSPec (Pipe Video Data Island Packet), we need to disable
	   the video DIP being updated before program video DIP data buffer
	   registers for DIP being updated. */
	I915_WRITE(ctl_reg, 0);
	POSTING_READ(ctl_reg);

	for (i = 0; i < sizeof(*vsc_psr); i += 4) {
		I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
						   i >> 2), *data);
		data++;
	}
	for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
		I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
						   i >> 2), 0);

	I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
	POSTING_READ(ctl_reg);
}
예제 #13
0
static enum drm_mode_status
intel_dp_mst_mode_valid(struct drm_connector *connector,
			struct drm_display_mode *mode)
{
	struct intel_connector *intel_connector = to_intel_connector(connector);
	struct intel_dp *intel_dp = intel_connector->mst_port;
	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
	int max_rate, mode_rate, max_lanes, max_link_clock;

	if (drm_connector_is_unregistered(connector))
		return MODE_ERROR;

	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
		return MODE_NO_DBLESCAN;

	max_link_clock = intel_dp_max_link_rate(intel_dp);
	max_lanes = intel_dp_max_lane_count(intel_dp);

	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
	mode_rate = intel_dp_link_required(mode->clock, 18);

	/* TODO - validate mode against available PBN for link */
	if (mode->clock < 10000)
		return MODE_CLOCK_LOW;

	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
		return MODE_H_ILLEGAL;

	if (mode_rate > max_rate || mode->clock > max_dotclk)
		return MODE_CLOCK_HIGH;

	return MODE_OK;
}
예제 #14
0
static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct intel_fbc_work *work = &fbc->work;

	WARN_ON(!mutex_is_locked(&fbc->lock));
	if (WARN_ON(!fbc->enabled))
		return;

	if (drm_crtc_vblank_get(&crtc->base)) {
		DRM_ERROR("vblank not available for FBC on pipe %c\n",
			  pipe_name(crtc->pipe));
		return;
	}

	/* It is useless to call intel_fbc_cancel_work() or cancel_work() in
	 * this function since we're not releasing fbc.lock, so it won't have an
	 * opportunity to grab it to discover that it was cancelled. So we just
	 * update the expected jiffy count. */
	work->scheduled = true;
	work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
	drm_crtc_vblank_put(&crtc->base);

	schedule_work(&work->work);
}
예제 #15
0
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
				     struct intel_fbc_reg_params *params)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct intel_fbc_state_cache *cache = &fbc->state_cache;

	/* Since all our fields are integer types, use memset here so the
	 * comparison function can rely on memcmp because the padding will be
	 * zero. */
	memset(params, 0, sizeof(*params));

	params->vma = cache->vma;

	params->crtc.pipe = crtc->pipe;
	params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
	params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);

	params->fb.format = cache->fb.format;
	params->fb.stride = cache->fb.stride;

	params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);

	if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
		params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
						32 * fbc->threshold) * 8;
}
예제 #16
0
static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
			       const struct intel_crtc_state *config)
{
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
	enum port port;
	u32 val;

	DRM_DEBUG_KMS("\n");

	/* Configure PLL vales */
	I915_WRITE(BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
	POSTING_READ(BXT_DSI_PLL_CTL);

	/* Program TX, RX, Dphy clocks */
	for_each_dsi_port(port, intel_dsi->ports)
		bxt_dsi_program_clocks(encoder->base.dev, port, config);

	/* Enable DSI PLL */
	val = I915_READ(BXT_DSI_PLL_ENABLE);
	val |= BXT_DSI_PLL_DO_ENABLE;
	I915_WRITE(BXT_DSI_PLL_ENABLE, val);

	/* Timeout and fail if PLL not locked */
	if (intel_wait_for_register(dev_priv,
				    BXT_DSI_PLL_ENABLE,
				    BXT_DSI_PLL_LOCKED,
				    BXT_DSI_PLL_LOCKED,
				    1)) {
		DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
		return;
	}

	DRM_DEBUG_KMS("DSI PLL locked\n");
}
예제 #17
0
static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
			     resource_size_t offset, resource_size_t size)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct sg_table *st;
	struct scatterlist *sg;

	GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm)));

	/* We hide that we have no struct page backing our stolen object
	 * by wrapping the contiguous physical allocation with a fake
	 * dma mapping in a single scatterlist.
	 */

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return ERR_PTR(-ENOMEM);

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
		return ERR_PTR(-ENOMEM);
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = size;

	sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset;
	sg_dma_len(sg) = size;

	return st;
}
예제 #18
0
static struct intel_shared_dpll *
ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
	     struct intel_encoder *encoder)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_shared_dpll *pll;
	enum intel_dpll_id i;

	if (HAS_PCH_IBX(dev_priv)) {
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
		i = (enum intel_dpll_id) crtc->pipe;
		pll = &dev_priv->shared_dplls[i];

		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
			      crtc->base.base.id, pll->name);
	} else {
		pll = intel_find_shared_dpll(crtc, crtc_state,
					     DPLL_ID_PCH_PLL_A,
					     DPLL_ID_PCH_PLL_B);
	}

	/* reference the pll */
	intel_reference_shared_dpll(pll, crtc_state);

	return pll;
}
예제 #19
0
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{
	struct drm_i915_gem_object *obj = vma->obj;
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	bool mappable, fenceable;
	u32 fence_size, fence_alignment;

	fence_size = i915_gem_get_ggtt_size(dev_priv,
					    vma->size,
					    i915_gem_object_get_tiling(obj));
	fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
						      vma->size,
						      i915_gem_object_get_tiling(obj),
						      true);

	fenceable = (vma->node.size == fence_size &&
		     (vma->node.start & (fence_alignment - 1)) == 0);

	mappable = (vma->node.start + fence_size <=
		    dev_priv->ggtt.mappable_end);

	/*
	 * Explicitly disable for rotated VMA since the display does not
	 * need the fence and the VMA is not accessible to other users.
	 */
	if (mappable && fenceable &&
	    vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
		vma->flags |= I915_VMA_CAN_FENCE;
	else
		vma->flags &= ~I915_VMA_CAN_FENCE;
}
예제 #20
0
static int
i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_mm_struct *mm;
	int ret = 0;

	/* During release of the GEM object we hold the struct_mutex. This
	 * precludes us from calling mmput() at that time as that may be
	 * the last reference and so call exit_mmap(). exit_mmap() will
	 * attempt to reap the vma, and if we were holding a GTT mmap
	 * would then call drm_gem_vm_close() and attempt to reacquire
	 * the struct mutex. So in order to avoid that recursion, we have
	 * to defer releasing the mm reference until after we drop the
	 * struct_mutex, i.e. we need to schedule a worker to do the clean
	 * up.
	 */
	mutex_lock(&dev_priv->mm_lock);
	mm = __i915_mm_struct_find(dev_priv, current->mm);
	if (mm == NULL) {
		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
		if (mm == NULL) {
			ret = -ENOMEM;
			goto out;
		}

		kref_init(&mm->kref);
		mm->i915 = to_i915(obj->base.dev);

		mm->mm = current->mm;
		mmgrab(current->mm);

		mm->mn = NULL;

		/* Protected by dev_priv->mm_lock */
		hash_add(dev_priv->mm_structs,
			 &mm->node, (unsigned long)mm->mm);
	} else
		kref_get(&mm->kref);

	obj->userptr.mm = mm;
out:
	mutex_unlock(&dev_priv->mm_lock);
	return ret;
}
예제 #21
0
/* Program BXT Mipi clocks and dividers */
static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
				   const struct intel_crtc_state *config)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
	u32 tmp;
	u32 dsi_rate = 0;
	u32 pll_ratio = 0;
	u32 rx_div;
	u32 tx_div;
	u32 rx_div_upper;
	u32 rx_div_lower;
	u32 mipi_8by3_divider;

	/* Clear old configurations */
	tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
	tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
	tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
	tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
	tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));

	/* Get the current DSI rate(actual) */
	pll_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
	dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2;

	/*
	 * tx clock should be <= 20MHz and the div value must be
	 * subtracted by 1 as per bspec
	 */
	tx_div = DIV_ROUND_UP(dsi_rate, 20000) - 1;
	/*
	 * rx clock should be <= 150MHz and the div value must be
	 * subtracted by 1 as per bspec
	 */
	rx_div = DIV_ROUND_UP(dsi_rate, 150000) - 1;

	/*
	 * rx divider value needs to be updated in the
	 * two differnt bit fields in the register hence splitting the
	 * rx divider value accordingly
	 */
	rx_div_lower = rx_div & RX_DIVIDER_BIT_1_2;
	rx_div_upper = (rx_div & RX_DIVIDER_BIT_3_4) >> 2;

	/* As per bpsec program the 8/3X clock divider to the below value */
	if (dev_priv->vbt.dsi.config->is_cmd_mode)
		mipi_8by3_divider = 0x2;
	else
		mipi_8by3_divider = 0x3;

	tmp |= BXT_MIPI_8X_BY3_DIVIDER(port, mipi_8by3_divider);
	tmp |= BXT_MIPI_TX_ESCLK_DIVIDER(port, tx_div);
	tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower);
	tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper);

	I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
}
예제 #22
0
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
{
	struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = intel_dig_port->base.base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct intel_connector *intel_connector;
	struct drm_connector *connector;
	enum pipe pipe;
	int ret;

	intel_connector = intel_connector_alloc();
	if (!intel_connector)
		return NULL;

	intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
	intel_connector->mst_port = intel_dp;
	intel_connector->port = port;
	drm_dp_mst_get_port_malloc(port);

	connector = &intel_connector->base;
	ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
				 DRM_MODE_CONNECTOR_DisplayPort);
	if (ret) {
		intel_connector_free(intel_connector);
		return NULL;
	}

	drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);

	for_each_pipe(dev_priv, pipe) {
		struct drm_encoder *enc =
			&intel_dp->mst_encoders[pipe]->base.base;

		ret = drm_connector_attach_encoder(&intel_connector->base, enc);
		if (ret)
			goto err;
	}

	drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
	drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);

	ret = drm_connector_set_path_property(connector, pathprop);
	if (ret)
		goto err;

	intel_attach_force_audio_property(connector);
	intel_attach_broadcast_rgb_property(connector);
	drm_connector_attach_max_bpc_property(connector, 6, 12);

	return connector;

err:
	drm_connector_cleanup(connector);
	return NULL;
}
예제 #23
0
static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
	uint32_t val;

	val = I915_READ(VLV_PSRSTAT(pipe)) &
	      VLV_EDP_PSR_CURR_STATE_MASK;
	return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
	       (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
}
예제 #24
0
void
intel_shared_dpll_config_put(struct intel_shared_dpll_config *config,
			     struct intel_shared_dpll *pll,
			     struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll);

	config[id].crtc_mask &= ~(1 << crtc->pipe);
}
예제 #25
0
static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
{
#ifdef CONFIG_DRM_FBDEV_EMULATION
	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);

	if (dev_priv->fbdev)
		drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
						   &connector->base);
#endif
}
예제 #26
0
static void intel_dp_register_mst_connector(struct drm_connector *connector)
{
	struct drm_i915_private *dev_priv = to_i915(connector->dev);

	if (dev_priv->fbdev)
		drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
						connector);

	drm_connector_register(connector);
}
예제 #27
0
static bool
intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
{
	struct intel_dp *intel_dp = &intel_dig_port->dp;
	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe)
		intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(intel_dig_port, pipe);
	return true;
}
예제 #28
0
static void obj_bump_mru(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

	spin_lock(&i915->mm.obj_lock);
	if (obj->bind_count)
		list_move_tail(&obj->mm.link, &i915->mm.bound_list);
	spin_unlock(&i915->mm.obj_lock);

	obj->mm.dirty = true; /* be paranoid  */
}
예제 #29
0
static void
i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
{
	if (obj->userptr.mm == NULL)
		return;

	kref_put_mutex(&obj->userptr.mm->kref,
		       __i915_mm_struct_free,
		       &to_i915(obj->base.dev)->mm_lock);
	obj->userptr.mm = NULL;
}
예제 #30
0
void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
	u32 temp;
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);

	temp = I915_READ(MIPI_CTRL(port));
	temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
	I915_WRITE(MIPI_CTRL(port), temp |
			intel_dsi->escape_clk_div <<
			ESCAPE_CLOCK_DIVIDER_SHIFT);
}