Example #1
0
int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	struct drm_gem_object *gem_obj;
	u64 pitch, size;

	if (!args || !dev || !file)
		return -EINVAL;

	pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
	size = pitch * args->height;

	if (!size)
		return -EINVAL;

	gem_obj = vkms_gem_create(dev, file, &args->handle, size);
	if (IS_ERR(gem_obj))
		return PTR_ERR(gem_obj);

	args->size = gem_obj->size;
	args->pitch = pitch;

	DRM_DEBUG_DRIVER("Created object of size %lld\n", size);

	return 0;
}
Example #2
0
static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
			     u32 offset, u32 size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct sg_table *st;
	struct scatterlist *sg;

	DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
	BUG_ON(offset > dev_priv->gtt.stolen_size - size);

	/* We hide that we have no struct page backing our stolen object
	 * by wrapping the contiguous physical allocation with a fake
	 * dma mapping in a single scatterlist.
	 */

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return NULL;

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
		return NULL;
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = size;

	sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
	sg_dma_len(sg) = size;

	return st;
}
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
static int create_default_context(struct drm_i915_private *dev_priv)
{
	struct i915_hw_context *ctx;
	int ret;

	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));

	ctx = create_hw_context(dev_priv->dev, NULL);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	/* We may need to do things with the shrinker which require us to
	 * immediately switch back to the default context. This can cause a
	 * problem as pinning the default context also requires GTT space which
	 * may not be available. To avoid this we always pin the
	 * default context.
	 */
	dev_priv->ring[RCS].default_context = ctx;
	ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
	if (ret) {
		do_destroy(ctx);
		return ret;
	}

	ret = do_switch(NULL, ctx, 0);
	if (ret) {
		i915_gem_object_unpin(ctx->obj);
		do_destroy(ctx);
	} else {
		DRM_DEBUG_DRIVER("Default HW context loaded\n");
	}

	return ret;
}
Example #4
0
void sun4i_tcon_set_status(struct sun4i_tcon *tcon,
			   const struct drm_encoder *encoder,
			   bool enabled)
{
	int channel;

	switch (encoder->encoder_type) {
	case DRM_MODE_ENCODER_NONE:
		channel = 0;
		break;
	case DRM_MODE_ENCODER_TMDS:
	case DRM_MODE_ENCODER_TVDAC:
		channel = 1;
		break;
	default:
		DRM_DEBUG_DRIVER("Unknown encoder type, doing nothing...\n");
		return;
	}

	regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
			   SUN4I_TCON_GCTL_TCON_ENABLE,
			   enabled ? SUN4I_TCON_GCTL_TCON_ENABLE : 0);

	sun4i_tcon_channel_set_status(tcon, channel, enabled);
}
Example #5
0
/*
 * Typicaly, a channel looks like: DMA-->clip-->scale-->ctrans-->compositor
 */
static void ade_update_channel(struct ade_plane *aplane,
			       struct drm_framebuffer *fb, int crtc_x,
			       int crtc_y, unsigned int crtc_w,
			       unsigned int crtc_h, u32 src_x,
			       u32 src_y, u32 src_w, u32 src_h)
{
	struct ade_hw_ctx *ctx = aplane->ctx;
	void __iomem *base = ctx->base;
	u32 fmt = ade_get_format(fb->format->format);
	u32 ch = aplane->ch;
	u32 in_w;
	u32 in_h;

	DRM_DEBUG_DRIVER("channel%d: src:(%d, %d)-%dx%d, crtc:(%d, %d)-%dx%d",
			 ch + 1, src_x, src_y, src_w, src_h,
			 crtc_x, crtc_y, crtc_w, crtc_h);

	/* 1) DMA setting */
	in_w = src_w;
	in_h = src_h;
	ade_rdma_set(base, fb, ch, src_y, in_h, fmt);

	/* 2) clip setting */
	ade_clip_set(base, ch, fb->width, src_x, in_w, in_h);

	/* 3) TODO: scale setting for overlay planes */

	/* 4) TODO: ctran/csc setting for overlay planes */

	/* 5) compositor routing setting */
	ade_compositor_routing_set(base, ch, crtc_x, crtc_y, in_w, in_h, fmt);
}
Example #6
0
/*
 * Parse pipe CRC command strings:
 *   command: wsp* object wsp+ name wsp+ source wsp*
 *   object: 'pipe'
 *   name: (A | B | C)
 *   source: (none | plane1 | plane2 | pf)
 *   wsp: (#0x20 | #0x9 | #0xA)+
 *
 * eg.:
 *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
 *  "pipe A none"    ->  Stop CRC
 */
static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
{
	int n_words = 0;

	while (*buf) {
		char *end;

		/* skip leading white space */
		buf = skip_spaces(buf);
		if (!*buf)
			break;	/* end of buffer */

		/* find end of word */
		for (end = buf; *end && !isspace(*end); end++)
			;

		if (n_words == max_words) {
			DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
					 max_words);
			return -EINVAL;	/* ran out of words[] before bytes */
		}

		if (*end)
			*end++ = '\0';
		words[n_words++] = buf;
		buf = end;
	}

	return n_words;
}
Example #7
0
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct i915_hw_context *ctx;
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	if (dev_priv->hw_contexts_disabled)
		return -ENODEV;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ret = create_hw_context(dev, file_priv, &ctx);
	DRM_UNLOCK(dev);
	if (ret != 0)
		return (ret);

	args->ctx_id = ctx->id;
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

	return 0;
}
Example #8
0
static void icl_get_stolen_reserved(struct drm_i915_private *dev_priv,
				    resource_size_t *base,
				    resource_size_t *size)
{
	u64 reg_val = I915_READ64(GEN6_STOLEN_RESERVED);

	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);

	*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;

	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
	case GEN8_STOLEN_RESERVED_1M:
		*size = 1024 * 1024;
		break;
	case GEN8_STOLEN_RESERVED_2M:
		*size = 2 * 1024 * 1024;
		break;
	case GEN8_STOLEN_RESERVED_4M:
		*size = 4 * 1024 * 1024;
		break;
	case GEN8_STOLEN_RESERVED_8M:
		*size = 8 * 1024 * 1024;
		break;
	default:
		*size = 8 * 1024 * 1024;
		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
	}
}
Example #9
0
static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
{
	DRM_DEBUG_DRIVER("\n");

	drm_plane_helper_disable(drm_plane);
	drm_plane_cleanup(drm_plane);
}
Example #10
0
static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
				    resource_size_t *base,
				    resource_size_t *size)
{
	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
	resource_size_t stolen_top = dev_priv->dsm.end + 1;

	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);

	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
		return;

	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
	default:
		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
		/* fall through */
	case GEN7_STOLEN_RESERVED_1M:
		*size = 1024 * 1024;
		break;
	}

	/*
	 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
	 * reserved location as (top - size).
	 */
	*base = stolen_top - *size;
}
Example #11
0
static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
				    resource_size_t *base,
				    resource_size_t *size)
{
	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);

	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);

	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
		return;

	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;

	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
	case GEN8_STOLEN_RESERVED_1M:
		*size = 1024 * 1024;
		break;
	case GEN8_STOLEN_RESERVED_2M:
		*size = 2 * 1024 * 1024;
		break;
	case GEN8_STOLEN_RESERVED_4M:
		*size = 4 * 1024 * 1024;
		break;
	case GEN8_STOLEN_RESERVED_8M:
		*size = 8 * 1024 * 1024;
		break;
	default:
		*size = 8 * 1024 * 1024;
		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
	}
}
Example #12
0
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
				    resource_size_t *base,
				    resource_size_t *size)
{
	u32 reg_val = I915_READ(IS_GM45(dev_priv) ?
				CTG_STOLEN_RESERVED :
				ELK_STOLEN_RESERVED);
	resource_size_t stolen_top = dev_priv->dsm.end + 1;

	DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
			 IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val);

	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
		return;

	/*
	 * Whether ILK really reuses the ELK register for this is unclear.
	 * Let's see if we catch anyone with this supposedly enabled on ILK.
	 */
	WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
	     reg_val);

	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
		return;

	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
	WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);

	*size = stolen_top - *base;
}
Example #13
0
void lspcon_write_infoframe(struct intel_encoder *encoder,
			    const struct intel_crtc_state *crtc_state,
			    unsigned int type,
			    const void *frame, ssize_t len)
{
	bool ret;
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
	struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);

	/* LSPCON only needs AVI IF */
	if (type != HDMI_INFOFRAME_TYPE_AVI)
		return;

	if (lspcon->vendor == LSPCON_VENDOR_MCA)
		ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
						      frame, len);
	else
		ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
							 frame, len);

	if (!ret) {
		DRM_ERROR("Failed to write AVI infoframes\n");
		return;
	}

	DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
}
Example #14
0
static int guc_wait_ucode(struct intel_guc *guc)
{
	u32 status;
	int ret;

	/*
	 * Wait for the GuC to start up.
	 * NB: Docs recommend not using the interrupt for completion.
	 * Measurements indicate this should take no more than 20ms, so a
	 * timeout here indicates that the GuC has failed and is unusable.
	 * (Higher levels of the driver may decide to reset the GuC and
	 * attempt the ucode load again if this happens.)
	 */
	ret = wait_for(guc_ready(guc, &status), 100);
	DRM_DEBUG_DRIVER("GuC status %#x\n", status);

	if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
		DRM_ERROR("GuC firmware signature verification failed\n");
		ret = -ENOEXEC;
	}

	if (ret == 0 && !guc_xfer_completed(guc, &status)) {
		DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
			  status);
		ret = -ENXIO;
	}

	return ret;
}
Example #15
0
int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
{
	int err = 0;

	dev_priv->workarounds.count = 0;

	if (INTEL_GEN(dev_priv) < 8)
		err = 0;
	else if (IS_BROADWELL(dev_priv))
		err = bdw_ctx_workarounds_init(dev_priv);
	else if (IS_CHERRYVIEW(dev_priv))
		err = chv_ctx_workarounds_init(dev_priv);
	else if (IS_SKYLAKE(dev_priv))
		err = skl_ctx_workarounds_init(dev_priv);
	else if (IS_BROXTON(dev_priv))
		err = bxt_ctx_workarounds_init(dev_priv);
	else if (IS_KABYLAKE(dev_priv))
		err = kbl_ctx_workarounds_init(dev_priv);
	else if (IS_GEMINILAKE(dev_priv))
		err = glk_ctx_workarounds_init(dev_priv);
	else if (IS_COFFEELAKE(dev_priv))
		err = cfl_ctx_workarounds_init(dev_priv);
	else if (IS_CANNONLAKE(dev_priv))
		err = cnl_ctx_workarounds_init(dev_priv);
	else if (IS_ICELAKE(dev_priv))
		err = icl_ctx_workarounds_init(dev_priv);
	else
		MISSING_CASE(INTEL_GEN(dev_priv));
	if (err)
		return err;

	DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
			 dev_priv->workarounds.count);
	return 0;
}
Example #16
0
static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
				     size_t len, loff_t *offp)
{
	struct seq_file *m = file->private_data;
	struct drm_i915_private *dev_priv = m->private;
	char *tmpbuf;
	int ret;

	if (len == 0)
		return 0;

	if (len > PAGE_SIZE - 1) {
		DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
				 PAGE_SIZE);
		return -E2BIG;
	}

	tmpbuf = memdup_user_nul(ubuf, len);
	if (IS_ERR(tmpbuf))
		return PTR_ERR(tmpbuf);

	ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);

	kfree(tmpbuf);
	if (ret < 0)
		return ret;

	*offp += len;
	return len;
}
Example #17
0
u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
		     struct intel_crtc_state *config)
{
	u32 pclk;
	u32 dsi_clk;
	u32 dsi_ratio;
	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);

	/* Divide by zero */
	if (!pipe_bpp) {
		DRM_ERROR("Invalid BPP(0)\n");
		return 0;
	}

	config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL);

	dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;

	dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;

	/* pixel_format and pipe_bpp should agree */
	assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);

	pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, pipe_bpp);

	DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk);
	return pclk;
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file)
{
	struct drm_i915_gem_context_create *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct i915_hw_context *ctx;
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	if (!HAS_HW_CONTEXTS(dev))
		return -ENODEV;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = create_hw_context(dev, file_priv);
	mutex_unlock(&dev->struct_mutex);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	args->ctx_id = ctx->id;
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);

	return 0;
}
Example #19
0
/**
 * The default context needs to exist per ring that uses contexts. It stores the
 * context state of the GPU for applications that don't utilize HW contexts, as
 * well as an idle case.
 */
static int create_default_context(struct drm_i915_private *dev_priv)
{
	struct i915_hw_context *ctx;
	int ret;

	DRM_LOCK_ASSERT(dev_priv->dev);

	ret = create_hw_context(dev_priv->dev, NULL, &ctx);
	if (ret != 0)
		return (ret);

	/* We may need to do things with the shrinker which require us to
	 * immediately switch back to the default context. This can cause a
	 * problem as pinning the default context also requires GTT space which
	 * may not be available. To avoid this we always pin the
	 * default context.
	 */
	dev_priv->rings[RCS].default_context = ctx;
	ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
	if (ret)
		goto err_destroy;

	ret = do_switch(ctx);
	if (ret)
		goto err_unpin;

	DRM_DEBUG_DRIVER("Default HW context loaded\n");
	return 0;

err_unpin:
	i915_gem_object_unpin(ctx->obj);
err_destroy:
	do_destroy(ctx);
	return ret;
}
Example #20
0
static void sun8i_mixer_commit(struct sunxi_engine *engine)
{
	DRM_DEBUG_DRIVER("Committing changes\n");

	regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_DBUFF,
		     SUN8I_MIXER_GLOBAL_DBUFF_ENABLE);
}
Example #21
0
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct i915_hw_context *ctx;
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
	if (!ctx) {
		DRM_UNLOCK(dev);
		return -ENOENT;
	}

	do_destroy(ctx);

	DRM_UNLOCK(dev);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}
Example #22
0
static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
{
	bool enabled;
	u32 val;
	u32 mask;

	mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED;
	val = I915_READ(BXT_DSI_PLL_ENABLE);
	enabled = (val & mask) == mask;

	if (!enabled)
		return false;

	/*
	 * Both dividers must be programmed with valid values even if only one
	 * of the PLL is used, see BSpec/Broxton Clocks. Check this here for
	 * paranoia, since BIOS is known to misconfigure PLLs in this way at
	 * times, and since accessing DSI registers with invalid dividers
	 * causes a system hang.
	 */
	val = I915_READ(BXT_DSI_PLL_CTL);
	if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
		DRM_DEBUG_DRIVER("PLL is enabled with invalid divider settings (%08x)\n",
				 val);
		enabled = false;
	}

	return enabled;
}
Example #23
0
static void ade_clip_set(void __iomem *base, u32 ch, u32 fb_w, u32 x,
			 u32 in_w, u32 in_h)
{
	u32 disable_val;
	u32 clip_left;
	u32 clip_right;

	/*
	 * clip width, no need to clip height
	 */
	if (fb_w == in_w) { /* bypass */
		disable_val = 1;
		clip_left = 0;
		clip_right = 0;
	} else {
		disable_val = 0;
		clip_left = x;
		clip_right = fb_w - (x + in_w) - 1;
	}

	DRM_DEBUG_DRIVER("clip%d: clip_left=%d, clip_right=%d\n",
			 ch + 1, clip_left, clip_right);

	writel(disable_val, base + ADE_CLIP_DISABLE(ch));
	writel((fb_w - 1) << 16 | (in_h - 1), base + ADE_CLIP_SIZE0(ch));
	writel(clip_left << 16 | clip_right, base + ADE_CLIP_SIZE1(ch));
	ade_update_reload_bit(base, CLIP_OFST + ch, 0);
}
Example #24
0
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file)
{
	struct drm_i915_gem_context_destroy *args = data;
	struct drm_i915_file_private *file_priv = file->driver_priv;
	struct i915_hw_context *ctx;
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	ctx = i915_gem_context_get(file_priv, args->ctx_id);
	if (!ctx) {
		mutex_unlock(&dev->struct_mutex);
		return -ENOENT;
	}

	idr_remove(&ctx->file_priv->context_idr, ctx->id);
	i915_gem_context_unreference(ctx);
	mutex_unlock(&dev->struct_mutex);

	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
	return 0;
}
/*
 * only 2 interrupts may occur: screen plug/unplug and EDID read
 */
static irqreturn_t tda998x_irq_thread(int irq, void *data)
{
	struct tda998x_priv *priv = data;
	u8 sta, cec, lvl, flag0, flag1, flag2;
	bool handled = false;

	sta = cec_read(priv, REG_CEC_INTSTATUS);
	cec = cec_read(priv, REG_CEC_RXSHPDINT);
	lvl = cec_read(priv, REG_CEC_RXSHPDLEV);
	flag0 = reg_read(priv, REG_INT_FLAGS_0);
	flag1 = reg_read(priv, REG_INT_FLAGS_1);
	flag2 = reg_read(priv, REG_INT_FLAGS_2);
	DRM_DEBUG_DRIVER(
		"tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n",
		sta, cec, lvl, flag0, flag1, flag2);

	if (cec & CEC_RXSHPDINT_HPD) {
		if (lvl & CEC_RXSHPDLEV_HPD)
			tda998x_edid_delay_start(priv);
		else
			schedule_work(&priv->detect_work);

		handled = true;
	}

	if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) {
		priv->wq_edid_wait = 0;
		wake_up(&priv->wq_edid);
		handled = true;
	}

	return IRQ_RETVAL(handled);
}
Example #26
0
static void psb_intel_opregion_asle_work(struct work_struct *work)
{
	struct psb_intel_opregion *opregion =
		container_of(work, struct psb_intel_opregion, asle_work);
	struct drm_psb_private *dev_priv =
		container_of(opregion, struct drm_psb_private, opregion);
	struct opregion_asle *asle = opregion->asle;
	u32 asle_stat = 0;
	u32 asle_req;

	if (!asle)
		return;

	asle_req = asle->aslc & ASLE_REQ_MSK;
	if (!asle_req) {
		DRM_DEBUG_DRIVER("non asle set request??\n");
		return;
	}

	if (asle_req & ASLE_SET_BACKLIGHT)
		asle_stat |= asle_set_backlight(dev_priv->dev, asle->bclp);

	asle->aslc = asle_stat;

}
int psb_get_brightness(struct backlight_device *bd)
{
	DRM_DEBUG_DRIVER("brightness = 0x%x \n", psb_brightness);

	/* return locally cached var instead of HW read (due to DPST etc.) */
	return psb_brightness;
}
Example #28
0
static int i915_setparam(struct drm_device *dev, void *data,
			 struct drm_file *file_priv)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	drm_i915_setparam_t *param = data;

	switch (param->param) {
	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
	case I915_SETPARAM_ALLOW_BATCHBUFFER:
		/* Reject all old ums/dri params. */
		return -ENODEV;

	case I915_SETPARAM_NUM_USED_FENCES:
		if (param->value > dev_priv->num_fence_regs ||
		    param->value < 0)
			return -EINVAL;
		/* Userspace can use first N regs */
		dev_priv->fence_reg_start = param->value;
		break;
	default:
		DRM_DEBUG_DRIVER("unknown parameter %d\n",
					param->param);
		return -EINVAL;
	}

	return 0;
}
void i915_gem_context_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint32_t ctx_size;

	if (!HAS_HW_CONTEXTS(dev)) {
		dev_priv->hw_contexts_disabled = true;
		return;
	}

	/* If called from reset, or thaw... we've been here already */
	if (dev_priv->hw_contexts_disabled ||
	    dev_priv->ring[RCS].default_context)
		return;

	ctx_size = get_context_size(dev);
	dev_priv->hw_context_size = get_context_size(dev);
	dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096);

	if (ctx_size <= 0 || ctx_size > (1<<20)) {
		dev_priv->hw_contexts_disabled = true;
		return;
	}

	if (create_default_context(dev_priv)) {
		dev_priv->hw_contexts_disabled = true;
		return;
	}

	DRM_DEBUG_DRIVER("HW context support initialized\n");
}
Example #30
0
void bochs_hw_setmode(struct bochs_device *bochs,
		      struct drm_display_mode *mode)
{
	bochs->xres = mode->hdisplay;
	bochs->yres = mode->vdisplay;
	bochs->bpp = 32;
	bochs->stride = mode->hdisplay * (bochs->bpp / 8);
	bochs->yres_virtual = bochs->fb_size / bochs->stride;

	DRM_DEBUG_DRIVER("%dx%d @ %d bpp, vy %d\n",
			 bochs->xres, bochs->yres, bochs->bpp,
			 bochs->yres_virtual);

	bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */

	bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP,         bochs->bpp);
	bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES,        bochs->xres);
	bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES,        bochs->yres);
	bochs_dispi_write(bochs, VBE_DISPI_INDEX_BANK,        0);
	bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH,  bochs->xres);
	bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_HEIGHT,
			  bochs->yres_virtual);
	bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET,    0);
	bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET,    0);

	bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE,
			  VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED);
}