Пример #1
0
/**
 * i915_reset - reset chip after a hang
 * @dev: drm device to reset
 *
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
 * reset or otherwise an error code.
 *
 * Procedure is fairly simple:
 *   - reset the chip using the reset reg
 *   - re-init context state
 *   - re-init hardware status page
 *   - re-init ring buffer
 *   - re-init interrupt state
 *   - re-init display
 */
int i915_reset(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret;

	if (!i915_try_reset)
		return 0;

	mutex_lock(&dev->struct_mutex);

	i915_gem_reset(dev);

	ret = -ENODEV;
	if (get_seconds() - dev_priv->last_gpu_reset < 5)
		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
	else
		ret = intel_gpu_reset(dev);

	dev_priv->last_gpu_reset = get_seconds();
	if (ret) {
		DRM_ERROR("Failed to reset chip.\n");
		mutex_unlock(&dev->struct_mutex);
		return ret;
	}

	/* Ok, now get things going again... */

	/*
	 * Everything depends on having the GTT running, so we need to start
	 * there.  Fortunately we don't need to do this unless we reset the
	 * chip at a PCI level.
	 *
	 * Next we need to restore the context, but we don't use those
	 * yet either...
	 *
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
	 * was running at the time of the reset (i.e. we weren't VT
	 * switched away).
	 */
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
			!dev_priv->mm.suspended) {
		struct intel_ring_buffer *ring;
		int i;

		dev_priv->mm.suspended = 0;

		i915_gem_init_swizzling(dev);

		for_each_ring(ring, dev_priv, i)
			ring->init(ring);

		i915_gem_context_init(dev);
		i915_gem_init_ppgtt(dev);

		/*
		 * It would make sense to re-init all the other hw state, at
		 * least the rps/rc6/emon init done within modeset_init_hw. For
		 * some unknown reason, this blows up my ilk, so don't.
		 */

		mutex_unlock(&dev->struct_mutex);

		drm_irq_uninstall(dev);
		drm_irq_install(dev);
	} else {
		mutex_unlock(&dev->struct_mutex);
	}

	return 0;
}
Пример #2
0
/*
 * Init DSI DPI encoder.
 * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
 * return pointer of newly allocated DPI encoder, NULL on error
 */
struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
				struct mdfld_dsi_connector *dsi_connector,
				const struct panel_funcs *p_funcs)
{
	struct mdfld_dsi_dpi_output *dpi_output = NULL;
	struct mdfld_dsi_config *dsi_config;
	struct drm_connector *connector = NULL;
	struct drm_encoder *encoder = NULL;
	int pipe;
	u32 data;
	int ret;

	pipe = dsi_connector->pipe;

	if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
		dsi_config = mdfld_dsi_get_config(dsi_connector);

		/* panel hard-reset */
		if (p_funcs->reset) {
			ret = p_funcs->reset(pipe);
			if (ret) {
				DRM_ERROR("Panel %d hard-reset failed\n", pipe);
				return NULL;
			}
		}

		/* panel drvIC init */
		if (p_funcs->drv_ic_init)
			p_funcs->drv_ic_init(dsi_config, pipe);

		/* panel power mode detect */
		ret = mdfld_dsi_get_power_mode(dsi_config, &data, false);
		if (ret) {
			DRM_ERROR("Panel %d get power mode failed\n", pipe);
			dsi_connector->status = connector_status_disconnected;
		} else {
			DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
			dsi_connector->status = connector_status_connected;
		}
	}

	dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL);
	if (!dpi_output) {
		DRM_ERROR("No memory\n");
		return NULL;
	}

	dpi_output->panel_on = 0;
	dpi_output->dev = dev;
	if (mdfld_get_panel_type(dev, pipe) != TC35876X)
		dpi_output->p_funcs = p_funcs;
	dpi_output->first_boot = 1;

	/*get fixed mode*/
	dsi_config = mdfld_dsi_get_config(dsi_connector);

	/*create drm encoder object*/
	connector = &dsi_connector->base.base;
	encoder = &dpi_output->base.base.base;
	drm_encoder_init(dev,
			encoder,
			p_funcs->encoder_funcs,
			DRM_MODE_ENCODER_LVDS, NULL);
	drm_encoder_helper_add(encoder,
				p_funcs->encoder_helper_funcs);

	/*attach to given connector*/
	drm_connector_attach_encoder(connector, encoder);

	/*set possible crtcs and clones*/
	if (dsi_connector->pipe) {
		encoder->possible_crtcs = (1 << 2);
		encoder->possible_clones = (1 << 1);
	} else {
		encoder->possible_crtcs = (1 << 0);
		encoder->possible_clones = (1 << 0);
	}

	dsi_connector->base.encoder = &dpi_output->base.base;

	return &dpi_output->base;
}
void mdfld_panel_generic_dsi_dbi_update_fb(
	struct mdfld_dsi_dbi_output *dbi_output,
	int pipe)
{
	struct mdfld_dsi_pkg_sender *sender =
		mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
	struct drm_device *dev = dbi_output->dev;
	struct drm_psb_private *dev_priv = dev->dev_private;
	struct drm_crtc *crtc = dbi_output->base.base.crtc;
	struct psb_intel_crtc *psb_crtc =
		(crtc) ? to_psb_intel_crtc(crtc) : NULL;
	u32 dpll_reg = MRST_DPLL_A;
	u32 dspcntr_reg = DSPACNTR;
	u32 pipeconf_reg = PIPEACONF;
	u32 dsplinoff_reg = DSPALINOFF;
	u32 dspsurf_reg = DSPASURF;

	if (!dev_priv->dsi_init_done)
		return;

	/* if mode setting on-going, back off */
	if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
			(psb_crtc &&
			 (psb_crtc->mode_flags & MODE_SETTING_ON_GOING)) ||
			!(dbi_output->mode_flags & MODE_SETTING_ENCODER_DONE))
		return;

	if (pipe == 2) {
		dspcntr_reg = DSPCCNTR;
		pipeconf_reg = PIPECCONF;
		dsplinoff_reg = DSPCLINOFF;
		dspsurf_reg = DSPCSURF;
	}

	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
				OSPM_UHB_FORCE_POWER_ON)) {
		DRM_ERROR("hw begin failed\n");
		return;
	}

	/* check DBI FIFO status */
	if (!(REG_READ(dpll_reg) & DPLL_VCO_ENABLE) ||
	   !(REG_READ(dspcntr_reg) & DISPLAY_PLANE_ENABLE) ||
	   !(REG_READ(pipeconf_reg) & DISPLAY_PLANE_ENABLE)) {
		goto update_fb_out0;
	}

	/* refresh plane changes */
	REG_WRITE(dsplinoff_reg, REG_READ(dsplinoff_reg));
	REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
	REG_READ(dspsurf_reg);

	mdfld_dsi_send_dcs(sender,
			   write_mem_start,
			   NULL,
			   0,
			   CMD_DATA_SRC_PIPE,
			   MDFLD_DSI_SEND_PACKAGE);

	dbi_output->dsr_fb_update_done = true;


update_fb_out0:
	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
}
Пример #4
0
static int armada_fb_create(struct drm_fb_helper *fbh,
	struct drm_fb_helper_surface_size *sizes)
{
	struct drm_device *dev = fbh->dev;
	struct drm_mode_fb_cmd2 mode;
	struct armada_framebuffer *dfb;
	struct armada_gem_object *obj;
	struct fb_info *info;
	int size, ret;
	void *ptr;

	memset(&mode, 0, sizeof(mode));
	mode.width = sizes->surface_width;
	mode.height = sizes->surface_height;
	mode.pitches[0] = armada_pitch(mode.width, sizes->surface_bpp);
	mode.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
					sizes->surface_depth);

	size = mode.pitches[0] * mode.height;
	obj = armada_gem_alloc_private_object(dev, size);
	if (!obj) {
		DRM_ERROR("failed to allocate fb memory\n");
		return -ENOMEM;
	}

	ret = armada_gem_linear_back(dev, obj);
	if (ret) {
		drm_gem_object_unreference_unlocked(&obj->obj);
		return ret;
	}

	ptr = armada_gem_map_object(dev, obj);
	if (!ptr) {
		drm_gem_object_unreference_unlocked(&obj->obj);
		return -ENOMEM;
	}

	dfb = armada_framebuffer_create(dev, &mode, obj);

	/*
	 * A reference is now held by the framebuffer object if
	 * successful, otherwise this drops the ref for the error path.
	 */
	drm_gem_object_unreference_unlocked(&obj->obj);

	if (IS_ERR(dfb))
		return PTR_ERR(dfb);

	info = drm_fb_helper_alloc_fbi(fbh);
	if (IS_ERR(info)) {
		ret = PTR_ERR(info);
		goto err_fballoc;
	}

	strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
	info->par = fbh;
	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &armada_fb_ops;
	info->fix.smem_start = obj->phys_addr;
	info->fix.smem_len = obj->obj.size;
	info->screen_size = obj->obj.size;
	info->screen_base = ptr;
	fbh->fb = &dfb->fb;

	drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
	drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);

	DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
		dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
		(unsigned long long)obj->phys_addr);

	return 0;

 err_fballoc:
	dfb->fb.funcs->destroy(&dfb->fb);
	return ret;
}
Пример #5
0
static int render_state_setup(struct render_state *so)
{
	struct drm_device *dev = so->vma->vm->dev;
	const struct intel_renderstate_rodata *rodata = so->rodata;
	const bool has_64bit_reloc = INTEL_GEN(dev) >= 8;
	unsigned int i = 0, reloc_index = 0;
	struct page *page;
	u32 *d;
	int ret;

	ret = i915_gem_object_set_to_cpu_domain(so->vma->obj, true);
	if (ret)
		return ret;

	page = i915_gem_object_get_dirty_page(so->vma->obj, 0);
	d = kmap(page);

	while (i < rodata->batch_items) {
		u32 s = rodata->batch[i];

		if (i * 4  == rodata->reloc[reloc_index]) {
			u64 r = s + so->vma->node.start;
			s = lower_32_bits(r);
			if (has_64bit_reloc) {
				if (i + 1 >= rodata->batch_items ||
				    rodata->batch[i + 1] != 0) {
					ret = -EINVAL;
					goto err_out;
				}

				d[i++] = s;
				s = upper_32_bits(r);
			}

			reloc_index++;
		}

		d[i++] = s;
	}

	while (i % CACHELINE_DWORDS)
		OUT_BATCH(d, i, MI_NOOP);

	so->aux_batch_offset = i * sizeof(u32);

	if (HAS_POOLED_EU(dev)) {
		/*
		 * We always program 3x6 pool config but depending upon which
		 * subslice is disabled HW drops down to appropriate config
		 * shown below.
		 *
		 * In the below table 2x6 config always refers to
		 * fused-down version, native 2x6 is not available and can
		 * be ignored
		 *
		 * SNo  subslices config                eu pool configuration
		 * -----------------------------------------------------------
		 * 1    3 subslices enabled (3x6)  -    0x00777000  (9+9)
		 * 2    ss0 disabled (2x6)         -    0x00777000  (3+9)
		 * 3    ss1 disabled (2x6)         -    0x00770000  (6+6)
		 * 4    ss2 disabled (2x6)         -    0x00007000  (9+3)
		 */
		u32 eu_pool_config = 0x00777000;

		OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE);
		OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE);
		OUT_BATCH(d, i, eu_pool_config);
		OUT_BATCH(d, i, 0);
		OUT_BATCH(d, i, 0);
		OUT_BATCH(d, i, 0);
	}

	OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
	so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;

	/*
	 * Since we are sending length, we need to strictly conform to
	 * all requirements. For Gen2 this must be a multiple of 8.
	 */
	so->aux_batch_size = ALIGN(so->aux_batch_size, 8);

	kunmap(page);

	ret = i915_gem_object_set_to_gtt_domain(so->vma->obj, false);
	if (ret)
		return ret;

	if (rodata->reloc[reloc_index] != -1) {
		DRM_ERROR("only %d relocs resolved\n", reloc_index);
		return -EINVAL;
	}

	return 0;

err_out:
	kunmap(page);
	return ret;
}
static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
{
	bool clock_recovery;
 	u8 voltage;
	int i;

	radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
	memset(dp_info->train_set, 0, 4);
	radeon_dp_update_vs_emph(dp_info);

	udelay(400);

	/* clock recovery loop */
	clock_recovery = false;
	dp_info->tries = 0;
	voltage = 0xff;
	while (1) {
		drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);

		if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
			DRM_ERROR("displayport link status failed\n");
			break;
		}

		if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
			clock_recovery = true;
			break;
		}

		for (i = 0; i < dp_info->dp_lane_count; i++) {
			if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
				break;
		}
		if (i == dp_info->dp_lane_count) {
			DRM_ERROR("clock recovery reached max voltage\n");
			break;
		}

		if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
			++dp_info->tries;
			if (dp_info->tries == 5) {
				DRM_ERROR("clock recovery tried 5 times\n");
				break;
			}
		} else
			dp_info->tries = 0;

		voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;

		/* Compute new train_set as requested by sink */
		dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);

		radeon_dp_update_vs_emph(dp_info);
	}
	if (!clock_recovery) {
		DRM_ERROR("clock recovery failed\n");
		return -1;
	} else {
		DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
			  DP_TRAIN_PRE_EMPHASIS_SHIFT);
		return 0;
	}
}
static int init_ring_common(struct intel_ring_buffer *ring)
{
    drm_i915_private_t *dev_priv = ring->dev->dev_private;
    struct drm_i915_gem_object *obj = ring->obj;
    u32 head;

    /* Stop the ring if it's running. */
    I915_WRITE_CTL(ring, 0);
    I915_WRITE_HEAD(ring, 0);
    ring->write_tail(ring, 0);

    /* Initialize the ring. */
    I915_WRITE_START(ring, obj->gtt_offset);
    head = I915_READ_HEAD(ring) & HEAD_ADDR;

    /* G45 ring initialization fails to reset head to zero */
    if (head != 0) {
        DRM_DEBUG_KMS("%s head not reset to zero "
                      "ctl %08x head %08x tail %08x start %08x\n",
                      ring->name,
                      I915_READ_CTL(ring),
                      I915_READ_HEAD(ring),
                      I915_READ_TAIL(ring),
                      I915_READ_START(ring));

        I915_WRITE_HEAD(ring, 0);

        if (I915_READ_HEAD(ring) & HEAD_ADDR) {
            DRM_ERROR("failed to set %s head to zero "
                      "ctl %08x head %08x tail %08x start %08x\n",
                      ring->name,
                      I915_READ_CTL(ring),
                      I915_READ_HEAD(ring),
                      I915_READ_TAIL(ring),
                      I915_READ_START(ring));
        }
    }

    I915_WRITE_CTL(ring,
                   ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
                   | RING_REPORT_64K | RING_VALID);

    /* If the head is still not zero, the ring is dead */
    if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
            I915_READ_START(ring) != obj->gtt_offset ||
            (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
        DRM_ERROR("%s initialization failed "
                  "ctl %08x head %08x tail %08x start %08x\n",
                  ring->name,
                  I915_READ_CTL(ring),
                  I915_READ_HEAD(ring),
                  I915_READ_TAIL(ring),
                  I915_READ_START(ring));
        return -EIO;
    }

    if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
        i915_kernel_lost_context(ring->dev);
    else {
        ring->head = I915_READ_HEAD(ring);
        ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
        ring->space = ring_space(ring);
    }

    return 0;
}
Пример #8
0
int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
    struct drm_update_draw *update = data;
    unsigned long irqflags;
    struct drm_clip_rect *rects;
    struct drm_drawable_info *info;
    int err;

    info = idr_find(&dev->drw_idr, update->handle);
    if (!info) {
        info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS);
        if (!info)
            return -ENOMEM;
        if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
            DRM_ERROR("No such drawable %d\n", update->handle);
            drm_free(info, sizeof(*info), DRM_MEM_BUFS);
            return -EINVAL;
        }
    }

    switch (update->type) {
    case DRM_DRAWABLE_CLIPRECTS:
        if (update->num != info->num_rects) {
            rects = drm_alloc(update->num * sizeof(struct drm_clip_rect),
                     DRM_MEM_BUFS);
        } else
            rects = info->rects;

        if (update->num && !rects) {
            DRM_ERROR("Failed to allocate cliprect memory\n");
            err = -ENOMEM;
            goto error;
        }

        if (update->num && DRM_COPY_FROM_USER(rects,
                             (struct drm_clip_rect __user *)
                             (unsigned long)update->data,
                             update->num *
                             sizeof(*rects))) {
            DRM_ERROR("Failed to copy cliprects from userspace\n");
            err = -EFAULT;
            goto error;
        }

        spin_lock_irqsave(&dev->drw_lock, irqflags);

        if (rects != info->rects) {
            drm_free(info->rects, info->num_rects *
                 sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
        }

        info->rects = rects;
        info->num_rects = update->num;

        spin_unlock_irqrestore(&dev->drw_lock, irqflags);

        DRM_DEBUG("Updated %d cliprects for drawable %d\n",
              info->num_rects, update->handle);
        break;
    default:
        DRM_ERROR("Invalid update type %d\n", update->type);
        return -EINVAL;
    }

    return 0;

error:
    if (rects != info->rects)
        drm_free(rects, update->num * sizeof(struct drm_clip_rect),
             DRM_MEM_BUFS);

    return err;
}
Пример #9
0
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
			   uint32_t handle, uint32_t width, uint32_t height)
{
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
	struct vmw_surface *surface = NULL;
	struct vmw_dma_buffer *dmabuf = NULL;
	int ret;

	if (handle) {
		ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
						     handle, &surface);
		if (!ret) {
			if (!surface->snooper.image) {
				DRM_ERROR("surface not suitable for cursor\n");
				return -EINVAL;
			}
		} else {
			ret = vmw_user_dmabuf_lookup(tfile,
						     handle, &dmabuf);
			if (ret) {
				DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
				return -EINVAL;
			}
		}
	}

	/* takedown old cursor */
	if (du->cursor_surface) {
		du->cursor_surface->snooper.crtc = NULL;
		vmw_surface_unreference(&du->cursor_surface);
	}
	if (du->cursor_dmabuf)
		vmw_dmabuf_unreference(&du->cursor_dmabuf);

	/* setup new image */
	if (surface) {
		/* vmw_user_surface_lookup takes one reference */
		du->cursor_surface = surface;

		du->cursor_surface->snooper.crtc = crtc;
		du->cursor_age = du->cursor_surface->snooper.age;
		vmw_cursor_update_image(dev_priv, surface->snooper.image,
					64, 64, du->hotspot_x, du->hotspot_y);
	} else if (dmabuf) {
		struct ttm_bo_kmap_obj map;
		unsigned long kmap_offset;
		unsigned long kmap_num;
		void *virtual;
		bool dummy;

		/* vmw_user_surface_lookup takes one reference */
		du->cursor_dmabuf = dmabuf;

		kmap_offset = 0;
		kmap_num = (64*64*4) >> PAGE_SHIFT;

		ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
		if (unlikely(ret != 0)) {
			DRM_ERROR("reserve failed\n");
			return -EINVAL;
		}

		ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
		if (unlikely(ret != 0))
			goto err_unreserve;

		virtual = ttm_kmap_obj_virtual(&map, &dummy);
		vmw_cursor_update_image(dev_priv, virtual, 64, 64,
					du->hotspot_x, du->hotspot_y);

		ttm_bo_kunmap(&map);
err_unreserve:
		ttm_bo_unreserve(&dmabuf->base);

	} else {
Пример #10
0
/* dm_helpers_parse_edid_caps
 *
 * Parse edid caps
 *
 * @edid:	[in] pointer to edid
 *  edid_caps:	[in] pointer to edid caps
 * @return
 *	void
 * */
enum dc_edid_status dm_helpers_parse_edid_caps(
		struct dc_context *ctx,
		const struct dc_edid *edid,
		struct dc_edid_caps *edid_caps)
{
	struct edid *edid_buf = (struct edid *) edid->raw_edid;
	struct cea_sad *sads;
	int sad_count = -1;
	int sadb_count = -1;
	int i = 0;
	int j = 0;
	uint8_t *sadb = NULL;

	enum dc_edid_status result = EDID_OK;

	if (!edid_caps || !edid)
		return EDID_BAD_INPUT;

	if (!drm_edid_is_valid(edid_buf))
		result = EDID_BAD_CHECKSUM;

	edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
					((uint16_t) edid_buf->mfg_id[1])<<8;
	edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
					((uint16_t) edid_buf->prod_code[1])<<8;
	edid_caps->serial_number = edid_buf->serial;
	edid_caps->manufacture_week = edid_buf->mfg_week;
	edid_caps->manufacture_year = edid_buf->mfg_year;

	/* One of the four detailed_timings stores the monitor name. It's
	 * stored in an array of length 13. */
	for (i = 0; i < 4; i++) {
		if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
			while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
				if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
					break;

				edid_caps->display_name[j] =
					edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
				j++;
			}
		}
	}

	edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
			(struct edid *) edid->raw_edid);

	sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
	if (sad_count <= 0) {
		DRM_INFO("SADs count is: %d, don't need to read it\n",
				sad_count);
		return result;
	}

	edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
	for (i = 0; i < edid_caps->audio_mode_count; ++i) {
		struct cea_sad *sad = &sads[i];

		edid_caps->audio_modes[i].format_code = sad->format;
		edid_caps->audio_modes[i].channel_count = sad->channels + 1;
		edid_caps->audio_modes[i].sample_rate = sad->freq;
		edid_caps->audio_modes[i].sample_size = sad->byte2;
	}

	sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);

	if (sadb_count < 0) {
		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
		sadb_count = 0;
	}

	if (sadb_count)
		edid_caps->speaker_flags = sadb[0];
	else
		edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;

	kfree(sads);
	kfree(sadb);

	return result;
}
Пример #11
0
/**
 * i965_reset - reset chip after a hang
 * @dev: drm device to reset
 * @flags: reset domains
 *
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
 * reset or otherwise an error code.
 *
 * Procedure is fairly simple:
 *   - reset the chip using the reset reg
 *   - re-init context state
 *   - re-init hardware status page
 *   - re-init ring buffer
 *   - re-init interrupt state
 *   - re-init display
 */
int i915_reset(struct drm_device *dev, u8 flags)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	/*
	 * We really should only reset the display subsystem if we actually
	 * need to
	 */
	bool need_display = true;
	int ret;

	if (!i915_try_reset)
		return 0;

	if (!mutex_trylock(&dev->struct_mutex))
		return -EBUSY;

	i915_gem_reset(dev);

	ret = -ENODEV;
	if (get_seconds() - dev_priv->last_gpu_reset < 5) {
		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
	} else switch (INTEL_INFO(dev)->gen) {
	case 7:
	case 6:
		ret = gen6_do_reset(dev, flags);
		break;
	case 5:
		ret = ironlake_do_reset(dev, flags);
		break;
	case 4:
		ret = i965_do_reset(dev, flags);
		break;
	case 2:
		ret = i8xx_do_reset(dev, flags);
		break;
	}
	dev_priv->last_gpu_reset = get_seconds();
	if (ret) {
		DRM_ERROR("Failed to reset chip.\n");
		mutex_unlock(&dev->struct_mutex);
		return ret;
	}

	/* Ok, now get things going again... */

	/*
	 * Everything depends on having the GTT running, so we need to start
	 * there.  Fortunately we don't need to do this unless we reset the
	 * chip at a PCI level.
	 *
	 * Next we need to restore the context, but we don't use those
	 * yet either...
	 *
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
	 * was running at the time of the reset (i.e. we weren't VT
	 * switched away).
	 */
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
			!dev_priv->mm.suspended) {
		dev_priv->mm.suspended = 0;

		dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
		if (HAS_BSD(dev))
		    dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
		if (HAS_BLT(dev))
		    dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);

		mutex_unlock(&dev->struct_mutex);
		drm_irq_uninstall(dev);
		drm_mode_config_reset(dev);
		drm_irq_install(dev);
		mutex_lock(&dev->struct_mutex);
	}

	mutex_unlock(&dev->struct_mutex);

	/*
	 * Perform a full modeset as on later generations, e.g. Ironlake, we may
	 * need to retrain the display link and cannot just restore the register
	 * values.
	 */
	if (need_display) {
		mutex_lock(&dev->mode_config.mutex);
		drm_helper_resume_force_mode(dev);
		mutex_unlock(&dev->mode_config.mutex);
	}

	return 0;
}
Пример #12
0
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
			      const struct firmware *fw)
{
	struct intel_css_header *css_header;
	struct intel_package_header *package_header;
	struct intel_dmc_header *dmc_header;
	struct intel_csr *csr = &dev_priv->csr;
	const struct stepping_info *si = intel_get_stepping_info(dev_priv);
	uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
	uint32_t i;
	uint32_t *dmc_payload;
	uint32_t required_version;

	if (!fw)
		return NULL;

	/* Extract CSS Header information*/
	css_header = (struct intel_css_header *)fw->data;
	if (sizeof(struct intel_css_header) !=
	    (css_header->header_len * 4)) {
		DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
			  (css_header->header_len * 4));
		return NULL;
	}

	csr->version = css_header->version;

	if (IS_GEMINILAKE(dev_priv)) {
		required_version = GLK_CSR_VERSION_REQUIRED;
	} else if (IS_KABYLAKE(dev_priv)) {
		required_version = KBL_CSR_VERSION_REQUIRED;
	} else if (IS_SKYLAKE(dev_priv)) {
		required_version = SKL_CSR_VERSION_REQUIRED;
	} else if (IS_BROXTON(dev_priv)) {
		required_version = BXT_CSR_VERSION_REQUIRED;
	} else {
		MISSING_CASE(INTEL_REVID(dev_priv));
		required_version = 0;
	}

	if (csr->version != required_version) {
		DRM_INFO("Refusing to load DMC firmware v%u.%u,"
			 " please use v%u.%u [" FIRMWARE_URL "].\n",
			 CSR_VERSION_MAJOR(csr->version),
			 CSR_VERSION_MINOR(csr->version),
			 CSR_VERSION_MAJOR(required_version),
			 CSR_VERSION_MINOR(required_version));
		return NULL;
	}

	readcount += sizeof(struct intel_css_header);

	/* Extract Package Header information*/
	package_header = (struct intel_package_header *)
		&fw->data[readcount];
	if (sizeof(struct intel_package_header) !=
	    (package_header->header_len * 4)) {
		DRM_ERROR("Firmware has wrong package header length %u bytes\n",
			  (package_header->header_len * 4));
		return NULL;
	}
	readcount += sizeof(struct intel_package_header);

	/* Search for dmc_offset to find firware binary. */
	for (i = 0; i < package_header->num_entries; i++) {
		if (package_header->fw_info[i].substepping == '*' &&
		    si->stepping == package_header->fw_info[i].stepping) {
			dmc_offset = package_header->fw_info[i].offset;
			break;
		} else if (si->stepping == package_header->fw_info[i].stepping &&
			   si->substepping == package_header->fw_info[i].substepping) {
			dmc_offset = package_header->fw_info[i].offset;
			break;
		} else if (package_header->fw_info[i].stepping == '*' &&
			   package_header->fw_info[i].substepping == '*')
			dmc_offset = package_header->fw_info[i].offset;
	}
	if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
		DRM_ERROR("Firmware not supported for %c stepping\n",
			  si->stepping);
		return NULL;
	}
	readcount += dmc_offset;

	/* Extract dmc_header information. */
	dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
	if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
		DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
			  (dmc_header->header_len));
		return NULL;
	}
	readcount += sizeof(struct intel_dmc_header);

	/* Cache the dmc header info. */
	if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
		DRM_ERROR("Firmware has wrong mmio count %u\n",
			  dmc_header->mmio_count);
		return NULL;
	}
	csr->mmio_count = dmc_header->mmio_count;
	for (i = 0; i < dmc_header->mmio_count; i++) {
		if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
		    dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
			DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
				  dmc_header->mmioaddr[i]);
			return NULL;
		}
		csr->mmioaddr[i] = _MMIO(dmc_header->mmioaddr[i]);
		csr->mmiodata[i] = dmc_header->mmiodata[i];
	}

	/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
	nbytes = dmc_header->fw_size * 4;
	if (nbytes > CSR_MAX_FW_SIZE) {
		DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
		return NULL;
	}
	csr->dmc_fw_size = dmc_header->fw_size;

	dmc_payload = kmalloc(nbytes, GFP_KERNEL);
	if (!dmc_payload) {
		DRM_ERROR("Memory allocation failed for dmc payload\n");
		return NULL;
	}

	return memcpy(dmc_payload, &fw->data[readcount], nbytes);
}
Пример #13
0
static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
    DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
              reg, v);
    BUG_ON(1);
}
Пример #14
0
/*
 * Registers accessors functions.
 */
static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
{
    DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
    BUG_ON(1);
    return 0;
}
/**
 * Detects bit 6 swizzling of address lookup between IGD access and CPU
 * access through main memory.
 */
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;

	if (INTEL_INFO(dev)->gen >= 5) {
		/* On Ironlake whatever DRAM config, GPU always do
		 * same swizzling setup.
		 */
		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
		swizzle_y = I915_BIT_6_SWIZZLE_9;
	} else if (IS_GEN2(dev)) {
		/* As far as we know, the 865 doesn't have these bit 6
		 * swizzling issues.
		 */
		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
	} else if (IS_MOBILE(dev)) {
		uint32_t dcc;

		/* On mobile 9xx chipsets, channel interleave by the CPU is
		 * determined by DCC.  For single-channel, neither the CPU
		 * nor the GPU do swizzling.  For dual channel interleaved,
		 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
		 * 9 for Y tiled.  The CPU's interleave is independent, and
		 * can be based on either bit 11 (haven't seen this yet) or
		 * bit 17 (common).
		 */
		dcc = I915_READ(DCC);
		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
		case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
			break;
		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
			if (dcc & DCC_CHANNEL_XOR_DISABLE) {
				/* This is the base swizzling by the GPU for
				 * tiled buffers.
				 */
				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
				swizzle_y = I915_BIT_6_SWIZZLE_9;
			} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
				/* Bit 11 swizzling by the CPU in addition. */
				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
			} else {
				/* Bit 17 swizzling by the CPU in addition. */
				swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
				swizzle_y = I915_BIT_6_SWIZZLE_9_17;
			}
			break;
		}
		if (dcc == 0xffffffff) {
			DRM_ERROR("Couldn't read from MCHBAR.  "
				  "Disabling tiling.\n");
			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
		}
	} else {
		/* The 965, G33, and newer, have a very flexible memory
		 * configuration.  It will enable dual-channel mode
		 * (interleaving) on as much memory as it can, and the GPU
		 * will additionally sometimes enable different bit 6
		 * swizzling for tiled objects from the CPU.
		 *
		 * Here's what I found on the G965:
		 *    slot fill         memory size  swizzling
		 * 0A   0B   1A   1B    1-ch   2-ch
		 * 512  0    0    0     512    0     O
		 * 512  0    512  0     16     1008  X
		 * 512  0    0    512   16     1008  X
		 * 0    512  0    512   16     1008  X
		 * 1024 1024 1024 0     2048   1024  O
		 *
		 * We could probably detect this based on either the DRB
		 * matching, which was the case for the swizzling required in
		 * the table above, or from the 1-ch value being less than
		 * the minimum size of a rank.
		 */
		if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
		} else {
			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
			swizzle_y = I915_BIT_6_SWIZZLE_9;
		}
	}

	dev_priv->mm.bit_6_swizzle_x = swizzle_x;
	dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
Пример #16
0
void radeon_test_ring_sync(struct radeon_device *rdev,
                           struct radeon_ring *ringA,
                           struct radeon_ring *ringB)
{
    struct radeon_fence *fence1 = NULL, *fence2 = NULL;
    struct radeon_semaphore *semaphore = NULL;
    int r;

    r = radeon_semaphore_create(rdev, &semaphore);
    if (r) {
        DRM_ERROR("Failed to create semaphore\n");
        goto out_cleanup;
    }

    r = radeon_ring_lock(rdev, ringA, 64);
    if (r) {
        DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
        goto out_cleanup;
    }
    radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
    r = radeon_fence_emit(rdev, &fence1, ringA->idx);
    if (r) {
        DRM_ERROR("Failed to emit fence 1\n");
        radeon_ring_unlock_undo(rdev, ringA);
        goto out_cleanup;
    }
    radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
    r = radeon_fence_emit(rdev, &fence2, ringA->idx);
    if (r) {
        DRM_ERROR("Failed to emit fence 2\n");
        radeon_ring_unlock_undo(rdev, ringA);
        goto out_cleanup;
    }
    radeon_ring_unlock_commit(rdev, ringA);

    DRM_MDELAY(1000);

    if (radeon_fence_signaled(fence1)) {
        DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
        goto out_cleanup;
    }

    r = radeon_ring_lock(rdev, ringB, 64);
    if (r) {
        DRM_ERROR("Failed to lock ring B %p\n", ringB);
        goto out_cleanup;
    }
    radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
    radeon_ring_unlock_commit(rdev, ringB);

    r = radeon_fence_wait(fence1, false);
    if (r) {
        DRM_ERROR("Failed to wait for sync fence 1\n");
        goto out_cleanup;
    }

    DRM_MDELAY(1000);

    if (radeon_fence_signaled(fence2)) {
        DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
        goto out_cleanup;
    }

    r = radeon_ring_lock(rdev, ringB, 64);
    if (r) {
        DRM_ERROR("Failed to lock ring B %p\n", ringB);
        goto out_cleanup;
    }
    radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
    radeon_ring_unlock_commit(rdev, ringB);

    r = radeon_fence_wait(fence2, false);
    if (r) {
        DRM_ERROR("Failed to wait for sync fence 1\n");
        goto out_cleanup;
    }

out_cleanup:
    radeon_semaphore_free(rdev, &semaphore, NULL);

    if (fence1)
        radeon_fence_unref(&fence1);

    if (fence2)
        radeon_fence_unref(&fence2);

    if (r)
        DRM_ERROR("Error while testing ring sync (%d).\n", r);
}
int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
			 u8 write_byte, u8 *read_byte)
{
	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
	struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
	u16 address = algo_data->address;
	u8 msg[5];
	u8 reply[2];
	unsigned retry;
	int msg_bytes;
	int reply_bytes = 1;
	int ret;
	u8 ack;

	/* Set up the command byte */
	if (mode & MODE_I2C_READ)
		msg[2] = AUX_I2C_READ << 4;
	else
		msg[2] = AUX_I2C_WRITE << 4;

	if (!(mode & MODE_I2C_STOP))
		msg[2] |= AUX_I2C_MOT << 4;

	msg[0] = address;
	msg[1] = address >> 8;

	switch (mode) {
	case MODE_I2C_WRITE:
		msg_bytes = 5;
		msg[3] = msg_bytes << 4;
		msg[4] = write_byte;
		break;
	case MODE_I2C_READ:
		msg_bytes = 4;
		msg[3] = msg_bytes << 4;
		break;
	default:
		msg_bytes = 4;
		msg[3] = 3 << 4;
		break;
	}

	for (retry = 0; retry < 4; retry++) {
		ret = radeon_process_aux_ch(auxch,
					    msg, msg_bytes, reply, reply_bytes, 0, &ack);
		if (ret == -EBUSY)
			continue;
		else if (ret < 0) {
			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
			return ret;
		}

		switch (ack & AUX_NATIVE_REPLY_MASK) {
		case AUX_NATIVE_REPLY_ACK:
			/* I2C-over-AUX Reply field is only valid
			 * when paired with AUX ACK.
			 */
			break;
		case AUX_NATIVE_REPLY_NACK:
			DRM_DEBUG_KMS("aux_ch native nack\n");
			return -EREMOTEIO;
		case AUX_NATIVE_REPLY_DEFER:
			DRM_DEBUG_KMS("aux_ch native defer\n");
			udelay(400);
			continue;
		default:
			DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
			return -EREMOTEIO;
		}

		switch (ack & AUX_I2C_REPLY_MASK) {
		case AUX_I2C_REPLY_ACK:
			if (mode == MODE_I2C_READ)
				*read_byte = reply[0];
			return ret;
		case AUX_I2C_REPLY_NACK:
			DRM_DEBUG_KMS("aux_i2c nack\n");
			return -EREMOTEIO;
		case AUX_I2C_REPLY_DEFER:
			DRM_DEBUG_KMS("aux_i2c defer\n");
			udelay(400);
			break;
		default:
			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
			return -EREMOTEIO;
		}
	}

	DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
	return -EREMOTEIO;
}
Пример #18
0
static void radeon_test_ring_sync2(struct radeon_device *rdev,
                                   struct radeon_ring *ringA,
                                   struct radeon_ring *ringB,
                                   struct radeon_ring *ringC)
{
    struct radeon_fence *fenceA = NULL, *fenceB = NULL;
    struct radeon_semaphore *semaphore = NULL;
    bool sigA, sigB;
    int i, r;

    r = radeon_semaphore_create(rdev, &semaphore);
    if (r) {
        DRM_ERROR("Failed to create semaphore\n");
        goto out_cleanup;
    }

    r = radeon_ring_lock(rdev, ringA, 64);
    if (r) {
        DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
        goto out_cleanup;
    }
    radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
    r = radeon_fence_emit(rdev, &fenceA, ringA->idx);
    if (r) {
        DRM_ERROR("Failed to emit sync fence 1\n");
        radeon_ring_unlock_undo(rdev, ringA);
        goto out_cleanup;
    }
    radeon_ring_unlock_commit(rdev, ringA);

    r = radeon_ring_lock(rdev, ringB, 64);
    if (r) {
        DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
        goto out_cleanup;
    }
    radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
    r = radeon_fence_emit(rdev, &fenceB, ringB->idx);
    if (r) {
        DRM_ERROR("Failed to create sync fence 2\n");
        radeon_ring_unlock_undo(rdev, ringB);
        goto out_cleanup;
    }
    radeon_ring_unlock_commit(rdev, ringB);

    DRM_MDELAY(1000);

    if (radeon_fence_signaled(fenceA)) {
        DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
        goto out_cleanup;
    }
    if (radeon_fence_signaled(fenceB)) {
        DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
        goto out_cleanup;
    }

    r = radeon_ring_lock(rdev, ringC, 64);
    if (r) {
        DRM_ERROR("Failed to lock ring B %p\n", ringC);
        goto out_cleanup;
    }
    radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
    radeon_ring_unlock_commit(rdev, ringC);

    for (i = 0; i < 30; ++i) {
        DRM_MDELAY(100);
        sigA = radeon_fence_signaled(fenceA);
        sigB = radeon_fence_signaled(fenceB);
        if (sigA || sigB)
            break;
    }

    if (!sigA && !sigB) {
        DRM_ERROR("Neither fence A nor B has been signaled\n");
        goto out_cleanup;
    } else if (sigA && sigB) {
        DRM_ERROR("Both fence A and B has been signaled\n");
        goto out_cleanup;
    }

    DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');

    r = radeon_ring_lock(rdev, ringC, 64);
    if (r) {
        DRM_ERROR("Failed to lock ring B %p\n", ringC);
        goto out_cleanup;
    }
    radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
    radeon_ring_unlock_commit(rdev, ringC);

    DRM_MDELAY(1000);

    r = radeon_fence_wait(fenceA, false);
    if (r) {
        DRM_ERROR("Failed to wait for sync fence A\n");
        goto out_cleanup;
    }
    r = radeon_fence_wait(fenceB, false);
    if (r) {
        DRM_ERROR("Failed to wait for sync fence B\n");
        goto out_cleanup;
    }

out_cleanup:
    radeon_semaphore_free(rdev, &semaphore, NULL);

    if (fenceA)
        radeon_fence_unref(&fenceA);

    if (fenceB)
        radeon_fence_unref(&fenceB);

    if (r)
        DRM_ERROR("Error while testing ring sync (%d).\n", r);
}
Пример #19
0
static int armada_drm_load(struct drm_device *dev, unsigned long flags)
{
	const struct platform_device_id *id;
	const struct armada_variant *variant;
	struct armada_private *priv;
	struct resource *res[ARRAY_SIZE(priv->dcrtc)];
	struct resource *mem = NULL;
	int ret, n, i;

	memset(res, 0, sizeof(res));

	for (n = i = 0; ; n++) {
		struct resource *r = platform_get_resource(dev->platformdev,
							   IORESOURCE_MEM, n);
		if (!r)
			break;

		/* Resources above 64K are graphics memory */
		if (resource_size(r) > SZ_64K)
			mem = r;
		else if (i < ARRAY_SIZE(priv->dcrtc))
			res[i++] = r;
		else
			return -EINVAL;
	}

	if (!mem)
		return -ENXIO;

	if (!devm_request_mem_region(dev->dev, mem->start,
			resource_size(mem), "armada-drm"))
		return -EBUSY;

	priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		DRM_ERROR("failed to allocate private\n");
		return -ENOMEM;
	}

	platform_set_drvdata(dev->platformdev, dev);
	dev->dev_private = priv;

	/* Get the implementation specific driver data. */
	id = platform_get_device_id(dev->platformdev);
	if (!id)
		return -ENXIO;

	variant = (const struct armada_variant *)id->driver_data;

	INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
	INIT_KFIFO(priv->fb_unref);

	/* Mode setting support */
	drm_mode_config_init(dev);
	dev->mode_config.min_width = 320;
	dev->mode_config.min_height = 200;

	/*
	 * With vscale enabled, the maximum width is 1920 due to the
	 * 1920 by 3 lines RAM
	 */
	dev->mode_config.max_width = 1920;
	dev->mode_config.max_height = 2048;

	dev->mode_config.preferred_depth = 24;
	dev->mode_config.funcs = &armada_drm_mode_config_funcs;
	drm_mm_init(&priv->linear, mem->start, resource_size(mem));

	/* Create all LCD controllers */
	for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
		int irq;

		if (!res[n])
			break;

		irq = platform_get_irq(dev->platformdev, n);
		if (irq < 0)
			goto err_kms;

		ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq,
					     variant, NULL);
		if (ret)
			goto err_kms;
	}

	if (is_componentized(dev->dev)) {
		ret = component_bind_all(dev->dev, dev);
		if (ret)
			goto err_kms;
	} else {
#ifdef CONFIG_DRM_ARMADA_TDA1998X
		ret = armada_drm_connector_slave_create(dev, &tda19988_config);
		if (ret)
			goto err_kms;
#endif
	}

	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
	if (ret)
		goto err_comp;

	dev->irq_enabled = true;
	dev->vblank_disable_allowed = 1;

	ret = armada_fbdev_init(dev);
	if (ret)
		goto err_comp;

	drm_kms_helper_poll_init(dev);

	return 0;

 err_comp:
	if (is_componentized(dev->dev))
		component_unbind_all(dev->dev, dev);
 err_kms:
	drm_mode_config_cleanup(dev);
	drm_mm_takedown(&priv->linear);
	flush_work(&priv->fb_unref_work);

	return ret;
}
Пример #20
0
/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
{
    struct radeon_bo *vram_obj = NULL;
    struct radeon_bo **gtt_obj = NULL;
    struct radeon_fence *fence = NULL;
    uint64_t gtt_addr, vram_addr;
    unsigned i, n, size;
    int r, ring;

    switch (flag) {
    case RADEON_TEST_COPY_DMA:
        ring = radeon_copy_dma_ring_index(rdev);
        break;
    case RADEON_TEST_COPY_BLIT:
        ring = radeon_copy_blit_ring_index(rdev);
        break;
    default:
        DRM_ERROR("Unknown copy method\n");
        return;
    }

    size = 1024 * 1024;

    /* Number of tests =
     * (Total GTT - IB pool - writeback page - ring buffers) / test size
     */
    n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
    for (i = 0; i < RADEON_NUM_RINGS; ++i)
        n -= rdev->ring[i].ring_size;
    if (rdev->wb.wb_obj)
        n -= RADEON_GPU_PAGE_SIZE;
    if (rdev->ih.ring_obj)
        n -= rdev->ih.ring_size;
    n /= size;

    gtt_obj = malloc(n * sizeof(*gtt_obj), DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
    if (!gtt_obj) {
        DRM_ERROR("Failed to allocate %d pointers\n", n);
        r = 1;
        goto out_cleanup;
    }

    r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
                         NULL, &vram_obj);
    if (r) {
        DRM_ERROR("Failed to create VRAM object\n");
        goto out_cleanup;
    }
    r = radeon_bo_reserve(vram_obj, false);
    if (unlikely(r != 0))
        goto out_cleanup;
    r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
    if (r) {
        DRM_ERROR("Failed to pin VRAM object\n");
        goto out_cleanup;
    }
    for (i = 0; i < n; i++) {
        void *gtt_map, *vram_map;
        void **gtt_start, **gtt_end;
        void **vram_start, **vram_end;

        r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
                             RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
        if (r) {
            DRM_ERROR("Failed to create GTT object %d\n", i);
            goto out_cleanup;
        }

        r = radeon_bo_reserve(gtt_obj[i], false);
        if (unlikely(r != 0))
            goto out_cleanup;
        r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
        if (r) {
            DRM_ERROR("Failed to pin GTT object %d\n", i);
            goto out_cleanup;
        }

        r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
        if (r) {
            DRM_ERROR("Failed to map GTT object %d\n", i);
            goto out_cleanup;
        }

        for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size);
                gtt_start < gtt_end;
                gtt_start++)
            *gtt_start = gtt_start;

        radeon_bo_kunmap(gtt_obj[i]);

        if (ring == R600_RING_TYPE_DMA_INDEX)
            r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
        else
            r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
        if (r) {
            DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
            goto out_cleanup;
        }

        r = radeon_fence_wait(fence, false);
        if (r) {
            DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
            goto out_cleanup;
        }

        radeon_fence_unref(&fence);

        r = radeon_bo_kmap(vram_obj, &vram_map);
        if (r) {
            DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
            goto out_cleanup;
        }

        for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size),
                vram_start = vram_map, vram_end = (void *)((uintptr_t)vram_map + size);
                vram_start < vram_end;
                gtt_start++, vram_start++) {
            if (*vram_start != gtt_start) {
                DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
                          "expected 0x%p (GTT/VRAM offset "
                          "0x%16llx/0x%16llx)\n",
                          i, *vram_start, gtt_start,
                          (unsigned long long)
                          ((uintptr_t)gtt_addr - (uintptr_t)rdev->mc.gtt_start +
                           (uintptr_t)gtt_start - (uintptr_t)gtt_map),
                          (unsigned long long)
                          ((uintptr_t)vram_addr - (uintptr_t)rdev->mc.vram_start +
                           (uintptr_t)gtt_start - (uintptr_t)gtt_map));
                radeon_bo_kunmap(vram_obj);
                goto out_cleanup;
            }
            *vram_start = vram_start;
        }

        radeon_bo_kunmap(vram_obj);

        if (ring == R600_RING_TYPE_DMA_INDEX)
            r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
        else
            r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
        if (r) {
            DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
            goto out_cleanup;
        }

        r = radeon_fence_wait(fence, false);
        if (r) {
            DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
            goto out_cleanup;
        }

        radeon_fence_unref(&fence);

        r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
        if (r) {
            DRM_ERROR("Failed to map GTT object after copy %d\n", i);
            goto out_cleanup;
        }

        for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size),
                vram_start = vram_map, vram_end = (void *)((uintptr_t)vram_map + size);
                gtt_start < gtt_end;
                gtt_start++, vram_start++) {
            if (*gtt_start != vram_start) {
                DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
                          "expected 0x%p (VRAM/GTT offset "
                          "0x%16llx/0x%16llx)\n",
                          i, *gtt_start, vram_start,
                          (unsigned long long)
                          ((uintptr_t)vram_addr - (uintptr_t)rdev->mc.vram_start +
                           (uintptr_t)vram_start - (uintptr_t)vram_map),
                          (unsigned long long)
                          ((uintptr_t)gtt_addr - (uintptr_t)rdev->mc.gtt_start +
                           (uintptr_t)vram_start - (uintptr_t)vram_map));
                radeon_bo_kunmap(gtt_obj[i]);
                goto out_cleanup;
            }
        }

        radeon_bo_kunmap(gtt_obj[i]);

        DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%jx\n",
                 (uintmax_t)gtt_addr - rdev->mc.gtt_start);
    }

out_cleanup:
    if (vram_obj) {
        if (radeon_bo_is_reserved(vram_obj)) {
            radeon_bo_unpin(vram_obj);
            radeon_bo_unreserve(vram_obj);
        }
        radeon_bo_unref(&vram_obj);
    }
    if (gtt_obj) {
        for (i = 0; i < n; i++) {
            if (gtt_obj[i]) {
                if (radeon_bo_is_reserved(gtt_obj[i])) {
                    radeon_bo_unpin(gtt_obj[i]);
                    radeon_bo_unreserve(gtt_obj[i]);
                }
                radeon_bo_unref(&gtt_obj[i]);
            }
        }
        free(gtt_obj, DRM_MEM_DRIVER);
    }
    if (fence) {
        radeon_fence_unref(&fence);
    }
    if (r) {
        DRM_ERROR("Error while testing BO move.\n");
    }
}
Пример #21
0
static int enter_dsr_locked(struct mdfld_dsi_config *dsi_config, int level)
{
	struct mdfld_dsi_hw_registers *regs;
	struct mdfld_dsi_hw_context *ctx;
	struct drm_psb_private *dev_priv;
	struct drm_device *dev;
	struct mdfld_dsi_pkg_sender *sender;
	int err;
	pm_message_t state;

	PSB_DEBUG_ENTRY("mdfld_dsi_dsr: enter dsr\n");

	if (!dsi_config)
		return -EINVAL;

	regs = &dsi_config->regs;
	ctx = &dsi_config->dsi_hw_context;
	dev = dsi_config->dev;
	dev_priv = dev->dev_private;

	sender = mdfld_dsi_get_pkg_sender(dsi_config);
	if (!sender) {
		DRM_ERROR("Failed to get dsi sender\n");
		return -EINVAL;
	}

	if (level < DSR_EXITED) {
		DRM_ERROR("Why to do this?");
		return -EINVAL;
	}

	if (level > DSR_ENTERED_LEVEL0) {
		/**
		 * TODO: require OSPM interfaces to tell OSPM module that
		 * display controller is ready to be power gated.
		 * OSPM module needs to response this request ASAP.
		 * NOTE: it makes no sense to have display controller islands
		 * & pci power gated here directly. OSPM module is the only one
		 * who can power gate/ungate power islands.
		 * FIXME: since there's no ospm interfaces for acquiring
		 * suspending DSI related power islands, we have to call OSPM
		 * interfaces to power gate display islands and pci right now,
		 * which should NOT happen in this way!!!
		 */
		if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
			OSPM_UHB_FORCE_POWER_ON)) {
			DRM_ERROR("Failed power on display island\n");
			return -EINVAL;
		}

		PSB_DEBUG_ENTRY("mdfld_dsi_dsr: entering DSR level 1\n");

		err = mdfld_dsi_wait_for_fifos_empty(sender);
		if (err) {
			DRM_ERROR("mdfld_dsi_dsr: FIFO not empty\n");
			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
			return err;
		}
		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);

		/*suspend whole PCI host and related islands
		** if failed at this try, revive te for another chance
		*/
		state.event = 0;
		if (ospm_power_suspend()) {
			/* Only display island is powered off then
			 ** need revive the whole TE
			 */
			if (!ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND))
				exit_dsr_locked(dsi_config);

			return -EINVAL;
		}
		/*
		 *suspend pci
		 *FIXME: should I do it here?
		 *how about decoder/encoder is working??
		 *OSPM should check the refcout of each islands before
		 *actually power off PCI!!!
		 *need invoke this in the same context, we need deal with
		 *DSR lock later for suspend PCI may go to sleep!!!
		 */
		/*ospm_suspend_pci(dev->pdev);*/

		PSB_DEBUG_ENTRY("mdfld_dsi_dsr: entered\n");
		return 0;
	}

	PSB_DEBUG_ENTRY("mdfld_dsi_dsr: entering DSR level 0\n");

	err = mdfld_dsi_wait_for_fifos_empty(sender);
	if (err) {
		DRM_ERROR("mdfld_dsi_dsr: FIFO not empty\n");
		return err;
	}

	/*
	 * To set the vblank_enabled to false with drm_vblank_off(), as
	 * vblank_disable_and_save() would be scheduled late (<= 5s), and it
	 * would cause drm_vblank_get() fail to turn on vsync interrupt
	 * immediately.
	 */
	drm_vblank_off(dev, dsi_config->pipe);

	DC_MRFLD_onPowerOff(dsi_config->pipe);

	/*turn off dbi interface put in ulps*/
	__dbi_power_off(dsi_config);

	PSB_DEBUG_ENTRY("entered\n");
	return 0;
}
Пример #22
0
static int
via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
	int draw = xfer->to_fb;
	int ret = 0;
	
	vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
	vsg->bounce_buffer = 0;

	vsg->state = dr_via_sg_init;

	if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
		DRM_ERROR("Zero size bitblt.\n");
		return DRM_ERR(EINVAL);
	}

	/*
	 * Below check is a driver limitation, not a hardware one. We
	 * don't want to lock unused pages, and don't want to incoporate the
	 * extra logic of avoiding them. Make sure there are no. 
	 * (Not a big limitation anyway.)
	 */

	if (((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) ||
	    (xfer->mem_stride > 2048)) {
		DRM_ERROR("Too large system memory stride.\n");
		return DRM_ERR(EINVAL);
	}

	if (xfer->num_lines > 2048) {
		DRM_ERROR("Too many PCI DMA bitblt lines.\n");
		return DRM_ERR(EINVAL);
	}		

	/* 
	 * we allow a negative fb stride to allow flipping of images in
	 * transfer. 
	 */

	if (xfer->mem_stride < xfer->line_length ||
		abs(xfer->fb_stride) < xfer->line_length) {
		DRM_ERROR("Invalid frame-buffer / memory stride.\n");
		return DRM_ERR(EINVAL);
	}

	/*
	 * A hardware bug seems to be worked around if system memory addresses start on
	 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
	 * about this. Meanwhile, impose the following restrictions:
	 */

#ifdef VIA_BUGFREE
	if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
	    ((xfer->mem_stride & 3) != (xfer->fb_stride & 3))) {
		DRM_ERROR("Invalid DRM bitblt alignment.\n");
	        return DRM_ERR(EINVAL);
	}
#else
	if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 15))) {
		DRM_ERROR("Invalid DRM bitblt alignment.\n");
	        return DRM_ERR(EINVAL);
	}	
#endif

	if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
		DRM_ERROR("Could not lock DMA pages.\n");
		via_free_sg_info(dev->pdev, vsg);
		return ret;
	}

	via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
	if (0 != (ret = via_alloc_desc_pages(vsg))) {
		DRM_ERROR("Could not allocate DMA descriptor pages.\n");
		via_free_sg_info(dev->pdev, vsg);
		return ret;
	}
	via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
	
	return 0;
}
Пример #23
0
/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
void radeon_test_moves(struct radeon_device *rdev)
{
	struct radeon_bo *vram_obj = NULL;
	struct radeon_bo **gtt_obj = NULL;
	struct radeon_fence *fence = NULL;
	uint64_t gtt_addr, vram_addr;
	unsigned i, n, size;
	int r;

	size = 1024 * 1024;

	/* Number of tests =
	 * (Total GTT - IB pool - writeback page - ring buffer) / test size
	 */
	n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
	     rdev->cp.ring_size) / size;

	gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
	if (!gtt_obj) {
		DRM_ERROR("Failed to allocate %d pointers\n", n);
		r = 1;
		goto out_cleanup;
	}

	r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
				&vram_obj);
	if (r) {
		DRM_ERROR("Failed to create VRAM object\n");
		goto out_cleanup;
	}
	r = radeon_bo_reserve(vram_obj, false);
	if (unlikely(r != 0))
		goto out_cleanup;
	r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
	if (r) {
		DRM_ERROR("Failed to pin VRAM object\n");
		goto out_cleanup;
	}
	for (i = 0; i < n; i++) {
		void *gtt_map, *vram_map;
		void **gtt_start, **gtt_end;
		void **vram_start, **vram_end;

		r = radeon_bo_create(rdev, NULL, size, true,
					 RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
		if (r) {
			DRM_ERROR("Failed to create GTT object %d\n", i);
			goto out_cleanup;
		}

		r = radeon_bo_reserve(gtt_obj[i], false);
		if (unlikely(r != 0))
			goto out_cleanup;
		r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
		if (r) {
			DRM_ERROR("Failed to pin GTT object %d\n", i);
			goto out_cleanup;
		}

		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
		if (r) {
			DRM_ERROR("Failed to map GTT object %d\n", i);
			goto out_cleanup;
		}

		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
		     gtt_start < gtt_end;
		     gtt_start++)
			*gtt_start = gtt_start;

		radeon_bo_kunmap(gtt_obj[i]);

		r = radeon_fence_create(rdev, &fence);
		if (r) {
			DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
			goto out_cleanup;
		}

		r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence);
		if (r) {
			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
			goto out_cleanup;
		}

		r = radeon_fence_wait(fence, false);
		if (r) {
			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
			goto out_cleanup;
		}

		radeon_fence_unref(&fence);

		r = radeon_bo_kmap(vram_obj, &vram_map);
		if (r) {
			DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
			goto out_cleanup;
		}

		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
		     vram_start = vram_map, vram_end = vram_map + size;
		     vram_start < vram_end;
		     gtt_start++, vram_start++) {
			if (*vram_start != gtt_start) {
				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
					  "expected 0x%p (GTT map 0x%p-0x%p)\n",
					  i, *vram_start, gtt_start, gtt_map,
					  gtt_end);
				radeon_bo_kunmap(vram_obj);
				goto out_cleanup;
			}
			*vram_start = vram_start;
		}

		radeon_bo_kunmap(vram_obj);

		r = radeon_fence_create(rdev, &fence);
		if (r) {
			DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
			goto out_cleanup;
		}

		r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence);
		if (r) {
			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
			goto out_cleanup;
		}

		r = radeon_fence_wait(fence, false);
		if (r) {
			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
			goto out_cleanup;
		}

		radeon_fence_unref(&fence);

		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
		if (r) {
			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
			goto out_cleanup;
		}

		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
		     vram_start = vram_map, vram_end = vram_map + size;
		     gtt_start < gtt_end;
		     gtt_start++, vram_start++) {
			if (*gtt_start != vram_start) {
				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
					  "expected 0x%p (VRAM map 0x%p-0x%p)\n",
					  i, *gtt_start, vram_start, vram_map,
					  vram_end);
				radeon_bo_kunmap(gtt_obj[i]);
				goto out_cleanup;
			}
		}

		radeon_bo_kunmap(gtt_obj[i]);

		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
			 gtt_addr - rdev->mc.gtt_location);
	}

out_cleanup:
	if (vram_obj) {
		if (radeon_bo_is_reserved(vram_obj)) {
			radeon_bo_unpin(vram_obj);
			radeon_bo_unreserve(vram_obj);
		}
		radeon_bo_unref(&vram_obj);
	}
	if (gtt_obj) {
		for (i = 0; i < n; i++) {
			if (gtt_obj[i]) {
				if (radeon_bo_is_reserved(gtt_obj[i])) {
					radeon_bo_unpin(gtt_obj[i]);
					radeon_bo_unreserve(gtt_obj[i]);
				}
				radeon_bo_unref(&gtt_obj[i]);
			}
		}
		kfree(gtt_obj);
	}
	if (fence) {
		radeon_fence_unref(&fence);
	}
	if (r) {
		printk(KERN_WARNING "Error while testing BO move.\n");
	}
}
Пример #24
0
/**
 * i965_reset - reset chip after a hang
 * @dev: drm device to reset
 * @flags: reset domains
 *
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
 * reset or otherwise an error code.
 *
 * Procedure is fairly simple:
 *   - reset the chip using the reset reg
 *   - re-init context state
 *   - re-init hardware status page
 *   - re-init ring buffer
 *   - re-init interrupt state
 *   - re-init display
 */
int i965_reset(struct drm_device *dev, u8 flags)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	unsigned long timeout;
	u8 gdrst;
	/*
	 * We really should only reset the display subsystem if we actually
	 * need to
	 */
	bool need_display = true;

	mutex_lock(&dev->struct_mutex);

	/*
	 * Clear request list
	 */
	i915_gem_retire_requests(dev, &dev_priv->render_ring);

	if (need_display)
		i915_save_display(dev);

	if (IS_I965G(dev) || IS_G4X(dev)) {
		/*
		 * Set the domains we want to reset, then the reset bit (bit 0).
		 * Clear the reset bit after a while and wait for hardware status
		 * bit (bit 1) to be set
		 */
		pci_read_config_byte(dev->pdev, GDRST, &gdrst);
		pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
		udelay(50);
		pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);

		/* ...we don't want to loop forever though, 500ms should be plenty */
	       timeout = jiffies + msecs_to_jiffies(500);
		do {
			udelay(100);
			pci_read_config_byte(dev->pdev, GDRST, &gdrst);
		} while ((gdrst & 0x1) && time_after(timeout, jiffies));

		if (gdrst & 0x1) {
			WARN(true, "i915: Failed to reset chip\n");
			mutex_unlock(&dev->struct_mutex);
			return -EIO;
		}
	} else {
		DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
		mutex_unlock(&dev->struct_mutex);
		return -ENODEV;
	}

	/* Ok, now get things going again... */

	/*
	 * Everything depends on having the GTT running, so we need to start
	 * there.  Fortunately we don't need to do this unless we reset the
	 * chip at a PCI level.
	 *
	 * Next we need to restore the context, but we don't use those
	 * yet either...
	 *
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
	 * was running at the time of the reset (i.e. we weren't VT
	 * switched away).
	 */
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
			!dev_priv->mm.suspended) {
		struct intel_ring_buffer *ring = &dev_priv->render_ring;
		dev_priv->mm.suspended = 0;
		ring->init(dev, ring);
		mutex_unlock(&dev->struct_mutex);
		drm_irq_uninstall(dev);
		drm_irq_install(dev);
		mutex_lock(&dev->struct_mutex);
	}

	/*
	 * Display needs restore too...
	 */
	if (need_display)
		i915_restore_display(dev);

	mutex_unlock(&dev->struct_mutex);
	return 0;
}
Пример #25
0
void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config,
								int pipe)
{
	struct drm_device *dev = dsi_config->dev;
	int lane_count = dsi_config->lane_count;
	struct mdfld_dsi_dpi_timing dpi_timing;
	struct drm_display_mode *mode = dsi_config->mode;
	u32 val;

	/*un-ready device*/
	REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 0, 0, 0);

	/*init dsi adapter before kicking off*/
	REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018);

	/*enable all interrupts*/
	REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff);

	/*set up func_prg*/
	val = lane_count;
	val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;

	switch (dsi_config->bpp) {
	case 16:
		val |= DSI_DPI_COLOR_FORMAT_RGB565;
		break;
	case 18:
		val |= DSI_DPI_COLOR_FORMAT_RGB666;
		break;
	case 24:
		val |= DSI_DPI_COLOR_FORMAT_RGB888;
		break;
	default:
		DRM_ERROR("unsupported color format, bpp = %d\n",
							dsi_config->bpp);
	}
	REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), val);

	REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe),
			(mode->vtotal * mode->htotal * dsi_config->bpp /
				(8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
	REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe),
				0xffff & DSI_LP_RX_TIMEOUT_MASK);

	/*max value: 20 clock cycles of txclkesc*/
	REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe),
				0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);

	/*min 21 txclkesc, max: ffffh*/
	REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe),
				0xffff & DSI_RESET_TIMER_MASK);

	REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe),
				mode->vdisplay << 16 | mode->hdisplay);

	/*set DPI timing registers*/
	mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing,
				dsi_config->lane_count, dsi_config->bpp);

	REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe),
			dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
	REG_WRITE(MIPI_HBP_COUNT_REG(pipe),
			dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
	REG_WRITE(MIPI_HFP_COUNT_REG(pipe),
			dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
	REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe),
			dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
	REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe),
			dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
	REG_WRITE(MIPI_VBP_COUNT_REG(pipe),
			dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
	REG_WRITE(MIPI_VFP_COUNT_REG(pipe),
			dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);

	REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x46);

	/*min: 7d0 max: 4e20*/
	REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0x000007d0);

	/*set up video mode*/
	val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
	REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), val);

	REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000);

	REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004);

	/*TODO: figure out how to setup these registers*/
	if (mdfld_get_panel_type(dev, pipe) == TC35876X)
		REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
	else
		REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150c3408);

	REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14);

	if (mdfld_get_panel_type(dev, pipe) == TC35876X)
		tc35876x_set_bridge_reset_state(dev, 0);  /*Pull High Reset */

	/*set device ready*/
	REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 1, 0, 0);
}
Пример #26
0
/**
 * radeon_info_ioctl - answer a device specific request.
 *
 * @rdev: radeon device pointer
 * @data: request object
 * @filp: drm filp
 *
 * This function is used to pass device specific parameters to the userspace
 * drivers.  Examples include: pci device id, pipeline parms, tiling params,
 * etc. (all asics).
 * Returns 0 on success, -EINVAL on failure.
 */
static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
	struct radeon_device *rdev = dev->dev_private;
	struct drm_radeon_info *info = data;
	struct radeon_mode_info *minfo = &rdev->mode_info;
	uint32_t *value, value_tmp, *value_ptr, value_size;
	uint64_t value64;
	struct drm_crtc *crtc;
	int i, found;

	value_ptr = (uint32_t *)((unsigned long)info->value);
	value = &value_tmp;
	value_size = sizeof(uint32_t);

	switch (info->request) {
	case RADEON_INFO_DEVICE_ID:
		*value = dev->pdev->device;
		break;
	case RADEON_INFO_NUM_GB_PIPES:
		*value = rdev->num_gb_pipes;
		break;
	case RADEON_INFO_NUM_Z_PIPES:
		*value = rdev->num_z_pipes;
		break;
	case RADEON_INFO_ACCEL_WORKING:
		/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
		if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
			*value = false;
		else
			*value = rdev->accel_working;
		break;
	case RADEON_INFO_CRTC_FROM_ID:
		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
			return -EFAULT;
		}
		for (i = 0, found = 0; i < rdev->num_crtc; i++) {
			crtc = (struct drm_crtc *)minfo->crtcs[i];
			if (crtc && crtc->base.id == *value) {
				struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
				*value = radeon_crtc->crtc_id;
				found = 1;
				break;
			}
		}
		if (!found) {
			DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
			return -EINVAL;
		}
		break;
	case RADEON_INFO_ACCEL_WORKING2:
		*value = rdev->accel_working;
		break;
	case RADEON_INFO_TILING_CONFIG:
		if (rdev->family >= CHIP_BONAIRE)
			*value = rdev->config.cik.tile_config;
		else if (rdev->family >= CHIP_TAHITI)
			*value = rdev->config.si.tile_config;
		else if (rdev->family >= CHIP_CAYMAN)
			*value = rdev->config.cayman.tile_config;
		else if (rdev->family >= CHIP_CEDAR)
			*value = rdev->config.evergreen.tile_config;
		else if (rdev->family >= CHIP_RV770)
			*value = rdev->config.rv770.tile_config;
		else if (rdev->family >= CHIP_R600)
			*value = rdev->config.r600.tile_config;
		else {
			DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
			return -EINVAL;
		}
		break;
	case RADEON_INFO_WANT_HYPERZ:
		/* The "value" here is both an input and output parameter.
		 * If the input value is 1, filp requests hyper-z access.
		 * If the input value is 0, filp revokes its hyper-z access.
		 *
		 * When returning, the value is 1 if filp owns hyper-z access,
		 * 0 otherwise. */
		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
			return -EFAULT;
		}
		if (*value >= 2) {
			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
			return -EINVAL;
		}
		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
		break;
	case RADEON_INFO_WANT_CMASK:
		/* The same logic as Hyper-Z. */
		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
			return -EFAULT;
		}
		if (*value >= 2) {
			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
			return -EINVAL;
		}
		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
		break;
	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
		/* return clock value in KHz */
		if (rdev->asic->get_xclk)
			*value = radeon_get_xclk(rdev) * 10;
		else
			*value = rdev->clock.spll.reference_freq * 10;
		break;
	case RADEON_INFO_NUM_BACKENDS:
		if (rdev->family >= CHIP_BONAIRE)
			*value = rdev->config.cik.max_backends_per_se *
				rdev->config.cik.max_shader_engines;
		else if (rdev->family >= CHIP_TAHITI)
			*value = rdev->config.si.max_backends_per_se *
				rdev->config.si.max_shader_engines;
		else if (rdev->family >= CHIP_CAYMAN)
			*value = rdev->config.cayman.max_backends_per_se *
				rdev->config.cayman.max_shader_engines;
		else if (rdev->family >= CHIP_CEDAR)
			*value = rdev->config.evergreen.max_backends;
		else if (rdev->family >= CHIP_RV770)
			*value = rdev->config.rv770.max_backends;
		else if (rdev->family >= CHIP_R600)
			*value = rdev->config.r600.max_backends;
		else {
			return -EINVAL;
		}
		break;
	case RADEON_INFO_NUM_TILE_PIPES:
		if (rdev->family >= CHIP_BONAIRE)
			*value = rdev->config.cik.max_tile_pipes;
		else if (rdev->family >= CHIP_TAHITI)
			*value = rdev->config.si.max_tile_pipes;
		else if (rdev->family >= CHIP_CAYMAN)
			*value = rdev->config.cayman.max_tile_pipes;
		else if (rdev->family >= CHIP_CEDAR)
			*value = rdev->config.evergreen.max_tile_pipes;
		else if (rdev->family >= CHIP_RV770)
			*value = rdev->config.rv770.max_tile_pipes;
		else if (rdev->family >= CHIP_R600)
			*value = rdev->config.r600.max_tile_pipes;
		else {
			return -EINVAL;
		}
		break;
	case RADEON_INFO_FUSION_GART_WORKING:
		*value = 1;
		break;
	case RADEON_INFO_BACKEND_MAP:
		if (rdev->family >= CHIP_BONAIRE)
			*value = rdev->config.cik.backend_map;
		else if (rdev->family >= CHIP_TAHITI)
			*value = rdev->config.si.backend_map;
		else if (rdev->family >= CHIP_CAYMAN)
			*value = rdev->config.cayman.backend_map;
		else if (rdev->family >= CHIP_CEDAR)
			*value = rdev->config.evergreen.backend_map;
		else if (rdev->family >= CHIP_RV770)
			*value = rdev->config.rv770.backend_map;
		else if (rdev->family >= CHIP_R600)
			*value = rdev->config.r600.backend_map;
		else {
			return -EINVAL;
		}
		break;
	case RADEON_INFO_VA_START:
		/* this is where we report if vm is supported or not */
		if (rdev->family < CHIP_CAYMAN)
			return -EINVAL;
		*value = RADEON_VA_RESERVED_SIZE;
		break;
	case RADEON_INFO_IB_VM_MAX_SIZE:
		/* this is where we report if vm is supported or not */
		if (rdev->family < CHIP_CAYMAN)
			return -EINVAL;
		*value = RADEON_IB_VM_MAX_SIZE;
		break;
	case RADEON_INFO_MAX_PIPES:
		if (rdev->family >= CHIP_BONAIRE)
			*value = rdev->config.cik.max_cu_per_sh;
		else if (rdev->family >= CHIP_TAHITI)
			*value = rdev->config.si.max_cu_per_sh;
		else if (rdev->family >= CHIP_CAYMAN)
			*value = rdev->config.cayman.max_pipes_per_simd;
		else if (rdev->family >= CHIP_CEDAR)
			*value = rdev->config.evergreen.max_pipes;
		else if (rdev->family >= CHIP_RV770)
			*value = rdev->config.rv770.max_pipes;
		else if (rdev->family >= CHIP_R600)
			*value = rdev->config.r600.max_pipes;
		else {
			return -EINVAL;
		}
		break;
	case RADEON_INFO_TIMESTAMP:
		if (rdev->family < CHIP_R600) {
			DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
			return -EINVAL;
		}
		value = (uint32_t*)&value64;
		value_size = sizeof(uint64_t);
		value64 = radeon_get_gpu_clock_counter(rdev);
		break;
	case RADEON_INFO_MAX_SE:
		if (rdev->family >= CHIP_BONAIRE)
			*value = rdev->config.cik.max_shader_engines;
		else if (rdev->family >= CHIP_TAHITI)
			*value = rdev->config.si.max_shader_engines;
		else if (rdev->family >= CHIP_CAYMAN)
			*value = rdev->config.cayman.max_shader_engines;
		else if (rdev->family >= CHIP_CEDAR)
			*value = rdev->config.evergreen.num_ses;
		else
			*value = 1;
		break;
	case RADEON_INFO_MAX_SH_PER_SE:
		if (rdev->family >= CHIP_BONAIRE)
			*value = rdev->config.cik.max_sh_per_se;
		else if (rdev->family >= CHIP_TAHITI)
			*value = rdev->config.si.max_sh_per_se;
		else
			return -EINVAL;
		break;
	case RADEON_INFO_FASTFB_WORKING:
		*value = rdev->fastfb_working;
		break;
	case RADEON_INFO_RING_WORKING:
		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
			return -EFAULT;
		}
		switch (*value) {
		case RADEON_CS_RING_GFX:
		case RADEON_CS_RING_COMPUTE:
			*value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
			break;
		case RADEON_CS_RING_DMA:
			*value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
			*value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
			break;
		case RADEON_CS_RING_UVD:
			*value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
			break;
		case RADEON_CS_RING_VCE:
			*value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
			break;
		default:
			return -EINVAL;
		}
		break;
	case RADEON_INFO_SI_TILE_MODE_ARRAY:
		if (rdev->family >= CHIP_BONAIRE) {
			value = rdev->config.cik.tile_mode_array;
			value_size = sizeof(uint32_t)*32;
		} else if (rdev->family >= CHIP_TAHITI) {
			value = rdev->config.si.tile_mode_array;
			value_size = sizeof(uint32_t)*32;
		} else {
			DRM_DEBUG_KMS("tile mode array is si+ only!\n");
			return -EINVAL;
		}
		break;
	case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
		if (rdev->family >= CHIP_BONAIRE) {
			value = rdev->config.cik.macrotile_mode_array;
			value_size = sizeof(uint32_t)*16;
		} else {
			DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
			return -EINVAL;
		}
		break;
	case RADEON_INFO_SI_CP_DMA_COMPUTE:
		*value = 1;
		break;
	case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
		if (rdev->family >= CHIP_BONAIRE) {
			*value = rdev->config.cik.backend_enable_mask;
		} else if (rdev->family >= CHIP_TAHITI) {
			*value = rdev->config.si.backend_enable_mask;
		} else {
			DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
		}
		break;
	case RADEON_INFO_MAX_SCLK:
		if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
		    rdev->pm.dpm_enabled)
			*value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
		else
			*value = rdev->pm.default_sclk * 10;
		break;
	case RADEON_INFO_VCE_FW_VERSION:
		*value = rdev->vce.fw_version;
		break;
	case RADEON_INFO_VCE_FB_VERSION:
		*value = rdev->vce.fb_version;
		break;
	case RADEON_INFO_NUM_BYTES_MOVED:
		value = (uint32_t*)&value64;
		value_size = sizeof(uint64_t);
		value64 = atomic64_read(&rdev->num_bytes_moved);
		break;
	case RADEON_INFO_VRAM_USAGE:
		value = (uint32_t*)&value64;
		value_size = sizeof(uint64_t);
		value64 = atomic64_read(&rdev->vram_usage);
		break;
	case RADEON_INFO_GTT_USAGE:
		value = (uint32_t*)&value64;
		value_size = sizeof(uint64_t);
		value64 = atomic64_read(&rdev->gtt_usage);
		break;
	default:
		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
		return -EINVAL;
	}
	if (copy_to_user(value_ptr, (char*)value, value_size)) {
		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
		return -EFAULT;
	}
	return 0;
}
int mdfld_panel_generic_dsi_dbi_set_power(struct drm_encoder *encoder, bool on)
{
	int ret = 0;
	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
	struct mdfld_dsi_dbi_output *dbi_output =
		MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
	struct panel_funcs *p_funcs = dbi_output->p_funcs;
	struct mdfld_dsi_connector *dsi_connector =
		mdfld_dsi_encoder_get_connector(dsi_encoder);
	struct mdfld_dsi_config *dsi_config =
		mdfld_dsi_encoder_get_config(dsi_encoder);
	struct drm_device *dev = encoder->dev;
	struct drm_psb_private *dev_priv = dev->dev_private;
	u32 reg_offset = 0;
	int pipe = (dbi_output->channel_num == 0) ? 0 : 2;

	PSB_DEBUG_ENTRY("pipe %d : %s, panel on: %s\n", pipe, on ? "On" : "Off",
			dbi_output->dbi_panel_on ? "True" : "False");

	if (pipe == 2) {
		if (on)
			dev_priv->dual_mipi = true;
		else
			dev_priv->dual_mipi = false;

		reg_offset = MIPIC_REG_OFFSET;
	} else {
		if (!on)
			dev_priv->dual_mipi = false;
	}

	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
				OSPM_UHB_FORCE_POWER_ON)) {
		DRM_ERROR("hw begin failed\n");
		return -EAGAIN;
	}

	mutex_lock(&dsi_config->context_lock);

	if (on) {
		if (dbi_output->dbi_panel_on)
			goto out_err;

		ret = mdfld_panel_generic_dsi_dbi_power_on(encoder);
		if (ret) {
			DRM_ERROR("power on error\n");
			goto out_err;
		}

		dbi_output->dbi_panel_on = true;

		if (pipe == 2)
			dev_priv->dbi_panel_on2 = true;
		else
			dev_priv->dbi_panel_on = true;

		if (dev_priv->platform_rev_id != MDFLD_PNW_A0)
			mdfld_enable_te(dev, pipe);

		/* wake up error detector if ESD enabled */
		if (p_funcs->esd_detection)
			mdfld_dsi_error_detector_wakeup(dsi_connector);
		else
			PSB_DEBUG_ENTRY("ESD detection disabled\n");

		dsi_config->dsi_hw_context.panel_on = 1;
	} else {
		if (!dbi_output->dbi_panel_on && !dbi_output->first_boot)
			goto out_err;

		dbi_output->dbi_panel_on = false;
		dbi_output->first_boot = false;

		if (pipe == 2)
			dev_priv->dbi_panel_on2 = false;
		else
			dev_priv->dbi_panel_on = false;

		if (dev_priv->platform_rev_id != MDFLD_PNW_A0)
			mdfld_disable_te(dev, pipe);

		ret = mdfld_panel_generic_dsi_dbi_power_off(encoder);
		if (ret) {
			DRM_ERROR("power on error\n");
			goto out_err;
		}

		dsi_config->dsi_hw_context.panel_on = 0;
	}

out_err:
	mutex_unlock(&dsi_config->context_lock);
	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);

	if (ret)
		DRM_ERROR("failed\n");
	else
		PSB_DEBUG_ENTRY("successfully\n");

	return ret;
}
Пример #28
0
void dp_link_train(struct drm_encoder *encoder,
		   struct drm_connector *connector)
{
	struct drm_device *dev = encoder->dev;
	struct radeon_device *rdev = dev->dev_private;
	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
	struct radeon_encoder_atom_dig *dig;
	struct radeon_connector *radeon_connector;
	struct radeon_connector_atom_dig *dig_connector;
	int enc_id = 0;
	bool clock_recovery, channel_eq;
	u8 link_status[DP_LINK_STATUS_SIZE];
	u8 link_configuration[DP_LINK_CONFIGURATION_SIZE];
	u8 tries, voltage;
	u8 train_set[4];
	int i;

	if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
	    (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
		return;

	if (!radeon_encoder->enc_priv)
		return;
	dig = radeon_encoder->enc_priv;

	radeon_connector = to_radeon_connector(connector);
	if (!radeon_connector->con_priv)
		return;
	dig_connector = radeon_connector->con_priv;

	if (dig->dig_encoder)
		enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
	else
		enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
	if (dig_connector->linkb)
		enc_id |= ATOM_DP_CONFIG_LINK_B;
	else
		enc_id |= ATOM_DP_CONFIG_LINK_A;

	memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
	if (dig_connector->dp_clock == 270000)
		link_configuration[0] = DP_LINK_BW_2_7;
	else
		link_configuration[0] = DP_LINK_BW_1_62;
	link_configuration[1] = dig_connector->dp_lane_count;
	if (dig_connector->dpcd[0] >= 0x11)
		link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;

	/* power up the sink */
	dp_set_power(radeon_connector, DP_SET_POWER_D0);
	/* disable the training pattern on the sink */
	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
	/* set link bw and lanes on the sink */
	dp_set_link_bw_lanes(radeon_connector, link_configuration);
	/* disable downspread on the sink */
	dp_set_downspread(radeon_connector, 0);
	/* start training on the source */
	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
				  dig_connector->dp_clock, enc_id, 0);
	/* set training pattern 1 on the source */
	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
				  dig_connector->dp_clock, enc_id, 0);

	/* set initial vs/emph */
	memset(train_set, 0, 4);
	udelay(400);
	/* set training pattern 1 on the sink */
	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1);

	dp_update_dpvs_emph(radeon_connector, encoder, train_set);

	/* clock recovery loop */
	clock_recovery = false;
	tries = 0;
	voltage = 0xff;
	for (;;) {
		udelay(100);
		if (!atom_dp_get_link_status(radeon_connector, link_status))
			break;

		if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) {
			clock_recovery = true;
			break;
		}

		for (i = 0; i < dig_connector->dp_lane_count; i++) {
			if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
				break;
		}
		if (i == dig_connector->dp_lane_count) {
			DRM_ERROR("clock recovery reached max voltage\n");
			break;
		}

		if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
			++tries;
			if (tries == 5) {
				DRM_ERROR("clock recovery tried 5 times\n");
				break;
			}
		} else
			tries = 0;

		voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;

		/* Compute new train_set as requested by sink */
		dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
		dp_update_dpvs_emph(radeon_connector, encoder, train_set);
	}
	if (!clock_recovery)
		DRM_ERROR("clock recovery failed\n");
	else
		DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n",
			  train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
			  (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
			  DP_TRAIN_PRE_EMPHASIS_SHIFT);


	/* set training pattern 2 on the sink */
	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
	/* set training pattern 2 on the source */
	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
				  dig_connector->dp_clock, enc_id, 1);

	/* channel equalization loop */
	tries = 0;
	channel_eq = false;
	for (;;) {
		udelay(400);
		if (!atom_dp_get_link_status(radeon_connector, link_status))
			break;

		if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) {
			channel_eq = true;
			break;
		}

		/* Try 5 times */
		if (tries > 5) {
			DRM_ERROR("channel eq failed: 5 tries\n");
			break;
		}

		/* Compute new train_set as requested by sink */
		dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
		dp_update_dpvs_emph(radeon_connector, encoder, train_set);

		tries++;
	}

	if (!channel_eq)
		DRM_ERROR("channel eq failed\n");
	else
		DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n",
			  train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
			  (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT);

	/* disable the training pattern on the sink */
	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);

	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
				  dig_connector->dp_clock, enc_id, 0);
}
Пример #29
0
static void
gi_renesas_dsi_controller_init(struct mdfld_dsi_config *dsi_config)
{
	struct mdfld_dsi_hw_context *hw_ctx =
		&dsi_config->dsi_hw_context;
	struct drm_device *dev = dsi_config->dev;

	struct csc_setting csc = {	.pipe = 0,
					.type = CSC_REG_SETTING,
					.enable_state = true,
					.data_len = CSC_REG_COUNT,
					.data.csc_reg_data = {
						0xF510486, 0x27, 0x3F10FD0, 0x3E, 0x51000F, 0x39F}
				};
	struct gamma_setting gamma = {	.pipe = 0,
					.type = GAMMA_REG_SETTING,
					.enable_state = true,
					.data_len = GAMMA_10_BIT_TABLE_COUNT,
					.gamma_tableX100 = {
						0x000000, 0x010101, 0x020202, 0x030303,
						0x040404, 0x050505, 0x060606, 0x070807,
						0x080908, 0x0A0B0A, 0x0B0C0B, 0x0D0D0D,
						0x0E0F0E, 0x0F100F, 0x111211, 0x121312,
						0x141514, 0x151715, 0x171817, 0x191A19,
						0x1A1C1A, 0x1C1D1C, 0x1D1F1D, 0x1F211F,
						0x212221, 0x222422, 0x242624, 0x262726,
						0x272928, 0x292B29, 0x2B2D2B, 0x2D2F2D,
						0x2E302F, 0x303230, 0x323432, 0x343634,
						0x363836, 0x383A38, 0x393B39, 0x3B3D3B,
						0x3D3F3D, 0x3F413F, 0x414341, 0x434543,
						0x454745, 0x474947, 0x494B49, 0x4B4D4B,
						0x4C4F4D, 0x4E514F, 0x505351, 0x525552,
						0x545754, 0x565956, 0x585B58, 0x5A5D5A,
						0x5C5F5D, 0x5E615F, 0x606361, 0x636563,
						0x656765, 0x676967, 0x696B69, 0x6B6D6B,
						0x6D6F6D, 0x6F716F, 0x717371, 0x737573,
						0x757775, 0x777A78, 0x7A7C7A, 0x7C7E7C,
						0x7E807E, 0x808280, 0x828482, 0x848684,
						0x868887, 0x898B89, 0x8B8D8B, 0x8D8F8D,
						0x8F918F, 0x919391, 0x949594, 0x969896,
						0x989A98, 0x9A9C9A, 0x9D9E9D, 0x9FA09F,
						0xA1A3A1, 0xA3A5A3, 0xA5A7A6, 0xA8A9A8,
						0xAAACAA, 0xACAEAC, 0xAFB0AF, 0xB1B2B1,
						0xB3B5B3, 0xB5B7B5, 0xB8B9B8, 0xBABBBA,
						0xBCBEBC, 0xBFC0BF, 0xC1C2C1, 0xC3C5C3,
						0xC6C7C6, 0xC8C9C8, 0xCACBCA, 0xCDCECD,
						0xCFD0CF, 0xD1D2D1, 0xD4D5D4, 0xD6D7D6,
						0xD8D9D8, 0xDBDCDB, 0xDDDEDD, 0xE0E0E0,
						0xE2E3E2, 0xE4E5E4, 0xE7E7E7, 0xE9EAE9,
						0xECECEC, 0xEEEEEE, 0xF0F1F0, 0xF3F3F3,
						0xF5F5F5, 0xF8F8F8, 0xFAFAFA, 0xFDFDFD}
					};

	PSB_DEBUG_ENTRY("\n");

	dsi_config->lane_count = 1;
	dsi_config->lane_config = MDFLD_DSI_DATA_LANE_2_2;
	dsi_config->enable_gamma_csc = ENABLE_GAMMA | ENABLE_CSC;
	hw_ctx->pll_bypass_mode = 1;
	hw_ctx->cck_div = 1;
	hw_ctx->mipi_control = 0x00;
	hw_ctx->intr_en = 0xffffffff;
	hw_ctx->hs_tx_timeout = 0xffffff;
	hw_ctx->lp_rx_timeout = 0xffffff;
	hw_ctx->turn_around_timeout = 0x14;
	hw_ctx->device_reset_timer = 0xffff;
	hw_ctx->high_low_switch_count = 0x28;
	hw_ctx->init_count = 0xf0;
	hw_ctx->eot_disable = 0x2;
	hw_ctx->hs_ls_dbi_enable = 0x0;
	hw_ctx->lp_byteclk = 0x0;
	hw_ctx->clk_lane_switch_time_cnt = 0xa0014;
	hw_ctx->dphy_param = 0x150a600f;
	hw_ctx->dbi_bw_ctrl = 0x820;
	hw_ctx->mipi = PASS_FROM_SPHY_TO_AFE | TE_TRIGGER_GPIO_PIN;
	hw_ctx->mipi |= dsi_config->lane_config;
	/*set up func_prg*/
	hw_ctx->dsi_func_prg = (0xa000 | dsi_config->lane_count);

	if (dsi_config->enable_gamma_csc & ENABLE_CSC) {
		/* setting the tuned csc setting */
		drm_psb_enable_color_conversion = 1;
		mdfld_intel_crtc_set_color_conversion(dev, &csc);
	}

	if (dsi_config->enable_gamma_csc & ENABLE_GAMMA) {
		/* setting the tuned gamma setting */
		drm_psb_enable_gamma = 1;
		mdfld_intel_crtc_set_gamma(dev, &gamma);
	}
}

static
struct drm_display_mode *gi_renesas_cmd_get_config_mode(void)
{
	struct drm_display_mode *mode;

	PSB_DEBUG_ENTRY("\n");

	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
	if (!mode)
		return NULL;

	mode->hdisplay = 320;
	mode->vdisplay = 480;
	/* HFP = 10, HSYNC = 10, HBP = 20 */
	mode->hsync_start = mode->hdisplay + 10;
	mode->hsync_end = mode->hsync_start + 10;
	mode->htotal = mode->hsync_end + 20;
	/* VFP = 10, VSYNC = 2, VBP = 20 */
	mode->vsync_start = mode->vdisplay + 10;
	mode->vsync_end = mode->vsync_start + 2;
	mode->vtotal = mode->vsync_end + 10;
	mode->vrefresh = 60;
	mode->clock = mode->vrefresh * mode->vtotal *
		mode->htotal / 1000;

	drm_mode_set_name(mode);
	drm_mode_set_crtcinfo(mode, 0);

	mode->type |= DRM_MODE_TYPE_PREFERRED;

	return mode;
}

static
int __gi_renesas_dsi_power_on(struct mdfld_dsi_config *dsi_config)
{
	struct drm_device *dev = dsi_config->dev;
	struct drm_psb_private *dev_priv = dev->dev_private;
	struct mdfld_dsi_hw_registers *regs =
		&dsi_config->regs;
	struct mdfld_dsi_pkg_sender *sender =
		mdfld_dsi_get_pkg_sender(dsi_config);
	int err = 0;

	PSB_DEBUG_ENTRY("\n");

	if (!sender) {
		DRM_ERROR("Failed to get DSI packet sender\n");
		return -EINVAL;
	}

	if (drm_psb_enable_cabc) {
		/* enable cabc */
		gi_er61529_backlight_cntr_1[1] = 0x01;
		mdfld_dsi_send_gen_long_hs(sender, gi_er61529_mcs_protect_on, 2, 0);
		mdfld_dsi_send_gen_long_hs(sender, gi_er61529_backlight_cntr_1, 21, 0);
		mdfld_dsi_send_gen_long_hs(sender, gi_er61529_mcs_protect_off, 2, 0);
	}

	mdfld_dsi_send_gen_long_hs(sender, gi_er61529_mcs_protect_on, 2, 0);
	mdfld_dsi_send_gen_long_hs(sender, gi_er61529_backlight_cntr, 5, 0);
	mdfld_dsi_send_gen_long_hs(sender, gi_er61529_mcs_protect_off, 2, 0);
	mdfld_dsi_send_mcs_long_hs(sender, gi_er61529_exit_sleep_mode, 1, 0);
	mdelay(120);
	mdfld_dsi_send_mcs_long_hs(sender, gi_er61529_set_tear_on, 2, 0);
	mdfld_dsi_send_mcs_long_hs(sender, gi_er61529_dcs_set_display_on, 1, 0);

	return err;
}

static
int __gi_renesas_dsi_power_off(struct mdfld_dsi_config *dsi_config)
{
	struct mdfld_dsi_pkg_sender *sender =
		mdfld_dsi_get_pkg_sender(dsi_config);
	int err = 0;

	PSB_DEBUG_ENTRY("Turn off video mode TMD panel...\n");

	if (!sender) {
		DRM_ERROR("Failed to get DSI packet sender\n");
		return -EINVAL;
	}

	/* turn off display */
	err = mdfld_dsi_send_dcs(sender,
		 set_display_off,
		 NULL,
		 0,
		 CMD_DATA_SRC_SYSTEM_MEM,
		 MDFLD_DSI_SEND_PACKAGE);
	if (err) {
		DRM_ERROR("%s - sent set_display_off faild\n", __func__);
		goto power_err;
	}
	mdelay(70);

	/* set tear off display */
	err = mdfld_dsi_send_dcs(sender,
		 set_tear_off,
		 NULL,
		 0,
		 CMD_DATA_SRC_SYSTEM_MEM,
		 MDFLD_DSI_SEND_PACKAGE);
	if (err) {
		DRM_ERROR("%s - sent set_tear_off faild\n", __func__);
		goto power_err;
	}

	/* disable CABC */
	gi_er61529_backlight_cntr_1[1] = 0x00;
	mdfld_dsi_send_gen_long_hs(sender, gi_er61529_mcs_protect_on, 2, 0);
	mdfld_dsi_send_gen_long_hs(sender, gi_er61529_backlight_cntr_1, 21, 0);
	mdfld_dsi_send_gen_long_hs(sender, gi_er61529_mcs_protect_off, 2, 0);

	err =
	mdfld_dsi_send_mcs_long_hs(sender, gi_er61529_enter_sleep_mode, 1, 0);
	if (err) {
		DRM_ERROR("Enter sleep mode error\n");
		goto power_err;
	}
	mdelay(120);

	mdfld_dsi_send_gen_long_hs(sender, gi_er61529_mcs_protect_on, 2, 0);
	mdfld_dsi_send_gen_long_hs(sender, gi_er61529_mcs_lp_mode_cntr, 2, 0);
	mdfld_dsi_send_gen_long_hs(sender, gi_er61529_mcs_protect_off, 2, 0);

power_err:
	return err;
}

static
void gi_renesas_cmd_get_panel_info(int pipe, struct panel_info *pi)
{
	if (pipe == 0) {
		pi->width_mm = PANEL_3DOT47_WIDTH;
		pi->height_mm = PANEL_3DOT47_HEIGHT;
	}
}

static
int gi_renesas_dsi_cmd_detect(struct mdfld_dsi_config *dsi_config)
{
	struct drm_device *dev = dsi_config->dev;
	struct mdfld_dsi_hw_registers *regs = &dsi_config->regs;
	int status;
	int pipe = dsi_config->pipe;
	uint32_t dpll_val, device_ready_val;

	PSB_DEBUG_ENTRY("\n");

	if (pipe == 0) {
		if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
					OSPM_UHB_FORCE_POWER_ON)) {
			DRM_ERROR("hw begin failed\n");
			return -EAGAIN;
		}

		dpll_val = REG_READ(regs->dpll_reg);
		device_ready_val = REG_READ(regs->device_ready_reg);
		if ((device_ready_val & DSI_DEVICE_READY) &&
				(dpll_val & DPLL_VCO_ENABLE)) {
			dsi_config->dsi_hw_context.panel_on = true;
			status = MDFLD_DSI_PANEL_CONNECTED;
		} else {
			dsi_config->dsi_hw_context.panel_on = false;
			status = MDFLD_DSI_PANEL_DISCONNECTED;
			DRM_INFO("%s: do NOT support dual panel\n", __func__);
		}

		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
	} else {
		PSB_DEBUG_ENTRY("Only support single panel\n");
		status = MDFLD_DSI_PANEL_DISCONNECTED;
		dsi_config->dsi_hw_context.panel_on = 0;
	}

	return 0;
}

static
int gi_renesas_dsi_cmd_set_brightness(struct mdfld_dsi_config *dsi_config,
		int level)
{
	struct mdfld_dsi_pkg_sender *sender =
		mdfld_dsi_get_pkg_sender(dsi_config);
	u8 backlight_val;

	PSB_DEBUG_ENTRY("Set brightness level %d...\n", level);

	if (!sender) {
		DRM_ERROR("Failed to get DSI packet sender\n");
		return -EINVAL;
	}

	backlight_val = level * 255 / 100;
	gi_er61529_set_backlight[2] = backlight_val;

	mdfld_dsi_send_gen_long_hs(sender, gi_er61529_mcs_protect_on, 2, 0);
	mdfld_dsi_send_gen_long_hs(sender, gi_er61529_set_backlight, 5, 0);
	mdfld_dsi_send_gen_long_hs(sender, gi_er61529_mcs_protect_off, 2, 0);

	return 0;
}

static
int gi_renesas_dsi_panel_reset(struct mdfld_dsi_config *dsi_config)
{
	static int mipi_reset_gpio;
	int ret = 0;

	PSB_DEBUG_ENTRY("\n");

	if (mipi_reset_gpio == 0) {
		ret = get_gpio_by_name("mipi-reset");
		if (ret < 0) {
			DRM_ERROR("Faild to get panel reset gpio, " \
				  "use default reset pin\n");
			ret = 128;
		}

		mipi_reset_gpio = ret;

		ret = gpio_request(mipi_reset_gpio, "mipi_display");
		if (ret) {
			DRM_ERROR("Faild to request panel reset gpio\n");
			return -EINVAL;
		}

		gpio_direction_output(mipi_reset_gpio, 0);
	}

	gpio_set_value_cansleep(mipi_reset_gpio, 0);
	mdelay(11);

	gpio_set_value_cansleep(mipi_reset_gpio, 1);
	mdelay(20);

	return 0;
}


void gi_renesas_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs)
{
	p_funcs->get_config_mode = gi_renesas_cmd_get_config_mode;
	p_funcs->get_panel_info = gi_renesas_cmd_get_panel_info;
	p_funcs->reset = gi_renesas_dsi_panel_reset;
	p_funcs->drv_ic_init = gi_renesas_dbi_ic_init;
	p_funcs->dsi_controller_init = gi_renesas_dsi_controller_init;
	p_funcs->detect = gi_renesas_dsi_cmd_detect;
	p_funcs->set_brightness = gi_renesas_dsi_cmd_set_brightness;
	p_funcs->power_on = __gi_renesas_dsi_power_on;
	p_funcs->power_off = __gi_renesas_dsi_power_off;
}
Пример #30
0
void hsw_fdi_link_train(struct drm_crtc *crtc)
{
	struct drm_device *dev = crtc->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	u32 temp, i, rx_ctl_val;

	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
	 * mode set "sequence for CRT port" document:
	 * - TP1 to TP2 time with the default value
	 * - FDI delay to 90h
	 */
	I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
				  FDI_RX_PWRDN_LANE0_VAL(2) |
				  FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);

	/* Enable the PCH Receiver FDI PLL */
	rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
		     FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19);
	I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
	POSTING_READ(_FDI_RXA_CTL);
	udelay(220);

	/* Switch from Rawclk to PCDclk */
	rx_ctl_val |= FDI_PCDCLK;
	I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);

	/* Configure Port Clock Select */
	I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);

	/* Start the training iterating through available voltages and emphasis,
	 * testing each value twice. */
	for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
		/* Configure DP_TP_CTL with auto-training */
		I915_WRITE(DP_TP_CTL(PORT_E),
					DP_TP_CTL_FDI_AUTOTRAIN |
					DP_TP_CTL_ENHANCED_FRAME_ENABLE |
					DP_TP_CTL_LINK_TRAIN_PAT1 |
					DP_TP_CTL_ENABLE);

		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
		 * DDI E does not support port reversal, the functionality is
		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
		 * port reversal bit */
		I915_WRITE(DDI_BUF_CTL(PORT_E),
			   DDI_BUF_CTL_ENABLE |
			   ((intel_crtc->fdi_lanes - 1) << 1) |
			   hsw_ddi_buf_ctl_values[i / 2]);
		POSTING_READ(DDI_BUF_CTL(PORT_E));

		udelay(600);

		/* Program PCH FDI Receiver TU */
		I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));

		/* Enable PCH FDI Receiver with auto-training */
		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
		I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
		POSTING_READ(_FDI_RXA_CTL);

		/* Wait for FDI receiver lane calibration */
		udelay(30);

		/* Unset FDI_RX_MISC pwrdn lanes */
		temp = I915_READ(_FDI_RXA_MISC);
		temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
		I915_WRITE(_FDI_RXA_MISC, temp);
		POSTING_READ(_FDI_RXA_MISC);

		/* Wait for FDI auto training time */
		udelay(5);

		temp = I915_READ(DP_TP_STATUS(PORT_E));
		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
			DRM_DEBUG_KMS("FDI link training done on step %d\n", i);

			/* Enable normal pixel sending for FDI */
			I915_WRITE(DP_TP_CTL(PORT_E),
				   DP_TP_CTL_FDI_AUTOTRAIN |
				   DP_TP_CTL_LINK_TRAIN_NORMAL |
				   DP_TP_CTL_ENHANCED_FRAME_ENABLE |
				   DP_TP_CTL_ENABLE);

			return;
		}

		temp = I915_READ(DDI_BUF_CTL(PORT_E));
		temp &= ~DDI_BUF_CTL_ENABLE;
		I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
		POSTING_READ(DDI_BUF_CTL(PORT_E));

		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
		temp = I915_READ(DP_TP_CTL(PORT_E));
		temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
		temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
		I915_WRITE(DP_TP_CTL(PORT_E), temp);
		POSTING_READ(DP_TP_CTL(PORT_E));

		intel_wait_ddi_buf_idle(dev_priv, PORT_E);

		rx_ctl_val &= ~FDI_RX_ENABLE;
		I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
		POSTING_READ(_FDI_RXA_CTL);

		/* Reset FDI_RX_MISC pwrdn lanes */
		temp = I915_READ(_FDI_RXA_MISC);
		temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
		temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
		I915_WRITE(_FDI_RXA_MISC, temp);
		POSTING_READ(_FDI_RXA_MISC);
	}

	DRM_ERROR("FDI link training failed!\n");
}