Ejemplo n.º 1
0
/**
 * sti_gdp_get_current_nodes
 * @layer: GDP layer
 *
 * Look for GDP nodes that are currently read by the HW.
 *
 * RETURNS:
 * Pointer to the current GDP node list
 */
static
struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
{
    int hw_nvn;
    void *virt_nvn;
    struct sti_gdp *gdp = to_sti_gdp(layer);
    unsigned int i;

    hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
    if (!hw_nvn)
        goto end;

    virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);

    for (i = 0; i < GDP_NODE_NB_BANK; i++)
        if ((virt_nvn == gdp->node_list[i].btm_field) ||
                (virt_nvn == gdp->node_list[i].top_field))
            return &gdp->node_list[i];

end:
    DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
                     hw_nvn, sti_layer_to_str(layer));

    return NULL;
}
Ejemplo n.º 2
0
/**
 * sti_gdp_get_free_nodes
 * @layer: gdp layer
 *
 * Look for a GDP node list that is not currently read by the HW.
 *
 * RETURNS:
 * Pointer to the free GDP node list
 */
static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
{
    int hw_nvn;
    void *virt_nvn;
    struct sti_gdp *gdp = to_sti_gdp(layer);
    unsigned int i;

    hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
    if (!hw_nvn)
        goto end;

    virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);

    for (i = 0; i < GDP_NODE_NB_BANK; i++)
        if ((virt_nvn != gdp->node_list[i].btm_field) &&
                (virt_nvn != gdp->node_list[i].top_field))
            return &gdp->node_list[i];

    /* in hazardious cases restart with the first node */
    DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
              sti_layer_to_str(layer), hw_nvn);

end:
    return &gdp->node_list[0];
}
Ejemplo n.º 3
0
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
{
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

	if (buf) {
		BUG_ON(buf->size != size);
		BUG_ON(buf->direction != dir);
		BUG_ON(buf->page);
		BUG_ON(!buf->ptr);

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);

		DO_STATS(dev->archdata.dmabounce->bounce_count++);

		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr = buf->ptr;

			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);

			/*
			 * DMA buffers must have the same cache properties
			 * as if they were really used for DMA - which means
			 * data must be written back to RAM.  Note that
			 * we don't use dmac_flush_range() here for the
			 * bidirectional case because we know the cache
			 * lines will be coherent with the data written.
			 */
			dmac_clean_range(ptr, ptr + size);
			outer_clean_range(__pa(ptr), __pa(ptr) + size);
		}
		free_safe_buffer(dev->archdata.dmabounce, buf);
	} else {
		__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
	}
}
Ejemplo n.º 4
0
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
{
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

	if (buf) {
		BUG_ON(buf->size != size);
		BUG_ON(buf->direction != dir);
		BUG_ON(buf->page);
		BUG_ON(!buf->ptr);

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);

		DO_STATS(dev->archdata.dmabounce->bounce_count++);

		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr = buf->ptr;

			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);

			/*
			 * Since we may have written to a page cache page,
			 * we need to ensure that the data will be coherent
			 * with user mappings.
			 */
			__cpuc_flush_dcache_area(ptr, size);
		}
		free_safe_buffer(dev->archdata.dmabounce, buf);
	} else {
		__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
	}
}
Ejemplo n.º 5
0
static ssize_t mhi_dbgfs_ev_read(struct file *fp, char __user *buf,
				size_t count, loff_t *offp)
{
	int amnt_copied = 0;
	int event_ring_index = 0;
	struct mhi_event_ctxt *ev_ctxt;
	uintptr_t v_wp_index;
	uintptr_t v_rp_index;
	uintptr_t device_p_rp_index;

	struct mhi_device_ctxt *mhi_dev_ctxt =
		&mhi_devices.device_list[0].mhi_ctxt;
	if (NULL == mhi_dev_ctxt)
		return -EIO;
	*offp = (u32)(*offp) % mhi_dev_ctxt->mmio_info.nr_event_rings;
	event_ring_index = *offp;
	ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[event_ring_index];
	if (*offp == (mhi_dev_ctxt->mmio_info.nr_event_rings - 1))
		msleep(1000);

	get_element_index(&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index],
			mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index].rp,
			&v_rp_index);
	get_element_index(&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index],
			mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index].wp,
			&v_wp_index);
	get_element_index(&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index],
			mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index].wp,
			&v_wp_index);
	get_element_index(&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index],
			(void *)dma_to_virt(NULL, ev_ctxt->mhi_event_read_ptr),
			&device_p_rp_index);

	amnt_copied =
	scnprintf(mhi_dev_ctxt->chan_info,
		MHI_LOG_SIZE,
		"%s 0x%d %s %02x %s 0x%08x %s 0x%08x %s 0x%llx %s %llx %s %lu %s %p %s %p %s %lu %s %p %s %lu\n",
		"Event Context ",
		(unsigned int)event_ring_index,
		"Intmod_T",
		MHI_GET_EV_CTXT(EVENT_CTXT_INTMODT, ev_ctxt),
		"MSI Vector",
		ev_ctxt->mhi_msi_vector,
		"MSI RX Count",
		mhi_dev_ctxt->counters.msi_counter[*offp],
		"p_base:",
		ev_ctxt->mhi_event_ring_base_addr,
		"p_rp:",
		ev_ctxt->mhi_event_read_ptr,
		"index:",
		device_p_rp_index,
		"v_base:",
		mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index].base,
		"v_wp:",
		mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index].wp,
		"index:",
		v_wp_index,
		"v_rp:",
		mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index].rp,
		"index:",
		v_rp_index);

	*offp += 1;
	if (amnt_copied < count)
		return amnt_copied -
			copy_to_user(buf, mhi_dev_ctxt->chan_info, amnt_copied);
	else
		return -ENOMEM;
}
Ejemplo n.º 6
0
static inline void
sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	struct safe_buffer *buf = NULL;

	if (device_info)
		buf = find_safe_buffer(device_info, dma_addr);

	if (buf) {
		/*
		 * Both of these checks from original code need to be
		 * commented out b/c some drivers rely on the following:
		 *
		 * 1) Drivers may map a large chunk of memory into DMA space
		 *    but only sync a small portion of it. Good example is
		 *    allocating a large buffer, mapping it, and then
		 *    breaking it up into small descriptors. No point
		 *    in syncing the whole buffer if you only have to
		 *    touch one descriptor.
		 *
		 * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
		 *    usually only synced in one dir at a time.
		 *
		 * See drivers/net/eepro100.c for examples of both cases.
		 *
		 * -ds
		 *
		 * BUG_ON(buf->size != size);
		 * BUG_ON(buf->direction != dir);
		 */

		dev_dbg(dev,
			"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
			__func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
			buf->safe, (void *) buf->safe_dma_addr);

		DO_STATS ( device_info->bounce_count++ );

		switch (dir) {
		case DMA_FROM_DEVICE:
			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, buf->ptr, size);
			memcpy(buf->ptr, buf->safe, size);
			break;
		case DMA_TO_DEVICE:
			dev_dbg(dev,
				"%s: copy out unsafe %p to safe %p, size %d\n",
				__func__,buf->ptr, buf->safe, size);
			memcpy(buf->safe, buf->ptr, size);
			break;
		case DMA_BIDIRECTIONAL:
			BUG();	/* is this allowed?  what does it mean? */
		default:
			BUG();
		}
		/*
		 * No need to sync the safe buffer - it was allocated
		 * via the coherent allocators.
		 */
	} else {
		consistent_sync(dma_to_virt(dev, dma_addr), size, dir);
	}
}
Ejemplo n.º 7
0
static void recv_pack(struct qm_queue *queue, u32 phys)
{
	struct ix_sa_ctx *sa_ctx;
	struct npe_crypt_cont *cr_cont;
	struct npe_cont *cont;
	int failed;

	failed = phys & 0x1;
	phys &= ~0x3;

	cr_cont = dma_to_virt(queue->dev, phys);
	cr_cont = cr_cont->virt;
	sa_ctx = cr_cont->ctl.crypt.sa_ctx;

	phys = npe_to_cpu32(cr_cont->ctl.crypt.src_buf);
	if (phys) {
		cont = dma_to_virt(queue->dev, phys);
		cont = cont->virt;
	} else {
		cont = NULL;
	}
	if (cr_cont->ctl.crypt.oper_type == OP_PERFORM) {
		dma_unmap_single(sa_ctx->master->npe_dev,
				cont->eth.phys_addr,
				cont->eth.buf_len,
				DMA_BIDIRECTIONAL);
		if (sa_ctx->perf_cb)
			sa_ctx->perf_cb(sa_ctx, cont->data, failed);
		qmgr_return_cont(dev_get_drvdata(queue->dev), cont);
		ix_sa_return_cont(sa_ctx->master, cr_cont);
		if (atomic_dec_and_test(&sa_ctx->use_cnt))
			ix_sa_ctx_destroy(sa_ctx);
		return;
	}

	/* We are registering */
	switch (cr_cont->ctl.crypt.mode) {
	case NPE_OP_HASH_GEN_ICV:
		/* 1 out of 2 HMAC preparation operations completed */
		dma_unmap_single(sa_ctx->master->npe_dev,
				cont->eth.phys_addr,
				cont->eth.buf_len,
				DMA_TO_DEVICE);
		kfree(cont->data);
		qmgr_return_cont(dev_get_drvdata(queue->dev), cont);
		break;
	case NPE_OP_ENC_GEN_KEY:
		memcpy(sa_ctx->decrypt.npe_ctx + sizeof(u32),
			sa_ctx->rev_aes->ctl.rev_aes_key + sizeof(u32),
			sa_ctx->c_key.len);
		/* REV AES data not needed anymore, free it */
		ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes);
		sa_ctx->rev_aes = NULL;
		break;
	default:
		printk(KERN_ERR "Unknown crypt-register mode: %x\n",
				cr_cont->ctl.crypt.mode);

	}
	if (cr_cont->ctl.crypt.oper_type == OP_REG_DONE) {
		if (sa_ctx->state == STATE_UNREGISTERED)
			sa_ctx->state = STATE_REGISTERED;
		if (sa_ctx->reg_cb)
			sa_ctx->reg_cb(sa_ctx, failed);
	}
	ix_sa_return_cont(sa_ctx->master, cr_cont);
	if (atomic_dec_and_test(&sa_ctx->use_cnt))
		ix_sa_ctx_destroy(sa_ctx);
}