static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	dma_addr_t dma_addr;
	int needs_bounce = 0;

	if (device_info)
		DO_STATS ( device_info->map_op_count++ );

	dma_addr = virt_to_dma(dev, ptr);

	if (dev->dma_mask) {
		unsigned long mask = *dev->dma_mask;
		unsigned long limit;

		limit = (mask + 1) & ~mask;
		if (limit && size > limit) {
			dev_err(dev, "DMA mapping too big (requested %#x "
				"mask %#Lx)\n", size, *dev->dma_mask);
			return ~0;
		}

		
		needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
	}

	if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
		struct safe_buffer *buf;

		buf = alloc_safe_buffer(device_info, ptr, size, dir);
		if (buf == 0) {
			dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
			       __func__, ptr);
			return 0;
		}

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);

		if ((dir == DMA_TO_DEVICE) ||
		    (dir == DMA_BIDIRECTIONAL)) {
			dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
				__func__, ptr, buf->safe, size);
			memcpy(buf->safe, ptr, size);
		}
		ptr = buf->safe;

		dma_addr = buf->safe_dma_addr;
	} else {
		
		dma_cache_maint(ptr, size, dir);
	}

	return dma_addr;
}
Example #2
0
/**
 * sti_gdp_commit_layer
 * @lay: gdp layer
 *
 * Update the NVN field of the 'right' field of the current GDP node (being
 * used by the HW) with the address of the updated ('free') top field GDP node.
 * - In interlaced mode the 'right' field is the bottom field as we update
 *   frames starting from their top field
 * - In progressive mode, we update both bottom and top fields which are
 *   equal nodes.
 * At the next VSYNC, the updated node list will be used by the HW.
 *
 * RETURNS:
 * 0 on success.
 */
static int sti_gdp_commit_layer(struct sti_layer *layer)
{
    struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
    struct sti_gdp_node *updated_top_node = updated_list->top_field;
    struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
    struct sti_gdp *gdp = to_sti_gdp(layer);
    u32 dma_updated_top = virt_to_dma(layer->dev, updated_top_node);
    u32 dma_updated_btm = virt_to_dma(layer->dev, updated_btm_node);
    struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);

    dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
            sti_layer_to_str(layer),
            updated_top_node, updated_btm_node);
    dev_dbg(layer->dev, "Current NVN:0x%X\n",
            readl(layer->regs + GAM_GDP_NVN_OFFSET));
    dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
            (unsigned long)layer->paddr,
            readl(layer->regs + GAM_GDP_PML_OFFSET));

    if (curr_list == NULL) {
        /* First update or invalid node should directly write in the
         * hw register */
        DRM_DEBUG_DRIVER("%s first update (or invalid node)",
                         sti_layer_to_str(layer));

        writel(gdp->is_curr_top == true ?
               dma_updated_btm : dma_updated_top,
               layer->regs + GAM_GDP_NVN_OFFSET);
        return 0;
    }

    if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
        if (gdp->is_curr_top == true) {
            /* Do not update in the middle of the frame, but
             * postpone the update after the bottom field has
             * been displayed */
            curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
        } else {
            /* Direct update to avoid one frame delay */
            writel(dma_updated_top,
                   layer->regs + GAM_GDP_NVN_OFFSET);
        }
    } else {
        /* Direct update for progressive to avoid one frame delay */
        writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
    }

    return 0;
}
Example #3
0
static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	u32 i;
	u64 db_value = 0;
	struct mhi_event_ctxt *event_ctxt = NULL;
	struct mhi_control_seg *mhi_ctrl = NULL;
	spinlock_t *lock = NULL;
	unsigned long flags;
	mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg;

	for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
		lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[i];
		mhi_dev_ctxt->mhi_ev_db_order[i] = 0;
		spin_lock_irqsave(lock, flags);
		event_ctxt = &mhi_ctrl->mhi_ec_list[i];
		db_value = virt_to_dma(NULL,
				mhi_dev_ctxt->mhi_local_event_ctxt[i].wp);
		if (0 == mhi_dev_ctxt->mhi_ev_db_order[i]) {
			mhi_process_db(mhi_dev_ctxt,
				       mhi_dev_ctxt->mmio_info.event_db_addr,
				       i, db_value);
		}
		mhi_dev_ctxt->mhi_ev_db_order[i] = 0;
		spin_unlock_irqrestore(lock, flags);
	}
}
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
{
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

	if (buf) {
		BUG_ON(buf->size != size);
		BUG_ON(buf->direction != dir);

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);

		DO_STATS(dev->archdata.dmabounce->bounce_count++);

		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr = buf->ptr;

			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);

			
			dmac_clean_range(ptr, ptr + size);
			outer_clean_range(__pa(ptr), __pa(ptr) + size);
		}
		free_safe_buffer(dev->archdata.dmabounce, buf);
	}
}
Example #5
0
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	struct safe_buffer *buf;

	if (device_info)
		DO_STATS ( device_info->map_op_count++ );

	buf = alloc_safe_buffer(device_info, ptr, size, dir);
	if (buf == NULL) {
		dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
		       __func__, ptr);
		return ~0;
	}

	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
		buf->safe, buf->safe_dma_addr);

	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
		dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
			__func__, ptr, buf->safe, size);
		memcpy(buf->safe, ptr, size);
	}

	return buf->safe_dma_addr;
}
Example #6
0
static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
		size_t size, enum dma_data_direction dir)
{
	BUG_ON(buf->size != size);
	BUG_ON(buf->direction != dir);

	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
		buf->safe, buf->safe_dma_addr);

	DO_STATS(dev->archdata.dmabounce->bounce_count++);

	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
		void *ptr = buf->ptr;

		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
			__func__, buf->safe, ptr, size);
		memcpy(ptr, buf->safe, size);

		/*
		 * Since we may have written to a page cache page,
		 * we need to ensure that the data will be coherent
		 * with user mappings.
		 */
		__cpuc_flush_dcache_area(ptr, size);
	}
	free_safe_buffer(dev->archdata.dmabounce, buf);
}
Example #7
0
static enum MHI_STATUS process_reset_transition(
			struct mhi_device_ctxt *mhi_dev_ctxt,
			enum STATE_TRANSITION cur_work_item)
{
	u32 i = 0;
	enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
	unsigned long flags = 0;

	mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n");
	write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
	mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
	write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
	mhi_dev_ctxt->counters.mhi_reset_cntr++;
	mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_PBL;
	ret_val = mhi_test_for_device_reset(mhi_dev_ctxt);
	ret_val = mhi_test_for_device_ready(mhi_dev_ctxt);
	switch (ret_val) {
	case MHI_STATUS_SUCCESS:
		break;
	case MHI_STATUS_LINK_DOWN:
		mhi_log(MHI_MSG_CRITICAL, "Link down detected\n");
		break;
	case MHI_STATUS_DEVICE_NOT_READY:
		ret_val = mhi_init_state_transition(mhi_dev_ctxt,
					STATE_TRANSITION_RESET);
		if (MHI_STATUS_SUCCESS != ret_val)
			mhi_log(MHI_MSG_CRITICAL,
				"Failed to initiate 0x%x state trans\n",
				STATE_TRANSITION_RESET);
		break;
	default:
		mhi_log(MHI_MSG_CRITICAL,
			"Unexpected ret code detected for\n");
		break;
	}
	for (i = 0; i < NR_OF_CMD_RINGS; ++i) {
		mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp =
				mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
		mhi_dev_ctxt->mhi_local_cmd_ctxt[i].wp =
				mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
		mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list[i].
						mhi_cmd_ring_read_ptr =
				virt_to_dma(NULL,
				mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp);
	}
	for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i)
		mhi_reset_ev_ctxt(mhi_dev_ctxt, i);

	for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
		if (VALID_CHAN_NR(i))
			mhi_reset_chan_ctxt(mhi_dev_ctxt, i);
	}
	ret_val = mhi_init_state_transition(mhi_dev_ctxt,
				STATE_TRANSITION_READY);
	if (MHI_STATUS_SUCCESS != ret_val)
		mhi_log(MHI_MSG_CRITICAL,
		"Failed to initiate 0x%x state trans\n",
		STATE_TRANSITION_READY);
	return ret_val;
}
Example #8
0
int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
		unsigned long off, size_t sz, enum dma_data_direction dir)
{
	struct safe_buffer *buf;

	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
		__func__, addr, off, sz, dir);

	buf = find_safe_buffer_dev(dev, addr, __func__);
	if (!buf)
		return 1;

	BUG_ON(buf->direction != dir);

	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
		buf->safe, buf->safe_dma_addr);

	DO_STATS(dev->archdata.dmabounce->bounce_count++);

	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
		dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
			__func__,buf->ptr + off, buf->safe + off, sz);
		memcpy(buf->safe + off, buf->ptr + off, sz);
	}
	return 0;
}
Example #9
0
void ring_ev_db(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index)
{
	struct mhi_ring *event_ctxt = NULL;
	u64 db_value = 0;
	event_ctxt =
		&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index];
	db_value = virt_to_dma(NULL, event_ctxt->wp);
	mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.event_db_addr,
					event_ring_index, db_value);
}
Example #10
0
static void ring_all_cmd_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	struct mutex *cmd_mutex = NULL;
	u64 db_value;
	u64 rp = 0;
	struct mhi_ring *local_ctxt = NULL;

	mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n");
	cmd_mutex = &mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING];
	mhi_dev_ctxt->cmd_ring_order = 0;
	mutex_lock(cmd_mutex);
	local_ctxt = &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
	rp = virt_to_dma(NULL, local_ctxt->rp);
	db_value = virt_to_dma(NULL, mhi_dev_ctxt->mhi_local_cmd_ctxt[0].wp);
	if (0 == mhi_dev_ctxt->cmd_ring_order && rp != db_value)
		mhi_process_db(mhi_dev_ctxt,
			       mhi_dev_ctxt->mmio_info.cmd_db_addr,
							0, db_value);
	mhi_dev_ctxt->cmd_ring_order = 0;
	mutex_unlock(cmd_mutex);
}
Example #11
0
static int mxc_allocate_dma_buf(struct asrc_pair_params *params)
{
	struct dma_block *input_a, *output_a, *last_period;
	enum asrc_pair_index index = params->index;

	input_a = &params->input_dma_total;
	output_a = &params->output_dma_total;
	last_period = &params->output_last_period;

	input_a->dma_vaddr = kzalloc(input_a->length, GFP_KERNEL);
	if (!input_a->dma_vaddr) {
		pair_err("failed to allocate input dma buffer\n");
		goto exit;
	}
	input_a->dma_paddr = virt_to_dma(NULL, input_a->dma_vaddr);

	output_a->dma_vaddr = kzalloc(output_a->length, GFP_KERNEL);
	if (!output_a->dma_vaddr) {
		pair_err("failed to allocate output dma buffer\n");
		goto exit;
	}
	output_a->dma_paddr = virt_to_dma(NULL, output_a->dma_vaddr);

	last_period->dma_vaddr = dma_alloc_coherent(asrc->dev,
			1024 * params->last_period_sample,
			&last_period->dma_paddr, GFP_KERNEL);
	if (!last_period->dma_vaddr) {
		pair_err("failed to allocate last period buffer\n");
		goto exit;
	}

	return 0;

exit:
	mxc_free_dma_buf(params);

	return -ENOBUFS;
}
Example #12
0
static inline void
unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	struct safe_buffer *buf = NULL;

	/*
	 * Trying to unmap an invalid mapping
	 */
	if (dma_mapping_error(dma_addr)) {
		dev_err(dev, "Trying to unmap invalid mapping\n");
		return;
	}

	if (device_info)
		buf = find_safe_buffer(device_info, dma_addr);

	if (buf) {
		BUG_ON(buf->size != size);

		dev_dbg(dev,
			"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
			__func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
			buf->safe, (void *) buf->safe_dma_addr);

		DO_STATS ( device_info->bounce_count++ );

		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr = buf->ptr;

			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);

			/*
			 * DMA buffers must have the same cache properties
			 * as if they were really used for DMA - which means
			 * data must be written back to RAM.  Note that
			 * we don't use dmac_flush_range() here for the
			 * bidirectional case because we know the cache
			 * lines will be coherent with the data written.
			 */
			dmac_clean_range(ptr, ptr + size);
			outer_clean_range(__pa(ptr), __pa(ptr) + size);
		}
		free_safe_buffer(device_info, buf);
	}
}
Example #13
0
static void conditional_chan_db_write(
				struct mhi_device_ctxt *mhi_dev_ctxt, u32 chan)
{
	u64 db_value;
	unsigned long flags;

	mhi_dev_ctxt->mhi_chan_db_order[chan] = 0;
	spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags);
	if (0 == mhi_dev_ctxt->mhi_chan_db_order[chan]) {
		db_value = virt_to_dma(NULL,
				mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp);
		mhi_process_db(mhi_dev_ctxt,
			       mhi_dev_ctxt->mmio_info.chan_db_addr,
			       chan, db_value);
	}
	mhi_dev_ctxt->mhi_chan_db_order[chan] = 0;
	spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags);
}
Example #14
0
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
{
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

	if (buf) {
		BUG_ON(buf->size != size);
		BUG_ON(buf->direction != dir);
		BUG_ON(buf->page);
		BUG_ON(!buf->ptr);

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);

		DO_STATS(dev->archdata.dmabounce->bounce_count++);

		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr = buf->ptr;

			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);

			/*
			 * DMA buffers must have the same cache properties
			 * as if they were really used for DMA - which means
			 * data must be written back to RAM.  Note that
			 * we don't use dmac_flush_range() here for the
			 * bidirectional case because we know the cache
			 * lines will be coherent with the data written.
			 */
			dmac_clean_range(ptr, ptr + size);
			outer_clean_range(__pa(ptr), __pa(ptr) + size);
		}
		free_safe_buffer(dev->archdata.dmabounce, buf);
	} else {
		__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
	}
}
int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
{
	struct device *dev = &pdev->dev;
	VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
	struct resource *res;
	void *slot_mem;
	dma_addr_t slot_phys;
	int slot_mem_size, frag_mem_size;
	int err, irq, i;

	g_virt_to_bus_offset = virt_to_dma(dev, (void *)0);

	/* Allocate space for the channels in coherent memory */
	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
	frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);

	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
				       &slot_phys, GFP_KERNEL);
	if (!slot_mem) {
		dev_err(dev, "could not allocate DMA memory\n");
		return -ENOMEM;
	}

	WARN_ON(((int)slot_mem & (PAGE_SIZE - 1)) != 0);

	vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
	if (!vchiq_slot_zero)
		return -EINVAL;

	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
		(int)slot_phys + slot_mem_size;
	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
		MAX_FRAGMENTS;

	g_fragments_base = (FRAGMENTS_T *)(slot_mem + slot_mem_size);
	slot_mem_size += frag_mem_size;

	g_free_fragments = g_fragments_base;
	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
		*(FRAGMENTS_T **)&g_fragments_base[i] =
			&g_fragments_base[i + 1];
	}
	*(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
	sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);

	if (vchiq_init_state(state, vchiq_slot_zero, 0) != VCHIQ_SUCCESS)
		return -EINVAL;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	g_regs = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(g_regs))
		return PTR_ERR(g_regs);

	irq = platform_get_irq(pdev, 0);
	if (irq <= 0) {
		dev_err(dev, "failed to get IRQ\n");
		return irq;
	}

	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
			       "VCHIQ doorbell", state);
	if (err) {
		dev_err(dev, "failed to register irq=%d\n", irq);
		return err;
	}

	/* Send the base address of the slots to VideoCore */

	dsb(); /* Ensure all writes have completed */

	err = bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)slot_phys);
	if (err) {
		dev_err(dev, "mailbox write failed\n");
		return err;
	}

	vchiq_log_info(vchiq_arm_log_level,
		"vchiq_init - done (slots %x, phys %pad)",
		(unsigned int)vchiq_slot_zero, &slot_phys);

	vchiq_call_connected_callbacks();

   return 0;
}
Example #16
0
/**
 * sti_gdp_prepare_layer
 * @lay: gdp layer
 * @first_prepare: true if it is the first time this function is called
 *
 * Update the free GDP node list according to the layer properties.
 *
 * RETURNS:
 * 0 on success.
 */
static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
{
    struct sti_gdp_node_list *list;
    struct sti_gdp_node *top_field, *btm_field;
    struct drm_display_mode *mode = layer->mode;
    struct device *dev = layer->dev;
    struct sti_gdp *gdp = to_sti_gdp(layer);
    struct sti_compositor *compo = dev_get_drvdata(dev);
    int format;
    unsigned int depth, bpp;
    int rate = mode->clock * 1000;
    int res;
    u32 ydo, xdo, yds, xds;

    list = sti_gdp_get_free_nodes(layer);
    top_field = list->top_field;
    btm_field = list->btm_field;

    dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
            sti_layer_to_str(layer), top_field, btm_field);

    /* Build the top field from layer params */
    top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
    top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
    format = sti_gdp_fourcc2format(layer->format);
    if (format == -1) {
        DRM_ERROR("Format not supported by GDP %.4s\n",
                  (char *)&layer->format);
        return 1;
    }
    top_field->gam_gdp_ctl |= format;
    top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
    top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;

    /* pixel memory location */
    drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
    top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
    top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
    top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];

    /* input parameters */
    top_field->gam_gdp_pmp = layer->pitches[0];
    top_field->gam_gdp_size =
        clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
        clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);

    /* output parameters */
    ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
    yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
    xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
    xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
    top_field->gam_gdp_vpo = (ydo << 16) | xdo;
    top_field->gam_gdp_vps = (yds << 16) | xds;

    /* Same content and chained together */
    memcpy(btm_field, top_field, sizeof(*btm_field));
    top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field);
    btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field);

    /* Interlaced mode */
    if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
        btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
                                 layer->pitches[0];

    if (first_prepare) {
        /* Register gdp callback */
        if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
                                    compo->vtg_main : compo->vtg_aux,
                                    &gdp->vtg_field_nb, layer->mixer_id)) {
            DRM_ERROR("Cannot register VTG notifier\n");
            return 1;
        }

        /* Set and enable gdp clock */
        if (gdp->clk_pix) {
            res = clk_set_rate(gdp->clk_pix, rate);
            if (res < 0) {
                DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
                          rate);
                return 1;
            }

            if (clk_prepare_enable(gdp->clk_pix)) {
                DRM_ERROR("Failed to prepare/enable gdp\n");
                return 1;
            }
        }
    }

    return 0;
}
Example #17
0
static void sti_gdp_init(struct sti_layer *layer)
{
    struct sti_gdp *gdp = to_sti_gdp(layer);
    struct device_node *np = layer->dev->of_node;
    dma_addr_t dma;
    void *base;
    unsigned int i, size;

    /* Allocate all the nodes within a single memory page */
    size = sizeof(struct sti_gdp_node) *
           GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;

    base = dma_alloc_writecombine(layer->dev,
                                  size, &dma, GFP_KERNEL | GFP_DMA);
    if (!base) {
        DRM_ERROR("Failed to allocate memory for GDP node\n");
        return;
    }
    memset(base, 0, size);

    for (i = 0; i < GDP_NODE_NB_BANK; i++) {
        if (virt_to_dma(layer->dev, base) & 0xF) {
            DRM_ERROR("Mem alignment failed\n");
            return;
        }
        gdp->node_list[i].top_field = base;
        DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
        base += sizeof(struct sti_gdp_node);

        if (virt_to_dma(layer->dev, base) & 0xF) {
            DRM_ERROR("Mem alignment failed\n");
            return;
        }
        gdp->node_list[i].btm_field = base;
        DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
        base += sizeof(struct sti_gdp_node);
    }

    if (of_device_is_compatible(np, "st,stih407-compositor")) {
        /* GDP of STiH407 chip have its own pixel clock */
        char *clk_name;

        switch (layer->desc) {
        case STI_GDP_0:
            clk_name = "pix_gdp1";
            break;
        case STI_GDP_1:
            clk_name = "pix_gdp2";
            break;
        case STI_GDP_2:
            clk_name = "pix_gdp3";
            break;
        case STI_GDP_3:
            clk_name = "pix_gdp4";
            break;
        default:
            DRM_ERROR("GDP id not recognized\n");
            return;
        }

        gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
        if (IS_ERR(gdp->clk_pix))
            DRM_ERROR("Cannot get %s clock\n", clk_name);
    }
}
Example #18
0
static inline dma_addr_t
map_single(struct device *dev, void *ptr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	dma_addr_t dma_addr;
	int needs_bounce = 0;

	if (device_info)
		DO_STATS ( device_info->map_op_count++ );

	dma_addr = virt_to_dma(dev, ptr);

	if (dev->dma_mask) {
		unsigned long mask = *dev->dma_mask;
		unsigned long limit;

		limit = (mask + 1) & ~mask;
		if (limit && size > limit) {
			dev_err(dev, "DMA mapping too big (requested %#x "
				"mask %#Lx)\n", size, *dev->dma_mask);
			return ~0;
		}

		/*
		 * Figure out if we need to bounce from the DMA mask.
		 */
		needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
	}

	if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
		struct safe_buffer *buf;

		buf = alloc_safe_buffer(device_info, ptr, size, dir);
		if (buf == 0) {
			dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
			       __func__, ptr);
			return 0;
		}

		dev_dbg(dev,
			"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
			__func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
			buf->safe, (void *) buf->safe_dma_addr);

		if ((dir == DMA_TO_DEVICE) ||
		    (dir == DMA_BIDIRECTIONAL)) {
			dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
				__func__, ptr, buf->safe, size);
			memcpy(buf->safe, ptr, size);
		}
		ptr = buf->safe;

		dma_addr = buf->safe_dma_addr;
	} else {
		/*
		 * We don't need to sync the DMA buffer since
		 * it was allocated via the coherent allocators.
		 */
		consistent_sync(ptr, size, dir);
	}

	return dma_addr;
}
Example #19
0
static inline dma_addr_t map_single_or_page(struct device *dev, void *ptr,
		struct page *page, unsigned long offset,  size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	dma_addr_t dma_addr;
	int needs_bounce = 0;

	if (device_info)
		DO_STATS ( device_info->map_op_count++ );

	if (page)
		dma_addr = page_to_dma(dev, page) + offset;
	else
		dma_addr = virt_to_dma(dev, ptr);

	if (dev->dma_mask) {
		unsigned long mask = *dev->dma_mask;
		unsigned long limit;

		limit = (mask - 1) | mask;
		limit = (limit + 1) & ~limit;
		if (limit && size > limit) {
			dev_err(dev, "DMA mapping too big (requested %#x "
				"mask %#Lx)\n", size, *dev->dma_mask);
			return ~0;
		}

		/*
		 * Figure out if we need to bounce from the DMA mask.
		 */
		needs_bounce = (dma_addr & ~mask) ||
			(limit && (dma_addr + size > limit));
	}

	if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
		struct safe_buffer *buf;

		buf = alloc_safe_buffer(device_info, ptr, page, offset, size, dir);
		if (buf == 0) {
			dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
			       __func__, ptr);
			return 0;
		}

                if (buf->page)
			dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped "
				"to %p (dma=%#x)\n", __func__,
				page_address(buf->page),
				page_to_dma(dev, buf->page),
				buf->safe, buf->safe_dma_addr);
		else
			dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped "
				"to %p (dma=%#x)\n", __func__,
				buf->ptr, virt_to_dma(dev, buf->ptr),
				buf->safe, buf->safe_dma_addr);

		if ((dir == DMA_TO_DEVICE) ||
		    (dir == DMA_BIDIRECTIONAL)) {
			if (page)
				ptr = kmap_atomic(page, KM_BOUNCE_READ) + offset;
			dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
				__func__, ptr, buf->safe, size);
			memcpy(buf->safe, ptr, size);
			wmb();
			if (page)
				kunmap_atomic(ptr - offset, KM_BOUNCE_READ);
		}
		dma_addr = buf->safe_dma_addr;
	} else {
		/*
		 * We don't need to sync the DMA buffer since
		 * it was allocated via the coherent allocators.
		 */
		if (page)
			__dma_page_cpu_to_dev(page, offset, size, dir);
		else
			__dma_single_cpu_to_dev(ptr, size, dir);
	}

	return dma_addr;
}
Example #20
0
static inline void
sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	struct safe_buffer *buf = NULL;

	if (device_info)
		buf = find_safe_buffer(device_info, dma_addr);

	if (buf) {
		/*
		 * Both of these checks from original code need to be
		 * commented out b/c some drivers rely on the following:
		 *
		 * 1) Drivers may map a large chunk of memory into DMA space
		 *    but only sync a small portion of it. Good example is
		 *    allocating a large buffer, mapping it, and then
		 *    breaking it up into small descriptors. No point
		 *    in syncing the whole buffer if you only have to
		 *    touch one descriptor.
		 *
		 * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
		 *    usually only synced in one dir at a time.
		 *
		 * See drivers/net/eepro100.c for examples of both cases.
		 *
		 * -ds
		 *
		 * BUG_ON(buf->size != size);
		 * BUG_ON(buf->direction != dir);
		 */

		dev_dbg(dev,
			"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
			__func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
			buf->safe, (void *) buf->safe_dma_addr);

		DO_STATS ( device_info->bounce_count++ );

		switch (dir) {
		case DMA_FROM_DEVICE:
			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, buf->ptr, size);
			memcpy(buf->ptr, buf->safe, size);
			break;
		case DMA_TO_DEVICE:
			dev_dbg(dev,
				"%s: copy out unsafe %p to safe %p, size %d\n",
				__func__,buf->ptr, buf->safe, size);
			memcpy(buf->safe, buf->ptr, size);
			break;
		case DMA_BIDIRECTIONAL:
			BUG();	/* is this allowed?  what does it mean? */
		default:
			BUG();
		}
		/*
		 * No need to sync the safe buffer - it was allocated
		 * via the coherent allocators.
		 */
	} else {
		consistent_sync(dma_to_virt(dev, dma_addr), size, dir);
	}
}
Example #21
0
int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
{
	struct device *dev = &pdev->dev;
	struct rpi_firmware *fw = platform_get_drvdata(pdev);
	VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
	struct resource *res;
	void *slot_mem;
	dma_addr_t slot_phys;
	u32 channelbase;
	int slot_mem_size, frag_mem_size;
	int err, irq, i;

	g_virt_to_bus_offset = virt_to_dma(dev, (void *)0);

	(void)of_property_read_u32(dev->of_node, "cache-line-size",
				   &g_cache_line_size);
	g_fragments_size = 2 * g_cache_line_size;

	/* Allocate space for the channels in coherent memory */
	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
	frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);

	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
				       &slot_phys, GFP_KERNEL);
	if (!slot_mem) {
		dev_err(dev, "could not allocate DMA memory\n");
		return -ENOMEM;
	}

	WARN_ON(((int)slot_mem & (PAGE_SIZE - 1)) != 0);

	vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
	if (!vchiq_slot_zero)
		return -EINVAL;

	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
		(int)slot_phys + slot_mem_size;
	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
		MAX_FRAGMENTS;

	g_fragments_base = (char *)slot_mem + slot_mem_size;
	slot_mem_size += frag_mem_size;

	g_free_fragments = g_fragments_base;
	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
		*(char **)&g_fragments_base[i*g_fragments_size] =
			&g_fragments_base[(i + 1)*g_fragments_size];
	}
	*(char **)&g_fragments_base[i * g_fragments_size] = NULL;
	sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);

	if (vchiq_init_state(state, vchiq_slot_zero, 0) != VCHIQ_SUCCESS)
		return -EINVAL;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	g_regs = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(g_regs))
		return PTR_ERR(g_regs);

	irq = platform_get_irq(pdev, 0);
	if (irq <= 0) {
		dev_err(dev, "failed to get IRQ\n");
		return irq;
	}

	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
			       "VCHIQ doorbell", state);
	if (err) {
		dev_err(dev, "failed to register irq=%d\n", irq);
		return err;
	}

	/* Send the base address of the slots to VideoCore */
	channelbase = slot_phys;
	err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
				    &channelbase, sizeof(channelbase));
	if (err || channelbase) {
		dev_err(dev, "failed to set channelbase\n");
		return err ? : -ENXIO;
	}