Пример #1
0
static void
vc4_irq_finish_bin_job(struct drm_device *dev)
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);
	struct vc4_exec_info *exec = vc4_first_bin_job(vc4);

	if (!exec)
		return;

	vc4_move_job_to_render(dev, exec);
	vc4_submit_next_bin_job(dev);
}
Пример #2
0
static void
vc4_cancel_bin_job(struct drm_device *dev)
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);
	struct vc4_exec_info *exec = vc4_first_bin_job(vc4);

	if (!exec)
		return;

	list_move_tail(&exec->head, &vc4->bin_job_list);
	vc4_submit_next_bin_job(dev);
}
Пример #3
0
static void
vc4_overflow_mem_work(struct work_struct *work)
{
	struct vc4_dev *vc4 =
		container_of(work, struct vc4_dev, overflow_mem_work);
	struct drm_device *dev = vc4->dev;
	struct vc4_bo *bo;

	bo = vc4_bo_create(dev, 256 * 1024, true);
	if (IS_ERR(bo)) {
		DRM_ERROR("Couldn't allocate binner overflow mem\n");
		return;
	}

	/* If there's a job executing currently, then our previous
	 * overflow allocation is getting used in that job and we need
	 * to queue it to be released when the job is done.  But if no
	 * job is executing at all, then we can free the old overflow
	 * object direcctly.
	 *
	 * No lock necessary for this pointer since we're the only
	 * ones that update the pointer, and our workqueue won't
	 * reenter.
	 */
	if (vc4->overflow_mem) {
		struct vc4_exec_info *current_exec;
		unsigned long irqflags;

		spin_lock_irqsave(&vc4->job_lock, irqflags);
		current_exec = vc4_first_bin_job(vc4);
		if (current_exec) {
			vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
			list_add_tail(&vc4->overflow_mem->unref_head,
				      &current_exec->unref_list);
			vc4->overflow_mem = NULL;
		}
		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
	}

	if (vc4->overflow_mem)
		drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
	vc4->overflow_mem = bo;

	V3D_WRITE(V3D_BPOA, bo->base.paddr);
	V3D_WRITE(V3D_BPOS, bo->base.base.size);
	V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
	V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
}
Пример #4
0
static void
vc4_overflow_mem_work(struct work_struct *work)
{
	struct vc4_dev *vc4 =
		container_of(work, struct vc4_dev, overflow_mem_work);
	struct vc4_bo *bo = vc4->bin_bo;
	int bin_bo_slot;
	struct vc4_exec_info *exec;
	unsigned long irqflags;

	bin_bo_slot = vc4_v3d_get_bin_slot(vc4);
	if (bin_bo_slot < 0) {
		DRM_ERROR("Couldn't allocate binner overflow mem\n");
		return;
	}

	spin_lock_irqsave(&vc4->job_lock, irqflags);

	if (vc4->bin_alloc_overflow) {
		/* If we had overflow memory allocated previously,
		 * then that chunk will free when the current bin job
		 * is done.  If we don't have a bin job running, then
		 * the chunk will be done whenever the list of render
		 * jobs has drained.
		 */
		exec = vc4_first_bin_job(vc4);
		if (!exec)
			exec = vc4_last_render_job(vc4);
		if (exec) {
			exec->bin_slots |= vc4->bin_alloc_overflow;
		} else {
			/* There's nothing queued in the hardware, so
			 * the old slot is free immediately.
			 */
			vc4->bin_alloc_used &= ~vc4->bin_alloc_overflow;
		}
	}
	vc4->bin_alloc_overflow = BIT(bin_bo_slot);

	V3D_WRITE(V3D_BPOA, bo->base.paddr + bin_bo_slot * vc4->bin_alloc_size);
	V3D_WRITE(V3D_BPOS, bo->base.base.size);
	V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
	V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
}