コード例 #1
0
static int kbase_fence_wait(kbase_jd_atom *katom)
{
	int ret;

	OSK_ASSERT(NULL != katom);
	OSK_ASSERT(NULL != katom->kctx);

	sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback);

	ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter);

	if (ret == 1)
	{
		/* Already signalled */
		return 0;
	}
	else if (ret < 0)
	{
		goto cancel_atom;
	}
	return 1;

cancel_atom:
	katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
	/* We should cause the dependant jobs in the bag to be failed,
	 * to do this we schedule the work queue to complete this job */
	OSK_ASSERT(0 == object_is_on_stack(&katom->work));
	INIT_WORK(&katom->work, kbase_fence_wait_worker);
	queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
	return 1;
}
コード例 #2
0
static void kbase_fence_wait_callback(struct sync_fence *fence, struct sync_fence_waiter *waiter)
{
	kbase_jd_atom *katom = container_of(waiter, kbase_jd_atom, sync_waiter);
	kbase_context *kctx;

	OSK_ASSERT(NULL != katom);

	kctx = katom->kctx;

	OSK_ASSERT(NULL != kctx);

	/* Propagate the fence status to the atom.
	 * If negative then cancel this atom and its dependencies.
	 */
	if (fence->status < 0)
	{
		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
	}

	/* To prevent a potential deadlock we schedule the work onto the job_done_wq workqueue
	 *
	 * The issue is that we may signal the timeline while holding kctx->jctx.lock and
	 * the callbacks are run synchronously from sync_timeline_signal. So we simply defer the work.
	 */

	OSK_ASSERT(0 == object_is_on_stack(&katom->work));
	INIT_WORK(&katom->work, kbase_fence_wait_worker);
	queue_work(kctx->jctx.job_done_wq, &katom->work);
}
コード例 #3
0
ファイル: blk-map.c プロジェクト: 03199618/linux
/**
 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
 * @q:		request queue where request should be inserted
 * @rq:		request to fill
 * @kbuf:	the kernel buffer
 * @len:	length of user data
 * @gfp_mask:	memory allocation flags
 *
 * Description:
 *    Data will be mapped directly if possible. Otherwise a bounce
 *    buffer is used. Can be called multple times to append multple
 *    buffers.
 */
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
		    unsigned int len, gfp_t gfp_mask)
{
	int reading = rq_data_dir(rq) == READ;
	unsigned long addr = (unsigned long) kbuf;
	int do_copy = 0;
	struct bio *bio;
	int ret;

	if (len > (queue_max_hw_sectors(q) << 9))
		return -EINVAL;
	if (!len || !kbuf)
		return -EINVAL;

	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
	if (do_copy)
		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
	else
		bio = bio_map_kern(q, kbuf, len, gfp_mask);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	if (!reading)
		bio->bi_rw |= REQ_WRITE;

	if (do_copy)
		rq->cmd_flags |= REQ_COPY_USER;

	ret = blk_rq_append_bio(q, rq, bio);
	if (unlikely(ret)) {
		/* request is too big */
		bio_put(bio);
		return ret;
	}

	blk_queue_bounce(q, &rq->bio);
	rq->buffer = NULL;
	return 0;
}
コード例 #4
0
/**
 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
 * @q:		request queue where request should be inserted
 * @rq:		request to fill
 * @kbuf:	the kernel buffer
 * @len:	length of user data
 * @gfp_mask:	memory allocation flags
 *
 * Description:
 *    Data will be mapped directly if possible. Otherwise a bounce
 *    buffer is used.
 */
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
		    unsigned int len, gfp_t gfp_mask)
{
	unsigned long kaddr;
	unsigned int alignment;
	int reading = rq_data_dir(rq) == READ;
	int do_copy = 0;
	struct bio *bio;

	if (len > (q->max_hw_sectors << 9))
		return -EINVAL;
	if (!len || !kbuf)
		return -EINVAL;

	kaddr = (unsigned long)kbuf;
	alignment = queue_dma_alignment(q) | q->dma_pad_mask;
	do_copy = ((kaddr & alignment) || (len & alignment) ||
		   object_is_on_stack(kbuf));

	if (do_copy)
		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
	else
		bio = bio_map_kern(q, kbuf, len, gfp_mask);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	if (rq_data_dir(rq) == WRITE)
		bio->bi_rw |= (1 << BIO_RW);

	if (do_copy)
		rq->cmd_flags |= REQ_COPY_USER;

	blk_rq_bio_prep(q, rq, bio);
	blk_queue_bounce(q, &rq->bio);
	rq->buffer = rq->data = NULL;
	return 0;
}
コード例 #5
0
void kbase_pm_send_event(kbase_device *kbdev, kbase_pm_event event)
{
	int pending_events;
	int work_active;
	int old_value, new_value;

	OSK_ASSERT(kbdev != NULL);

	if ( (kbdev->pm.current_policy->flags & KBASE_PM_POLICY_FLAG_NO_CORE_TRANSITIONS)
		 && event == KBASE_PM_EVENT_CHANGE_GPU_STATE )
	{
		/* Optimize out event sending when the policy doesn't transition individual cores */
		return;
	}

	KBASE_TRACE_ADD( kbdev, PM_SEND_EVENT, NULL, NULL, 0u, event );

	pending_events = atomic_read(&kbdev->pm.pending_events);

	/* Atomically OR the new event into the pending_events bit mask */
	do
	{
		old_value = pending_events;
		new_value = kbasep_pm_merge_event(pending_events, event);
		if (old_value == new_value)
		{
			/* Event already pending */
			return;
		}
		pending_events = atomic_cmpxchg(&kbdev->pm.pending_events, old_value, new_value);
	} while (old_value != pending_events);

	work_active = atomic_read(&kbdev->pm.work_active);
	do
	{
		old_value = work_active;
		switch(old_value)
		{
			case KBASE_PM_WORK_ACTIVE_STATE_INACTIVE:
				/* Need to enqueue an event */
				new_value = KBASE_PM_WORK_ACTIVE_STATE_ENQUEUED;
				break;
			case KBASE_PM_WORK_ACTIVE_STATE_ENQUEUED:
				/* Event already queued */
				return;
			case KBASE_PM_WORK_ACTIVE_STATE_PROCESSING:
				/* Event being processed, we need to ensure it checks for another event */
				new_value = KBASE_PM_WORK_ACTIVE_STATE_PENDING_EVT;
				break;
			case  KBASE_PM_WORK_ACTIVE_STATE_PENDING_EVT:
				/* Event being processed, but another check for events is going to happen */
				return;
			default:
				OSK_ASSERT(0);
		}
		work_active = atomic_cmpxchg(&kbdev->pm.work_active, old_value, new_value);
	} while (old_value != work_active);

	if (old_value == KBASE_PM_WORK_ACTIVE_STATE_INACTIVE)
	{
		KBASE_TRACE_ADD( kbdev, PM_ACTIVATE_WORKER, NULL, NULL, 0u, 0u );
		kbdev->pm.no_outstanding_event = 0;
		OSK_ASSERT(0 == object_is_on_stack(&kbdev->pm.work));
		INIT_WORK(&kbdev->pm.work, kbase_pm_worker);
		queue_work(kbdev->pm.workqueue, &kbdev->pm.work);
	}
}
コード例 #6
0
/*
 * NOTE: void *buf, caller for the buf is required to use DMA-capable
 * buffer or on-stack buffer (with some overhead in callee).
 */
static int
mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
		u32 opcode, void *buf, unsigned len)
{
	struct mmc_request mrq = {NULL};
	struct mmc_command cmd = {0};
	struct mmc_data data = {0};
	struct scatterlist sg;
	void *data_buf;
	int is_on_stack;

	is_on_stack = object_is_on_stack(buf);
	if (is_on_stack) {
		/*
		 * dma onto stack is unsafe/nonportable, but callers to this
		 * routine normally provide temporary on-stack buffers ...
		 */
		data_buf = kmalloc(len, GFP_KERNEL);
		if (!data_buf)
			return -ENOMEM;
	} else
		data_buf = buf;

	mrq.cmd = &cmd;
	mrq.data = &data;

	cmd.opcode = opcode;
	cmd.arg = 0;

	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
	 * rely on callers to never use this with "native" calls for reading
	 * CSD or CID.  Native versions of those commands use the R2 type,
	 * not R1 plus a data block.
	 */
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

	data.blksz = len;
	data.blocks = 1;
	data.flags = MMC_DATA_READ;
	data.sg = &sg;
	data.sg_len = 1;

	sg_init_one(&sg, data_buf, len);

	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
		/*
		 * The spec states that CSR and CID accesses have a timeout
		 * of 64 clock cycles.
		 */
		data.timeout_ns = 0;
		data.timeout_clks = 64;
	} else
		mmc_set_data_timeout(&data, card);

	mmc_wait_for_req(host, &mrq);

	if (is_on_stack) {
		memcpy(buf, data_buf, len);
		kfree(data_buf);
	}

	if (cmd.error)
		return cmd.error;
	if (data.error)
		return data.error;

	return 0;
}
コード例 #7
0
/**
 * Find the first available slot for a new block of shared memory
 * and map the user buffer.
 * Update the descriptors to L1 descriptors
 * Update the buffer_start_offset and buffer_size fields
 * shmem_desc is updated to the mapped shared memory descriptor
 **/
int tf_map_shmem(
		struct tf_connection *connection,
		u32 buffer,
		/* flags for read-write access rights on the memory */
		u32 flags,
		bool in_user_space,
		u32 descriptors[TF_MAX_COARSE_PAGES],
		u32 *buffer_start_offset,
		u32 buffer_size,
		struct tf_shmem_desc **shmem_desc,
		u32 *descriptor_count)
{
	struct tf_shmem_desc *desc = NULL;
	int error;

	dprintk(KERN_INFO "tf_map_shmem(%p, %p, flags = 0x%08x)\n",
					connection,
					(void *) buffer,
					flags);

	/*
	 * Added temporary to avoid kernel stack buffer
	 */
	if (!in_user_space) {
		if (object_is_on_stack((void *)buffer) != 0) {
			dprintk(KERN_ERR
				"tf_map_shmem: "
				"kernel stack buffers "
				"(addr=0x%08X) "
				"are not supported",
				buffer);
			error = -ENOSYS;
			goto error;
		}
	}

	mutex_lock(&(connection->shmem_mutex));

	/*
	 * Check the list of free shared memory
	 * is not empty
	 */
	if (list_empty(&(connection->free_shmem_list))) {
		if (atomic_read(&(connection->shmem_count)) ==
				TF_SHMEM_MAX_COUNT) {
			printk(KERN_ERR "tf_map_shmem(%p):"
				" maximum shared memories already registered\n",
				connection);
			error = -ENOMEM;
			goto error;
		}

		/* no descriptor available, allocate a new one */

		desc = (struct tf_shmem_desc *) internal_kmalloc(
			sizeof(*desc), GFP_KERNEL);
		if (desc == NULL) {
			printk(KERN_ERR "tf_map_shmem(%p):"
				" failed to allocate descriptor\n",
				connection);
			error = -ENOMEM;
			goto error;
		}

		/* Initialize the structure */
		desc->type = TF_SHMEM_TYPE_REGISTERED_SHMEM;
		atomic_set(&desc->ref_count, 1);
		INIT_LIST_HEAD(&(desc->list));

		atomic_inc(&(connection->shmem_count));
	} else {
		/* take the first free shared memory descriptor */
		desc = list_first_entry(&(connection->free_shmem_list),
			struct tf_shmem_desc, list);
		list_del(&(desc->list));
	}

	/* Add the descriptor to the used list */
	list_add(&(desc->list), &(connection->used_shmem_list));

	error = tf_fill_descriptor_table(
			&(connection->cpt_alloc_context),
			desc,
			buffer,
			connection->vmas,
			descriptors,
			buffer_size,
			buffer_start_offset,
			in_user_space,
			flags,
			descriptor_count);

	if (error != 0) {
		dprintk(KERN_ERR "tf_map_shmem(%p):"
			" tf_fill_descriptor_table failed with error "
			"code %d!\n",
			connection,
			error);
		goto error;
	}
	desc->client_buffer = (u8 *) buffer;

	/*
	 * Successful completion.
	 */
	*shmem_desc = desc;
	mutex_unlock(&(connection->shmem_mutex));
	dprintk(KERN_DEBUG "tf_map_shmem: success\n");
	return 0;


	/*
	 * Error handling.
	 */
error:
	mutex_unlock(&(connection->shmem_mutex));
	dprintk(KERN_ERR "tf_map_shmem: failure with error code %d\n",
		error);

	tf_unmap_shmem(
			connection,
			desc,
			0);

	return error;
}
コード例 #8
0
ファイル: stacktrace.c プロジェクト: 020gzh/linux
/*
 * AArch64 PCS assigns the frame pointer to x29.
 *
 * A simple function prologue looks like this:
 * 	sub	sp, sp, #0x10
 *   	stp	x29, x30, [sp]
 *	mov	x29, sp
 *
 * A simple function epilogue looks like this:
 *	mov	sp, x29
 *	ldp	x29, x30, [sp]
 *	add	sp, sp, #0x10
 */
int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
{
	unsigned long high, low;
	unsigned long fp = frame->fp;
	unsigned long irq_stack_ptr;

	/*
	 * Switching between stacks is valid when tracing current and in
	 * non-preemptible context.
	 */
	if (tsk == current && !preemptible())
		irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
	else
		irq_stack_ptr = 0;

	low  = frame->sp;
	/* irq stacks are not THREAD_SIZE aligned */
	if (on_irq_stack(frame->sp, raw_smp_processor_id()))
		high = irq_stack_ptr;
	else
		high = ALIGN(low, THREAD_SIZE) - 0x20;

	if (fp < low || fp > high || fp & 0xf)
		return -EINVAL;

	frame->sp = fp + 0x10;
	frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
	frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	if (tsk && tsk->ret_stack &&
			(frame->pc == (unsigned long)return_to_handler)) {
		/*
		 * This is a case where function graph tracer has
		 * modified a return address (LR) in a stack frame
		 * to hook a function return.
		 * So replace it to an original value.
		 */
		frame->pc = tsk->ret_stack[frame->graph--].ret;
	}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

	/*
	 * Check whether we are going to walk through from interrupt stack
	 * to task stack.
	 * If we reach the end of the stack - and its an interrupt stack,
	 * unpack the dummy frame to find the original elr.
	 *
	 * Check the frame->fp we read from the bottom of the irq_stack,
	 * and the original task stack pointer are both in current->stack.
	 */
	if (frame->sp == irq_stack_ptr) {
		struct pt_regs *irq_args;
		unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);

		if (object_is_on_stack((void *)orig_sp) &&
		   object_is_on_stack((void *)frame->fp)) {
			frame->sp = orig_sp;

			/* orig_sp is the saved pt_regs, find the elr */
			irq_args = (struct pt_regs *)orig_sp;
			frame->pc = irq_args->pc;
		} else {
			/*
			 * This frame has a non-standard format, and we
			 * didn't fix it, because the data looked wrong.
			 * Refuse to output this frame.
			 */
			return -EINVAL;
		}
	}

	return 0;
}