static void kbase_event_post_worker(osk_workq_work *data)
{
	kbase_jd_atom *atom = CONTAINER_OF(data, kbase_jd_atom, work);
	kbase_context *ctx = atom->kctx;

	if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
	{
		kbase_jd_free_external_resources(atom);
	}

	if (atom->core_req & BASE_JD_REQ_EVENT_ONLY_ON_FAILURE)
	{
		if (atom->event_code == BASE_JD_EVENT_DONE)
		{
			/* Don't report the event */
			kbase_event_process(ctx, atom);
			return;
		}
	}

	mutex_lock(&ctx->event_mutex);
	OSK_DLIST_PUSH_BACK(&ctx->event_list, atom, kbase_jd_atom, dep_item[0]);
	mutex_unlock(&ctx->event_mutex);

	kbase_event_wakeup(ctx);
}
static void kbase_event_post_worker(struct work_struct *data)
{
	kbase_jd_atom *atom = CONTAINER_OF(data, kbase_jd_atom, work);
	kbase_context *ctx = atom->kctx;

	if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
		kbase_jd_free_external_resources(atom);

	if (atom->core_req & BASE_JD_REQ_EVENT_ONLY_ON_FAILURE) {
		if (atom->event_code == BASE_JD_EVENT_DONE) {
			/* Don't report the event */
			kbase_event_process(ctx, atom);
			return;
		}
	}

	if (atom->core_req & BASEP_JD_REQ_EVENT_NEVER) {
		/* Don't report the event */
		kbase_event_process(ctx, atom);
		return;
	}

	mutex_lock(&ctx->event_mutex);
	list_add_tail(&atom->dep_item[0], &ctx->event_list);
	mutex_unlock(&ctx->event_mutex);

	kbase_event_wakeup(ctx);
}
/**
 * kbase_event_process_noreport_worker - Worker for processing atoms that do not
 *                                       return an event but do have external
 *                                       resources
 * @data:  Work structure
 */
static void kbase_event_process_noreport_worker(struct work_struct *data)
{
	struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
			work);
	struct kbase_context *kctx = katom->kctx;

	if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
		kbase_jd_free_external_resources(katom);

	mutex_lock(&kctx->jctx.lock);
	kbase_event_process(kctx, katom);
	mutex_unlock(&kctx->jctx.lock);
}
int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent)
{
	struct kbase_jd_atom *atom;

	KBASE_DEBUG_ASSERT(ctx);

	mutex_lock(&ctx->event_mutex);

	if (list_empty(&ctx->event_list)) {
		if (!ctx->event_closed) {
			mutex_unlock(&ctx->event_mutex);
			return -1;
		}

		/* generate the BASE_JD_EVENT_DRV_TERMINATED message on the fly */
		mutex_unlock(&ctx->event_mutex);
		uevent->event_code = BASE_JD_EVENT_DRV_TERMINATED;
		memset(&uevent->udata, 0, sizeof(uevent->udata));
		dev_dbg(ctx->kbdev->dev,
				"event system closed, returning BASE_JD_EVENT_DRV_TERMINATED(0x%X)\n",
				BASE_JD_EVENT_DRV_TERMINATED);
		return 0;
	}

	/* normal event processing */
	atom = list_entry(ctx->event_list.next, struct kbase_jd_atom, dep_item[0]);
	list_del(ctx->event_list.next);

	mutex_unlock(&ctx->event_mutex);

	dev_dbg(ctx->kbdev->dev, "event dequeuing %p\n", (void *)atom);
	uevent->event_code = atom->event_code;
	uevent->atom_number = (atom - ctx->jctx.atoms);

	if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
		kbase_jd_free_external_resources(atom);

	mutex_lock(&ctx->jctx.lock);
	uevent->udata = kbase_event_process(ctx, atom);
	mutex_unlock(&ctx->jctx.lock);

	return 0;
}