/**
 * kbase_destroy_context - Destroy a kernel base context.
 * @kctx: Context to destroy
 *
 * Calls kbase_destroy_os_context() to free OS specific structures.
 * Will release all outstanding regions.
 */
void kbase_destroy_context(struct kbase_context *kctx)
{
	struct kbase_device *kbdev;
	int pages;
	unsigned long pending_regions_to_clean;

	/* MALI_SEC_INTEGRATION */
	int profile_count;

	/* MALI_SEC_INTEGRATION */
	if (!kctx) {
		printk("An uninitialized or destroyed context is tried to be destroyed. kctx is null\n");
		return ;
	}
	else if (kctx->ctx_status != CTX_INITIALIZED) {
		printk("An uninitialized or destroyed context is tried to be destroyed\n");
		printk("kctx: 0x%p, kctx->tgid: %d, kctx->ctx_status: 0x%x\n", kctx, kctx->tgid, kctx->ctx_status);
		return ;
	}

	KBASE_DEBUG_ASSERT(NULL != kctx);

	kbdev = kctx->kbdev;
	KBASE_DEBUG_ASSERT(NULL != kbdev);

	/* MALI_SEC_INTEGRATION */
	for (profile_count = 0; profile_count < 3; profile_count++) {
		if (wait_event_timeout(kctx->mem_profile_wait, atomic_read(&kctx->mem_profile_showing_state) == 0, (unsigned int) msecs_to_jiffies(1000)))
			break;
		else
			printk("[G3D] waiting for memory profile\n");
	}

	/* MALI_SEC_INTEGRATION */
	while (wait_event_timeout(kbdev->pm.suspending_wait, kbdev->pm.suspending == false, (unsigned int) msecs_to_jiffies(1000)) == 0)
		printk("[G3D] Waiting for resuming the device\n");

	KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);

	/* Ensure the core is powered up for the destroy process */
	/* A suspend won't happen here, because we're in a syscall from a userspace
	 * thread. */
	kbase_pm_context_active(kbdev);

	kbase_jd_zap_context(kctx);
	kbase_event_cleanup(kctx);

	kbase_gpu_vm_lock(kctx);

	/* MMU is disabled as part of scheduling out the context */
	kbase_mmu_free_pgd(kctx);

	/* drop the aliasing sink page now that it can't be mapped anymore */
	kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);

	/* free pending region setups */
	pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK;
	while (pending_regions_to_clean) {
		unsigned int cookie = __ffs(pending_regions_to_clean);

		BUG_ON(!kctx->pending_regions[cookie]);

		kbase_reg_pending_dtor(kctx->pending_regions[cookie]);

		kctx->pending_regions[cookie] = NULL;
		pending_regions_to_clean &= ~(1UL << cookie);
	}

	kbase_region_tracker_term(kctx);
	kbase_gpu_vm_unlock(kctx);

	/* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
	kbasep_js_kctx_term(kctx);

	kbase_jd_exit(kctx);

	kbase_pm_context_idle(kbdev);

	kbase_mmu_term(kctx);

	pages = atomic_read(&kctx->used_pages);
	if (pages != 0)
		dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);

	kbase_mem_pool_term(&kctx->mem_pool);
	WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);

	/* MALI_SEC_INTEGRATION */
	if(kbdev->vendor_callbacks->destroy_context)
		kbdev->vendor_callbacks->destroy_context(kctx);

	if (kctx->ctx_need_qos) {
		kctx->ctx_need_qos = false;
	}

	vfree(kctx);
	/* MALI_SEC_INTEGRATION */
	kctx = NULL;
}
int kbase_instr_hwcnt_enable(struct kbase_context *kctx,
                             struct kbase_uk_hwcnt_setup *setup)
{
    struct kbase_device *kbdev;
    bool access_allowed;
    int err;

    kbdev = kctx->kbdev;

    /* Determine if the calling task has access to this capability */
    access_allowed = kbase_security_has_capability(kctx,
                     KBASE_SEC_INSTR_HW_COUNTERS_COLLECT,
                     KBASE_SEC_FLAG_NOAUDIT);
    if (!access_allowed)
        return -EINVAL;

    /* Mark the context as active so the GPU is kept turned on */
    /* A suspend won't happen here, because we're in a syscall from a
     * userspace thread. */
    kbase_pm_context_active(kbdev);

    /* Schedule the context in */
    kbasep_js_schedule_privileged_ctx(kbdev, kctx);
    err = kbase_instr_hwcnt_enable_internal(kbdev, kctx, setup);
    if (err) {
        /* Release the context. This had its own Power Manager Active
         * reference */
        kbasep_js_release_privileged_ctx(kbdev, kctx);

        /* Also release our Power Manager Active reference */
        kbase_pm_context_idle(kbdev);
    }

    return err;
}
Пример #3
0
void kbase_wait_write_flush(struct kbase_context *kctx)
{
	u32 base_count = 0;

	/* A suspend won't happen here, because we're in a syscall from a
	 * userspace thread */

	kbase_pm_context_active(kctx->kbdev);
	kbase_pm_request_gpu_cycle_counter(kctx->kbdev);

	while (true) {
		u32 new_count;

		new_count = kbase_reg_read(kctx->kbdev,
					GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL);
		/* First time around, just store the count. */
		if (base_count == 0) {
			base_count = new_count;
			continue;
		}

		/* No need to handle wrapping, unsigned maths works for this. */
		if ((new_count - base_count) > 1000)
			break;
	}

	kbase_pm_release_gpu_cycle_counter(kctx->kbdev);
	kbase_pm_context_idle(kctx->kbdev);
}
Пример #4
0
void kbase_pm_resume(struct kbase_device *kbdev)
{
	int nr_keep_gpu_powered_ctxs;

	/* MUST happen before any pm_context_active calls occur */
	mutex_lock(&kbdev->pm.lock);
	kbdev->pm.suspending = MALI_FALSE;
	mutex_unlock(&kbdev->pm.lock);

	/* Initial active call, to power on the GPU/cores if needed */
	kbase_pm_context_active(kbdev);

	/* Restore the keep_gpu_powered calls */
	for (nr_keep_gpu_powered_ctxs = atomic_read(&kbdev->keep_gpu_powered_count);
		 nr_keep_gpu_powered_ctxs > 0 ;
		 --nr_keep_gpu_powered_ctxs ) {
		kbase_pm_context_active(kbdev);
	}

	/* Re-enable instrumentation, if it was previously disabled */
	kbase_instr_hwcnt_resume(kbdev);

	/* Resume any blocked atoms (which may cause contexts to be scheduled in
	 * and dependent atoms to run) */
	kbase_resume_suspended_soft_jobs(kbdev);

	/* Resume the Job Scheduler and associated components, and start running
	 * atoms */
	kbasep_js_resume(kbdev);

	/* Matching idle call, to power off the GPU/cores if we didn't actually
	 * need it and the policy doesn't want it on */
	kbase_pm_context_idle(kbdev);
}
Пример #5
0
void kbase_pm_change_policy(kbase_device *kbdev)
{
	OSK_ASSERT(kbdev != NULL);

	KBASE_TRACE_ADD( kbdev, PM_CHANGE_POLICY, NULL, NULL, 0u,
	                 kbdev->pm.current_policy->id | (kbdev->pm.new_policy->id<<16) );

	KBASE_TRACE_ADD( kbdev, PM_CURRENT_POLICY_TERM, NULL, NULL, 0u, kbdev->pm.current_policy->id );
	kbdev->pm.current_policy->term(kbdev);
	kbdev->pm.current_policy = kbdev->pm.new_policy;
	KBASE_TRACE_ADD( kbdev, PM_CURRENT_POLICY_INIT, NULL, NULL, 0u, kbdev->pm.current_policy->id );
	kbdev->pm.current_policy->init(kbdev);

	kbase_pm_send_event(kbdev, KBASE_PM_EVENT_POLICY_INIT);

	/* Changing policy might have occurred during an update to the core desired
	 * states, but the KBASE_PM_EVENT_CHANGE_GPU_STATE event could've been
	 * optimized out if the previous policy had
	 * KBASE_PM_POLICY_FLAG_NO_CORE_TRANSITIONS set.
	 *
	 * In any case, we issue a KBASE_PM_EVENT_CHANGE_GPU_STATE just in case. */
	kbase_pm_send_event(kbdev, KBASE_PM_EVENT_CHANGE_GPU_STATE);

	/* Now the policy change is finished, we release our fake context active reference */
	kbase_pm_context_idle(kbdev);

	kbdev->pm.new_policy = NULL;
}
int kbase_instr_hwcnt_enable(struct kbase_context *kctx,
		struct kbase_uk_hwcnt_setup *setup)
{
	struct kbase_device *kbdev;
	int err;

	kbdev = kctx->kbdev;

	/* Mark the context as active so the GPU is kept turned on */
	/* A suspend won't happen here, because we're in a syscall from a
	 * userspace thread. */
	kbase_pm_context_active(kbdev);

	/* Schedule the context in */
	kbasep_js_schedule_privileged_ctx(kbdev, kctx);
	err = kbase_instr_hwcnt_enable_internal(kbdev, kctx, setup);
	if (err) {
		/* Release the context. This had its own Power Manager Active
		 * reference */
		kbasep_js_release_privileged_ctx(kbdev, kctx);

		/* Also release our Power Manager Active reference */
		kbase_pm_context_idle(kbdev);
	}

	return err;
}
mali_error kbase_pm_powerup(struct kbase_device *kbdev)
{
	unsigned long flags;
	mali_error ret;

	KBASE_DEBUG_ASSERT(kbdev != NULL);

	mutex_lock(&kbdev->pm.lock);

	/* A suspend won't happen during startup/insmod */
	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));

	/* MALI_SEC_INTEGRATION */
	/* while the GPU initialization, vendor desired gpu log will be out by set_power_dbg(FALSE) calls */
	if(kbdev->vendor_callbacks->set_poweron_dbg)
		kbdev->vendor_callbacks->set_poweron_dbg(FALSE);

	/* Power up the GPU, don't enable IRQs as we are not ready to receive them. */
	ret = kbase_pm_init_hw(kbdev, MALI_FALSE);
	if (ret != MALI_ERROR_NONE) {
		mutex_unlock(&kbdev->pm.lock);
		return ret;
	}

	kbasep_pm_read_present_cores(kbdev);

	kbdev->pm.debug_core_mask = kbdev->shader_present_bitmap;

	/* Pretend the GPU is active to prevent a power policy turning the GPU cores off */
	kbdev->pm.active_count = 1;

	spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
	/* Ensure cycle counter is off */
	kbdev->pm.gpu_cycle_counter_requests = 0;
	spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);

	/* We are ready to receive IRQ's now as power policy is set up, so enable them now. */
#ifdef CONFIG_MALI_DEBUG
	spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
	kbdev->pm.driver_ready_for_irqs = MALI_TRUE;
	spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
#endif
	kbase_pm_enable_interrupts(kbdev);

	/* Turn on the GPU and any cores needed by the policy */
	kbase_pm_do_poweron(kbdev, MALI_FALSE);
	mutex_unlock(&kbdev->pm.lock);

	/* MALI_SEC_INTEGRATION */
	if(kbdev->vendor_callbacks->hwcnt_init)
		kbdev->vendor_callbacks->hwcnt_init(kbdev);

	/* Idle the GPU and/or cores, if the policy wants it to */
	kbase_pm_context_idle(kbdev);

	return MALI_ERROR_NONE;
}
void kbase_pm_suspend(struct kbase_device *kbdev)
{
	int nr_keep_gpu_powered_ctxs;

	KBASE_DEBUG_ASSERT(kbdev);

	/* MALI_SEC_INTEGRATION */
	if(kbdev->vendor_callbacks->hwcnt_prepare_suspend)
		kbdev->vendor_callbacks->hwcnt_prepare_suspend(kbdev);

	mutex_lock(&kbdev->pm.lock);
	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
	kbdev->pm.suspending = MALI_TRUE;
	mutex_unlock(&kbdev->pm.lock);

	/* From now on, the active count will drop towards zero. Sometimes, it'll
	 * go up briefly before going down again. However, once it reaches zero it
	 * will stay there - guaranteeing that we've idled all pm references */

	/* Suspend job scheduler and associated components, so that it releases all
	 * the PM active count references */
	kbasep_js_suspend(kbdev);

#ifndef MALI_SEC_HWCNT
	/* Suspend any counter collection that might be happening */
	kbase_instr_hwcnt_suspend(kbdev);
#endif

	/* Cancel the keep_gpu_powered calls */
	for (nr_keep_gpu_powered_ctxs = atomic_read(&kbdev->keep_gpu_powered_count);
		 nr_keep_gpu_powered_ctxs > 0;
		 --nr_keep_gpu_powered_ctxs) {
		kbase_pm_context_idle(kbdev);
	}

	/* Wait for the active count to reach zero. This is not the same as
	 * waiting for a power down, since not all policies power down when this
	 * reaches zero. */
	wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);

	/* NOTE: We synchronize with anything that was just finishing a
	 * kbase_pm_context_idle() call by locking the pm.lock below */

	/* Force power off the GPU and all cores (regardless of policy), only after
	 * the PM active count reaches zero (otherwise, we risk turning it off
	 * prematurely) */
	mutex_lock(&kbdev->pm.lock);
	kbase_pm_cancel_deferred_poweroff(kbdev);
	kbase_pm_do_poweroff(kbdev, MALI_TRUE);
	mutex_unlock(&kbdev->pm.lock);
}
void kbase_pm_ca_set_policy(struct kbase_device *kbdev,
                            const struct kbase_pm_ca_policy *new_policy)
{
    const struct kbase_pm_ca_policy *old_policy;
    unsigned long flags;

    KBASE_DEBUG_ASSERT(kbdev != NULL);
    KBASE_DEBUG_ASSERT(new_policy != NULL);

    KBASE_TRACE_ADD(kbdev, PM_CA_SET_POLICY, NULL, NULL, 0u,
                    new_policy->id);

    /* During a policy change we pretend the GPU is active */
    /* A suspend won't happen here, because we're in a syscall from a
     * userspace thread */
    kbase_pm_context_active(kbdev);

    mutex_lock(&kbdev->pm.lock);

    /* Remove the policy to prevent IRQ handlers from working on it */
    spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
    old_policy = kbdev->pm.backend.ca_current_policy;
    kbdev->pm.backend.ca_current_policy = NULL;
    spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);

    if (old_policy->term)
        old_policy->term(kbdev);

    if (new_policy->init)
        new_policy->init(kbdev);

    spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
    kbdev->pm.backend.ca_current_policy = new_policy;

    /* If any core power state changes were previously attempted, but
     * couldn't be made because the policy was changing (current_policy was
     * NULL), then re-try them here. */
    kbase_pm_update_cores_state_nolock(kbdev);

    kbdev->pm.backend.ca_current_policy->update_core_status(kbdev,
            kbdev->shader_ready_bitmap,
            kbdev->shader_transitioning_bitmap);

    spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);

    mutex_unlock(&kbdev->pm.lock);

    /* Now the policy change is finished, we release our fake context active
     * reference */
    kbase_pm_context_idle(kbdev);
}
Пример #10
0
void kbase_pm_resume(struct kbase_device *kbdev)
{
	int nr_keep_gpu_powered_ctxs;

	/* MUST happen before any pm_context_active calls occur */
	mutex_lock(&kbdev->pm.lock);
	kbdev->pm.suspending = MALI_FALSE;
	kbase_pm_do_poweron(kbdev, MALI_TRUE);
	mutex_unlock(&kbdev->pm.lock);

	/* Initial active call, to power on the GPU/cores if needed */
	kbase_pm_context_active(kbdev);

	/* Restore the keep_gpu_powered calls */
	for (nr_keep_gpu_powered_ctxs = atomic_read(&kbdev->keep_gpu_powered_count);
		 nr_keep_gpu_powered_ctxs > 0 ;
		 --nr_keep_gpu_powered_ctxs) {
		kbase_pm_context_active(kbdev);
	}

#if SLSI_INTEGRATION
	if (kbdev->hwcnt.prev_mm) {
		mutex_lock(&kbdev->hwcnt.mlock);

		if ((kbdev->hwcnt.enable_for_gpr == FALSE) && (kbdev->hwcnt.s_enable_for_utilization))
			kbdev->hwcnt.enable_for_utilization = TRUE;
		else
			kbdev->hwcnt.enable_for_utilization = FALSE;

		kbase_pm_policy_change(kbdev, 2);

		mutex_unlock(&kbdev->hwcnt.mlock);
	} else
#endif
	/* Re-enable instrumentation, if it was previously disabled */
	kbase_instr_hwcnt_resume(kbdev);

	/* Resume any blocked atoms (which may cause contexts to be scheduled in
	 * and dependent atoms to run) */
	kbase_resume_suspended_soft_jobs(kbdev);

	/* Resume the Job Scheduler and associated components, and start running
	 * atoms */
	kbasep_js_resume(kbdev);

	/* Matching idle call, to power off the GPU/cores if we didn't actually
	 * need it and the policy doesn't want it on */
	kbase_pm_context_idle(kbdev);
}
Пример #11
0
mali_error kbase_pm_powerup(kbase_device *kbdev)
{
	unsigned long flags;
	mali_error ret;

	KBASE_DEBUG_ASSERT(kbdev != NULL);

	mutex_lock(&kbdev->pm.lock);

	/* A suspend won't happen during startup/insmod */
	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));

	/* Power up the GPU, don't enable IRQs as we are not ready to receive them. */
	ret = kbase_pm_init_hw(kbdev, MALI_FALSE );
	if (ret != MALI_ERROR_NONE) {
		mutex_unlock(&kbdev->pm.lock);
		return ret;
	}

	kbasep_pm_read_present_cores(kbdev);

	kbdev->pm.debug_core_mask = kbdev->shader_present_bitmap;

	/* Pretend the GPU is active to prevent a power policy turning the GPU cores off */
	kbdev->pm.active_count = 1;

	spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
	/* Ensure cycle counter is off */
	kbdev->pm.gpu_cycle_counter_requests = 0;
	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CYCLE_COUNT_STOP, NULL);
	spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);

	/* We are ready to receive IRQ's now as power policy is set up, so enable them now. */
#ifdef CONFIG_MALI_DEBUG
	spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
	kbdev->pm.driver_ready_for_irqs = MALI_TRUE;
	spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
#endif
	kbase_pm_enable_interrupts(kbdev);

	/* Turn on the GPU and any cores needed by the policy */
	kbase_pm_do_poweron(kbdev);
	mutex_unlock(&kbdev->pm.lock);

	/* Idle the GPU and/or cores, if the policy wants it to */
	kbase_pm_context_idle(kbdev);

	return MALI_ERROR_NONE;
}
Пример #12
0
mali_error kbase_pm_powerup(kbase_device *kbdev)
{
	unsigned long flags;
	mali_error ret;

	OSK_ASSERT(kbdev != NULL);

	ret = kbase_pm_init_hw(kbdev);
	if (ret != MALI_ERROR_NONE)
	{
		return ret;
	}

	kbase_pm_power_transitioning(kbdev);

	kbasep_pm_read_present_cores(kbdev);

	/* Pretend the GPU is active to prevent a power policy turning the GPU cores off */
	spin_lock_irqsave(&kbdev->pm.active_count_lock, flags);
	kbdev->pm.active_count = 1;
	spin_unlock_irqrestore(&kbdev->pm.active_count_lock, flags);

	spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
	/* Ensure cycle counter is off */
	kbdev->pm.gpu_cycle_counter_requests = 0;
	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CYCLE_COUNT_STOP, NULL);
	spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);

	atomic_set(&kbdev->pm.pending_events, 0);

	atomic_set(&kbdev->pm.work_active, KBASE_PM_WORK_ACTIVE_STATE_INACTIVE);

	kbdev->pm.new_policy = NULL;
	kbdev->pm.current_policy = policy_list[0];
	KBASE_TRACE_ADD( kbdev, PM_CURRENT_POLICY_INIT, NULL, NULL, 0u, kbdev->pm.current_policy->id );
	kbdev->pm.current_policy->init(kbdev);

	kbase_pm_send_event(kbdev, KBASE_PM_EVENT_POLICY_INIT);

	/* Idle the GPU */
	kbase_pm_context_idle(kbdev);

	return MALI_ERROR_NONE;
}
int kbase_instr_hwcnt_disable(struct kbase_context *kctx)
{
	int err = -EINVAL;
	struct kbase_device *kbdev = kctx->kbdev;

	err = kbase_instr_hwcnt_disable_internal(kctx);
	if (err)
		goto out;

	/* Release the context. This had its own Power Manager Active reference
	 */
	kbasep_js_release_privileged_ctx(kbdev, kctx);

	/* Also release our Power Manager Active reference */
	kbase_pm_context_idle(kbdev);

	dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p",
									kctx);
out:
	return err;
}
void kbase_pm_resume(struct kbase_device *kbdev)
{
    /* MUST happen before any pm_context_active calls occur */
    kbase_hwaccess_pm_resume(kbdev);

    /* Initial active call, to power on the GPU/cores if needed */
    kbase_pm_context_active(kbdev);

    /* Re-enable instrumentation, if it was previously disabled */
    kbase_instr_hwcnt_resume(kbdev);

    /* Resume any blocked atoms (which may cause contexts to be scheduled in
     * and dependent atoms to run) */
    kbase_resume_suspended_soft_jobs(kbdev);

    /* Resume the Job Scheduler and associated components, and start running
     * atoms */
    kbasep_js_resume(kbdev);

    /* Matching idle call, to power off the GPU/cores if we didn't actually
     * need it and the policy doesn't want it on */
    kbase_pm_context_idle(kbdev);
}
Пример #15
0
void page_fault_worker(struct work_struct *data)
{
	u64 fault_pfn;
	u32 fault_status;
	size_t new_pages;
	size_t fault_rel_pfn;
	struct kbase_as *faulting_as;
	int as_no;
	struct kbase_context *kctx;
	struct kbase_device *kbdev;
	struct kbase_va_region *region;
	int err;
	bool grown = false;

	faulting_as = container_of(data, struct kbase_as, work_pagefault);
	fault_pfn = faulting_as->fault_addr >> PAGE_SHIFT;
	as_no = faulting_as->number;

	kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);

	/* Grab the context that was already refcounted in kbase_mmu_interrupt().
	 * Therefore, it cannot be scheduled out of this AS until we explicitly release it
	 *
	 * NOTE: NULL can be returned here if we're gracefully handling a spurious interrupt */
	kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no);

	if (kctx == NULL) {
		/* Only handle this if not already suspended */
		if (!kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
			/* Address space has no context, terminate the work */

			/* AS transaction begin */
			mutex_lock(&faulting_as->transaction_mutex);

			kbase_mmu_disable_as(kbdev, as_no);

			mutex_unlock(&faulting_as->transaction_mutex);
			/* AS transaction end */

			kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
					KBASE_MMU_FAULT_TYPE_PAGE);
			kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
					KBASE_MMU_FAULT_TYPE_PAGE);
			kbase_pm_context_idle(kbdev);
		}
		atomic_dec(&kbdev->faults_pending);
		return;
	}

	KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev);

	fault_status = faulting_as->fault_status;
	switch (fault_status & AS_FAULTSTATUS_EXCEPTION_CODE_MASK) {

	case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT:
		/* need to check against the region to handle this one */
		break;

	case AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT:
		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
				"Permission failure");
		goto fault_done;

	case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT:
		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
				"Tranlation table bus fault");
		goto fault_done;

	case AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG:
		/* nothing to do, but we don't expect this fault currently */
		dev_warn(kbdev->dev, "Access flag unexpectedly set");
		goto fault_done;


	default:
		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
				"Unknown fault code");
		goto fault_done;
	}

	/* so we have a translation fault, let's see if it is for growable
	 * memory */
	kbase_gpu_vm_lock(kctx);

	region = kbase_region_tracker_find_region_enclosing_address(kctx,
			faulting_as->fault_addr);
	if (!region || region->flags & KBASE_REG_FREE) {
		kbase_gpu_vm_unlock(kctx);
		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
				"Memory is not mapped on the GPU");
		goto fault_done;
	}

	if ((region->flags & GROWABLE_FLAGS_REQUIRED)
			!= GROWABLE_FLAGS_REQUIRED) {
		kbase_gpu_vm_unlock(kctx);
		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
				"Memory is not growable");
		goto fault_done;
	}

	/* find the size we need to grow it by */
	/* we know the result fit in a size_t due to kbase_region_tracker_find_region_enclosing_address
	 * validating the fault_adress to be within a size_t from the start_pfn */
	fault_rel_pfn = fault_pfn - region->start_pfn;

	if (fault_rel_pfn < kbase_reg_current_backed_size(region)) {
		dev_dbg(kbdev->dev, "Page fault @ 0x%llx in allocated region 0x%llx-0x%llx of growable TMEM: Ignoring",
				faulting_as->fault_addr, region->start_pfn,
				region->start_pfn +
				kbase_reg_current_backed_size(region));

		kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
				KBASE_MMU_FAULT_TYPE_PAGE);
		/* [1] in case another page fault occurred while we were
		 * handling the (duplicate) page fault we need to ensure we
		 * don't loose the other page fault as result of us clearing
		 * the MMU IRQ. Therefore, after we clear the MMU IRQ we send
		 * an UNLOCK command that will retry any stalled memory
		 * transaction (which should cause the other page fault to be
		 * raised again).
		 */
		kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
				AS_COMMAND_UNLOCK, 1);
		kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
				KBASE_MMU_FAULT_TYPE_PAGE);
		kbase_gpu_vm_unlock(kctx);

		goto fault_done;
	}

	new_pages = make_multiple(fault_rel_pfn -
			kbase_reg_current_backed_size(region) + 1,
			region->extent);

	/* cap to max vsize */
	if (new_pages + kbase_reg_current_backed_size(region) >
			region->nr_pages)
		new_pages = region->nr_pages -
				kbase_reg_current_backed_size(region);

	if (0 == new_pages) {
		/* Duplicate of a fault we've already handled, nothing to do */
		kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
				KBASE_MMU_FAULT_TYPE_PAGE);
		/* See comment [1] about UNLOCK usage */
		kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
				AS_COMMAND_UNLOCK, 1);
		kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
				KBASE_MMU_FAULT_TYPE_PAGE);
		kbase_gpu_vm_unlock(kctx);
		goto fault_done;
	}

	if (kbase_alloc_phy_pages_helper(region->gpu_alloc, new_pages) == 0) {
		if (region->gpu_alloc != region->cpu_alloc) {
			if (kbase_alloc_phy_pages_helper(
					region->cpu_alloc, new_pages) == 0) {
				grown = true;
			} else {
				kbase_free_phy_pages_helper(region->gpu_alloc,
						new_pages);
			}
		} else {
			grown = true;
		}
	}


	if (grown) {
		u32 op;

		/* alloc success */
		KBASE_DEBUG_ASSERT(kbase_reg_current_backed_size(region) <= region->nr_pages);

		/* AS transaction begin */
		mutex_lock(&faulting_as->transaction_mutex);

		/* set up the new pages */
		err = kbase_mmu_insert_pages(kctx, region->start_pfn + kbase_reg_current_backed_size(region) - new_pages, &kbase_get_gpu_phy_pages(region)[kbase_reg_current_backed_size(region) - new_pages], new_pages, region->flags);
		if (err) {
			/* failed to insert pages, handle as a normal PF */
			mutex_unlock(&faulting_as->transaction_mutex);
			kbase_free_phy_pages_helper(region->gpu_alloc, new_pages);
			if (region->gpu_alloc != region->cpu_alloc)
				kbase_free_phy_pages_helper(region->cpu_alloc,
						new_pages);
			kbase_gpu_vm_unlock(kctx);
			/* The locked VA region will be unlocked and the cache invalidated in here */
			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
					"Page table update failure");
			goto fault_done;
		}
#if defined(CONFIG_MALI_GATOR_SUPPORT)
		kbase_trace_mali_page_fault_insert_pages(as_no, new_pages);
#endif
#if defined(CONFIG_MALI_MIPE_ENABLED)
		kbase_tlstream_aux_pagefault(
				as_no,
				atomic_read(&kctx->used_pages));
#endif

		/* flush L2 and unlock the VA (resumes the MMU) */
		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
			op = AS_COMMAND_FLUSH;
		else
			op = AS_COMMAND_FLUSH_PT;

		/* clear MMU interrupt - this needs to be done after updating
		 * the page tables but before issuing a FLUSH command. The
		 * FLUSH cmd has a side effect that it restarts stalled memory
		 * transactions in other address spaces which may cause
		 * another fault to occur. If we didn't clear the interrupt at
		 * this stage a new IRQ might not be raised when the GPU finds
		 * a MMU IRQ is already pending.
		 */
		kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
					 KBASE_MMU_FAULT_TYPE_PAGE);

		kbase_mmu_hw_do_operation(kbdev, faulting_as, kctx,
					  faulting_as->fault_addr >> PAGE_SHIFT,
					  new_pages,
					  op, 1);

		mutex_unlock(&faulting_as->transaction_mutex);
		/* AS transaction end */

		/* reenable this in the mask */
		kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
					 KBASE_MMU_FAULT_TYPE_PAGE);
		kbase_gpu_vm_unlock(kctx);
	} else {
static base_jd_event_code kbase_dump_cpu_gpu_time(kbase_jd_atom *katom)
{
	kbase_va_region *reg;
	osk_phy_addr addr;
	u64 pfn;
	u32 offset;
	char *page;
	struct timespec ts;
	base_dump_cpu_gpu_counters data;
	u64 system_time;
	u64 cycle_counter;
	mali_addr64 jc = katom->jc;
	kbase_context *kctx = katom->kctx;

	u32 hi1, hi2;

	memset(&data, 0, sizeof(data));

	kbase_pm_context_active(kctx->kbdev);

	/* Read hi, lo, hi to ensure that overflow from lo to hi is handled correctly */
	do {
		hi1 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI), NULL);
		cycle_counter = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL);
		hi2 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI), NULL);
		cycle_counter |= (((u64)hi1) << 32);
	} while (hi1 != hi2);

	/* Read hi, lo, hi to ensure that overflow from lo to hi is handled correctly */
	do {
		hi1 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(TIMESTAMP_HI), NULL);
		system_time = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(TIMESTAMP_LO), NULL);
		hi2 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(TIMESTAMP_HI), NULL);
		system_time |= (((u64)hi1) << 32);
	} while (hi1 != hi2);

	/* Record the CPU's idea of current time */
	getnstimeofday(&ts);

	kbase_pm_context_idle(kctx->kbdev);

	data.sec = ts.tv_sec;
	data.usec = ts.tv_nsec / 1000;
	data.system_time = system_time;
	data.cycle_counter = cycle_counter;

	pfn = jc >> 12;
	offset = jc & 0xFFF;

	if (offset > 0x1000-sizeof(data))
	{
		/* Wouldn't fit in the page */
		return BASE_JD_EVENT_JOB_CANCELLED;
	}

	reg = kbase_region_tracker_find_region_enclosing_address(kctx, jc);
	if (!reg)
	{
		return BASE_JD_EVENT_JOB_CANCELLED;
	}
	
	if (! (reg->flags & KBASE_REG_GPU_WR) )
	{
		/* Region is not writable by GPU so we won't write to it either */
		return BASE_JD_EVENT_JOB_CANCELLED;
	}

	if (!reg->phy_pages)
	{
		return BASE_JD_EVENT_JOB_CANCELLED;
	}

	addr = reg->phy_pages[pfn - reg->start_pfn];
	if (!addr)
	{
		return BASE_JD_EVENT_JOB_CANCELLED;
	}

	page = osk_kmap(addr);
	if (!page)
	{
		return BASE_JD_EVENT_JOB_CANCELLED;
	}
	memcpy(page+offset, &data, sizeof(data));
	osk_sync_to_cpu(addr+offset, page+offset, sizeof(data));
	osk_kunmap(addr, page);

	return BASE_JD_EVENT_DONE;
}
STATIC mali_error kbase_instr_hwcnt_enable_internal(kbase_device *kbdev, kbase_context *kctx, kbase_uk_hwcnt_setup *setup)
{
	unsigned long flags, pm_flags;
	mali_error err = MALI_ERROR_FUNCTION_FAILED;
	kbasep_js_device_data *js_devdata;
	u32 irq_mask;
	int ret;
	u64 shader_cores_needed;

	KBASE_DEBUG_ASSERT(NULL != kctx);
	KBASE_DEBUG_ASSERT(NULL != kbdev);
	KBASE_DEBUG_ASSERT(NULL != setup);
	KBASE_DEBUG_ASSERT(NULL == kbdev->hwcnt.suspended_kctx);

	shader_cores_needed = kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER);

	js_devdata = &kbdev->js_data;

	/* alignment failure */
	if ((setup->dump_buffer == 0ULL) || (setup->dump_buffer & (2048 - 1)))
		goto out_err;

	/* Override core availability policy to ensure all cores are available */
	kbase_pm_ca_instr_enable(kbdev);

	/* Mark the context as active so the GPU is kept turned on */
	/* A suspend won't happen here, because we're in a syscall from a userspace
	 * thread. */
	kbase_pm_context_active(kbdev);

	/* Request the cores early on synchronously - we'll release them on any errors
	 * (e.g. instrumentation already active) */
	kbase_pm_request_cores_sync(kbdev, MALI_TRUE, shader_cores_needed);

	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);

	if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) {
		/* GPU is being reset */
		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
		wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0);
		spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
	}

	if (kbdev->hwcnt.state != KBASE_INSTR_STATE_DISABLED) {
		/* Instrumentation is already enabled */
		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
		goto out_unrequest_cores;
	}

	/* Enable interrupt */
	spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags);
	irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask | PRFCNT_SAMPLE_COMPLETED, NULL);
	spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags);

	/* In use, this context is the owner */
	kbdev->hwcnt.kctx = kctx;
	/* Remember the dump address so we can reprogram it later */
	kbdev->hwcnt.addr = setup->dump_buffer;
	/* Remember all the settings for suspend/resume */
	if (&kbdev->hwcnt.suspended_state != setup)
		memcpy(&kbdev->hwcnt.suspended_state, setup, sizeof(kbdev->hwcnt.suspended_state));

	/* Request the clean */
	kbdev->hwcnt.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
	kbdev->hwcnt.triggered = 0;
	/* Clean&invalidate the caches so we're sure the mmu tables for the dump buffer is valid */
	ret = queue_work(kbdev->hwcnt.cache_clean_wq, &kbdev->hwcnt.cache_clean_work);
	KBASE_DEBUG_ASSERT(ret);

	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);

	/* Wait for cacheclean to complete */
	wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0);

	KBASE_DEBUG_ASSERT(kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE);

	/* Schedule the context in */
	kbasep_js_schedule_privileged_ctx(kbdev, kctx);

	/* Configure */
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_OFF, kctx);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),     setup->dump_buffer & 0xFFFFFFFF, kctx);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),     setup->dump_buffer >> 32,        kctx);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN),       setup->jm_bm,                    kctx);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN),   setup->shader_bm,                kctx);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_L3_CACHE_EN), setup->l3_cache_bm,              kctx);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN),   setup->mmu_l2_bm,                kctx);
	/* Due to PRLAM-8186 we need to disable the Tiler before we enable the HW counter dump. */
	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0, kctx);
	else
		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), setup->tiler_bm, kctx);

	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_MANUAL, kctx);

	/* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump */
	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), setup->tiler_bm, kctx);
	
	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);

	if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) {
		/* GPU is being reset */
		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
		wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0);
		spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
	}

	kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE;
	kbdev->hwcnt.triggered = 1;
	wake_up(&kbdev->hwcnt.wait);

	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);

	err = MALI_ERROR_NONE;

	dev_dbg(kbdev->dev, "HW counters dumping set-up for context %p", kctx);
	return err;
 out_unrequest_cores:
	kbase_pm_unrequest_cores(kbdev, MALI_TRUE, shader_cores_needed);
	kbase_pm_context_idle(kbdev);
 out_err:
	return err;
}
/**
 * @brief Disable HW counters collection
 *
 * Note: might sleep, waiting for an ongoing dump to complete
 */
mali_error kbase_instr_hwcnt_disable(kbase_context *kctx)
{
	unsigned long flags, pm_flags;
	mali_error err = MALI_ERROR_FUNCTION_FAILED;
	u32 irq_mask;
	kbase_device *kbdev;

	KBASE_DEBUG_ASSERT(NULL != kctx);
	kbdev = kctx->kbdev;
	KBASE_DEBUG_ASSERT(NULL != kbdev);

	while (1) {
		spin_lock_irqsave(&kbdev->hwcnt.lock, flags);

		if (kbdev->hwcnt.state == KBASE_INSTR_STATE_DISABLED) {
			/* Instrumentation is not enabled */
			spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
			goto out;
		}

		if (kbdev->hwcnt.kctx != kctx) {
			/* Instrumentation has been setup for another context */
			spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
			goto out;
		}

		if (kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE)
			break;

		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);

		/* Ongoing dump/setup - wait for its completion */
		wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0);

	}

	kbdev->hwcnt.state = KBASE_INSTR_STATE_DISABLED;
	kbdev->hwcnt.triggered = 0;

	/* Disable interrupt */
	spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags);
	irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask & ~PRFCNT_SAMPLE_COMPLETED, NULL);
	spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags);

	/* Disable the counters */
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), 0, kctx);

	kbdev->hwcnt.kctx = NULL;
	kbdev->hwcnt.addr = 0ULL;

	kbase_pm_ca_instr_disable(kbdev);

	kbase_pm_unrequest_cores(kbdev, MALI_TRUE, kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER));

	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);

	/* Release the context. This had its own Power Manager Active reference */
	kbasep_js_release_privileged_ctx(kbdev, kctx);

	/* Also release our Power Manager Active reference */
	kbase_pm_context_idle(kbdev);

	dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p", kctx);

	err = MALI_ERROR_NONE;

 out:
	return err;
}
/**
 * kbase_destroy_context - Destroy a kernel base context.
 * @kctx: Context to destroy
 *
 * Calls kbase_destroy_os_context() to free OS specific structures.
 * Will release all outstanding regions.
 */
void kbase_destroy_context(struct kbase_context *kctx)
{
	struct kbase_device *kbdev;
	int pages;
	unsigned long pending_regions_to_clean;

	KBASE_DEBUG_ASSERT(NULL != kctx);

	kbdev = kctx->kbdev;
	KBASE_DEBUG_ASSERT(NULL != kbdev);

	KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);

	/* Ensure the core is powered up for the destroy process */
	/* A suspend won't happen here, because we're in a syscall from a userspace
	 * thread. */
	kbase_pm_context_active(kbdev);

	kbase_jd_zap_context(kctx);
	kbase_event_cleanup(kctx);

	kbase_gpu_vm_lock(kctx);

	/* MMU is disabled as part of scheduling out the context */
	kbase_mmu_free_pgd(kctx);

	/* drop the aliasing sink page now that it can't be mapped anymore */
	kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);

	/* free pending region setups */
	pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK;
	while (pending_regions_to_clean) {
		unsigned int cookie = __ffs(pending_regions_to_clean);

		BUG_ON(!kctx->pending_regions[cookie]);

		kbase_reg_pending_dtor(kctx->pending_regions[cookie]);

		kctx->pending_regions[cookie] = NULL;
		pending_regions_to_clean &= ~(1UL << cookie);
	}

	kbase_region_tracker_term(kctx);
	kbase_gpu_vm_unlock(kctx);

	/* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
	kbasep_js_kctx_term(kctx);

	kbase_jd_exit(kctx);

	kbase_pm_context_idle(kbdev);

	kbase_mmu_term(kctx);

	pages = atomic_read(&kctx->used_pages);
	if (pages != 0)
		dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);

	kbase_mem_pool_term(&kctx->mem_pool);
	WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);

	vfree(kctx);
}
Пример #20
0
void kbasep_js_runpool_release_ctx( kbase_device *kbdev, kbase_context *kctx )
{
	kbasep_js_device_data *js_devdata;
	kbasep_js_kctx_info   *js_kctx_info;
	kbasep_js_policy      *js_policy;
	kbasep_js_per_as_data *js_per_as_data;
	
	mali_bool was_descheduled = MALI_FALSE;
	int saved_as_nr;
	kbase_as *current_as;
	int new_ref_count;
	mali_bool nss_state_changed = MALI_FALSE;

	OSK_ASSERT( kbdev != NULL );
	OSK_ASSERT( kctx != NULL );
	js_kctx_info = &kctx->jctx.sched_info;
	js_devdata = &kbdev->js_data;
	js_policy = &kbdev->js_data.policy;

	osk_mutex_lock( &js_kctx_info->ctx.jsctx_mutex );
	osk_mutex_lock( &js_devdata->runpool_mutex );

	/* Ensure context really is scheduled in */
	OSK_ASSERT( js_kctx_info->ctx.is_scheduled != MALI_FALSE );

	/* The saved_as_nr must be accessed under lock, but we also need to take a
	 * sleeping mutex. Since the ctx is known to be busy-refcounted, we can
	 * just take the runpool lock briefly, then taken it again later (the as_nr
	 * won't be reassigned due to being busy).
	 *
	 * We ASSERT on this fact */
	osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );
	{
		saved_as_nr = kctx->as_nr;
		OSK_ASSERT( saved_as_nr != KBASEP_AS_NR_INVALID );
		js_per_as_data = &js_devdata->runpool_irq.per_as_data[saved_as_nr];
		OSK_ASSERT( js_per_as_data->as_busy_refcount > 0 );
	}
	osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );

	/* 
	 * Transaction begins on AS and runpool_irq
	 *
	 * Doubly-assert that our previous facts are still true
	 */
	current_as = &kbdev->as[saved_as_nr];
	osk_mutex_lock( &current_as->transaction_mutex );
	osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );
	OSK_ASSERT( saved_as_nr == kctx->as_nr );
	OSK_ASSERT( js_per_as_data->as_busy_refcount > 0 );

	/* Update refcount */
	new_ref_count = --(js_per_as_data->as_busy_refcount);

	/* Make a set of checks to see if the context should be scheduled out */
	if ( new_ref_count == 0
		 && ( kctx->jctx.sched_info.ctx.nr_jobs == 0
			  || kbasep_js_is_submit_allowed( js_devdata, kctx ) == MALI_FALSE ) )
	{
		/* Last reference, and we've been told to remove this context from the Run Pool */
		OSK_PRINT_INFO(OSK_BASE_JM, "JS: RunPool Remove Context %p because as_busy_refcount=%d, jobs=%d, allowed=%d",
					   kctx,
					   new_ref_count,
					   js_kctx_info->ctx.nr_jobs,
					   kbasep_js_is_submit_allowed( js_devdata, kctx ) );

		kbasep_js_policy_runpool_remove_ctx( js_policy, kctx );

		/* Stop any more refcounts occuring on the context */
		js_per_as_data->kctx = NULL;

		/* Ensure we prevent the context from submitting any new jobs
		 * e.g. from kbasep_js_try_run_next_job_on_slot_irq_nolock()  */
		kbasep_js_clear_submit_allowed( js_devdata, kctx );

		/* Disable the MMU on the affected address space, and indicate it's invalid */
		kbase_mmu_disable( kctx );
		kctx->as_nr = KBASEP_AS_NR_INVALID;

		/* NSS handling */
		nss_state_changed = kbasep_js_check_and_deref_nss_running_ctx( js_devdata, kctx );

		/*
		 * Transaction ends on AS and runpool_irq:
		 *
		 * By this point, the AS-related data is now clear and ready for re-use.
		 *
		 * Since releases only occur once for each previous successful retain, and no more
		 * retains are allowed on this context, no other thread will be operating in this
		 * code whilst we are
		 */
		osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );
		osk_mutex_unlock( &current_as->transaction_mutex );

		/* Free up the address space */
		js_devdata->as_free |= ((u16)(1u << saved_as_nr));
		/* Note: Don't reuse saved_as_nr now */

		/* update book-keeping info */
		--(js_devdata->nr_contexts_running);
		js_kctx_info->ctx.is_scheduled = MALI_FALSE;
		/* Signal any waiter that the context is not scheduled, so is safe for
		 * termination - once the jsctx_mutex is also dropped, and jobs have
		 * finished. */
		osk_waitq_set( &js_kctx_info->ctx.not_scheduled_waitq );

		/* Handle dying contexts */
		if ( js_kctx_info->ctx.is_dying != MALI_FALSE )
		{
			/* This happens asynchronously */
			OSK_PRINT_INFO(OSK_BASE_JM, "JS: ** Killing Context %p on RunPool Remove **", kctx );
			kbasep_js_policy_kill_all_ctx_jobs( js_policy, kctx );
		}

		/* Queue an action to occur after we've dropped the lock */
		was_descheduled = MALI_TRUE;

	}
	else
	{
		osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );
		osk_mutex_unlock( &current_as->transaction_mutex );
	}
	osk_mutex_unlock( &js_devdata->runpool_mutex );

	/* Do we have an action queued whilst the lock was held? */
	if ( was_descheduled != MALI_FALSE )
	{
		/* Determine whether this context should be requeued on the policy queue */
		if ( js_kctx_info->ctx.nr_jobs > 0 && js_kctx_info->ctx.is_dying == MALI_FALSE )
		{
			OSK_PRINT_INFO(OSK_BASE_JM, "JS: Requeue Context %p", kctx );
			osk_mutex_lock( &js_devdata->queue_mutex );
			kbasep_js_policy_enqueue_ctx( js_policy, kctx );
			osk_mutex_unlock( &js_devdata->queue_mutex );
		}
		else
		{
			OSK_PRINT_INFO(OSK_BASE_JM, "JS: Idling Context %p (not requeued)", kctx );
			/* Notify PM that a context has gone idle */
			kbase_pm_context_idle(kctx->kbdev);
		}
	}
	/* We've finished with this context for now, so drop the lock for it. */
	osk_mutex_unlock( &js_kctx_info->ctx.jsctx_mutex );

	if ( was_descheduled != MALI_FALSE )
	{
		/* We've freed up an address space, so let's try to schedule in another
		 * context
		 *
		 * Note: if there's a context to schedule in, then it also tries to run
		 * another job, in case the new context has jobs satisfying requirements
		 * that no other context/job in the runpool does */
		kbasep_js_try_schedule_head_ctx( kbdev );
	}

	if ( nss_state_changed != MALI_FALSE )
	{
		osk_mutex_lock( &js_devdata->runpool_mutex );
		kbasep_js_try_run_next_job( kbdev );
		osk_mutex_unlock( &js_devdata->runpool_mutex );
	}

}
Пример #21
0
void kbasep_js_try_schedule_head_ctx( kbase_device *kbdev )
{
	kbasep_js_device_data *js_devdata;
	mali_bool has_kctx;
	kbase_context *head_kctx;
	kbasep_js_kctx_info *js_kctx_info;
	mali_bool is_runpool_full;

	OSK_ASSERT( kbdev != NULL );

	js_devdata = &kbdev->js_data;

	/* Make a speculative check on the Run Pool - this MUST be repeated once
	 * we've obtained a context from the queue and reobtained the Run Pool
	 * lock */
	osk_mutex_lock( &js_devdata->runpool_mutex );
	is_runpool_full = (mali_bool)( js_devdata->nr_contexts_running >= kbdev->nr_address_spaces );
	osk_mutex_unlock( &js_devdata->runpool_mutex );

	if ( is_runpool_full != MALI_FALSE )
	{
		/* No free address spaces - nothing to do */
		return;
	}

	/* Grab the context off head of queue - if there is one */
	osk_mutex_lock( &js_devdata->queue_mutex );
	has_kctx = kbasep_js_policy_dequeue_head_ctx( &js_devdata->policy, &head_kctx );
	osk_mutex_unlock( &js_devdata->queue_mutex );

	if ( has_kctx == MALI_FALSE )
	{
		/* No ctxs to run - nothing to do */
		return;
	}
	js_kctx_info = &head_kctx->jctx.sched_info;

	OSK_PRINT_INFO(OSK_BASE_JM, "JS: Dequeue Context %p", head_kctx );

	/*
	 * Atomic transaction on the Context and Run Pool begins
	 */
	osk_mutex_lock( &js_kctx_info->ctx.jsctx_mutex );
	osk_mutex_lock( &js_devdata->runpool_mutex );

	/* Re-check to see if the Run Pool is full */
	is_runpool_full = (mali_bool)( js_devdata->nr_contexts_running >= kbdev->nr_address_spaces );
	if ( is_runpool_full != MALI_FALSE )
	{
		/* No free address spaces - roll back the transaction so far and return */
		osk_mutex_unlock( &js_devdata->runpool_mutex );

		/* Only requeue if not dying - which might occur through zapping-whilst-scheduling */
		if ( js_kctx_info->ctx.is_dying == MALI_FALSE )
		{
			OSK_PRINT_INFO(OSK_BASE_JM, "JS: Transaction rollback: Requeue Context %p", head_kctx );

			osk_mutex_lock( &js_devdata->queue_mutex );
			kbasep_js_policy_enqueue_ctx( &js_devdata->policy, head_kctx );
			osk_mutex_unlock( &js_devdata->queue_mutex );
		}
		else
		{
			OSK_PRINT_INFO(OSK_BASE_JM, "JS: Transaction rollback: Context %p is dying. Kill remaining jobs and pm-idle ctx", head_kctx );
			OSK_ASSERT( js_kctx_info->ctx.nr_jobs > 0 );
			/* Notify PM that a context has gone idle */
			kbase_pm_context_idle(kbdev);

			/* Kill all the jobs present (call kbase_jd_cancel on all jobs) */
			kbasep_js_policy_kill_all_ctx_jobs( &js_devdata->policy, head_kctx );

			/* Nothing more to be done to kill the context here, kbase_jd_zap_context
			 * waits for all jobs to be cancelled */
		}

		osk_mutex_unlock( &js_kctx_info->ctx.jsctx_mutex );
		return;
	}

	OSK_PRINT_INFO(OSK_BASE_JM, "JS: RunPool Add Context %p", head_kctx );

	/* update book-keeping info */
	js_kctx_info->ctx.is_scheduled = MALI_TRUE;
	++(js_devdata->nr_contexts_running);
	/* Cause any future waiter-on-termination to wait until the context is
	 * descheduled */
	osk_waitq_clear( &js_kctx_info->ctx.not_scheduled_waitq );


	/* Do all the necessaries to pick the address space (inc. update book-keeping info)
	 * Add the context to the Run Pool, and allow it to run jobs */
	assign_and_activate_kctx_addr_space( kbdev, head_kctx );

	/* Check and setup HW counters dumping */
	osk_spinlock_lock(&kbdev->hwcnt_lock);
	osk_spinlock_irq_lock(&js_devdata->runpool_irq.lock);
	if (head_kctx == kbdev->hwcnt_context &&
		kbdev->hwcnt_is_setup == MALI_FALSE)
	{
		/* Setup the base address */
#if BASE_HW_ISSUE_8186
		u32 val;
		/* Save and clear PRFCNT_TILER_EN */
		val = kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), head_kctx);
		if(0 != val)
		{
			kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0, head_kctx);
		}
		/* Update PRFCNT_CONFIG with TILER_EN = 0 */
		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (head_kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_MANUAL, head_kctx);
		/* Restore PRFCNT_TILER_EN */
		if(0 != val)
		{
			kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),  val, head_kctx);
		}
#else
		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (head_kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_MANUAL, head_kctx);
#endif
		/* Prevent the context to be scheduled out */
		kbasep_js_runpool_retain_ctx_nolock(kbdev, head_kctx);

		kbdev->hwcnt_is_setup = MALI_TRUE;
	}
	osk_spinlock_irq_unlock(&js_devdata->runpool_irq.lock);
	osk_spinlock_unlock(&kbdev->hwcnt_lock);

	/* Try to run the next job, in case this context has jobs that match the
	 * job slot requirements, but none of the other currently running contexts
	 * do */
	kbasep_js_try_run_next_job( kbdev );

	/* Transaction complete */
	osk_mutex_unlock( &js_devdata->runpool_mutex );
	osk_mutex_unlock( &js_kctx_info->ctx.jsctx_mutex );
	/* Note: after this point, the context could potentially get scheduled out immediately */
}