Пример #1
0
void kbase_pm_report_vsync(kbase_device *kbdev, int buffer_updated)
{
	OSK_ASSERT(kbdev != NULL);

	osk_spinlock_irq_lock(&kbdev->pm.metrics.lock);
	kbdev->pm.metrics.vsync_hit = buffer_updated;
	osk_spinlock_irq_unlock(&kbdev->pm.metrics.lock);
}
Пример #2
0
static void mmu_mask_reenable(kbase_device * kbdev, kbase_context *kctx, kbase_as * as)
{
	u32 mask;
	osk_spinlock_irq_lock(&kbdev->mmu_mask_change);
	mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), kctx);
	mask |= ((1UL << as->number) | (1UL << (MMU_REGS_BUS_ERROR_FLAG(as->number))));
	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), mask, kctx);
	osk_spinlock_irq_unlock(&kbdev->mmu_mask_change);
}
Пример #3
0
void kbasep_js_remove_job( kbase_context *kctx, kbase_jd_atom *atom )
{
	kbasep_js_policy_cfs_ctx *ctx_info;
	kbasep_js_kctx_info *js_kctx_info;
	kbase_device *kbdev;
	kbasep_js_device_data *js_devdata;
	kbasep_js_policy    *js_policy;
	mali_bool nss_state_changed;

	OSK_ASSERT( kctx != NULL );
	OSK_ASSERT( atom != NULL );

	kbdev = kctx->kbdev;
	js_devdata = &kbdev->js_data;
	js_policy = &kbdev->js_data.policy;
	js_kctx_info = &kctx->jctx.sched_info;

	/* De-refcount ctx.nr_jobs */
	OSK_ASSERT( js_kctx_info->ctx.nr_jobs > 0 );
	--(js_kctx_info->ctx.nr_jobs);

	ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;

	/* Adjust context priority to no longer include removed job */
	OSK_ASSERT(ctx_info->bag_total_nr_atoms > 0);
	ctx_info->bag_total_nr_atoms--;
	ctx_info->bag_total_priority -= atom->nice_prio;
	OSK_ASSERT(ctx_info->bag_total_priority >= 0);

	/* Get average priority and convert to NICE range -20..19 */
	if(ctx_info->bag_total_nr_atoms)
	{
		ctx_info->bag_priority = (ctx_info->bag_total_priority / ctx_info->bag_total_nr_atoms) - 20;
	}

	osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );
	nss_state_changed = kbasep_js_check_and_deref_nss_job( js_devdata, kctx, atom );
	osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );

	/* De-register the job from the system */
	kbasep_js_policy_term_job( js_policy, atom );

	/* A change in NSS state means we might be able to run on slots that were
	 * previously empty, but could now run jobs on them */
	if ( nss_state_changed != MALI_FALSE )
	{
		osk_mutex_lock( &js_devdata->runpool_mutex );
		kbasep_js_try_run_next_job( kbdev );
		osk_mutex_unlock( &js_devdata->runpool_mutex );
	}

}
Пример #4
0
mali_bool kbasep_js_runpool_retain_ctx( kbase_device *kbdev, kbase_context *kctx )
{
	kbasep_js_device_data *js_devdata;
	mali_bool result;
	OSK_ASSERT( kbdev != NULL );
	js_devdata = &kbdev->js_data;

	osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );
	result = kbasep_js_runpool_retain_ctx_nolock( kbdev, kctx );
	osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );

	return result;
}
Пример #5
0
/**
 * Picks a free address space and add the context to the Policy. Then perform a
 * transaction on this AS and RunPool IRQ to:
 * - setup the runpool_irq structure and the context on that AS
 * - Activate the MMU on that AS
 * - Allow jobs to be submitted on that AS
 *
 * Locking conditions:
 * - Caller must hold the kbasep_js_kctx_info::jsctx_mutex
 * - Caller must hold the kbase_js_device_data::runpool_mutex
 * - AS transaction mutex will be obtained
 * - Runpool IRQ lock will be obtained
 */
STATIC void assign_and_activate_kctx_addr_space( kbase_device *kbdev, kbase_context *kctx )
{
	kbasep_js_device_data *js_devdata;
	kbase_as *current_as;
	kbasep_js_per_as_data *js_per_as_data;
	long ffs_result;

	OSK_ASSERT( kbdev != NULL );
	OSK_ASSERT( kctx != NULL );

	js_devdata = &kbdev->js_data;

	/* Find the free address space */
	ffs_result = osk_find_first_set_bit( js_devdata->as_free );
	/* ASSERT that we should've found a free one */
	OSK_ASSERT( 0 <= ffs_result && ffs_result < kbdev->nr_address_spaces );
	js_devdata->as_free &= ~((u16)(1u << ffs_result));

	/*
	 * Transaction on the AS and runpool_irq
	 */
	current_as = &kbdev->as[ffs_result];
	js_per_as_data = &js_devdata->runpool_irq.per_as_data[ffs_result];
	osk_mutex_lock( &current_as->transaction_mutex );
	osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );

	/* NSS Handling */
	kbasep_js_check_and_ref_nss_running_ctx( js_devdata, kctx );

	/* Assign addr space */
	kctx->as_nr = (int)ffs_result;

	/* Activate this address space on the MMU */
	kbase_mmu_update( kctx );

	/* Allow it to run jobs */
	kbasep_js_set_submit_allowed( js_devdata, kctx );

	/* Book-keeping */
	js_per_as_data->kctx = kctx;
	js_per_as_data->as_busy_refcount = 0;

	/* Lastly, add the context to the policy's runpool - this really allows it to run jobs */
	kbasep_js_policy_runpool_add_ctx( &js_devdata->policy, kctx );
	/*
	 * Transaction complete
	 */
	osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );
	osk_mutex_unlock( &current_as->transaction_mutex );

}
Пример #6
0
void kbasep_js_job_done_slot_irq( kbase_device *kbdev, int s, kbase_jd_atom *katom, kbasep_js_tick *end_timestamp )
{
	kbasep_js_policy *js_policy;
	kbasep_js_device_data *js_devdata;
	mali_bool submit_retry_needed;
	kbasep_js_tick tick_diff;
	u32 microseconds_spent;
	kbase_context *parent_ctx;

	js_devdata = &kbdev->js_data;
	js_policy = &kbdev->js_data.policy;
	parent_ctx = katom->kctx;

	/* Calculate the job's time used */
	tick_diff = *end_timestamp - katom->start_timestamp;
	microseconds_spent = kbasep_js_convert_js_ticks_to_us( tick_diff );
	/* Round up time spent to the minimum timer resolution */
	if (microseconds_spent < KBASEP_JS_TICK_RESOLUTION_US)
	{
		microseconds_spent = KBASEP_JS_TICK_RESOLUTION_US;
	}

	/* Lock the runpool_irq for modifying the runpool_irq data */
	osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );

	/* Check if submitted jobs no longer require the cycle counter to be enabled */
	kbasep_js_deref_permon_check_and_disable_cycle_counter( kbdev, katom );

	/* Log the result of the job (completion status, and time spent). */
	kbasep_js_policy_log_job_result( js_policy, katom, microseconds_spent );
	/* Determine whether the parent context's timeslice is up */
	if ( kbasep_js_policy_should_remove_ctx( js_policy, parent_ctx ) != MALI_FALSE )
	{
		kbasep_js_clear_submit_allowed( js_devdata, parent_ctx );
	}

	/* Submit a new job (if there is one) to help keep the GPU's HEAD and NEXT registers full */
	submit_retry_needed = kbasep_js_try_run_next_job_on_slot_irq_nolock(
		kbdev,
		s,
		&kbdev->slot_submit_count_irq[s] );

	osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );
	/* We've finished modifying runpool_irq data, so the lock is dropped */

	if ( submit_retry_needed != MALI_FALSE )
	{
		kbasep_js_set_job_retry_submit_slot( katom, s );
	}

}
Пример #7
0
void kbasep_pm_metrics_term(kbase_device *kbdev)
{
	OSK_ASSERT(kbdev != NULL);

	osk_spinlock_irq_lock(&kbdev->pm.metrics.lock);
	kbdev->pm.metrics.timer_active = MALI_FALSE;
	osk_spinlock_irq_unlock(&kbdev->pm.metrics.lock);

	osk_timer_stop(&kbdev->pm.metrics.timer);
	osk_timer_term(&kbdev->pm.metrics.timer);

	kbase_pm_unregister_vsync_callback(kbdev);

	osk_spinlock_irq_term(&kbdev->pm.metrics.lock);
}
Пример #8
0
void kbasep_pm_record_gpu_active(kbase_device *kbdev)
{
	osk_ticks now = osk_time_now();

	OSK_ASSERT(kbdev != NULL);

	osk_spinlock_irq_lock(&kbdev->pm.metrics.lock);

	OSK_ASSERT(kbdev->pm.metrics.gpu_active == MALI_FALSE);

	kbdev->pm.metrics.gpu_active = MALI_TRUE;

	kbdev->pm.metrics.time_idle += osk_time_elapsed(kbdev->pm.metrics.time_period_start, now);
	kbdev->pm.metrics.time_period_start = now;

	osk_spinlock_irq_unlock(&kbdev->pm.metrics.lock);
}
Пример #9
0
static void dvfs_callback(void *data)
{
	kbase_device *kbdev;
	kbase_pm_dvfs_action action;
	osk_error ret;

	OSK_ASSERT(data != NULL);

	kbdev = (kbase_device*)data;
#ifdef CONFIG_VITHAR_DVFS
	CSTD_UNUSED(action);
	kbase_platform_dvfs_event(kbdev, kbase_pm_get_dvfs_utilisation(kbdev));
#else
	action = kbase_pm_get_dvfs_action(kbdev);

	switch(action) {
		case KBASE_PM_DVFS_NOP:
			break;
		case KBASE_PM_DVFS_CLOCK_UP:
			/* Do whatever is required to increase the clock frequency */
			break;
		case KBASE_PM_DVFS_CLOCK_DOWN:
			/* Do whatever is required to decrease the clock frequency */
			break;
	}
#endif

	osk_spinlock_irq_lock(&kbdev->pm.metrics.lock);
	if (kbdev->pm.metrics.timer_active)
	{
		ret = osk_timer_start(&kbdev->pm.metrics.timer, KBASE_PM_DVFS_FREQUENCY);
		if (ret != OSK_ERR_NONE)
		{
			/* Handle the situation where the timer cannot be restarted */
		}
	}
	osk_spinlock_irq_unlock(&kbdev->pm.metrics.lock);
}
Пример #10
0
int kbase_pm_get_dvfs_utilisation(kbase_device *kbdev)
{
	int utilisation=0;
	osk_ticks now = osk_time_now();

	OSK_ASSERT(kbdev != NULL);

	osk_spinlock_irq_lock(&kbdev->pm.metrics.lock);

	if (kbdev->pm.metrics.gpu_active)
	{
		kbdev->pm.metrics.time_busy += osk_time_elapsed(kbdev->pm.metrics.time_period_start, now);
		kbdev->pm.metrics.time_period_start = now;
	}
	else
	{
		kbdev->pm.metrics.time_idle += osk_time_elapsed(kbdev->pm.metrics.time_period_start, now);
		kbdev->pm.metrics.time_period_start = now;
	}

	if (kbdev->pm.metrics.time_idle + kbdev->pm.metrics.time_busy == 0)
	{
		/* No data - so we return NOP */
		goto out;
	}

	utilisation = (100*kbdev->pm.metrics.time_busy) / (kbdev->pm.metrics.time_idle + kbdev->pm.metrics.time_busy);

out:

	kbdev->pm.metrics.time_idle = 0;
	kbdev->pm.metrics.time_busy = 0;

	osk_spinlock_irq_unlock(&kbdev->pm.metrics.lock);

	return utilisation;
}
Пример #11
0
kbase_context* kbasep_js_runpool_lookup_ctx( kbase_device *kbdev, int as_nr )
{
	kbasep_js_device_data *js_devdata;
	kbase_context *found_kctx = NULL;
	kbasep_js_per_as_data *js_per_as_data;

	OSK_ASSERT( kbdev != NULL );
	OSK_ASSERT( 0 <= as_nr && as_nr < BASE_MAX_NR_AS );
	js_devdata = &kbdev->js_data;
	js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];

	osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );

	found_kctx = js_per_as_data->kctx;

	if ( found_kctx != NULL )
	{
		++(js_per_as_data->as_busy_refcount);
	}

	osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );

	return found_kctx;
}
Пример #12
0
mali_bool kbasep_js_add_job( kbase_context *kctx, kbase_jd_atom *atom )
{
	kbasep_js_kctx_info *js_kctx_info;
	kbase_device *kbdev;
	kbasep_js_device_data *js_devdata;
	kbasep_js_policy    *js_policy;

	mali_bool policy_queue_updated = MALI_FALSE;

	OSK_ASSERT( kctx != NULL );
	OSK_ASSERT( atom != NULL );

	kbdev = kctx->kbdev;
	js_devdata = &kbdev->js_data;
	js_policy = &kbdev->js_data.policy;
	js_kctx_info = &kctx->jctx.sched_info;

	osk_mutex_lock( &js_devdata->runpool_mutex );
	OSK_PRINT_INFO( OSK_BASE_JM, "JS: job enqueue %p", (void *)atom);

	/* Refcount ctx.nr_jobs */
	OSK_ASSERT( js_kctx_info->ctx.nr_jobs < U32_MAX );
	++(js_kctx_info->ctx.nr_jobs);

	/* Setup any scheduling information */
	kbasep_js_clear_job_retry_submit( atom );

	/*
	 * Begin Runpool_irq transaction
	 */
	osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );
	{
		/* NSS refcounting */
		kbasep_js_check_and_ref_nss_job( js_devdata, kctx, atom );

		/* Enqueue the job in the policy, causing it to be scheduled if the
		 * parent context gets scheduled */
		kbasep_js_policy_enqueue_job( js_policy, atom );
	}
	osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );
	/* End runpool_irq transaction */

	if ( js_kctx_info->ctx.is_scheduled != MALI_FALSE )
	{
		/* Handle an already running context - try to run the new job, in case it
		 * matches requirements that aren't matched by any other job in the Run
		 * Pool */
		kbasep_js_try_run_next_job( kbdev );
	}
	osk_mutex_unlock( &js_devdata->runpool_mutex );

	if ( js_kctx_info->ctx.is_scheduled == MALI_FALSE && js_kctx_info->ctx.nr_jobs == 1 )
	{
		/* Handle Refcount going from 0 to 1: schedule the context on the Policy Queue */
		OSK_ASSERT( js_kctx_info->ctx.is_scheduled == MALI_FALSE );

		OSK_PRINT_INFO(OSK_BASE_JM, "JS: Enqueue Context %p", kctx );

		osk_mutex_lock( &js_devdata->queue_mutex );
		kbasep_js_policy_enqueue_ctx( js_policy, kctx );
		osk_mutex_unlock( &js_devdata->queue_mutex );
		/* If the runpool is full and this job has a higher priority than the non-running
		 * job in the runpool - evict it so this higher priority job starts faster */
		kbasep_js_runpool_attempt_fast_start_ctx( kbdev, kctx );

		/* This context is becoming active */
		kbase_pm_context_active(kctx->kbdev);

		/* NOTE: Potentially, we can make the scheduling of the head context
		 * happen in a work-queue if we need to wait for the PM to power
		 * up. Also need logic to submit nothing until PM really has completed
		 * powering up. */

		/* Policy Queue was updated - caller must try to schedule the head context */
		policy_queue_updated = MALI_TRUE;
	}

	return policy_queue_updated;
}
Пример #13
0
/**
 * Fast start a higher priority job when the runpool is full and contains a
 * non-running lower priority job.
 * The evicted job will be returned to the policy queue.
 *
 * The following locking conditions are made on the caller:
 * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
 */
STATIC void kbasep_js_runpool_attempt_fast_start_ctx( kbase_device *kbdev, kbase_context *kctx )
{
	kbasep_js_device_data *js_devdata;
	kbasep_js_policy      *js_policy;
	kbasep_js_per_as_data *js_per_as_data;
	int evict_as_nr;
	kbase_as *current_as;
	mali_bool nss_state_changed = MALI_FALSE;
	mali_bool is_runpool_full;

	OSK_ASSERT(kbdev != NULL);
	OSK_ASSERT(kctx != NULL);

	js_devdata = &kbdev->js_data;
	js_policy = &kbdev->js_data.policy;
	osk_mutex_lock(&js_devdata->runpool_mutex);

	/* If the runpool is full, attempt to fast start our context */
	is_runpool_full = (mali_bool)(js_devdata->nr_contexts_running >= kbdev->nr_address_spaces);

	if(is_runpool_full != MALI_FALSE)
	{
		/* No free address spaces - attempt to evict non-running lower priority context */
		osk_spinlock_irq_lock(&js_devdata->runpool_irq.lock);
		for(evict_as_nr = 0; evict_as_nr < kbdev->nr_address_spaces; evict_as_nr++)
		{
			current_as = &kbdev->as[evict_as_nr];
			js_per_as_data = &js_devdata->runpool_irq.per_as_data[evict_as_nr];

			/* Look for the AS which is not currently running */
			if(0 == js_per_as_data->as_busy_refcount)
			{
				kbase_context *kctx_evict = js_per_as_data->kctx;

				osk_spinlock_irq_unlock(&js_devdata->runpool_irq.lock);

				/* Now compare the scheduled priority we are considering evicting with the new ctx priority
				 * and take into consideration if the scheduled priority is a realtime policy or not.
				 * Note that the lower the number, the higher the priority
				 */
				if(((kctx_evict->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy == MALI_FALSE) &&
				           kctx->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy) ||
				   ((kctx_evict->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy ==
				           kctx->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy) &&
				   (kctx_evict->jctx.sched_info.runpool.policy_ctx.cfs.bag_priority >
				          kctx->jctx.sched_info.runpool.policy_ctx.cfs.bag_priority)))
				{
					/* Evict idle job in the runpool as priority is lower than new job */
					osk_mutex_lock(&current_as->transaction_mutex);
					osk_spinlock_irq_lock(&js_devdata->runpool_irq.lock);
					/* Remove the context from the runpool policy list (policy_info->scheduled_ctxs_head) */
					kbasep_js_policy_runpool_remove_ctx(js_policy, kctx_evict);
					/* Stop any more refcounts occuring on the context */
					js_per_as_data->kctx = NULL;

					/* Prevent a context from submitting more jobs on this policy */
					kbasep_js_clear_submit_allowed(js_devdata, kctx_evict);

					/* Disable the MMU on the affected address space, and indicate it's invalid */
					kbase_mmu_disable(kctx_evict);
					kctx_evict->as_nr = KBASEP_AS_NR_INVALID;

					/* NSS handling */
					nss_state_changed = kbasep_js_check_and_deref_nss_running_ctx(js_devdata, kctx_evict);
					CSTD_UNUSED(nss_state_changed);

					osk_spinlock_irq_unlock(&js_devdata->runpool_irq.lock);
					osk_mutex_unlock(&current_as->transaction_mutex);

					/* Free up the address space */
					js_devdata->as_free |= ((u16)(1u << evict_as_nr));

					/* update book-keeping info */
					--(js_devdata->nr_contexts_running);
					kctx_evict->jctx.sched_info.ctx.is_scheduled = MALI_FALSE;
					/* Signal any waiter that the context is not scheduled */
					osk_waitq_set(&kctx_evict->jctx.sched_info.ctx.not_scheduled_waitq);

					osk_mutex_unlock(&js_devdata->runpool_mutex);

					/* Requeue onto the policy queue */
					OSK_PRINT_INFO(OSK_BASE_JM, "JS: Requeue Context %p", kctx_evict);
					osk_mutex_lock(&js_devdata->queue_mutex);
					kbasep_js_policy_enqueue_ctx(js_policy, kctx_evict);
					osk_mutex_unlock(&js_devdata->queue_mutex);
					/* ctx fast start has taken place */
					return;
				}
				osk_spinlock_irq_lock(&js_devdata->runpool_irq.lock);
			}
		}
		osk_spinlock_irq_unlock(&js_devdata->runpool_irq.lock);
	}

	/* ctx fast start has not taken place */
	osk_mutex_unlock(&js_devdata->runpool_mutex);
}
Пример #14
0
void kbasep_js_try_run_next_job_on_slot( kbase_device *kbdev, int js )
{
	kbasep_js_device_data *js_devdata;
	mali_bool has_job;
	mali_bool cores_ready;

	OSK_ASSERT( kbdev != NULL );

	js_devdata = &kbdev->js_data;

#if BASE_HW_ISSUE_7347
	for(js = 0; js < kbdev->nr_job_slots; js++)
	{
#endif

	kbase_job_slot_lock(kbdev, js);

	/* Keep submitting while there's space to run a job on this job-slot,
	 * and there are jobs to get that match its requirements (see 'break'
	 * statement below) */
	if (  kbasep_jm_is_submit_slots_free( kbdev, js, NULL ) != MALI_FALSE )
	{
		/* Only lock the Run Pool whilst there's work worth doing */
		osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );

		/* The caller of this function may not be aware of NSS status changes so we
		 * must recheck if the given slot is still valid. Otherwise do not try to run.
		 */
		if (kbase_js_can_run_job_on_slot_no_lock( js_devdata, js))
		{
			do {
				kbase_jd_atom *dequeued_atom;

				/* Dequeue a job that matches the requirements */
				has_job = kbasep_js_policy_dequeue_job( kbdev, js, &dequeued_atom );

				if ( has_job != MALI_FALSE )
				{
					/* NOTE: since the runpool_irq lock is currently held and acts across
					 * all address spaces, any context whose busy refcount has reached
					 * zero won't yet be scheduled out whilst we're trying to run jobs
					 * from it */
					kbase_context *parent_ctx = dequeued_atom->kctx;
					mali_bool retain_success;

					/* Retain/power up the cores it needs, check if cores are ready */
					cores_ready = kbasep_js_job_check_ref_cores( kbdev, js, dequeued_atom );

					if ( cores_ready != MALI_TRUE )
					{
						/* The job can't be submitted until the cores are ready */
						break;
					}
					/* ASSERT that the Policy picked a job from an allowed context */
					OSK_ASSERT( kbasep_js_is_submit_allowed( js_devdata, parent_ctx) );

					/* Retain the context to stop it from being scheduled out
					 * This is released when the job finishes */
					retain_success = kbasep_js_runpool_retain_ctx_nolock( kbdev, parent_ctx );
					OSK_ASSERT( retain_success != MALI_FALSE );
					CSTD_UNUSED( retain_success );

					/* Check if this job needs the cycle counter enabled before submission */
					kbasep_js_ref_permon_check_and_enable_cycle_counter( kbdev, dequeued_atom );

					/* Submit the job */
					kbase_job_submit_nolock( kbdev, dequeued_atom, js );
				}

			} while ( kbasep_jm_is_submit_slots_free( kbdev, js, NULL ) != MALI_FALSE
				      && has_job != MALI_FALSE );
		}

		osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );
	}
	kbase_job_slot_unlock(kbdev, js);
#if BASE_HW_ISSUE_7347
	}
#endif
}
Пример #15
0
kbase_pm_dvfs_action kbase_pm_get_dvfs_action(kbase_device *kbdev)
{
	int utilisation;
	kbase_pm_dvfs_action action;
	osk_ticks now = osk_time_now();

	OSK_ASSERT(kbdev != NULL);

	osk_spinlock_irq_lock(&kbdev->pm.metrics.lock);

	if (kbdev->pm.metrics.gpu_active)
	{
		kbdev->pm.metrics.time_busy += osk_time_elapsed(kbdev->pm.metrics.time_period_start, now);
		kbdev->pm.metrics.time_period_start = now;
	}
	else
	{
		kbdev->pm.metrics.time_idle += osk_time_elapsed(kbdev->pm.metrics.time_period_start, now);
		kbdev->pm.metrics.time_period_start = now;
	}

	if (kbdev->pm.metrics.time_idle + kbdev->pm.metrics.time_busy == 0)
	{
		/* No data - so we return NOP */
		action = KBASE_PM_DVFS_NOP;
		goto out;
	}

	utilisation = (100*kbdev->pm.metrics.time_busy) / (kbdev->pm.metrics.time_idle + kbdev->pm.metrics.time_busy);

	if (kbdev->pm.metrics.vsync_hit)
	{
		/* VSync is being met */
		if (utilisation < KBASE_PM_VSYNC_MIN_UTILISATION)
		{
			action = KBASE_PM_DVFS_CLOCK_DOWN;
		}
		else if (utilisation > KBASE_PM_VSYNC_MAX_UTILISATION)
		{
			action = KBASE_PM_DVFS_CLOCK_UP;
		}
		else
		{
			action = KBASE_PM_DVFS_NOP;
		}
	}
	else
	{
		/* VSync is being missed */
		if (utilisation < KBASE_PM_NO_VSYNC_MIN_UTILISATION)
		{
			action = KBASE_PM_DVFS_CLOCK_DOWN;
		}
		else if (utilisation > KBASE_PM_NO_VSYNC_MAX_UTILISATION)
		{
			action = KBASE_PM_DVFS_CLOCK_UP;
		}
		else
		{
			action = KBASE_PM_DVFS_NOP;
		}
	}

out:

	kbdev->pm.metrics.time_idle = 0;
	kbdev->pm.metrics.time_busy = 0;

	osk_spinlock_irq_unlock(&kbdev->pm.metrics.lock);

	return action;
}
Пример #16
0
void kbasep_js_runpool_release_ctx( kbase_device *kbdev, kbase_context *kctx )
{
	kbasep_js_device_data *js_devdata;
	kbasep_js_kctx_info   *js_kctx_info;
	kbasep_js_policy      *js_policy;
	kbasep_js_per_as_data *js_per_as_data;
	
	mali_bool was_descheduled = MALI_FALSE;
	int saved_as_nr;
	kbase_as *current_as;
	int new_ref_count;
	mali_bool nss_state_changed = MALI_FALSE;

	OSK_ASSERT( kbdev != NULL );
	OSK_ASSERT( kctx != NULL );
	js_kctx_info = &kctx->jctx.sched_info;
	js_devdata = &kbdev->js_data;
	js_policy = &kbdev->js_data.policy;

	osk_mutex_lock( &js_kctx_info->ctx.jsctx_mutex );
	osk_mutex_lock( &js_devdata->runpool_mutex );

	/* Ensure context really is scheduled in */
	OSK_ASSERT( js_kctx_info->ctx.is_scheduled != MALI_FALSE );

	/* The saved_as_nr must be accessed under lock, but we also need to take a
	 * sleeping mutex. Since the ctx is known to be busy-refcounted, we can
	 * just take the runpool lock briefly, then taken it again later (the as_nr
	 * won't be reassigned due to being busy).
	 *
	 * We ASSERT on this fact */
	osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );
	{
		saved_as_nr = kctx->as_nr;
		OSK_ASSERT( saved_as_nr != KBASEP_AS_NR_INVALID );
		js_per_as_data = &js_devdata->runpool_irq.per_as_data[saved_as_nr];
		OSK_ASSERT( js_per_as_data->as_busy_refcount > 0 );
	}
	osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );

	/* 
	 * Transaction begins on AS and runpool_irq
	 *
	 * Doubly-assert that our previous facts are still true
	 */
	current_as = &kbdev->as[saved_as_nr];
	osk_mutex_lock( &current_as->transaction_mutex );
	osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );
	OSK_ASSERT( saved_as_nr == kctx->as_nr );
	OSK_ASSERT( js_per_as_data->as_busy_refcount > 0 );

	/* Update refcount */
	new_ref_count = --(js_per_as_data->as_busy_refcount);

	/* Make a set of checks to see if the context should be scheduled out */
	if ( new_ref_count == 0
		 && ( kctx->jctx.sched_info.ctx.nr_jobs == 0
			  || kbasep_js_is_submit_allowed( js_devdata, kctx ) == MALI_FALSE ) )
	{
		/* Last reference, and we've been told to remove this context from the Run Pool */
		OSK_PRINT_INFO(OSK_BASE_JM, "JS: RunPool Remove Context %p because as_busy_refcount=%d, jobs=%d, allowed=%d",
					   kctx,
					   new_ref_count,
					   js_kctx_info->ctx.nr_jobs,
					   kbasep_js_is_submit_allowed( js_devdata, kctx ) );

		kbasep_js_policy_runpool_remove_ctx( js_policy, kctx );

		/* Stop any more refcounts occuring on the context */
		js_per_as_data->kctx = NULL;

		/* Ensure we prevent the context from submitting any new jobs
		 * e.g. from kbasep_js_try_run_next_job_on_slot_irq_nolock()  */
		kbasep_js_clear_submit_allowed( js_devdata, kctx );

		/* Disable the MMU on the affected address space, and indicate it's invalid */
		kbase_mmu_disable( kctx );
		kctx->as_nr = KBASEP_AS_NR_INVALID;

		/* NSS handling */
		nss_state_changed = kbasep_js_check_and_deref_nss_running_ctx( js_devdata, kctx );

		/*
		 * Transaction ends on AS and runpool_irq:
		 *
		 * By this point, the AS-related data is now clear and ready for re-use.
		 *
		 * Since releases only occur once for each previous successful retain, and no more
		 * retains are allowed on this context, no other thread will be operating in this
		 * code whilst we are
		 */
		osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );
		osk_mutex_unlock( &current_as->transaction_mutex );

		/* Free up the address space */
		js_devdata->as_free |= ((u16)(1u << saved_as_nr));
		/* Note: Don't reuse saved_as_nr now */

		/* update book-keeping info */
		--(js_devdata->nr_contexts_running);
		js_kctx_info->ctx.is_scheduled = MALI_FALSE;
		/* Signal any waiter that the context is not scheduled, so is safe for
		 * termination - once the jsctx_mutex is also dropped, and jobs have
		 * finished. */
		osk_waitq_set( &js_kctx_info->ctx.not_scheduled_waitq );

		/* Handle dying contexts */
		if ( js_kctx_info->ctx.is_dying != MALI_FALSE )
		{
			/* This happens asynchronously */
			OSK_PRINT_INFO(OSK_BASE_JM, "JS: ** Killing Context %p on RunPool Remove **", kctx );
			kbasep_js_policy_kill_all_ctx_jobs( js_policy, kctx );
		}

		/* Queue an action to occur after we've dropped the lock */
		was_descheduled = MALI_TRUE;

	}
	else
	{
		osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );
		osk_mutex_unlock( &current_as->transaction_mutex );
	}
	osk_mutex_unlock( &js_devdata->runpool_mutex );

	/* Do we have an action queued whilst the lock was held? */
	if ( was_descheduled != MALI_FALSE )
	{
		/* Determine whether this context should be requeued on the policy queue */
		if ( js_kctx_info->ctx.nr_jobs > 0 && js_kctx_info->ctx.is_dying == MALI_FALSE )
		{
			OSK_PRINT_INFO(OSK_BASE_JM, "JS: Requeue Context %p", kctx );
			osk_mutex_lock( &js_devdata->queue_mutex );
			kbasep_js_policy_enqueue_ctx( js_policy, kctx );
			osk_mutex_unlock( &js_devdata->queue_mutex );
		}
		else
		{
			OSK_PRINT_INFO(OSK_BASE_JM, "JS: Idling Context %p (not requeued)", kctx );
			/* Notify PM that a context has gone idle */
			kbase_pm_context_idle(kctx->kbdev);
		}
	}
	/* We've finished with this context for now, so drop the lock for it. */
	osk_mutex_unlock( &js_kctx_info->ctx.jsctx_mutex );

	if ( was_descheduled != MALI_FALSE )
	{
		/* We've freed up an address space, so let's try to schedule in another
		 * context
		 *
		 * Note: if there's a context to schedule in, then it also tries to run
		 * another job, in case the new context has jobs satisfying requirements
		 * that no other context/job in the runpool does */
		kbasep_js_try_schedule_head_ctx( kbdev );
	}

	if ( nss_state_changed != MALI_FALSE )
	{
		osk_mutex_lock( &js_devdata->runpool_mutex );
		kbasep_js_try_run_next_job( kbdev );
		osk_mutex_unlock( &js_devdata->runpool_mutex );
	}

}
Пример #17
0
void kbasep_js_try_schedule_head_ctx( kbase_device *kbdev )
{
	kbasep_js_device_data *js_devdata;
	mali_bool has_kctx;
	kbase_context *head_kctx;
	kbasep_js_kctx_info *js_kctx_info;
	mali_bool is_runpool_full;

	OSK_ASSERT( kbdev != NULL );

	js_devdata = &kbdev->js_data;

	/* Make a speculative check on the Run Pool - this MUST be repeated once
	 * we've obtained a context from the queue and reobtained the Run Pool
	 * lock */
	osk_mutex_lock( &js_devdata->runpool_mutex );
	is_runpool_full = (mali_bool)( js_devdata->nr_contexts_running >= kbdev->nr_address_spaces );
	osk_mutex_unlock( &js_devdata->runpool_mutex );

	if ( is_runpool_full != MALI_FALSE )
	{
		/* No free address spaces - nothing to do */
		return;
	}

	/* Grab the context off head of queue - if there is one */
	osk_mutex_lock( &js_devdata->queue_mutex );
	has_kctx = kbasep_js_policy_dequeue_head_ctx( &js_devdata->policy, &head_kctx );
	osk_mutex_unlock( &js_devdata->queue_mutex );

	if ( has_kctx == MALI_FALSE )
	{
		/* No ctxs to run - nothing to do */
		return;
	}
	js_kctx_info = &head_kctx->jctx.sched_info;

	OSK_PRINT_INFO(OSK_BASE_JM, "JS: Dequeue Context %p", head_kctx );

	/*
	 * Atomic transaction on the Context and Run Pool begins
	 */
	osk_mutex_lock( &js_kctx_info->ctx.jsctx_mutex );
	osk_mutex_lock( &js_devdata->runpool_mutex );

	/* Re-check to see if the Run Pool is full */
	is_runpool_full = (mali_bool)( js_devdata->nr_contexts_running >= kbdev->nr_address_spaces );
	if ( is_runpool_full != MALI_FALSE )
	{
		/* No free address spaces - roll back the transaction so far and return */
		osk_mutex_unlock( &js_devdata->runpool_mutex );

		/* Only requeue if not dying - which might occur through zapping-whilst-scheduling */
		if ( js_kctx_info->ctx.is_dying == MALI_FALSE )
		{
			OSK_PRINT_INFO(OSK_BASE_JM, "JS: Transaction rollback: Requeue Context %p", head_kctx );

			osk_mutex_lock( &js_devdata->queue_mutex );
			kbasep_js_policy_enqueue_ctx( &js_devdata->policy, head_kctx );
			osk_mutex_unlock( &js_devdata->queue_mutex );
		}
		else
		{
			OSK_PRINT_INFO(OSK_BASE_JM, "JS: Transaction rollback: Context %p is dying. Kill remaining jobs and pm-idle ctx", head_kctx );
			OSK_ASSERT( js_kctx_info->ctx.nr_jobs > 0 );
			/* Notify PM that a context has gone idle */
			kbase_pm_context_idle(kbdev);

			/* Kill all the jobs present (call kbase_jd_cancel on all jobs) */
			kbasep_js_policy_kill_all_ctx_jobs( &js_devdata->policy, head_kctx );

			/* Nothing more to be done to kill the context here, kbase_jd_zap_context
			 * waits for all jobs to be cancelled */
		}

		osk_mutex_unlock( &js_kctx_info->ctx.jsctx_mutex );
		return;
	}

	OSK_PRINT_INFO(OSK_BASE_JM, "JS: RunPool Add Context %p", head_kctx );

	/* update book-keeping info */
	js_kctx_info->ctx.is_scheduled = MALI_TRUE;
	++(js_devdata->nr_contexts_running);
	/* Cause any future waiter-on-termination to wait until the context is
	 * descheduled */
	osk_waitq_clear( &js_kctx_info->ctx.not_scheduled_waitq );


	/* Do all the necessaries to pick the address space (inc. update book-keeping info)
	 * Add the context to the Run Pool, and allow it to run jobs */
	assign_and_activate_kctx_addr_space( kbdev, head_kctx );

	/* Check and setup HW counters dumping */
	osk_spinlock_lock(&kbdev->hwcnt_lock);
	osk_spinlock_irq_lock(&js_devdata->runpool_irq.lock);
	if (head_kctx == kbdev->hwcnt_context &&
		kbdev->hwcnt_is_setup == MALI_FALSE)
	{
		/* Setup the base address */
#if BASE_HW_ISSUE_8186
		u32 val;
		/* Save and clear PRFCNT_TILER_EN */
		val = kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), head_kctx);
		if(0 != val)
		{
			kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0, head_kctx);
		}
		/* Update PRFCNT_CONFIG with TILER_EN = 0 */
		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (head_kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_MANUAL, head_kctx);
		/* Restore PRFCNT_TILER_EN */
		if(0 != val)
		{
			kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),  val, head_kctx);
		}
#else
		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (head_kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_MANUAL, head_kctx);
#endif
		/* Prevent the context to be scheduled out */
		kbasep_js_runpool_retain_ctx_nolock(kbdev, head_kctx);

		kbdev->hwcnt_is_setup = MALI_TRUE;
	}
	osk_spinlock_irq_unlock(&js_devdata->runpool_irq.lock);
	osk_spinlock_unlock(&kbdev->hwcnt_lock);

	/* Try to run the next job, in case this context has jobs that match the
	 * job slot requirements, but none of the other currently running contexts
	 * do */
	kbasep_js_try_run_next_job( kbdev );

	/* Transaction complete */
	osk_mutex_unlock( &js_devdata->runpool_mutex );
	osk_mutex_unlock( &js_kctx_info->ctx.jsctx_mutex );
	/* Note: after this point, the context could potentially get scheduled out immediately */
}