Пример #1
0
void kbase_pm_context_idle(kbase_device *kbdev)
{
	unsigned long flags;
	int c;

	OSK_ASSERT(kbdev != NULL);

	spin_lock_irqsave(&kbdev->pm.active_count_lock, flags);

	c = --kbdev->pm.active_count;

	KBASE_TRACE_ADD_REFCOUNT( kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c );

	OSK_ASSERT(c >= 0);
	
	if (c == 0)
	{
		/* Last context has gone idle */
		kbase_pm_send_event(kbdev, KBASE_PM_EVENT_GPU_IDLE);

		kbasep_pm_record_gpu_idle(kbdev);
	}

	/* We must wait for the above functions to finish (in the case c==0) before releasing the lock otherwise there is
	 * a race with another thread calling kbase_pm_context_active - in this case the IDLE message could be sent
	 * *after* the ACTIVE message causing the policy and metrics systems to become confused
	 */
	spin_unlock_irqrestore(&kbdev->pm.active_count_lock, flags);
}
Пример #2
0
/**
 * When the context is scheduled, the caller must hold the runpool_irq lock (a spinlock).
 */
STATIC mali_bool kbasep_js_check_and_deref_nss_job( kbasep_js_device_data *js_devdata,
											   kbase_context *kctx,
											   kbase_jd_atom *atom )
{
	kbasep_js_kctx_info *js_kctx_info;
	mali_bool nss_state_changed = MALI_FALSE;

	OSK_ASSERT( kctx != NULL );
	js_kctx_info = &kctx->jctx.sched_info;

	if ( atom->atom->core_req & BASE_JD_REQ_NSS )
	{
		OSK_ASSERT( js_kctx_info->ctx.nr_nss_jobs > 0 );

		if ( js_kctx_info->ctx.is_scheduled != MALI_FALSE
			 && js_kctx_info->ctx.nr_nss_jobs == 1 )
		{
			/* Only NSS deref-count a running ctx on the last nss job */
			nss_state_changed = kbasep_js_check_and_deref_nss_running_ctx( js_devdata, kctx );
		}

		--(js_kctx_info->ctx.nr_nss_jobs);
	}

	return nss_state_changed;
}
static void kbase_fence_wait_callback(struct sync_fence *fence, struct sync_fence_waiter *waiter)
{
	kbase_jd_atom *katom = container_of(waiter, kbase_jd_atom, sync_waiter);
	kbase_context *kctx;

	OSK_ASSERT(NULL != katom);

	kctx = katom->kctx;

	OSK_ASSERT(NULL != kctx);

	/* Propagate the fence status to the atom.
	 * If negative then cancel this atom and its dependencies.
	 */
	if (fence->status < 0)
	{
		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
	}

	/* To prevent a potential deadlock we schedule the work onto the job_done_wq workqueue
	 *
	 * The issue is that we may signal the timeline while holding kctx->jctx.lock and
	 * the callbacks are run synchronously from sync_timeline_signal. So we simply defer the work.
	 */

	OSK_ASSERT(0 == object_is_on_stack(&katom->work));
	INIT_WORK(&katom->work, kbase_fence_wait_worker);
	queue_work(kctx->jctx.job_done_wq, &katom->work);
}
Пример #4
0
void kbasep_get_memory_performance(const kbase_memory_resource *resource, kbase_memory_performance *cpu_performance,
		kbase_memory_performance *gpu_performance)
{
	kbase_attribute *attributes;

	OSK_ASSERT(resource != NULL);
	OSK_ASSERT(cpu_performance != NULL );
	OSK_ASSERT(gpu_performance != NULL);

	attributes = resource->attributes;
	*cpu_performance = *gpu_performance = KBASE_MEM_PERF_NORMAL; /* default performance */

	if (attributes == NULL)
	{
		return;
	}

	while (attributes->id != KBASE_CONFIG_ATTR_END)
	{
		if (attributes->id == KBASE_MEM_ATTR_PERF_GPU)
		{
			*gpu_performance = (kbase_memory_performance) attributes->data;
		}
		else if (attributes->id == KBASE_MEM_ATTR_PERF_CPU)
		{
			*cpu_performance = (kbase_memory_performance) attributes->data;
		}
		attributes++;
	}
}
Пример #5
0
void ukk_call_prepare(ukk_call_context * const ukk_ctx, ukk_session * const session)
{
	OSK_ASSERT(NULL != ukk_ctx);
	OSK_ASSERT(NULL != session);

	ukk_ctx->ukk_session = session;
}
Пример #6
0
void kbasep_js_devdata_term( kbase_device *kbdev )
{
	kbasep_js_device_data *js_devdata;

	OSK_ASSERT( kbdev != NULL );

	js_devdata = &kbdev->js_data;

	if ( (js_devdata->init_status & JS_DEVDATA_INIT_CONSTANTS) )
	{
		/* The caller must de-register all contexts before calling this */
		OSK_ASSERT( js_devdata->nr_contexts_running == 0 );
		OSK_ASSERT( js_devdata->runpool_irq.nr_nss_ctxs_running == 0 );
	}
	if ( (js_devdata->init_status & JS_DEVDATA_INIT_POLICY) )
	{
		kbasep_js_policy_term( &js_devdata->policy );
	}
	if ( (js_devdata->init_status & JS_DEVDATA_INIT_RUNPOOL_IRQ_LOCK) )
	{
		osk_spinlock_irq_term( &js_devdata->runpool_irq.lock );
	}
	if ( (js_devdata->init_status & JS_DEVDATA_INIT_QUEUE_MUTEX) )
	{
		osk_mutex_term( &js_devdata->queue_mutex );
	}
	if ( (js_devdata->init_status & JS_DEVDATA_INIT_RUNPOOL_MUTEX) )
	{
		osk_mutex_term( &js_devdata->runpool_mutex );
	}

	js_devdata->init_status = JS_DEVDATA_INIT_NONE;
}
static int kbase_fence_wait(kbase_jd_atom *katom)
{
	int ret;

	OSK_ASSERT(NULL != katom);
	OSK_ASSERT(NULL != katom->kctx);

	sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback);

	ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter);

	if (ret == 1)
	{
		/* Already signalled */
		return 0;
	}
	else if (ret < 0)
	{
		goto cancel_atom;
	}
	return 1;

cancel_atom:
	katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
	/* We should cause the dependant jobs in the bag to be failed,
	 * to do this we schedule the work queue to complete this job */
	OSK_ASSERT(0 == object_is_on_stack(&katom->work));
	INIT_WORK(&katom->work, kbase_fence_wait_worker);
	queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
	return 1;
}
Пример #8
0
struct kbase_va_region *kbase_pmem_alloc(struct kbase_context *kctx, u32 size,
					 u32 flags, u16 *pmem_cookie)
{
	struct kbase_va_region *reg;
	u16 cookie;

	OSK_ASSERT(kctx != NULL);
	OSK_ASSERT(pmem_cookie != NULL);

	if ( 0 == size )
	{
		goto out1;
	}

	if (!kbase_check_alloc_flags(flags))
	{
		goto out1;
	}

	reg = kbase_alloc_free_region(kctx, 0, size, KBASE_REG_ZONE_PMEM);
	if (!reg)
		goto out1;

	reg->flags &= ~KBASE_REG_FREE;

	kbase_update_region_flags(reg, flags, MALI_FALSE);

	if (kbase_alloc_phy_pages(reg, size, size))
		goto out2;

	reg->nr_alloc_pages = size;
	reg->extent = 0;

	kbase_gpu_vm_lock(kctx);
	if (!kctx->osctx.cookies)
		goto out3;
	
	cookie = __ffs(kctx->osctx.cookies);
	kctx->osctx.cookies &= ~(1UL << cookie);
	reg->flags &= ~KBASE_REG_COOKIE_MASK;
	reg->flags |= KBASE_REG_COOKIE(cookie);
	
	OSK_DLIST_PUSH_FRONT(&kctx->osctx.reg_pending, reg,
				struct kbase_va_region, link);

	*pmem_cookie = cookie;
	kbase_gpu_vm_unlock(kctx);

	return reg;

out3:
	kbase_gpu_vm_unlock(kctx);
	kbase_free_phy_pages(reg);
out2:
	osk_free(reg);
out1:
	return NULL;
	
}
Пример #9
0
void kbase_event_post(kbase_context *ctx, kbase_jd_atom *atom)
{
	OSK_ASSERT(ctx);
	OSK_ASSERT(atom);

	osk_workq_work_init(&atom->work, kbase_event_post_worker);
	osk_workq_submit(&ctx->event_workq, &atom->work);
}
/**
 * Submit the 8401 workaround job.
 *
 * Important for BASE_HW_ISSUE_8987: This job always uses 16 RMUs
 * - Therefore, on slot[1] it will always use the same number of RMUs as another
 * GLES job.
 * - On slot[2], no other job (GLES or otherwise) will be running on the
 * cores, by virtue of it being slot[2]. Therefore, any value of RMUs is
 * acceptable.
 */
void kbasep_8401_submit_dummy_job(kbase_device *kbdev, int js)
{
	u32 cfg;
	mali_addr64 jc;

	/* While this workaround is active we reserve the last address space just for submitting the dummy jobs */
	int as = kbdev->nr_hw_address_spaces;

	/* Don't issue compute jobs on job slot 0 */
	OSK_ASSERT(js != 0);
	OSK_ASSERT(js < KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT);

	/* Job chain GPU address */
	jc = (js+WORKAROUND_PAGE_OFFSET)*OSK_PAGE_SIZE; /* GPU phys address (see kbase_mmu_insert_pages call in kbasep_8401_workaround_init*/

	/* Clear the job status words which may contain values from a previous job completion */
	memset(kbdev->workaround_compute_job_va[js], 0,  4*sizeof(u32));

	/* Get the affinity of the previous job */
	dummy_job_atom[js].affinity = ((u64)kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_LO), NULL)) |
	                              (((u64)kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_HI), NULL)) << 32);

	/* Don't submit a compute job if the affinity was previously zero (i.e. no jobs have run yet on this slot) */
	if(!dummy_job_atom[js].affinity)
	{
		return;
	}

	/* Ensure that our page tables are programmed into the MMU */
	kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_TRANSTAB_LO),
	                       (kbdev->workaround_kctx->pgd & ASn_TRANSTAB_ADDR_SPACE_MASK) | ASn_TRANSTAB_READ_INNER
	                       | ASn_TRANSTAB_ADRMODE_TABLE, NULL);

	kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_TRANSTAB_HI), (kbdev->workaround_kctx->pgd >> 32), NULL);

	kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_MEMATTR_LO), ASn_MEMATTR_IMPL_DEF_CACHE_POLICY, NULL);
	kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_MEMATTR_HI), ASn_MEMATTR_IMPL_DEF_CACHE_POLICY, NULL);
	kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_COMMAND), ASn_COMMAND_UPDATE, NULL);

	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), jc & 0xFFFFFFFF, NULL);
	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), jc >> 32, NULL);

	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_LO), dummy_job_atom[js].affinity & 0xFFFFFFFF, NULL);
	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_HI), dummy_job_atom[js].affinity >> 32, NULL);

	/* start MMU, medium priority, cache clean/flush on end, clean/flush on start */
	cfg = as | JSn_CONFIG_END_FLUSH_CLEAN_INVALIDATE | JSn_CONFIG_START_MMU
	         | JSn_CONFIG_START_FLUSH_CLEAN_INVALIDATE | JSn_CONFIG_THREAD_PRI(8);
	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_CONFIG_NEXT), cfg, NULL);

	KBASE_TRACE_ADD_SLOT( kbdev, JM_SUBMIT, NULL, 0, jc, js );

	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), JSn_COMMAND_START, NULL);
	/* Report that the job has been submitted */
	kbasep_jm_enqueue_submit_slot(&kbdev->jm_slots[js], &dummy_job_atom[js]);
}
Пример #11
0
/**
 * @brief Get the GPU configuration
 *
 * Fill the base_gpu_props structure with values from the GPU configuration registers
 *
 * @param gpu_props  The base_gpu_props structure
 * @param kbdev      The kbase_device structure for the device
 */
static void kbase_gpuprops_get_props(base_gpu_props * gpu_props, kbase_device * kbdev)
{
	kbase_gpuprops_regdump regdump;
	int i;

	OSK_ASSERT(NULL != kbdev);
	OSK_ASSERT(NULL != gpu_props);

	/* Dump relevant registers */
	kbase_gpuprops_dump_registers(kbdev, &regdump);

	/* Populate the base_gpu_props structure */
	gpu_props->core_props.version_status = KBASE_UBFX32(regdump.gpu_id, 0U, 4);
	gpu_props->core_props.minor_revision = KBASE_UBFX32(regdump.gpu_id, 4U, 8);
	gpu_props->core_props.major_revision = KBASE_UBFX32(regdump.gpu_id, 12U, 4);
	gpu_props->core_props.product_id = KBASE_UBFX32(regdump.gpu_id, 16U, 16);
	gpu_props->core_props.log2_program_counter_size = KBASE_GPU_PC_SIZE_LOG2;
	gpu_props->core_props.gpu_available_memory_size = totalram_pages << PAGE_SHIFT;

	for(i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
	{
		gpu_props->core_props.texture_features[i] = regdump.texture_features[i];
	}

	gpu_props->l2_props.log2_line_size = KBASE_UBFX32(regdump.l2_features, 0U, 8);
	gpu_props->l2_props.log2_cache_size = KBASE_UBFX32(regdump.l2_features, 16U, 8);

	gpu_props->l3_props.log2_line_size = KBASE_UBFX32(regdump.l3_features, 0U, 8);
	gpu_props->l3_props.log2_cache_size = KBASE_UBFX32(regdump.l3_features, 16U, 8);

	gpu_props->tiler_props.bin_size_bytes = 1 << KBASE_UBFX32(regdump.tiler_features, 0U, 6);
	gpu_props->tiler_props.max_active_levels = KBASE_UBFX32(regdump.tiler_features, 8U, 4);

	gpu_props->raw_props.gpu_id = regdump.gpu_id;
	gpu_props->raw_props.tiler_features = regdump.tiler_features;
	gpu_props->raw_props.mem_features = regdump.mem_features;
	gpu_props->raw_props.mmu_features = regdump.mmu_features;
	gpu_props->raw_props.l2_features = regdump.l2_features;
	gpu_props->raw_props.l3_features = regdump.l3_features;

	gpu_props->raw_props.as_present = regdump.as_present;
	gpu_props->raw_props.js_present = regdump.js_present;
	gpu_props->raw_props.shader_present = ((u64)regdump.shader_present_hi << 32) + regdump.shader_present_lo;
	gpu_props->raw_props.tiler_present = ((u64)regdump.tiler_present_hi << 32) + regdump.tiler_present_lo;
	gpu_props->raw_props.l2_present = ((u64)regdump.l2_present_hi << 32) + regdump.l2_present_lo;
	gpu_props->raw_props.l3_present = ((u64)regdump.l3_present_hi << 32) + regdump.l3_present_lo;

	for(i = 0; i < MIDG_MAX_JOB_SLOTS; i++)
	{
		gpu_props->raw_props.js_features[i] = regdump.js_features[i];
	}

	/* Initialize the coherent_group structure for each group */
	kbase_gpuprops_construct_coherent_groups(gpu_props);
}
Пример #12
0
mali_error kbasep_js_kctx_init( kbase_context *kctx )
{
	kbase_device *kbdev;
	kbasep_js_kctx_info *js_kctx_info;
	mali_error err;
	osk_error osk_err;

	OSK_ASSERT( kctx != NULL );

	kbdev = kctx->kbdev;
	OSK_ASSERT( kbdev != NULL );

	js_kctx_info = &kctx->jctx.sched_info;
	OSK_ASSERT( js_kctx_info->init_status == JS_KCTX_INIT_NONE );

	js_kctx_info->ctx.nr_jobs = 0;
	js_kctx_info->ctx.nr_nss_jobs = 0;

	js_kctx_info->ctx.is_scheduled = MALI_FALSE;
	js_kctx_info->ctx.is_dying = MALI_FALSE;

	js_kctx_info->init_status |= JS_KCTX_INIT_CONSTANTS;

	/* On error, we could continue on: providing none of the below resources
	 * rely on the ones above */
	osk_err = osk_mutex_init( &js_kctx_info->ctx.jsctx_mutex, OSK_LOCK_ORDER_JS_CTX );
	if ( osk_err == OSK_ERR_NONE )
	{
		js_kctx_info->init_status |= JS_KCTX_INIT_JSCTX_MUTEX;
	}

	osk_err = osk_waitq_init( &js_kctx_info->ctx.not_scheduled_waitq );
	if ( osk_err == OSK_ERR_NONE )
	{
		js_kctx_info->init_status |= JS_KCTX_INIT_JSCTX_WAITQ;
	}

	err = kbasep_js_policy_init_ctx( kbdev, kctx );
	if ( err == MALI_ERROR_NONE )
	{
		js_kctx_info->init_status |= JS_KCTX_INIT_POLICY;
	}

	/* On error, do no cleanup; this will be handled by the caller(s), since
	 * we've designed this resource to be safe to terminate on init-fail */
	if ( js_kctx_info->init_status != JS_KCTX_INIT_ALL)
	{
		return MALI_ERROR_FUNCTION_FAILED;
	}

	/* Initially, the context is not scheduled */
	osk_waitq_set( &js_kctx_info->ctx.not_scheduled_waitq );

	return MALI_ERROR_NONE;
}
static void mali_dvfs_event_proc(struct work_struct *w)
{
	unsigned long flags;
	mali_dvfs_status *dvfs_status;
	struct exynos_context *platform;

	mutex_lock(&mali_enable_clock_lock);
	dvfs_status = &mali_dvfs_status_current;

	if (!kbase_platform_dvfs_get_enable_status()) {
		mutex_unlock(&mali_enable_clock_lock);
		return;
	}

	platform = (struct exynos_context *)dvfs_status->kbdev->platform_context;
#ifdef MALI_DVFS_ASV_ENABLE
	if (dvfs_status->asv_status==ASV_STATUS_DISABLE_REQ) {
		dvfs_status->asv_status=mali_dvfs_update_asv(ASV_CMD_DISABLE);
	} else if (dvfs_status->asv_status==ASV_STATUS_NOT_INIT) {
		dvfs_status->asv_status=mali_dvfs_update_asv(ASV_CMD_ENABLE);
	}
#endif
	spin_lock_irqsave(&mali_dvfs_spinlock, flags);
	if (dvfs_status->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) {
		if (dvfs_status->step==kbase_platform_dvfs_get_level(450)) {
			if (platform->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold)
				dvfs_status->step++;
			OSK_ASSERT(dvfs_status->step < MALI_DVFS_STEP);
		} else {
			dvfs_status->step++;
			OSK_ASSERT(dvfs_status->step < MALI_DVFS_STEP);
		}
	}else if ((dvfs_status->step>0) &&
			(platform->time_tick == MALI_DVFS_TIME_INTERVAL) &&
			(platform->utilisation < mali_dvfs_infotbl[dvfs_status->step].min_threshold)) {
		OSK_ASSERT(dvfs_status->step > 0);
		dvfs_status->step--;
	}
#ifdef CONFIG_MALI_T6XX_FREQ_LOCK
	if ((dvfs_status->upper_lock >= 0)&&(dvfs_status->step > dvfs_status->upper_lock)) {
		dvfs_status->step = dvfs_status->upper_lock;
	}
	if (dvfs_status->under_lock > 0) {
		if (dvfs_status->step < dvfs_status->under_lock)
			dvfs_status->step = dvfs_status->under_lock;
	}
#endif
	spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);

	kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step);

	mutex_unlock(&mali_enable_clock_lock);
}
Пример #14
0
void kbasep_js_remove_job( kbase_context *kctx, kbase_jd_atom *atom )
{
	kbasep_js_policy_cfs_ctx *ctx_info;
	kbasep_js_kctx_info *js_kctx_info;
	kbase_device *kbdev;
	kbasep_js_device_data *js_devdata;
	kbasep_js_policy    *js_policy;
	mali_bool nss_state_changed;

	OSK_ASSERT( kctx != NULL );
	OSK_ASSERT( atom != NULL );

	kbdev = kctx->kbdev;
	js_devdata = &kbdev->js_data;
	js_policy = &kbdev->js_data.policy;
	js_kctx_info = &kctx->jctx.sched_info;

	/* De-refcount ctx.nr_jobs */
	OSK_ASSERT( js_kctx_info->ctx.nr_jobs > 0 );
	--(js_kctx_info->ctx.nr_jobs);

	ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;

	/* Adjust context priority to no longer include removed job */
	OSK_ASSERT(ctx_info->bag_total_nr_atoms > 0);
	ctx_info->bag_total_nr_atoms--;
	ctx_info->bag_total_priority -= atom->nice_prio;
	OSK_ASSERT(ctx_info->bag_total_priority >= 0);

	/* Get average priority and convert to NICE range -20..19 */
	if(ctx_info->bag_total_nr_atoms)
	{
		ctx_info->bag_priority = (ctx_info->bag_total_priority / ctx_info->bag_total_nr_atoms) - 20;
	}

	osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );
	nss_state_changed = kbasep_js_check_and_deref_nss_job( js_devdata, kctx, atom );
	osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );

	/* De-register the job from the system */
	kbasep_js_policy_term_job( js_policy, atom );

	/* A change in NSS state means we might be able to run on slots that were
	 * previously empty, but could now run jobs on them */
	if ( nss_state_changed != MALI_FALSE )
	{
		osk_mutex_lock( &js_devdata->runpool_mutex );
		kbasep_js_try_run_next_job( kbdev );
		osk_mutex_unlock( &js_devdata->runpool_mutex );
	}

}
Пример #15
0
/**
 * Picks a free address space and add the context to the Policy. Then perform a
 * transaction on this AS and RunPool IRQ to:
 * - setup the runpool_irq structure and the context on that AS
 * - Activate the MMU on that AS
 * - Allow jobs to be submitted on that AS
 *
 * Locking conditions:
 * - Caller must hold the kbasep_js_kctx_info::jsctx_mutex
 * - Caller must hold the kbase_js_device_data::runpool_mutex
 * - AS transaction mutex will be obtained
 * - Runpool IRQ lock will be obtained
 */
STATIC void assign_and_activate_kctx_addr_space( kbase_device *kbdev, kbase_context *kctx )
{
	kbasep_js_device_data *js_devdata;
	kbase_as *current_as;
	kbasep_js_per_as_data *js_per_as_data;
	long ffs_result;

	OSK_ASSERT( kbdev != NULL );
	OSK_ASSERT( kctx != NULL );

	js_devdata = &kbdev->js_data;

	/* Find the free address space */
	ffs_result = osk_find_first_set_bit( js_devdata->as_free );
	/* ASSERT that we should've found a free one */
	OSK_ASSERT( 0 <= ffs_result && ffs_result < kbdev->nr_address_spaces );
	js_devdata->as_free &= ~((u16)(1u << ffs_result));

	/*
	 * Transaction on the AS and runpool_irq
	 */
	current_as = &kbdev->as[ffs_result];
	js_per_as_data = &js_devdata->runpool_irq.per_as_data[ffs_result];
	osk_mutex_lock( &current_as->transaction_mutex );
	osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );

	/* NSS Handling */
	kbasep_js_check_and_ref_nss_running_ctx( js_devdata, kctx );

	/* Assign addr space */
	kctx->as_nr = (int)ffs_result;

	/* Activate this address space on the MMU */
	kbase_mmu_update( kctx );

	/* Allow it to run jobs */
	kbasep_js_set_submit_allowed( js_devdata, kctx );

	/* Book-keeping */
	js_per_as_data->kctx = kctx;
	js_per_as_data->as_busy_refcount = 0;

	/* Lastly, add the context to the policy's runpool - this really allows it to run jobs */
	kbasep_js_policy_runpool_add_ctx( &js_devdata->policy, kctx );
	/*
	 * Transaction complete
	 */
	osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );
	osk_mutex_unlock( &current_as->transaction_mutex );

}
Пример #16
0
mali_error kbase_gpuprops_uk_get_props(kbase_context *kctx, kbase_uk_gpuprops * kbase_props)
{
	kbase_gpuprops_clock_speed_function get_gpu_speed_mhz;
	u32 gpu_speed_mhz;
	int rc = 1;

	OSK_ASSERT(NULL != kctx);
	OSK_ASSERT(NULL != kbase_props);

	if (OSK_SIMULATE_FAILURE(OSK_BASE_CORE))
	{
		return MALI_ERROR_FUNCTION_FAILED;
	}

	/* Current GPU speed is requested from the system integrator via the KBASE_CONFIG_ATTR_GPU_SPEED_FUNC function.
	 * If that function fails, or the function is not provided by the system integrator, we report the maximum
	 * GPU speed as specified by KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX.
	 */
	get_gpu_speed_mhz = (kbase_gpuprops_clock_speed_function)kbasep_get_config_value(kctx->kbdev, kctx->kbdev->config_attributes, KBASE_CONFIG_ATTR_GPU_SPEED_FUNC);
	if (get_gpu_speed_mhz != NULL)
	{
		rc = get_gpu_speed_mhz(&gpu_speed_mhz);
#ifdef CONFIG_MALI_DEBUG
		/* Issue a warning message when the reported GPU speed falls outside the min/max range */
		if (rc == 0)
		{
			u32 gpu_speed_khz = gpu_speed_mhz * 1000;
			if (gpu_speed_khz < kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min ||
			    gpu_speed_khz > kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max)
			{
				OSK_PRINT_WARN(OSK_BASE_CORE, "GPU Speed is outside of min/max range (got %lu Khz, min %lu Khz, max %lu Khz)\n",
				                   gpu_speed_khz,
				                   kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min,
				                   kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max);
			}
		}
#endif /* CONFIG_MALI_DEBUG */
	}
	if (rc != 0)
	{
		gpu_speed_mhz = kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max / 1000;
	}

	kctx->kbdev->gpu_props.props.core_props.gpu_speed_mhz = gpu_speed_mhz;

	memcpy(&kbase_props->props, &kctx->kbdev->gpu_props.props, sizeof(kbase_props->props));

	return MALI_ERROR_NONE;
}
Пример #17
0
STATIC base_jd_udata kbase_event_process(kbase_context *kctx, kbase_jd_atom *katom)
{
	base_jd_udata data;

	OSK_ASSERT(kctx != NULL);
	OSK_ASSERT(katom != NULL);
	OSK_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);

	data = katom->udata;

	katom->status = KBASE_JD_ATOM_STATE_UNUSED;

	wake_up(&katom->completed);

	return data;
}
void kbase_pm_register_vsync_callback(kbase_device *kbdev)
{
	OSK_ASSERT(kbdev != NULL);

	/* no VSync metrics will be available */
	kbdev->pm.metrics.platform_data = NULL;
}
Пример #19
0
unsigned long osk_bitarray_find_first_zero_bit(const unsigned long *addr, unsigned long maxbit)
{
	unsigned long total;

	OSK_ASSERT(NULL != addr);

	for ( total = 0; total < maxbit; total += OSK_BITS_PER_LONG, ++addr )
	{
		if (OSK_ULONG_MAX != *addr)
		{
			int result;
			result = oskp_find_first_zero_bit( *addr );
			/* non-negative signifies the bit was found */
			if ( result >= 0 )
			{
				total += (unsigned long)result;
				break;
			}
		}
	}

	/* Now check if we reached maxbit or above */
	if ( total >= maxbit )
	{
		total = maxbit;
	}

	return total; /* either the found bit nr, or maxbit if not found */
}
Пример #20
0
void kbase_gpuprops_set(kbase_device *kbdev)
{
	kbase_gpu_props *gpu_props;
	struct midg_raw_gpu_props *raw;

	OSK_ASSERT(NULL != kbdev);
	gpu_props = &kbdev->gpu_props;
	raw = &gpu_props->props.raw_props;

	/* Initialize the base_gpu_props structure */
	kbase_gpuprops_get_props(&gpu_props->props, kbdev);

	/* Populate kbase-only fields */
	gpu_props->l2_props.associativity = KBASE_UBFX32(raw->l2_features, 8U, 8);
	gpu_props->l2_props.external_bus_width = KBASE_UBFX32(raw->l2_features, 24U, 8);

	gpu_props->l3_props.associativity = KBASE_UBFX32(raw->l3_features, 8U, 8);
	gpu_props->l3_props.external_bus_width = KBASE_UBFX32(raw->l3_features, 24U, 8);

	gpu_props->mem.core_group = KBASE_UBFX32(raw->mem_features, 0U, 1);
	gpu_props->mem.supergroup = KBASE_UBFX32(raw->mem_features, 1U, 1);

	gpu_props->mmu.va_bits = KBASE_UBFX32(raw->mmu_features, 0U, 8);
	gpu_props->mmu.pa_bits = KBASE_UBFX32(raw->mmu_features, 8U, 8);

	gpu_props->num_cores = osk_count_set_bits64(raw->shader_present);
	gpu_props->num_core_groups = osk_count_set_bits64(raw->l2_present);
	gpu_props->num_supergroups = osk_count_set_bits64(raw->l3_present);
	gpu_props->num_address_spaces = osk_count_set_bits(raw->as_present);
	gpu_props->num_job_slots = osk_count_set_bits(raw->js_present);
}
Пример #21
0
void kbase_pm_change_policy(kbase_device *kbdev)
{
	OSK_ASSERT(kbdev != NULL);

	KBASE_TRACE_ADD( kbdev, PM_CHANGE_POLICY, NULL, NULL, 0u,
	                 kbdev->pm.current_policy->id | (kbdev->pm.new_policy->id<<16) );

	KBASE_TRACE_ADD( kbdev, PM_CURRENT_POLICY_TERM, NULL, NULL, 0u, kbdev->pm.current_policy->id );
	kbdev->pm.current_policy->term(kbdev);
	kbdev->pm.current_policy = kbdev->pm.new_policy;
	KBASE_TRACE_ADD( kbdev, PM_CURRENT_POLICY_INIT, NULL, NULL, 0u, kbdev->pm.current_policy->id );
	kbdev->pm.current_policy->init(kbdev);

	kbase_pm_send_event(kbdev, KBASE_PM_EVENT_POLICY_INIT);

	/* Changing policy might have occurred during an update to the core desired
	 * states, but the KBASE_PM_EVENT_CHANGE_GPU_STATE event could've been
	 * optimized out if the previous policy had
	 * KBASE_PM_POLICY_FLAG_NO_CORE_TRANSITIONS set.
	 *
	 * In any case, we issue a KBASE_PM_EVENT_CHANGE_GPU_STATE just in case. */
	kbase_pm_send_event(kbdev, KBASE_PM_EVENT_CHANGE_GPU_STATE);

	/* Now the policy change is finished, we release our fake context active reference */
	kbase_pm_context_idle(kbdev);

	kbdev->pm.new_policy = NULL;
}
Пример #22
0
void kbase_pm_context_active(kbase_device *kbdev)
{
	unsigned long flags;
	int c;

	OSK_ASSERT(kbdev != NULL);

	spin_lock_irqsave(&kbdev->pm.active_count_lock, flags);
	c = ++kbdev->pm.active_count;
	spin_unlock_irqrestore(&kbdev->pm.active_count_lock, flags);

	KBASE_TRACE_ADD_REFCOUNT( kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c );

	if (c == 1)
	{
		/* First context active */
		kbase_pm_send_event(kbdev, KBASE_PM_EVENT_GPU_ACTIVE);

		kbasep_pm_record_gpu_active(kbdev);
	}
	/* Synchronise with the power policy to ensure that the event has been noticed */
	kbase_pm_wait_for_no_outstanding_events(kbdev);

	kbase_pm_wait_for_power_up(kbdev);
}
int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation)
{
	unsigned long flags;
	struct exynos_context *platform;

	OSK_ASSERT(kbdev != NULL);
	platform = (struct exynos_context *) kbdev->platform_context;

	spin_lock_irqsave(&mali_dvfs_spinlock, flags);
	if (platform->time_tick < MALI_DVFS_TIME_INTERVAL) {
		platform->time_tick++;
		platform->time_busy += kbdev->pm.metrics.time_busy;
		platform->time_idle += kbdev->pm.metrics.time_idle;
	} else {
		platform->time_busy = kbdev->pm.metrics.time_busy;
		platform->time_idle = kbdev->pm.metrics.time_idle;
		platform->time_tick = 0;
	}

	if ((platform->time_tick == MALI_DVFS_TIME_INTERVAL) &&
		(platform->time_idle + platform->time_busy > 0))
			platform->utilisation = (100*platform->time_busy) / (platform->time_idle + platform->time_busy);

	mali_dvfs_status_current.utilisation = utilisation;
	spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);

	queue_work_on(0, mali_dvfs_wq, &mali_dvfs_work);
	/*add error handle here*/
	return MALI_TRUE;
}
Пример #24
0
void kbasep_pm_record_gpu_active(kbase_device *kbdev)
{
	osk_ticks now = osk_time_now();

	OSK_ASSERT(kbdev != NULL);

	osk_spinlock_irq_lock(&kbdev->pm.metrics.lock);

	OSK_ASSERT(kbdev->pm.metrics.gpu_active == MALI_FALSE);

	kbdev->pm.metrics.gpu_active = MALI_TRUE;

	kbdev->pm.metrics.time_idle += osk_time_elapsed(kbdev->pm.metrics.time_period_start, now);
	kbdev->pm.metrics.time_period_start = now;

	osk_spinlock_irq_unlock(&kbdev->pm.metrics.lock);
}
Пример #25
0
void kbase_pm_set_policy(kbase_device *kbdev, const kbase_pm_policy *new_policy)
{
	OSK_ASSERT(kbdev != NULL);
	OSK_ASSERT(new_policy != NULL);

	if (kbdev->pm.new_policy) {
		/* A policy change is already outstanding */
		KBASE_TRACE_ADD( kbdev, PM_SET_POLICY, NULL, NULL, 0u, -1 );
		return;
	}
	KBASE_TRACE_ADD( kbdev, PM_SET_POLICY, NULL, NULL, 0u, new_policy->id );
	/* During a policy change we pretend the GPU is active */
	kbase_pm_context_active(kbdev);

	kbdev->pm.new_policy = new_policy;
	kbase_pm_send_event(kbdev, KBASE_PM_EVENT_POLICY_CHANGE);
}
Пример #26
0
void kbase_pm_report_vsync(kbase_device *kbdev, int buffer_updated)
{
	OSK_ASSERT(kbdev != NULL);

	osk_spinlock_irq_lock(&kbdev->pm.metrics.lock);
	kbdev->pm.metrics.vsync_hit = buffer_updated;
	osk_spinlock_irq_unlock(&kbdev->pm.metrics.lock);
}
Пример #27
0
/**
 * Caller must hold the runpool_irq lock (a spinlock)
 */
STATIC INLINE void kbasep_js_check_and_ref_nss_running_ctx( kbasep_js_device_data *js_devdata, kbase_context *kctx )
{
	kbasep_js_kctx_info *js_kctx_info;

	OSK_ASSERT( kctx != NULL );
	js_kctx_info = &kctx->jctx.sched_info;

	OSK_ASSERT( js_kctx_info->ctx.is_scheduled != MALI_FALSE );

	if ( js_kctx_info->ctx.nr_nss_jobs > 0 )
	{
		OSK_ASSERT( js_devdata->runpool_irq.nr_nss_ctxs_running < S8_MAX );
		++(js_devdata->runpool_irq.nr_nss_ctxs_running);

		if ( js_devdata->runpool_irq.nr_nss_ctxs_running == 1 )
		{
			OSK_PRINT_INFO(OSK_BASE_JM, "JS: First NSS Context %p scheduled (switched to NSS state)", kctx );
		}
	}
}
Пример #28
0
void kbasep_js_try_run_next_job( kbase_device *kbdev )
{
	int js;

	OSK_ASSERT( kbdev != NULL );

	for ( js = 0; js < kbdev->nr_job_slots ; ++js )
	{
		kbasep_js_try_run_next_job_on_slot( kbdev, js );
	}
}
void kbasep_config_parse_io_resources(const kbase_io_resources *io_resources, struct resource *linux_resources)
{
	OSK_ASSERT(io_resources != NULL);
	OSK_ASSERT(linux_resources != NULL);

	OSK_MEMSET(linux_resources, 0, PLATFORM_CONFIG_RESOURCE_COUNT * sizeof(struct resource));

	linux_resources[0].start = io_resources->io_memory_region.start;
	linux_resources[0].end   = io_resources->io_memory_region.end;
	linux_resources[0].flags = IORESOURCE_MEM;

	linux_resources[1].start = linux_resources[1].end = io_resources->job_irq_number;
	linux_resources[1].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;

	linux_resources[2].start = linux_resources[2].end = io_resources->mmu_irq_number;
	linux_resources[2].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;

	linux_resources[3].start = linux_resources[3].end = io_resources->gpu_irq_number;
	linux_resources[3].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
}
Пример #30
0
uintptr_t kbasep_get_config_value(const kbase_attribute *attributes, int attribute_id)
{
	const kbase_attribute *attr;

	OSK_ASSERT(attributes != NULL);

	attr = kbasep_get_next_attribute(attributes, attribute_id);
	if (attr != NULL)
	{
		return attr->data;
	}

	/* default values */
	switch (attribute_id)
	{
		case KBASE_CONFIG_ATTR_MEMORY_PER_PROCESS_LIMIT:
			return (uintptr_t)-1;
		case KBASE_CONFIG_ATTR_UMP_DEVICE:
			return UMP_DEVICE_W_SHIFT;
		case KBASE_CONFIG_ATTR_MEMORY_OS_SHARED_MAX:
			return (uintptr_t)-1;
		case KBASE_CONFIG_ATTR_MEMORY_OS_SHARED_PERF_GPU:
			return KBASE_MEM_PERF_NORMAL;
		case KBASE_CONFIG_ATTR_GPU_IRQ_THROTTLE_TIME_US:
			return DEFAULT_IRQ_THROTTLE_TIME_US;
		/* Begin scheduling defaults */
		case KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS:
			return     DEFAULT_JS_SCHEDULING_TICK_NS;
		case KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS:
			return     DEFAULT_JS_SOFT_STOP_TICKS;
		case KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS:
			return     DEFAULT_JS_HARD_STOP_TICKS_SS;
		case KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS:
			return     DEFAULT_JS_HARD_STOP_TICKS_NSS;
		case KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS:
			return     DEFAULT_JS_CTX_TIMESLICE_NS;
		case KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES:
			return     DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES;
		case KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES:
			return     DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES;
		case KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS:
			return     DEFAULT_JS_RESET_TICKS_SS;
		case KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS:
			return     DEFAULT_JS_RESET_TICKS_NSS;
		case KBASE_CONFIG_ATTR_JS_RESET_TIMEOUT_MS:
			return     DEFAULT_JS_RESET_TIMEOUT_MS;
		/* End scheduling defaults */
		default:
			OSK_PRINT_ERROR(OSK_BASE_CORE,
			    "kbasep_get_config_value. Cannot get value of attribute with id=%i and no default value defined",
			    attribute_id);
			return 0;
	}
}