void oskp_debug_test_timer_stats( void ) { oskp_time_test time_tester; osk_ticks start_timestamp; osk_ticks end_timestamp; u32 msec_elapsed; osk_error err; if ( oskp_timer_has_been_checked != MALI_FALSE ) { return; } oskp_timer_has_been_checked = MALI_TRUE; OSK_MEMSET( &time_tester, 0, sizeof(time_tester) ); err = osk_timer_on_stack_init( &time_tester.timer ); if ( err != OSK_ERR_NONE ) { goto fail_init; } osk_timer_callback_set( &time_tester.timer, &oskp_check_timer_callback, &time_tester ); start_timestamp = osk_time_now(); err = osk_timer_start_ns( &time_tester.timer, TIMER_PERIOD_NS ); if ( err != OSK_ERR_NONE ) { goto fail_start; } msleep( TIMER_TEST_TIME_MS ); time_tester.should_stop = MALI_TRUE; osk_timer_stop( &time_tester.timer ); end_timestamp = osk_time_now(); msec_elapsed = osk_time_elapsed( start_timestamp, end_timestamp ); OSK_PRINT( OSK_BASE_CORE, "OSK Timer did %d iterations in %dms", time_tester.val, msec_elapsed ); osk_timer_on_stack_term( &time_tester.timer ); return; fail_start: osk_timer_on_stack_term( &time_tester.timer ); fail_init: OSK_PRINT_WARN( OSK_BASE_CORE, "OSK Timer couldn't init/start for testing stats" ); return; }
void kbasep_config_parse_io_resources(const kbase_io_resources *io_resources, struct resource *linux_resources) { OSK_ASSERT(io_resources != NULL); OSK_ASSERT(linux_resources != NULL); OSK_MEMSET(linux_resources, 0, PLATFORM_CONFIG_RESOURCE_COUNT * sizeof(struct resource)); linux_resources[0].start = io_resources->io_memory_region.start; linux_resources[0].end = io_resources->io_memory_region.end; linux_resources[0].flags = IORESOURCE_MEM; linux_resources[1].start = linux_resources[1].end = io_resources->job_irq_number; linux_resources[1].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL; linux_resources[2].start = linux_resources[2].end = io_resources->mmu_irq_number; linux_resources[2].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL; linux_resources[3].start = linux_resources[3].end = io_resources->gpu_irq_number; linux_resources[3].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL; }
mali_error kbasep_js_devdata_init( kbase_device *kbdev ) { kbasep_js_device_data *js_devdata; mali_error err; int i; u16 as_present; osk_error osk_err; OSK_ASSERT( kbdev != NULL ); js_devdata = &kbdev->js_data; as_present = (1U << kbdev->nr_address_spaces) - 1; OSK_ASSERT( js_devdata->init_status == JS_DEVDATA_INIT_NONE ); js_devdata->nr_contexts_running = 0; js_devdata->as_free = as_present; /* All ASs initially free */ js_devdata->runpool_irq.nr_nss_ctxs_running = 0; js_devdata->runpool_irq.nr_permon_jobs_submitted = 0; js_devdata->runpool_irq.submit_allowed = 0u; /* No ctx allowed to submit */ /* Config attributes */ js_devdata->scheduling_tick_ns = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS ); js_devdata->soft_stop_ticks = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS ); js_devdata->hard_stop_ticks_ss = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS ); js_devdata->hard_stop_ticks_nss = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS ); js_devdata->gpu_reset_ticks_ss = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS ); js_devdata->gpu_reset_ticks_nss = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS ); js_devdata->ctx_timeslice_ns = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS ); js_devdata->cfs_ctx_runtime_init_slices = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES ); js_devdata->cfs_ctx_runtime_min_slices = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES ); OSK_PRINT_INFO( OSK_BASE_JM, "JS Config Attribs: " ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->scheduling_tick_ns:%d", js_devdata->scheduling_tick_ns ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->soft_stop_ticks:%d", js_devdata->soft_stop_ticks ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->hard_stop_ticks_ss:%d", js_devdata->hard_stop_ticks_ss ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->hard_stop_ticks_nss:%d", js_devdata->hard_stop_ticks_nss ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->gpu_reset_ticks_ss:%d", js_devdata->gpu_reset_ticks_ss ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->gpu_reset_ticks_nss:%d", js_devdata->gpu_reset_ticks_nss ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->ctx_timeslice_ns:%d", js_devdata->ctx_timeslice_ns ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->cfs_ctx_runtime_init_slices:%d", js_devdata->cfs_ctx_runtime_init_slices ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->cfs_ctx_runtime_min_slices:%d", js_devdata->cfs_ctx_runtime_min_slices ); #if MALI_BACKEND_KERNEL /* Only output on real kernel modules, otherwise it fills up multictx testing output */ #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS != 0 OSK_PRINT( OSK_BASE_JM, "Job Scheduling Policy Soft-stops disabled, ignoring value for soft_stop_ticks==%d at %dns per tick. Other soft-stops may still occur.", js_devdata->soft_stop_ticks, js_devdata->scheduling_tick_ns ); #endif #if KBASE_DISABLE_SCHEDULING_HARD_STOPS != 0 OSK_PRINT( OSK_BASE_JM, "Job Scheduling Policy Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_nss==%d at %dns per tick. Other hard-stops may still occur.", js_devdata->hard_stop_ticks_ss, js_devdata->hard_stop_ticks_nss, js_devdata->scheduling_tick_ns ); #endif #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS != 0 && KBASE_DISABLE_SCHEDULING_HARD_STOPS != 0 OSK_PRINT( OSK_BASE_JM, "Note: The JS policy's tick timer (if coded) will still be run, but do nothing." ); #endif #endif /* MALI_BACKEND_KERNEL */ /* setup the number of irq throttle cycles base on given time */ { u32 irq_throttle_time_us = kbdev->gpu_props.irq_throttle_time_us; u32 irq_throttle_cycles = kbasep_js_convert_us_to_gpu_ticks_max_freq(kbdev, irq_throttle_time_us); osk_atomic_set( &kbdev->irq_throttle_cycles, irq_throttle_cycles); } /* Clear the AS data, including setting NULL pointers */ OSK_MEMSET( &js_devdata->runpool_irq.per_as_data[0], 0, sizeof(js_devdata->runpool_irq.per_as_data) ); for ( i = 0; i < kbdev->nr_job_slots; ++i ) { js_devdata->js_reqs[i] = core_reqs_from_jsn_features( kbdev->job_slot_features[i] ); } js_devdata->init_status |= JS_DEVDATA_INIT_CONSTANTS; /* On error, we could continue on: providing none of the below resources * rely on the ones above */ osk_err = osk_mutex_init( &js_devdata->runpool_mutex, OSK_LOCK_ORDER_JS_RUNPOOL ); if ( osk_err == OSK_ERR_NONE ) { js_devdata->init_status |= JS_DEVDATA_INIT_RUNPOOL_MUTEX; } osk_err = osk_mutex_init( &js_devdata->queue_mutex, OSK_LOCK_ORDER_JS_QUEUE ); if ( osk_err == OSK_ERR_NONE ) { js_devdata->init_status |= JS_DEVDATA_INIT_QUEUE_MUTEX; } osk_err = osk_spinlock_irq_init( &js_devdata->runpool_irq.lock, OSK_LOCK_ORDER_JS_RUNPOOL_IRQ ); if ( osk_err == OSK_ERR_NONE ) { js_devdata->init_status |= JS_DEVDATA_INIT_RUNPOOL_IRQ_LOCK; } err = kbasep_js_policy_init( kbdev ); if ( err == MALI_ERROR_NONE) { js_devdata->init_status |= JS_DEVDATA_INIT_POLICY; } /* On error, do no cleanup; this will be handled by the caller(s), since * we've designed this resource to be safe to terminate on init-fail */ if ( js_devdata->init_status != JS_DEVDATA_INIT_ALL) { return MALI_ERROR_FUNCTION_FAILED; } return MALI_ERROR_NONE; }
static base_jd_event_code kbase_dump_cpu_gpu_time(kbase_context *kctx, mali_addr64 jc) { kbase_va_region *reg; osk_phy_addr addr; u64 pfn; u32 offset; char *page; osk_timeval tv; base_dump_cpu_gpu_counters data; u64 system_time; u64 cycle_counter; u32 hi1, hi2; OSK_MEMSET(&data, 0, sizeof(data)); /* Read hi, lo, hi to ensure that overflow from lo to hi is handled correctly */ do { hi1 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI), NULL); cycle_counter = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL); hi2 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI), NULL); cycle_counter |= (((u64)hi1) << 32); } while (hi1 != hi2); /* Read hi, lo, hi to ensure that overflow from lo to hi is handled correctly */ do { hi1 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(TIMESTAMP_HI), NULL); system_time = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(TIMESTAMP_LO), NULL); hi2 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(TIMESTAMP_HI), NULL); system_time |= (((u64)hi1) << 32); } while (hi1 != hi2); /* Record the CPU's idea of current time */ osk_gettimeofday(&tv); data.sec = tv.tv_sec; data.usec = tv.tv_usec; data.system_time = system_time; data.cycle_counter = cycle_counter; pfn = jc >> 12; offset = jc & 0xFFF; if (offset > 0x1000-sizeof(data)) { /* Wouldn't fit in the page */ return BASE_JD_EVENT_JOB_CANCELLED; } reg = kbase_region_lookup(kctx, jc); if (!reg) { return BASE_JD_EVENT_JOB_CANCELLED; } if (! (reg->flags & KBASE_REG_GPU_RW) ) { /* Region is not writable by GPU so we won't write to it either */ return BASE_JD_EVENT_JOB_CANCELLED; } if (!reg->phy_pages) { return BASE_JD_EVENT_JOB_CANCELLED; } addr = reg->phy_pages[pfn - reg->start_pfn]; if (!addr) { return BASE_JD_EVENT_JOB_CANCELLED; } page = osk_kmap(addr); if (!page) { return BASE_JD_EVENT_JOB_CANCELLED; } memcpy(page+offset, &data, sizeof(data)); osk_kunmap(addr, page); return BASE_JD_EVENT_DONE; }