_mali_osk_errcode_t mali_scheduler_initialize(void) { if ( _MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_id_autonumber, 0)) { MALI_DEBUG_PRINT(1, ("Initialization of atomic job id counter failed.\n")); return _MALI_OSK_ERR_FAULT; } if ( _MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0)) { MALI_DEBUG_PRINT(1, ("Initialization of atomic job cache order counter failed.\n")); _mali_osk_atomic_term(&mali_job_id_autonumber); return _MALI_OSK_ERR_FAULT; } pp_scheduler_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_scheduler_wq_schedule_pp, NULL); if (NULL == pp_scheduler_wq_high_pri) { _mali_osk_atomic_term(&mali_job_cache_order_autonumber); _mali_osk_atomic_term(&mali_job_id_autonumber); return _MALI_OSK_ERR_NOMEM; } gp_scheduler_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_scheduler_wq_schedule_gp, NULL); if (NULL == gp_scheduler_wq_high_pri) { _mali_osk_wq_delete_work(pp_scheduler_wq_high_pri); _mali_osk_atomic_term(&mali_job_cache_order_autonumber); _mali_osk_atomic_term(&mali_job_id_autonumber); return _MALI_OSK_ERR_NOMEM; } return _MALI_OSK_ERR_OK; }
_mali_osk_errcode_t _mali_profiling_init(mali_bool auto_start) { profile_entries = NULL; profile_entry_count = 0; _mali_osk_atomic_init(&profile_insert_index, 0); _mali_osk_atomic_init(&profile_entries_written, 0); lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 0 ); if (NULL == lock) { return _MALI_OSK_ERR_FAULT; } prof_state = MALI_PROFILING_STATE_IDLE; if (MALI_TRUE == auto_start) { u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* Use maximum buffer size */ mali_profiling_default_enable = MALI_TRUE; /* save this so user space can query this on their startup */ if (_MALI_OSK_ERR_OK != _mali_profiling_start(&limit)) { return _MALI_OSK_ERR_FAULT; } } return _MALI_OSK_ERR_OK; }
_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start) { profile_entries = NULL; profile_entry_count = 0; _mali_osk_atomic_init(&profile_insert_index, 0); _mali_osk_atomic_init(&profile_entries_written, 0); lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_PROFILING); if (NULL == lock) { return _MALI_OSK_ERR_FAULT; } prof_state = MALI_PROFILING_STATE_IDLE; if (MALI_TRUE == auto_start) { u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* Use maximum buffer size */ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE); if (_MALI_OSK_ERR_OK != _mali_osk_profiling_start(&limit)) { return _MALI_OSK_ERR_FAULT; } } return _MALI_OSK_ERR_OK; }
void _mali_osk_pm_dev_enable(void) { _mali_osk_atomic_init(&mali_pm_ref_count, 0); _mali_osk_atomic_init(&mali_suspend_called, 0); if (NULL == pm_timer) { pm_timer = _mali_osk_timer_init(); } if (NULL != pm_timer) { _mali_osk_timer_setcallback(pm_timer, _mali_pm_callback, NULL); } pm_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, 0); #if MALI_LICENSE_IS_GPL #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36) mali_pm_wq = alloc_workqueue("mali_pm", WQ_UNBOUND, 0); #else mali_pm_wq = create_workqueue("mali_pm"); #endif if(NULL == mali_pm_wq) { MALI_PRINT_ERROR(("Unable to create Mali pm workqueue\n")); } #endif INIT_WORK( &mali_pm_wq_work_handle, mali_bottom_half_pm ); }
_mali_osk_errcode_t mali_scheduler_initialize(void) { _mali_osk_atomic_init(&mali_job_id_autonumber, 0); _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0); _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.normal_pri); _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.high_pri); job_queue_gp.depth = 0; _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.normal_pri); _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.high_pri); job_queue_pp.depth = 0; mali_scheduler_lock_obj = _mali_osk_spinlock_irq_init( _MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER); if (NULL == mali_scheduler_lock_obj) { mali_scheduler_terminate(); } #if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) scheduler_wq_pp_job_delete = _mali_osk_wq_create_work( mali_scheduler_do_pp_job_delete, NULL); if (NULL == scheduler_wq_pp_job_delete) { mali_scheduler_terminate(); return _MALI_OSK_ERR_FAULT; } scheduler_pp_job_delete_lock = _mali_osk_spinlock_irq_init( _MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED); if (NULL == scheduler_pp_job_delete_lock) { mali_scheduler_terminate(); return _MALI_OSK_ERR_FAULT; } #endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */ #if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) scheduler_wq_pp_job_queue = _mali_osk_wq_create_work( mali_scheduler_do_pp_job_queue, NULL); if (NULL == scheduler_wq_pp_job_queue) { mali_scheduler_terminate(); return _MALI_OSK_ERR_FAULT; } scheduler_pp_job_queue_lock = _mali_osk_spinlock_irq_init( _MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED); if (NULL == scheduler_pp_job_queue_lock) { mali_scheduler_terminate(); return _MALI_OSK_ERR_FAULT; } #endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */ return _MALI_OSK_ERR_OK; }
/* Prepare memory descriptor */ static mali_mem_allocation *mali_mem_allocation_struct_create(struct mali_session_data *session) { mali_mem_allocation *mali_allocation; /* Allocate memory */ mali_allocation = (mali_mem_allocation *)kzalloc(sizeof(mali_mem_allocation), GFP_KERNEL); if (NULL == mali_allocation) { MALI_DEBUG_PRINT(1, ("mali_mem_allocation_struct_create: descriptor was NULL\n")); return NULL; } MALI_DEBUG_CODE(mali_allocation->magic = MALI_MEM_ALLOCATION_VALID_MAGIC); /* do init */ mali_allocation->flags = 0; mali_allocation->session = session; INIT_LIST_HEAD(&mali_allocation->list); _mali_osk_atomic_init(&mali_allocation->mem_alloc_refcount, 1); /** *add to session list */ mutex_lock(&session->allocation_mgr.list_mutex); list_add_tail(&mali_allocation->list, &session->allocation_mgr.head); session->allocation_mgr.mali_allocation_num++; mutex_unlock(&session->allocation_mgr.list_mutex); return mali_allocation; }
void MTK_mali_osk_pm_dev_enable(void) { _mali_osk_atomic_init(&mtk_mali_pm_ref_count, 0); _mali_osk_atomic_init(&mtk_mali_suspend_called, 0); mtk_pm_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, 0); mtk_mali_pm_wq = alloc_workqueue("mtk_mali_pm", WQ_UNBOUND, 0); mtk_mali_pm_wq2 = alloc_workqueue("mtk_mali_pm2", WQ_UNBOUND, 0); if(NULL == mtk_mali_pm_wq || NULL == mtk_mali_pm_wq2) { MALI_PRINT_ERROR(("Unable to create Mali pm workqueue\n")); } INIT_WORK(&mtk_mali_pm_wq_work_handle, MTK_mali_bottom_half_pm_suspend ); INIT_WORK(&mtk_mali_pm_wq_work_handle2, MTK_mali_bottom_half_pm_resume); }
_mali_osk_errcode_t mali_scheduler_initialize(void) { if ( _MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_autonumber, 0)) { MALI_DEBUG_PRINT(1, ("Initialization of atomic job id counter failed.\n")); return _MALI_OSK_ERR_FAULT; } return _MALI_OSK_ERR_OK; }
void _mali_osk_pm_dev_enable(void) /* @@@@ todo: change to init of some kind.. or change the way or where atomics are initialized? */ { _mali_osk_atomic_init(&mali_pm_ref_count, 0); _mali_osk_atomic_init(&mali_suspend_called, 0); pm_timer = _mali_osk_timer_init(); _mali_osk_timer_setcallback(pm_timer, _mali_pm_callback, NULL); pm_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED, 0, 0); #if MALI_LICENSE_IS_GPL #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36) mali_pm_wq = alloc_workqueue("mali_pm", WQ_UNBOUND, 0); #else mali_pm_wq = create_workqueue("mali_pm"); #endif if(NULL == mali_pm_wq) { MALI_PRINT_ERROR(("Unable to create Mali pm workqueue\n")); } #endif INIT_WORK( &mali_pm_wq_work_handle, mali_bottom_half_pm ); }
_mali_osk_errcode_t mali_platform_init() { MALI_CHECK(init_mali_clock(), _MALI_OSK_ERR_FAULT); #if MALI_VOLTAGE_LOCK _mali_osk_atomic_init(&voltage_lock_status, 0); #endif #if MALI_DVFS_ENABLED if (!clk_register_map) clk_register_map = _mali_osk_mem_mapioregion( CLK_DIV_STAT_G3D, 0x20, CLK_DESC ); if(!init_mali_dvfs_status(MALI_DVFS_DEFAULT_STEP)) MALI_DEBUG_PRINT(1, ("mali_platform_init failed\n")); #endif MALI_SUCCESS; }
inline _mali_osk_errcode_t _mali_osk_profiling_clear(void) { _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW); if (prof_state != MALI_PROFILING_STATE_RETURN) { _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */ } prof_state = MALI_PROFILING_STATE_IDLE; profile_entry_count = 0; _mali_osk_atomic_init(&profile_insert_index, 0); _mali_osk_atomic_init(&profile_entries_written, 0); if (NULL != profile_entries) { _mali_osk_vfree(profile_entries); profile_entries = NULL; } _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return _MALI_OSK_ERR_OK; }
mali_bool init_mali_dvfs_status(int step) { /*default status add here with the right function to get initilization value. */ if (!mali_dvfs_wq) mali_dvfs_wq = create_singlethread_workqueue("mali_dvfs"); _mali_osk_atomic_init(&bottomlock_status, 0); /*add a error handling here*/ set_mali_dvfs_current_step(step); return MALI_TRUE; }
mali_bool init_mali_dvfs_status(int step) { /*default status add here with the right function to get initilization value. */ if (!mali_dvfs_wq) mali_dvfs_wq = create_singlethread_workqueue("mali_dvfs"); #if MALI_GPU_BOTTOM_LOCK _mali_osk_atomic_init(&bottomlock_status, 0); #endif /*add a error handling here*/ maliDvfsStatus.currentStep = step; return MALI_TRUE; }
struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job) { struct mali_soft_job *job; _mali_osk_notification_t *notification = NULL; MALI_DEBUG_ASSERT_POINTER(system); MALI_DEBUG_ASSERT((MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) || (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == type)); notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s)); if (unlikely(NULL == notification)) { MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification")); return NULL; } job = _mali_osk_malloc(sizeof(struct mali_soft_job)); if (unlikely(NULL == job)) { MALI_DEBUG_PRINT(2, ("Mali Soft Job: system alloc job failed. \n")); return NULL; } mali_soft_job_system_lock(system); job->system = system; job->id = system->last_job_id++; job->state = MALI_SOFT_JOB_STATE_ALLOCATED; _mali_osk_list_add(&(job->system_list), &(system->jobs_used)); job->type = type; job->user_job = user_job; job->activated = MALI_FALSE; job->activated_notification = notification; _mali_osk_atomic_init(&job->refcount, 1); MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state); MALI_DEBUG_ASSERT(system == job->system); MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id); mali_soft_job_system_unlock(system); return job; }
_mali_osk_errcode_t mali_pegasus_platform_init() { mali_gpu_clk = 266; mali_gpu_vol = 900000; MALI_CHECK(init_mali_pegasus_clock(), _MALI_OSK_ERR_FAULT); #ifdef CONFIG_EXYNOS_TMU_TC _mali_osk_atomic_init(&voltage_lock_status, 0); #endif #if MALI_DVFS_ENABLED if (!clk_pegasus_register_map) clk_pegasus_register_map = _mali_osk_mem_mapioregion( CLK_DIV_STAT_G3D, 0x20, CLK_DESC ); if(!init_mali_pegasus_dvfs_status(MALI_DVFS_DEFAULT_STEP)) MALI_DEBUG_PRINT(1, ("mali_platform_init failed\n")); #endif MALI_SUCCESS; }
_mali_osk_errcode_t _mali_internal_profiling_clear(void) { _mali_osk_mutex_wait(lock); if (MALI_PROFILING_STATE_RETURN != prof_state) { _mali_osk_mutex_signal(lock); return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */ } prof_state = MALI_PROFILING_STATE_IDLE; profile_mask = 0; _mali_osk_atomic_init(&profile_insert_index, 0); if (NULL != profile_entries) { _mali_osk_vfree(profile_entries); profile_entries = NULL; } _mali_osk_mutex_signal(lock); return _MALI_OSK_ERR_OK; }
_mali_osk_errcode_t malipmm_create(_mali_osk_resource_t *resource) { /* Create PMM state memory */ MALI_DEBUG_ASSERT( pmm_state == NULL ); pmm_state = (_mali_pmm_internal_state_t *) _mali_osk_malloc(sizeof(*pmm_state)); MALI_CHECK_NON_NULL( pmm_state, _MALI_OSK_ERR_NOMEM ); /* All values get 0 as default */ _mali_osk_memset(pmm_state, 0, sizeof(*pmm_state)); /* Set up the initial PMM state */ pmm_state->waiting = 0; pmm_state->status = MALI_PMM_STATUS_IDLE; pmm_state->state = MALI_PMM_STATE_UNAVAILABLE; /* Until a core registers */ /* Set up policy via compile time option for the moment */ #if MALI_PMM_ALWAYS_ON pmm_state->policy = MALI_PMM_POLICY_ALWAYS_ON; #else pmm_state->policy = MALI_PMM_POLICY_JOB_CONTROL; #endif #if MALI_PMM_TRACE _mali_pmm_trace_policy_change( MALI_PMM_POLICY_NONE, pmm_state->policy ); #endif /* Set up assumes all values are initialized to NULL or MALI_FALSE, so * we can exit halfway through set up and perform clean up */ #if !MALI_PMM_NO_PMU if( mali_platform_init(resource) != _MALI_OSK_ERR_OK ) goto pmm_fail_cleanup; pmm_state->pmu_initialized = MALI_TRUE; #endif pmm_state->queue = _mali_osk_notification_queue_init(); if( !pmm_state->queue ) goto pmm_fail_cleanup; pmm_state->iqueue = _mali_osk_notification_queue_init(); if( !pmm_state->iqueue ) goto pmm_fail_cleanup; /* We are creating an IRQ handler just for the worker thread it gives us */ pmm_state->irq = _mali_osk_irq_init( _MALI_OSK_IRQ_NUMBER_PMM, malipmm_irq_uhandler, malipmm_irq_bhandler, NULL, NULL, (void *)pmm_state, /* PMM state is passed to IRQ */ "PMM handler" ); if( !pmm_state->irq ) goto pmm_fail_cleanup; #ifdef CONFIG_SMP mali_pmm_lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)( _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_ORDERED), 0, 0); if( !mali_pmm_lock ) goto pmm_fail_cleanup; #endif /* CONFIG_SMP */ pmm_state->lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_ORDERED), 0, 75); if( !pmm_state->lock ) goto pmm_fail_cleanup; if( _mali_osk_atomic_init( &(pmm_state->messages_queued), 0 ) != _MALI_OSK_ERR_OK ) { goto pmm_fail_cleanup; } MALIPMM_DEBUG_PRINT( ("PMM: subsystem created, policy=%d\n", pmm_state->policy) ); MALI_SUCCESS; pmm_fail_cleanup: MALI_PRINT_ERROR( ("PMM: subsystem failed to be created\n") ); if( pmm_state ) { _mali_osk_resource_type_t t = PMU; if( pmm_state->lock ) _mali_osk_lock_term( pmm_state->lock ); if( pmm_state->irq ) _mali_osk_irq_term( pmm_state->irq ); if( pmm_state->queue ) _mali_osk_notification_queue_term( pmm_state->queue ); if( pmm_state->iqueue ) _mali_osk_notification_queue_term( pmm_state->iqueue ); if( pmm_state->pmu_initialized ) ( mali_platform_deinit(&t) ); _mali_osk_free(pmm_state); pmm_state = NULL; } MALI_ERROR( _MALI_OSK_ERR_FAULT ); }
UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks) { ump_dd_mem * mem; unsigned long size_total = 0; int map_id; u32 i; /* Go through the input blocks and verify that they are sane */ for (i=0; i < num_blocks; i++) { unsigned long addr = blocks[i].addr; unsigned long size = blocks[i].size; DBG_MSG(5, ("Adding physical memory to new handle. Address: 0x%08lx, size: %lu\n", addr, size)); size_total += blocks[i].size; if (0 != UMP_ADDR_ALIGN_OFFSET(addr)) { MSG_ERR(("Trying to create UMP memory from unaligned physical address. Address: 0x%08lx\n", addr)); return UMP_DD_HANDLE_INVALID; } if (0 != UMP_ADDR_ALIGN_OFFSET(size)) { MSG_ERR(("Trying to create UMP memory with unaligned size. Size: %lu\n", size)); return UMP_DD_HANDLE_INVALID; } } /* Allocate the ump_dd_mem struct for this allocation */ mem = _mali_osk_malloc(sizeof(*mem)); if (NULL == mem) { DBG_MSG(1, ("Could not allocate ump_dd_mem in ump_dd_handle_create_from_phys_blocks()\n")); return UMP_DD_HANDLE_INVALID; } /* Find a secure ID for this allocation */ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*) mem); if (map_id < 0) { _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); _mali_osk_free(mem); DBG_MSG(1, ("Failed to allocate secure ID in ump_dd_handle_create_from_phys_blocks()\n")); return UMP_DD_HANDLE_INVALID; } /* Now, make a copy of the block information supplied by the user */ mem->block_array = _mali_osk_malloc(sizeof(ump_dd_physical_block)* num_blocks); if (NULL == mem->block_array) { ump_descriptor_mapping_free(device.secure_id_map, map_id); _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); _mali_osk_free(mem); DBG_MSG(1, ("Could not allocate a mem handle for function ump_dd_handle_create_from_phys_blocks().\n")); return UMP_DD_HANDLE_INVALID; } _mali_osk_memcpy(mem->block_array, blocks, sizeof(ump_dd_physical_block) * num_blocks); /* And setup the rest of the ump_dd_mem struct */ _mali_osk_atomic_init(&mem->ref_count, 1); mem->secure_id = (ump_secure_id)map_id; mem->size_bytes = size_total; mem->nr_blocks = num_blocks; mem->backend_info = NULL; mem->ctx = NULL; mem->release_func = phys_blocks_release; /* For now UMP handles created by ump_dd_handle_create_from_phys_blocks() is forced to be Uncached */ mem->is_cached = 0; mem->hw_device = _UMP_UK_USED_BY_CPU; mem->lock_usage = UMP_NOT_LOCKED; _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); DBG_MSG(3, ("UMP memory created. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes)); return (ump_dd_handle)mem; }
_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction ) { ump_session_data * session_data = NULL; ump_dd_mem *new_allocation = NULL; ump_session_memory_list_element * session_memory_element = NULL; int map_id; DEBUG_ASSERT_POINTER( user_interaction ); DEBUG_ASSERT_POINTER( user_interaction->ctx ); session_data = (ump_session_data *) user_interaction->ctx; session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element)); if (NULL == session_memory_element) { DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n")); return _MALI_OSK_ERR_NOMEM; } new_allocation = _mali_osk_calloc( 1, sizeof(ump_dd_mem)); if (NULL==new_allocation) { _mali_osk_free(session_memory_element); DBG_MSG(1, ("Failed to allocate ump_dd_mem in _ump_ukk_allocate()\n")); return _MALI_OSK_ERR_NOMEM; } /* Create a secure ID for this allocation */ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*)new_allocation); if (map_id < 0) { _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); _mali_osk_free(session_memory_element); _mali_osk_free(new_allocation); DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n")); return - _MALI_OSK_ERR_INVALID_FUNC; } /* Initialize the part of the new_allocation that we know so for */ new_allocation->secure_id = (ump_secure_id)map_id; _mali_osk_atomic_init(&new_allocation->ref_count,1); if ( 0==(UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints) ) new_allocation->is_cached = 0; else new_allocation->is_cached = 1; /* special case a size of 0, we should try to emulate what malloc does in this case, which is to return a valid pointer that must be freed, but can't be dereferences */ if (0 == user_interaction->size) { user_interaction->size = 1; /* emulate by actually allocating the minimum block size */ } new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size); /* Page align the size */ new_allocation->lock_usage = UMP_NOT_LOCKED; /* Now, ask the active memory backend to do the actual memory allocation */ if (!device.backend->allocate( device.backend->ctx, new_allocation ) ) { DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n", new_allocation->size_bytes, (unsigned long)user_interaction->size)); ump_descriptor_mapping_free(device.secure_id_map, map_id); _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); _mali_osk_free(new_allocation); _mali_osk_free(session_memory_element); return _MALI_OSK_ERR_INVALID_FUNC; } new_allocation->hw_device = _UMP_UK_USED_BY_CPU; new_allocation->ctx = device.backend->ctx; new_allocation->release_func = device.backend->release; _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); /* Initialize the session_memory_element, and add it to the session object */ session_memory_element->mem = new_allocation; _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW); _mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list)); _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW); user_interaction->secure_id = new_allocation->secure_id; user_interaction->size = new_allocation->size_bytes; DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n", new_allocation->secure_id, new_allocation->size_bytes)); return _MALI_OSK_ERR_OK; }
void _mali_osk_pm_dev_enable(void) { _mali_osk_atomic_init(&mali_pm_ref_count, 0); }
/** * Check if fence has been signaled. * * @param system Timeline system. * @param fence Timeline fence. * @return MALI_TRUE if fence is signaled, MALI_FALSE if not. */ static mali_bool mali_timeline_fence_wait_check_status(struct mali_timeline_system *system, struct mali_timeline_fence *fence) { int i; u32 tid = _mali_osk_get_tid(); mali_bool ret = MALI_TRUE; #if defined(CONFIG_SYNC) struct sync_fence *sync_fence = NULL; #endif MALI_DEBUG_ASSERT_POINTER(system); MALI_DEBUG_ASSERT_POINTER(fence); mali_spinlock_reentrant_wait(system->spinlock, tid); for (i = 0; i < MALI_TIMELINE_MAX; ++i) { struct mali_timeline *timeline; mali_timeline_point point; point = fence->points[i]; if (likely(MALI_TIMELINE_NO_POINT == point)) { /* Fence contains no point on this timeline. */ continue; } timeline = system->timelines[i]; MALI_DEBUG_ASSERT_POINTER(timeline); if (unlikely(!mali_timeline_is_point_valid(timeline, point))) { MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n", point, timeline->point_oldest, timeline->point_next)); } if (!mali_timeline_is_point_released(timeline, point)) { ret = MALI_FALSE; goto exit; } } #if defined(CONFIG_SYNC) if (-1 != fence->sync_fd) { sync_fence = sync_fence_fdget(fence->sync_fd); if (likely(NULL != sync_fence)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) if (0 == sync_fence->status) { #else if (0 == atomic_read(&sync_fence->status)) { #endif ret = MALI_FALSE; } } else { MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", fence->sync_fd)); } } #endif /* defined(CONFIG_SYNC) */ exit: mali_spinlock_reentrant_signal(system->spinlock, tid); #if defined(CONFIG_SYNC) if (NULL != sync_fence) { sync_fence_put(sync_fence); } #endif /* defined(CONFIG_SYNC) */ return ret; } mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout) { struct mali_timeline_fence_wait_tracker *wait; mali_timeline_point point; mali_bool ret; MALI_DEBUG_ASSERT_POINTER(system); MALI_DEBUG_ASSERT_POINTER(fence); MALI_DEBUG_PRINT(4, ("Mali Timeline: wait on fence\n")); if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY == timeout) { return mali_timeline_fence_wait_check_status(system, fence); } wait = mali_timeline_fence_wait_tracker_alloc(); if (unlikely(NULL == wait)) { MALI_PRINT_ERROR(("Mali Timeline: failed to allocate data for fence wait\n")); return MALI_FALSE; } wait->activated = MALI_FALSE; wait->system = system; /* Initialize refcount to two references. The reference first will be released by this * function after the wait is over. The second reference will be released when the tracker * is activated. */ _mali_osk_atomic_init(&wait->refcount, 2); /* Add tracker to timeline system, but not to a timeline. */ mali_timeline_tracker_init(&wait->tracker, MALI_TIMELINE_TRACKER_WAIT, fence, wait); point = mali_timeline_system_add_tracker(system, &wait->tracker, MALI_TIMELINE_NONE); MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point); MALI_IGNORE(point); /* Wait for the tracker to be activated or time out. */ if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER == timeout) { _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait); } else { _mali_osk_wait_queue_wait_event_timeout(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait, timeout); } ret = wait->activated; if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) { mali_timeline_fence_wait_tracker_free(wait); } return ret; } void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *wait) { mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; MALI_DEBUG_ASSERT_POINTER(wait); MALI_DEBUG_ASSERT_POINTER(wait->system); MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for fence wait tracker\n")); MALI_DEBUG_ASSERT(MALI_FALSE == wait->activated); wait->activated = MALI_TRUE; _mali_osk_wait_queue_wake_up(wait->system->wait_queue); /* Nothing can wait on this tracker, so nothing to schedule after release. */ schedule_mask = mali_timeline_tracker_release(&wait->tracker); MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask); MALI_IGNORE(schedule_mask); if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) { mali_timeline_fence_wait_tracker_free(wait); } }
void mali_pp_job_initialize(void) { _mali_osk_atomic_init(&pp_counter_per_sub_job_count, 0); }