struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *args, u32 id) { struct mali_gp_job *job; job = _mali_osk_malloc(sizeof(struct mali_gp_job)); if (NULL != job) { _mali_osk_list_init(&job->list); job->session = session; job->id = id; job->user_id = args->user_job_ptr; _mali_osk_memcpy(job->frame_registers, args->frame_registers, sizeof(job->frame_registers)); job->heap_current_addr = args->frame_registers[4]; job->perf_counter_flag = args->perf_counter_flag; job->perf_counter_src0 = args->perf_counter_src0; job->perf_counter_src1 = args->perf_counter_src1; job->perf_counter_value0 = 0; job->perf_counter_value1 = 0; job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); job->frame_builder_id = args->frame_builder_id; job->flush_id = args->flush_id; return job; } return NULL; }
struct mali_dma_core *mali_dma_create(_mali_osk_resource_t *resource) { struct mali_dma_core* dma; _mali_osk_errcode_t err; MALI_DEBUG_ASSERT(NULL == mali_global_dma_core); dma = _mali_osk_malloc(sizeof(struct mali_dma_core)); if (dma == NULL) goto alloc_failed; dma->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_DMA_COMMAND); if (NULL == dma->lock) goto lock_init_failed; dma->pool = mali_dma_pool_create(MALI_DMA_CMD_BUF_SIZE, 4, 0); if (NULL == dma->pool) goto dma_pool_failed; err = mali_hw_core_create(&dma->hw_core, resource, MALI450_DMA_REG_SIZE); if (_MALI_OSK_ERR_OK != err) goto hw_core_failed; mali_global_dma_core = dma; MALI_DEBUG_PRINT(2, ("Mali DMA: Created Mali APB DMA unit\n")); return dma; /* Error handling */ hw_core_failed: mali_dma_pool_destroy(dma->pool); dma_pool_failed: _mali_osk_spinlock_term(dma->lock); lock_init_failed: _mali_osk_free(dma); alloc_failed: MALI_DEBUG_PRINT(2, ("Mali DMA: Failed to create APB DMA unit\n")); return NULL; }
struct mali_cluster *mali_cluster_create(struct mali_l2_cache_core *l2_cache) { struct mali_cluster *cluster = NULL; if (mali_global_num_clusters >= MALI_MAX_NUMBER_OF_CLUSTERS) { MALI_PRINT_ERROR(("Mali cluster: Too many cluster objects created\n")); return NULL; } cluster = _mali_osk_malloc(sizeof(struct mali_cluster)); if (NULL != cluster) { _mali_osk_memset(cluster, 0, sizeof(struct mali_cluster)); cluster->l2 = l2_cache; /* This cluster now owns this L2 cache object */ cluster->last_invalidated_id = 0; mali_global_clusters[mali_global_num_clusters] = cluster; mali_global_num_clusters++; return cluster; } return NULL; }
struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource) { struct mali_pmu_core* pmu; MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core); MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n")); pmu = (struct mali_pmu_core *)_mali_osk_malloc(sizeof(struct mali_pmu_core)); if (NULL != pmu) { pmu->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PMU); if (NULL != pmu->lock) { pmu->registered_cores_mask = mali_pmu_detect_mask(); pmu->active_cores_mask = pmu->registered_cores_mask; if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core, resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) { _mali_osk_errcode_t err; struct _mali_osk_device_data data = { 0, }; err = _mali_osk_device_data_get(&data); if (_MALI_OSK_ERR_OK == err) { pmu->switch_delay = data.pmu_switch_delay; mali_global_pmu_core = pmu; return pmu; } mali_hw_core_delete(&pmu->hw_core); } _mali_osk_spinlock_term(pmu->lock); } _mali_osk_free(pmu); } return NULL; }
mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager) { memory_engine * engine; /* Mali Address Manager need not support unmap_physical */ MALI_DEBUG_ASSERT_POINTER(mali_address_manager); MALI_DEBUG_ASSERT_POINTER(mali_address_manager->allocate); MALI_DEBUG_ASSERT_POINTER(mali_address_manager->release); MALI_DEBUG_ASSERT_POINTER(mali_address_manager->map_physical); /* Process Address Manager must support unmap_physical for OS allocation * error path handling */ MALI_DEBUG_ASSERT_POINTER(process_address_manager); MALI_DEBUG_ASSERT_POINTER(process_address_manager->allocate); MALI_DEBUG_ASSERT_POINTER(process_address_manager->release); MALI_DEBUG_ASSERT_POINTER(process_address_manager->map_physical); MALI_DEBUG_ASSERT_POINTER(process_address_manager->unmap_physical); engine = (memory_engine*)_mali_osk_malloc(sizeof(memory_engine)); if (NULL == engine) return NULL; engine->mali_address = mali_address_manager; engine->process_address = process_address_manager; return (mali_allocation_engine)engine; }
struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t * resource) { struct mali_dlbu_core *core = NULL; MALI_DEBUG_PRINT(2, ("Mali DLBU: Creating Mali dynamic load balancing unit: %s\n", resource->description)); core = _mali_osk_malloc(sizeof(struct mali_dlbu_core)); if (NULL != core) { if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI_DLBU_SIZE)) { if (_MALI_OSK_ERR_OK == mali_dlbu_reset(core)) { mali_hw_core_register_write(&core->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR, MALI_DLB_VIRT_ADDR); return core; } MALI_PRINT_ERROR(("Failed to reset DLBU %s\n", core->hw_core.description)); mali_hw_core_delete(&core->hw_core); } _mali_osk_free(core); } else { MALI_PRINT_ERROR(("Mali DLBU: Failed to allocate memory for DLBU core\n")); } return NULL; }
struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource) { struct mali_bcast_unit *bcast_unit = NULL; MALI_DEBUG_ASSERT_POINTER(resource); MALI_DEBUG_PRINT(2, ("Mali Broadcast unit: Creating Mali Broadcast unit: %s\n", resource->description)); bcast_unit = _mali_osk_malloc(sizeof(struct mali_bcast_unit)); if (NULL == bcast_unit) { MALI_PRINT_ERROR(("Mali Broadcast unit: Failed to allocate memory for Broadcast unit\n")); return NULL; } if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core, resource, bcast_unit_reg_size)) { bcast_unit->current_mask = 0; mali_bcast_reset(bcast_unit); return bcast_unit; } else { MALI_PRINT_ERROR(("Mali Broadcast unit: Failed map broadcast unit\n")); } _mali_osk_free(bcast_unit); return NULL; }
struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource) { struct mali_pmu_core *pmu; MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core); MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n")); pmu = (struct mali_pmu_core *)_mali_osk_malloc( sizeof(struct mali_pmu_core)); if (NULL != pmu) { pmu->registered_cores_mask = 0; /* to be set later */ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core, resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) { pmu->switch_delay = _mali_osk_get_pmu_switch_delay(); mali_global_pmu_core = pmu; return pmu; } _mali_osk_free(pmu); } return NULL; }
struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource, u32 number_of_pp_cores, u32 number_of_l2_caches) { struct mali_pmu_core* pmu; MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core); MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n")); pmu = (struct mali_pmu_core *)_mali_osk_malloc(sizeof(struct mali_pmu_core)); if (NULL != pmu) { pmu->mali_registered_cores_power_mask = mali_pmu_detect_mask(number_of_pp_cores, number_of_l2_caches); if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core, resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) { if (_MALI_OSK_ERR_OK == mali_pmu_reset(pmu)) { mali_global_pmu_core = pmu; return pmu; } mali_hw_core_delete(&pmu->hw_core); } _mali_osk_free(pmu); } return NULL; }
struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id) { struct mali_gp_job *job; u32 perf_counter_flag; job = _mali_osk_malloc(sizeof(struct mali_gp_job)); if (NULL != job) { job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s)); if (NULL == job->finished_notification) { _mali_osk_free(job); return NULL; } job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s)); if (NULL == job->oom_notification) { _mali_osk_notification_delete(job->finished_notification); _mali_osk_free(job); return NULL; } if (0 != copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s))) { _mali_osk_notification_delete(job->finished_notification); _mali_osk_notification_delete(job->oom_notification); _mali_osk_free(job); return NULL; } perf_counter_flag = mali_gp_job_get_perf_counter_flag(job); /* case when no counters came from user space * so pass the debugfs / DS-5 provided global ones to the job object */ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0()); mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1()); } _mali_osk_list_init(&job->list); job->session = session; job->id = id; job->heap_current_addr = job->uargs.frame_registers[4]; job->perf_counter_value0 = 0; job->perf_counter_value1 = 0; job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); return job; } return NULL; }
static mali_mem_allocator *mali_mem_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size) { block_allocator *info; u32 usable_size; u32 num_blocks; usable_size = size & ~(MALI_BLOCK_SIZE - 1); MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size)); MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size)); num_blocks = usable_size / MALI_BLOCK_SIZE; MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks)); if (usable_size == 0) { MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size)); return NULL; } info = _mali_osk_malloc(sizeof(block_allocator)); if (NULL != info) { mutex_init(&info->mutex); info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks); if (NULL != info->all_blocks) { u32 i; info->first_free = NULL; info->num_blocks = num_blocks; info->free_blocks = num_blocks; info->base = base_address; info->cpu_usage_adjust = cpu_usage_adjust; for (i = 0; i < num_blocks; i++) { info->all_blocks[i].next = info->first_free; info->first_free = &info->all_blocks[i]; } return (mali_mem_allocator *)info; } _mali_osk_free(info); } return NULL; }
_mali_osk_errcode_t _mali_ukk_open(void **context) { int i; _mali_osk_errcode_t err; struct mali_session_data * session_data; /* allocated struct to track this session */ session_data = (struct mali_session_data *)_mali_osk_malloc(sizeof(struct mali_session_data)); MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_NOMEM); _mali_osk_memset(session_data->subsystem_data, 0, sizeof(session_data->subsystem_data)); /* create a response queue for this session */ session_data->ioctl_queue = _mali_osk_notification_queue_init(); if (NULL == session_data->ioctl_queue) { _mali_osk_free(session_data); MALI_ERROR(_MALI_OSK_ERR_NOMEM); } MALI_DEBUG_PRINT(3, ("Session starting\n")); /* call session_begin on all subsystems */ for (i = 0; i < (int)SUBSYSTEMS_COUNT; ++i) { if (NULL != subsystems[i]->session_begin) { /* subsystem has a session_begin */ err = subsystems[i]->session_begin(session_data, &session_data->subsystem_data[i], session_data->ioctl_queue); MALI_CHECK_GOTO(err == _MALI_OSK_ERR_OK, cleanup); } } *context = (void*)session_data; MALI_DEBUG_PRINT(3, ("Session started\n")); MALI_SUCCESS; cleanup: MALI_DEBUG_PRINT(2, ("Session startup failed\n")); /* i is index of subsystem which failed session begin, all indices before that has to be ended */ /* end subsystem sessions in the reverse order they where started in */ for (i = i - 1; i >= 0; --i) { if (NULL != subsystems[i]->session_end) subsystems[i]->session_end(session_data, &session_data->subsystem_data[i]); } _mali_osk_notification_queue_term(session_data->ioctl_queue); _mali_osk_free(session_data); /* return what the subsystem which failed session start returned */ MALI_ERROR(err); }
mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name) { mali_physical_memory_allocator * allocator; os_allocator * info; max_allocation = (max_allocation + _MALI_OSK_CPU_PAGE_SIZE-1) & ~(_MALI_OSK_CPU_PAGE_SIZE-1); MALI_DEBUG_PRINT(2, ("Mali OS memory allocator created with max allocation size of 0x%X bytes, cpu_usage_adjust 0x%08X\n", max_allocation, cpu_usage_adjust)); allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator)); if (NULL != allocator) { info = _mali_osk_malloc(sizeof(os_allocator)); if (NULL != info) { info->num_pages_max = max_allocation / _MALI_OSK_CPU_PAGE_SIZE; info->num_pages_allocated = 0; info->cpu_usage_adjust = cpu_usage_adjust; info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED, 0, _MALI_OSK_LOCK_ORDER_MEM_INFO); if (NULL != info->mutex) { allocator->allocate = os_allocator_allocate; allocator->allocate_page_table_block = os_allocator_allocate_page_table_block; allocator->destroy = os_allocator_destroy; allocator->stat = os_allocator_stat; allocator->ctx = info; allocator->name = name; return allocator; } _mali_osk_free(info); } _mali_osk_free(allocator); } return NULL; }
int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs) { _mali_uk_dump_mmu_page_table_s kargs; _mali_osk_errcode_t err; void *buffer; int rc = -EFAULT; /* validate input */ MALI_CHECK_NON_NULL(uargs, -EINVAL); /* the session_data pointer was validated by caller */ kargs.buffer = NULL; /* get location of user buffer */ if (0 != get_user(buffer, &uargs->buffer)) goto err_exit; /* get size of mmu page table info buffer from user space */ if ( 0 != get_user(kargs.size, &uargs->size) ) goto err_exit; /* verify we can access the whole of the user buffer */ if (!access_ok(VERIFY_WRITE, buffer, kargs.size)) goto err_exit; /* allocate temporary buffer (kernel side) to store mmu page table info */ kargs.buffer = _mali_osk_malloc(kargs.size); if (NULL == kargs.buffer) { rc = -ENOMEM; goto err_exit; } kargs.ctx = session_data; err = _mali_ukk_dump_mmu_page_table(&kargs); if (_MALI_OSK_ERR_OK != err) { rc = map_errcode(err); goto err_exit; } /* copy mmu page table info back to user space and update pointers */ if (0 != copy_to_user(uargs->buffer, kargs.buffer, kargs.size) ) goto err_exit; if (0 != put_user((kargs.register_writes - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->register_writes)) goto err_exit; if (0 != put_user((kargs.page_table_dump - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->page_table_dump)) goto err_exit; if (0 != put_user(kargs.register_writes_size, &uargs->register_writes_size)) goto err_exit; if (0 != put_user(kargs.page_table_dump_size, &uargs->page_table_dump_size)) goto err_exit; rc = 0; err_exit: if (kargs.buffer) _mali_osk_free(kargs.buffer); return rc; }
struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group) { struct mali_gp_core *core = NULL; MALI_DEBUG_ASSERT(NULL == mali_global_gp_core); MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description)); core = _mali_osk_malloc(sizeof(struct mali_gp_core)); if (NULL != core) { if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALIGP2_REGISTER_ADDRESS_SPACE_SIZE)) { _mali_osk_errcode_t ret; ret = mali_gp_reset(core); if (_MALI_OSK_ERR_OK == ret) { ret = mali_group_add_gp_core(group, core); if (_MALI_OSK_ERR_OK == ret) { /* Setup IRQ handlers (which will do IRQ probing if needed) */ core->irq = _mali_osk_irq_init(resource->irq, mali_group_upper_half_gp, group, mali_gp_irq_probe_trigger, mali_gp_irq_probe_ack, core, resource->description); if (NULL != core->irq) { MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core)); mali_global_gp_core = core; return core; } else { MALI_PRINT_ERROR(("Mali GP: Failed to setup interrupt handlers for GP core %s\n", core->hw_core.description)); } mali_group_remove_gp_core(group); } else { MALI_PRINT_ERROR(("Mali GP: Failed to add core %s to group\n", core->hw_core.description)); } } mali_hw_core_delete(&core->hw_core); } _mali_osk_free(core); } else { MALI_PRINT_ERROR(("Failed to allocate memory for GP core\n")); } return NULL; }
struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job) { struct mali_soft_job *job; _mali_osk_notification_t *notification = NULL; MALI_DEBUG_ASSERT_POINTER(system); MALI_DEBUG_ASSERT((MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) || (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == type)); notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s)); if (unlikely(NULL == notification)) { MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification")); return NULL; } job = _mali_osk_malloc(sizeof(struct mali_soft_job)); if (unlikely(NULL == job)) { MALI_DEBUG_PRINT(2, ("Mali Soft Job: system alloc job failed. \n")); return NULL; } mali_soft_job_system_lock(system); job->system = system; job->id = system->last_job_id++; job->state = MALI_SOFT_JOB_STATE_ALLOCATED; _mali_osk_list_add(&(job->system_list), &(system->jobs_used)); job->type = type; job->user_job = user_job; job->activated = MALI_FALSE; job->activated_notification = notification; _mali_osk_atomic_init(&job->refcount, 1); MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state); MALI_DEBUG_ASSERT(system == job->system); MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id); mali_soft_job_system_unlock(system); return job; }
struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource) { struct mali_mmu_core* mmu = NULL; MALI_DEBUG_ASSERT_POINTER(resource); MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description)); mmu = _mali_osk_malloc(sizeof(struct mali_mmu_core)); if (NULL != mmu) { if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE)) { if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu)) { /* Setup IRQ handlers (which will do IRQ probing if needed) */ mmu->irq = _mali_osk_irq_init(resource->irq, mali_mmu_upper_half, mali_mmu_bottom_half, mali_mmu_probe_trigger, mali_mmu_probe_ack, mmu, "mali_mmu_irq_handlers"); if (NULL != mmu->irq) { return mmu; } else { MALI_PRINT_ERROR(("Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description)); } } mali_hw_core_delete(&mmu->hw_core); } _mali_osk_free(mmu); } else { MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n")); } return NULL; }
struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask) { struct mali_pm_domain* domain = NULL; u32 domain_id = 0; domain = mali_pm_domain_get_from_mask(pmu_mask); if (NULL != domain) return domain; MALI_DEBUG_PRINT(2, ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n", pmu_mask)); domain = (struct mali_pm_domain *)_mali_osk_malloc(sizeof(struct mali_pm_domain)); if (NULL != domain) { domain->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PM_DOMAIN); if (NULL == domain->lock) { _mali_osk_free(domain); return NULL; } domain->state = MALI_PM_DOMAIN_ON; domain->pmu_mask = pmu_mask; domain->use_count = 0; domain->group_list = NULL; domain->group_count = 0; domain->l2 = NULL; domain_id = _mali_osk_fls(pmu_mask) - 1; /* Verify the domain_id */ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > domain_id); /* Verify that pmu_mask only one bit is set */ MALI_DEBUG_ASSERT((1 << domain_id) == pmu_mask); mali_pm_domains[domain_id] = domain; return domain; } else { MALI_DEBUG_PRINT_ERROR(("Unable to create PM domain\n")); } return NULL; }
struct mali_group *mali_group_create(struct mali_cluster *cluster, struct mali_mmu_core *mmu) { struct mali_group *group = NULL; if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) { MALI_PRINT_ERROR(("Mali group: Too many group objects created\n")); return NULL; } group = _mali_osk_malloc(sizeof(struct mali_group)); if (NULL != group) { _mali_osk_memset(group, 0, sizeof(struct mali_group)); group->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ |_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_GROUP); if (NULL != group->lock) { group->cluster = cluster; group->mmu = mmu; /* This group object now owns the MMU object */ group->session = NULL; group->page_dir_ref_count = 0; group->power_is_on = MALI_TRUE; group->gp_state = MALI_GROUP_CORE_STATE_IDLE; group->pp_state = MALI_GROUP_CORE_STATE_IDLE; #if defined(USING_MALI200) group->pagedir_activation_failed = MALI_FALSE; #endif mali_global_groups[mali_global_num_groups] = group; mali_global_num_groups++; return group; } _mali_osk_free(group); } return NULL; }
static _mali_osk_errcode_t mali200_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue) { mali_core_session * session; MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_session_begin\n") ) ; MALI_CHECK_NON_NULL(session = _mali_osk_malloc( sizeof(mali_core_session) ), _MALI_OSK_ERR_NOMEM); _mali_osk_memset(session, 0, sizeof(*session) ); *slot = (mali_kernel_subsystem_session_slot)session; session->subsystem = &subsystem_mali200; session->notification_queue = queue; #if USING_MMU session->mmu_session = mali_session_data; #endif mali_core_session_begin(session); MALI_DEBUG_PRINT(6, ("Mali PP: mali200_subsystem_session_begin\n") ) ; MALI_SUCCESS; }
struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker) { struct mali_gp_job *job; u32 perf_counter_flag; job = _mali_osk_malloc(sizeof(struct mali_gp_job)); if (NULL != job) { job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s)); if (NULL == job->finished_notification) { _mali_osk_free(job); return NULL; } job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s)); if (NULL == job->oom_notification) { _mali_osk_notification_delete(job->finished_notification); _mali_osk_free(job); return NULL; } if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s))) { _mali_osk_notification_delete(job->finished_notification); _mali_osk_notification_delete(job->oom_notification); _mali_osk_free(job); return NULL; } perf_counter_flag = mali_gp_job_get_perf_counter_flag(job); /* case when no counters came from user space * so pass the debugfs / DS-5 provided global ones to the job object */ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0()); mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1()); } _mali_osk_list_init(&job->list); job->session = session; job->id = id; job->heap_current_addr = job->uargs.frame_registers[4]; job->perf_counter_value0 = 0; job->perf_counter_value1 = 0; job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); job->pp_tracker = pp_tracker; if (NULL != job->pp_tracker) { /* Take a reference on PP job's tracker that will be released when the GP job is done. */ mali_timeline_system_tracker_get(session->timeline_system, pp_tracker); } mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_GP, NULL, job); mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence)); return job; } return NULL; }
/* * IOCTL operation; Import fd to UMP memory */ int ump_ion_import_wrapper(u32 __user * argument, struct ump_session_data * session_data) { _ump_uk_ion_import_s user_interaction; ump_dd_handle *ump_handle; ump_dd_physical_block * blocks; unsigned long num_blocks; struct ion_handle *ion_hnd; struct scatterlist *sg; struct scatterlist *sg_ion; unsigned long i = 0; ump_session_memory_list_element * session_memory_element = NULL; if (ion_client_ump==NULL) ion_client_ump = ion_client_create(ion_exynos, -1, "ump"); /* Sanity check input parameters */ if (NULL == argument || NULL == session_data) { MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n")); return -ENOTTY; } /* Copy the user space memory to kernel space (so we safely can read it) */ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) { MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n")); return -EFAULT; } user_interaction.ctx = (void *) session_data; /* translate fd to secure ID*/ ion_hnd = ion_import_fd(ion_client_ump, user_interaction.ion_fd); sg_ion = ion_map_dma(ion_client_ump,ion_hnd); blocks = (ump_dd_physical_block*)_mali_osk_malloc(sizeof(ump_dd_physical_block)*1024); if (NULL == blocks) { MSG_ERR(("Failed to allocate blocks in ump_ioctl_allocate()\n")); return -ENOMEM; } sg = sg_ion; do { blocks[i].addr = sg_phys(sg); blocks[i].size = sg_dma_len(sg); i++; if (i>=1024) { _mali_osk_free(blocks); MSG_ERR(("ion_import fail() in ump_ioctl_allocate()\n")); return -EFAULT; } sg = sg_next(sg); } while(sg); num_blocks = i; /* Initialize the session_memory_element, and add it to the session object */ session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element)); if (NULL == session_memory_element) { _mali_osk_free(blocks); DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n")); return -EFAULT; } ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, num_blocks); if (UMP_DD_HANDLE_INVALID == ump_handle) { _mali_osk_free(session_memory_element); _mali_osk_free(blocks); DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n")); return -EFAULT; } session_memory_element->mem = (ump_dd_mem*)ump_handle; _mali_osk_mutex_wait(session_data->lock); _mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list)); _mali_osk_mutex_signal(session_data->lock); ion_unmap_dma(ion_client_ump,ion_hnd); ion_free(ion_client_ump, ion_hnd); _mali_osk_free(blocks); user_interaction.secure_id = ump_dd_secure_id_get(ump_handle); user_interaction.size = ump_dd_size_get(ump_handle); user_interaction.ctx = NULL; if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) { /* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */ MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n")); return -EFAULT; } return 0; /* success */ }
struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource) { struct mali_l2_cache_core *cache = NULL; _mali_osk_lock_flags_t lock_flags; #if defined(MALI_UPPER_HALF_SCHEDULING) lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE; #else lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE; #endif MALI_DEBUG_PRINT(2, ("Mali L2 cache: Creating Mali L2 cache: %s\n", resource->description)); if (mali_global_num_l2_cache_cores >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) { MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 cache core objects created\n")); return NULL; } cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core)); if (NULL != cache) { cache->core_id = mali_global_num_l2_cache_cores; cache->counter_src0 = MALI_HW_CORE_NO_COUNTER; cache->counter_src1 = MALI_HW_CORE_NO_COUNTER; cache->pm_domain = NULL; cache->mali_l2_status = MALI_L2_NORMAL; if (_MALI_OSK_ERR_OK == mali_hw_core_create(&cache->hw_core, resource, MALI400_L2_CACHE_REGISTERS_SIZE)) { cache->command_lock = _mali_osk_lock_init(lock_flags, 0, _MALI_OSK_LOCK_ORDER_L2_COMMAND); if (NULL != cache->command_lock) { cache->counter_lock = _mali_osk_lock_init(lock_flags, 0, _MALI_OSK_LOCK_ORDER_L2_COUNTER); if (NULL != cache->counter_lock) { mali_l2_cache_reset(cache); cache->last_invalidated_id = 0; mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = cache; mali_global_num_l2_cache_cores++; return cache; } else { MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", cache->hw_core.description)); } _mali_osk_lock_term(cache->command_lock); } else { MALI_PRINT_ERROR(("Mali L2 cache: Failed to create command lock for L2 cache core %s\n", cache->hw_core.description)); } mali_hw_core_delete(&cache->hw_core); } _mali_osk_free(cache); } else { MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n")); } return NULL; }
static _mali_osk_errcode_t build_system_info(void) { unsigned int i; int err = _MALI_OSK_ERR_FAULT; _mali_system_info * new_info, * cleanup; _mali_core_info * current_core; _mali_mem_info * current_mem; u32 new_size = 0; /* create a new system info struct */ MALI_CHECK_NON_NULL(new_info = (_mali_system_info *)_mali_osk_malloc(sizeof(_mali_system_info)), _MALI_OSK_ERR_NOMEM); _mali_osk_memset(new_info, 0, sizeof(_mali_system_info)); /* if an error happens during any of the system_info_fill calls cleanup the new info structs */ cleanup = new_info; /* ask each subsystems to fill in their info */ for (i = 0; i < SUBSYSTEMS_COUNT; ++i) { if (NULL != subsystems[i]->system_info_fill) { err = subsystems[i]->system_info_fill(new_info); if (_MALI_OSK_ERR_OK != err) goto error_exit; } } /* building succeeded, calculate the size */ /* size needed of the system info struct itself */ new_size = sizeof(_mali_system_info); /* size needed for the cores */ for (current_core = new_info->core_info; NULL != current_core; current_core = current_core->next) { new_size += sizeof(_mali_core_info); } /* size needed for the memory banks */ for (current_mem = new_info->mem_info; NULL != current_mem; current_mem = current_mem->next) { new_size += sizeof(_mali_mem_info); } /* lock system info access so a user wont't get a corrupted version */ _mali_osk_lock_wait( system_info_lock, _MALI_OSK_LOCKMODE_RW ); /* cleanup the old one */ cleanup = system_info; /* set new info */ system_info = new_info; system_info_size = new_size; /* we're safe */ _mali_osk_lock_signal( system_info_lock, _MALI_OSK_LOCKMODE_RW ); /* ok result */ err = _MALI_OSK_ERR_OK; /* we share the cleanup routine with the error case */ error_exit: if (NULL == cleanup) MALI_ERROR((_mali_osk_errcode_t)err); /* no cleanup needed, return what err contains */ /* cleanup */ cleanup_system_info(cleanup); /* return whatever err is, we could end up here in both the error and success cases */ MALI_ERROR((_mali_osk_errcode_t)err); }
UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks) { ump_dd_mem * mem; unsigned long size_total = 0; int map_id; u32 i; /* Go through the input blocks and verify that they are sane */ for (i=0; i < num_blocks; i++) { unsigned long addr = blocks[i].addr; unsigned long size = blocks[i].size; DBG_MSG(5, ("Adding physical memory to new handle. Address: 0x%08lx, size: %lu\n", addr, size)); size_total += blocks[i].size; if (0 != UMP_ADDR_ALIGN_OFFSET(addr)) { MSG_ERR(("Trying to create UMP memory from unaligned physical address. Address: 0x%08lx\n", addr)); return UMP_DD_HANDLE_INVALID; } if (0 != UMP_ADDR_ALIGN_OFFSET(size)) { MSG_ERR(("Trying to create UMP memory with unaligned size. Size: %lu\n", size)); return UMP_DD_HANDLE_INVALID; } } /* Allocate the ump_dd_mem struct for this allocation */ mem = _mali_osk_malloc(sizeof(*mem)); if (NULL == mem) { DBG_MSG(1, ("Could not allocate ump_dd_mem in ump_dd_handle_create_from_phys_blocks()\n")); return UMP_DD_HANDLE_INVALID; } /* Find a secure ID for this allocation */ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*) mem); if (map_id < 0) { _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); _mali_osk_free(mem); DBG_MSG(1, ("Failed to allocate secure ID in ump_dd_handle_create_from_phys_blocks()\n")); return UMP_DD_HANDLE_INVALID; } /* Now, make a copy of the block information supplied by the user */ mem->block_array = _mali_osk_malloc(sizeof(ump_dd_physical_block)* num_blocks); if (NULL == mem->block_array) { ump_descriptor_mapping_free(device.secure_id_map, map_id); _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); _mali_osk_free(mem); DBG_MSG(1, ("Could not allocate a mem handle for function ump_dd_handle_create_from_phys_blocks().\n")); return UMP_DD_HANDLE_INVALID; } _mali_osk_memcpy(mem->block_array, blocks, sizeof(ump_dd_physical_block) * num_blocks); /* And setup the rest of the ump_dd_mem struct */ _mali_osk_atomic_init(&mem->ref_count, 1); mem->secure_id = (ump_secure_id)map_id; mem->size_bytes = size_total; mem->nr_blocks = num_blocks; mem->backend_info = NULL; mem->ctx = NULL; mem->release_func = phys_blocks_release; /* For now UMP handles created by ump_dd_handle_create_from_phys_blocks() is forced to be Uncached */ mem->is_cached = 0; mem->hw_device = _UMP_UK_USED_BY_CPU; mem->lock_usage = UMP_NOT_LOCKED; _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); DBG_MSG(3, ("UMP memory created. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes)); return (ump_dd_handle)mem; }
static int mali_ump_map(struct mali_session_data *session, mali_mem_allocation *descriptor) { ump_dd_handle ump_mem; u32 nr_blocks; u32 i; ump_dd_physical_block *ump_blocks; struct mali_page_directory *pagedir; u32 offset = 0; u32 prop; _mali_osk_errcode_t err; MALI_DEBUG_ASSERT_POINTER(session); MALI_DEBUG_ASSERT_POINTER(descriptor); MALI_DEBUG_ASSERT(MALI_MEM_UMP == descriptor->type); ump_mem = descriptor->ump_mem.handle; MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem); nr_blocks = ump_dd_phys_block_count_get(ump_mem); if (nr_blocks == 0) { MALI_DEBUG_PRINT(1, ("No block count\n")); return -EINVAL; } ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks)*nr_blocks); if (NULL == ump_blocks) { return -ENOMEM; } if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks)) { _mali_osk_free(ump_blocks); return -EFAULT; } pagedir = session->page_directory; prop = descriptor->mali_mapping.properties; err = mali_mem_mali_map_prepare(descriptor); if (_MALI_OSK_ERR_OK != err) { MALI_DEBUG_PRINT(1, ("Mapping of UMP memory failed\n")); _mali_osk_free(ump_blocks); return -ENOMEM; } for(i = 0; i < nr_blocks; ++i) { u32 virt = descriptor->mali_mapping.addr + offset; MALI_DEBUG_PRINT(7, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size)); mali_mmu_pagedir_update(pagedir, virt, ump_blocks[i].addr, ump_blocks[i].size, prop); offset += ump_blocks[i].size; } if (descriptor->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) { u32 virt = descriptor->mali_mapping.addr + offset; /* Map in an extra virtual guard page at the end of the VMA */ MALI_DEBUG_PRINT(6, ("Mapping in extra guard page\n")); mali_mmu_pagedir_update(pagedir, virt, ump_blocks[0].addr, _MALI_OSK_MALI_PAGE_SIZE, prop); offset += _MALI_OSK_MALI_PAGE_SIZE; } _mali_osk_free(ump_blocks); return 0; }
mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name) { mali_physical_memory_allocator * allocator; block_allocator * info; u32 usable_size; u32 num_blocks; usable_size = size & ~(MALI_BLOCK_SIZE - 1); MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size)); MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size)); num_blocks = usable_size / MALI_BLOCK_SIZE; MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks)); if (usable_size == 0) { MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size)); return NULL; } allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator)); if (NULL != allocator) { info = _mali_osk_malloc(sizeof(block_allocator)); if (NULL != info) { info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED, 0, 105); if (NULL != info->mutex) { info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks); if (NULL != info->all_blocks) { u32 i; info->first_free = NULL; info->num_blocks = num_blocks; info->base = base_address; info->cpu_usage_adjust = cpu_usage_adjust; for ( i = 0; i < num_blocks; i++) { info->all_blocks[i].next = info->first_free; info->first_free = &info->all_blocks[i]; } allocator->allocate = block_allocator_allocate; allocator->allocate_page_table_block = block_allocator_allocate_page_table_block; allocator->destroy = block_allocator_destroy; allocator->stat = block_allocator_stat; allocator->ctx = info; allocator->name = name; return allocator; } _mali_osk_lock_term(info->mutex); } _mali_osk_free(info); } _mali_osk_free(allocator); } return NULL; }
static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info) { block_allocator * info; u32 left; block_info * last_allocated = NULL; mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE; block_allocator_allocation *ret_allocation; MALI_DEBUG_ASSERT_POINTER(ctx); MALI_DEBUG_ASSERT_POINTER(descriptor); MALI_DEBUG_ASSERT_POINTER(offset); MALI_DEBUG_ASSERT_POINTER(alloc_info); info = (block_allocator*)ctx; left = descriptor->size - *offset; MALI_DEBUG_ASSERT(0 != left); if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE; ret_allocation = _mali_osk_malloc( sizeof(block_allocator_allocation) ); if ( NULL == ret_allocation ) { /* Failure; try another allocator by returning MALI_MEM_ALLOC_NONE */ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW); return result; } ret_allocation->start_offset = *offset; ret_allocation->mapping_length = 0; while ((left > 0) && (info->first_free)) { block_info * block; u32 phys_addr; u32 padding; u32 current_mapping_size; block = info->first_free; info->first_free = info->first_free->next; block->next = last_allocated; last_allocated = block; phys_addr = get_phys(info, block); padding = *offset & (MALI_BLOCK_SIZE-1); if (MALI_BLOCK_SIZE - padding < left) { current_mapping_size = MALI_BLOCK_SIZE - padding; } else { current_mapping_size = left; } if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, phys_addr + padding, info->cpu_usage_adjust, current_mapping_size)) { MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n")); result = MALI_MEM_ALLOC_INTERNAL_FAILURE; mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->start_offset, ret_allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0); /* release all memory back to the pool */ while (last_allocated) { /* This relinks every block we've just allocated back into the free-list */ block = last_allocated->next; last_allocated->next = info->first_free; info->first_free = last_allocated; last_allocated = block; } break; } *offset += current_mapping_size; left -= current_mapping_size; ret_allocation->mapping_length += current_mapping_size; } _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW); if (last_allocated) { if (left) result = MALI_MEM_ALLOC_PARTIAL; else result = MALI_MEM_ALLOC_FINISHED; /* Record all the information about this allocation */ ret_allocation->last_allocated = last_allocated; ret_allocation->engine = engine; ret_allocation->descriptor = descriptor; alloc_info->ctx = info; alloc_info->handle = ret_allocation; alloc_info->release = block_allocator_release; } else { /* Free the allocation information - nothing to be passed back */ _mali_osk_free( ret_allocation ); } return result; }
_mali_osk_errcode_t malipmm_create(_mali_osk_resource_t *resource) { /* Create PMM state memory */ MALI_DEBUG_ASSERT( pmm_state == NULL ); pmm_state = (_mali_pmm_internal_state_t *) _mali_osk_malloc(sizeof(*pmm_state)); MALI_CHECK_NON_NULL( pmm_state, _MALI_OSK_ERR_NOMEM ); /* All values get 0 as default */ _mali_osk_memset(pmm_state, 0, sizeof(*pmm_state)); /* Set up the initial PMM state */ pmm_state->waiting = 0; pmm_state->status = MALI_PMM_STATUS_IDLE; pmm_state->state = MALI_PMM_STATE_UNAVAILABLE; /* Until a core registers */ /* Set up policy via compile time option for the moment */ #if MALI_PMM_ALWAYS_ON pmm_state->policy = MALI_PMM_POLICY_ALWAYS_ON; #else pmm_state->policy = MALI_PMM_POLICY_JOB_CONTROL; #endif #if MALI_PMM_TRACE _mali_pmm_trace_policy_change( MALI_PMM_POLICY_NONE, pmm_state->policy ); #endif /* Set up assumes all values are initialized to NULL or MALI_FALSE, so * we can exit halfway through set up and perform clean up */ #if !MALI_PMM_NO_PMU if( mali_platform_init(resource) != _MALI_OSK_ERR_OK ) goto pmm_fail_cleanup; pmm_state->pmu_initialized = MALI_TRUE; #endif pmm_state->queue = _mali_osk_notification_queue_init(); if( !pmm_state->queue ) goto pmm_fail_cleanup; pmm_state->iqueue = _mali_osk_notification_queue_init(); if( !pmm_state->iqueue ) goto pmm_fail_cleanup; /* We are creating an IRQ handler just for the worker thread it gives us */ pmm_state->irq = _mali_osk_irq_init( _MALI_OSK_IRQ_NUMBER_PMM, malipmm_irq_uhandler, malipmm_irq_bhandler, NULL, NULL, (void *)pmm_state, /* PMM state is passed to IRQ */ "PMM handler" ); if( !pmm_state->irq ) goto pmm_fail_cleanup; #ifdef CONFIG_SMP mali_pmm_lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)( _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_ORDERED), 0, 0); if( !mali_pmm_lock ) goto pmm_fail_cleanup; #endif /* CONFIG_SMP */ pmm_state->lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_ORDERED), 0, 75); if( !pmm_state->lock ) goto pmm_fail_cleanup; if( _mali_osk_atomic_init( &(pmm_state->messages_queued), 0 ) != _MALI_OSK_ERR_OK ) { goto pmm_fail_cleanup; } MALIPMM_DEBUG_PRINT( ("PMM: subsystem created, policy=%d\n", pmm_state->policy) ); MALI_SUCCESS; pmm_fail_cleanup: MALI_PRINT_ERROR( ("PMM: subsystem failed to be created\n") ); if( pmm_state ) { _mali_osk_resource_type_t t = PMU; if( pmm_state->lock ) _mali_osk_lock_term( pmm_state->lock ); if( pmm_state->irq ) _mali_osk_irq_term( pmm_state->irq ); if( pmm_state->queue ) _mali_osk_notification_queue_term( pmm_state->queue ); if( pmm_state->iqueue ) _mali_osk_notification_queue_term( pmm_state->iqueue ); if( pmm_state->pmu_initialized ) ( mali_platform_deinit(&t) ); _mali_osk_free(pmm_state); pmm_state = NULL; } MALI_ERROR( _MALI_OSK_ERR_FAULT ); }