void exynos_hwcnt_remove(struct kbase_device *kbdev) { struct exynos_context *platform; if (!kbdev->hwcnt.is_init) return; if (kbdev->hwcnt.kctx && kbdev->hwcnt.suspended_state.dump_buffer) kbase_mem_free(kbdev->hwcnt.kctx, kbdev->hwcnt.suspended_state.dump_buffer); if (kbdev->hwcnt.acc_buffer) kfree(kbdev->hwcnt.acc_buffer); platform = (struct exynos_context *) kbdev->platform_context; kbdev->hwcnt.enable_for_gpr = FALSE; kbdev->hwcnt.enable_for_utilization = FALSE; kbdev->hwcnt.kctx_gpr = NULL; kbdev->hwcnt.kctx = NULL; kbdev->hwcnt.is_init = FALSE; platform->hwcnt_bt_clk = 0; if (kbdev->hwcnt.kspace_addr) { kbase_kunmap_from_physical_address(kbdev); kbdev->hwcnt.kspace_addr = 0; } mutex_destroy(&kbdev->hwcnt.mlock); }
void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles) { if (in_out_info) kfree(in_out_info->hwc_layout); if (opaque_handles) { kbase_instr_hwcnt_disable(opaque_handles->kctx); kbase_vunmap(opaque_handles->kctx, &opaque_handles->hwcnt_map); kbase_mem_free(opaque_handles->kctx, opaque_handles->hwcnt_gpu_va); kbase_destroy_context(opaque_handles->kctx); kbase_release_device(opaque_handles->kbdev); kfree(opaque_handles); } }
void exynos_hwcnt_init(struct kbase_device *kbdev) { struct kbase_uk_hwcnt_setup setup_arg; struct kbase_context *kctx; struct kbase_uk_mem_alloc mem; struct kbase_va_region *reg; struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; if (platform->hwcnt_gathering_status == false) goto out; kctx = kbase_create_context(kbdev, false); if (kctx) { kbdev->hwcnt.kctx = kctx; } else { GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "hwcnt error!, hwcnt_init is failed\n"); goto out; } mem.va_pages = mem.commit_pages = mem.extent = 1; mem.flags = BASE_MEM_PROT_GPU_WR | BASE_MEM_PROT_CPU_RD | BASE_MEM_HINT_CPU_RD; reg = kbase_mem_alloc(kctx, mem.va_pages, mem.commit_pages, mem.extent, &mem.flags, &mem.gpu_va, &mem.va_alignment); #if defined(CONFIG_64BIT) kbase_gpu_vm_lock(kctx); if (MALI_ERROR_NONE != kbase_gpu_mmap(kctx, reg, 0, 1, 1)) { kbase_gpu_vm_unlock(kctx); platform->hwcnt_gathering_status = false; GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "exynos_hwcnt_init error!mmap fail\n"); kbase_mem_free(kbdev->hwcnt.kctx, kbdev->hwcnt.suspended_state.dump_buffer); goto out; } kbase_gpu_vm_unlock(kctx); #endif kctx->kbdev->hwcnt.phy_addr = reg->alloc->pages[0]; kctx->kbdev->hwcnt.enable_for_utilization = FALSE; kctx->kbdev->hwcnt.enable_for_gpr = FALSE; kctx->kbdev->hwcnt.suspended_kctx = NULL; kctx->kbdev->hwcnt.timeout = msecs_to_jiffies(100); kctx->kbdev->hwcnt.is_powered = FALSE; mutex_init(&kbdev->hwcnt.mlock); #if defined(CONFIG_64BIT) setup_arg.dump_buffer = reg->start_pfn << PAGE_SHIFT; #else setup_arg.dump_buffer = mem.gpu_va; #endif setup_arg.jm_bm = platform->hwcnt_choose_jm; setup_arg.shader_bm = platform->hwcnt_choose_shader; setup_arg.tiler_bm = platform->hwcnt_choose_tiler; setup_arg.l3_cache_bm = platform->hwcnt_choose_l3_cache; setup_arg.mmu_l2_bm = platform->hwcnt_choose_mmu_l2; setup_arg.padding = HWC_MODE_UTILIZATION; kctx->kbdev->hwcnt.kspace_addr = kbase_kmap_from_physical_address(kbdev); if (MALI_ERROR_NONE != hwcnt_setup(kctx, &setup_arg)) { GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "hwcnt_setup is failed\n"); goto out; } kctx->kbdev->hwcnt.acc_buffer = kmalloc(HWC_ACC_BUFFER_SIZE, GFP_KERNEL); if (kctx->kbdev->hwcnt.acc_buffer) memset(kctx->kbdev->hwcnt.acc_buffer, 0, HWC_ACC_BUFFER_SIZE); else goto out; kbdev->hwcnt.is_init = TRUE; if(kbdev->pm.pm_current_policy->id == KBASE_PM_POLICY_ID_ALWAYS_ON) { mutex_lock(&kbdev->hwcnt.mlock); if (!kbdev->hwcnt.kctx) hwcnt_start(kbdev); mutex_unlock(&kbdev->hwcnt.mlock); } return; out: kbdev->hwcnt.is_init = FALSE; return; }
struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info) { struct kbase_gator_hwcnt_handles *hand; struct kbase_uk_hwcnt_setup setup; int err; uint32_t dump_size = 0, i = 0; struct kbase_va_region *reg; u64 flags; u64 nr_pages; u16 va_alignment = 0; if (!in_out_info) return NULL; hand = kzalloc(sizeof(*hand), GFP_KERNEL); if (!hand) return NULL; /* Get the first device */ hand->kbdev = kbase_find_device(-1); if (!hand->kbdev) goto free_hand; /* Create a kbase_context */ hand->kctx = kbase_create_context(hand->kbdev, true); if (!hand->kctx) goto release_device; in_out_info->nr_cores = hand->kbdev->gpu_props.num_cores; in_out_info->nr_core_groups = hand->kbdev->gpu_props.num_core_groups; in_out_info->gpu_id = hand->kbdev->gpu_props.props.core_props.product_id; /* If we are using a v4 device (Mali-T6xx or Mali-T72x) */ if (kbase_hw_has_feature(hand->kbdev, BASE_HW_FEATURE_V4)) { uint32_t cg, j; uint64_t core_mask; /* There are 8 hardware counters blocks per core group */ in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) * MALI_MAX_NUM_BLOCKS_PER_GROUP * in_out_info->nr_core_groups, GFP_KERNEL); if (!in_out_info->hwc_layout) goto destroy_context; dump_size = in_out_info->nr_core_groups * MALI_MAX_NUM_BLOCKS_PER_GROUP * MALI_COUNTERS_PER_BLOCK * MALI_BYTES_PER_COUNTER; for (cg = 0; cg < in_out_info->nr_core_groups; cg++) { core_mask = hand->kbdev->gpu_props.props.coherency_info.group[cg].core_mask; for (j = 0; j < MALI_MAX_CORES_PER_GROUP; j++) { if (core_mask & (1u << j)) in_out_info->hwc_layout[i++] = SHADER_BLOCK; else in_out_info->hwc_layout[i++] = RESERVED_BLOCK; } in_out_info->hwc_layout[i++] = TILER_BLOCK; in_out_info->hwc_layout[i++] = MMU_L2_BLOCK; in_out_info->hwc_layout[i++] = RESERVED_BLOCK; if (0 == cg) in_out_info->hwc_layout[i++] = JM_BLOCK; else in_out_info->hwc_layout[i++] = RESERVED_BLOCK; } /* If we are using any other device */ } else { uint32_t nr_l2, nr_sc_bits, j; uint64_t core_mask; nr_l2 = hand->kbdev->gpu_props.props.l2_props.num_l2_slices; core_mask = hand->kbdev->gpu_props.props.coherency_info.group[0].core_mask; nr_sc_bits = fls64(core_mask); /* The job manager and tiler sets of counters * are always present */ in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) * (2 + nr_sc_bits + nr_l2), GFP_KERNEL); if (!in_out_info->hwc_layout) goto destroy_context; dump_size = (2 + nr_sc_bits + nr_l2) * MALI_COUNTERS_PER_BLOCK * MALI_BYTES_PER_COUNTER; in_out_info->hwc_layout[i++] = JM_BLOCK; in_out_info->hwc_layout[i++] = TILER_BLOCK; for (j = 0; j < nr_l2; j++) in_out_info->hwc_layout[i++] = MMU_L2_BLOCK; while (core_mask != 0ull) { if ((core_mask & 1ull) != 0ull) in_out_info->hwc_layout[i++] = SHADER_BLOCK; else in_out_info->hwc_layout[i++] = RESERVED_BLOCK; core_mask >>= 1; } } in_out_info->nr_hwc_blocks = i; in_out_info->size = dump_size; flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR; nr_pages = PFN_UP(dump_size); reg = kbase_mem_alloc(hand->kctx, nr_pages, nr_pages, 0, &flags, &hand->hwcnt_gpu_va, &va_alignment); if (!reg) goto free_layout; hand->hwcnt_cpu_va = kbase_vmap(hand->kctx, hand->hwcnt_gpu_va, dump_size, &hand->hwcnt_map); if (!hand->hwcnt_cpu_va) goto free_buffer; in_out_info->kernel_dump_buffer = hand->hwcnt_cpu_va; memset(in_out_info->kernel_dump_buffer, 0, nr_pages * PAGE_SIZE); /*setup.dump_buffer = (uintptr_t)in_out_info->kernel_dump_buffer;*/ setup.dump_buffer = hand->hwcnt_gpu_va; setup.jm_bm = in_out_info->bitmask[0]; setup.tiler_bm = in_out_info->bitmask[1]; setup.shader_bm = in_out_info->bitmask[2]; setup.mmu_l2_bm = in_out_info->bitmask[3]; err = kbase_instr_hwcnt_enable(hand->kctx, &setup); if (err) goto free_unmap; kbase_instr_hwcnt_clear(hand->kctx); return hand; free_unmap: kbase_vunmap(hand->kctx, &hand->hwcnt_map); free_buffer: kbase_mem_free(hand->kctx, hand->hwcnt_gpu_va); free_layout: kfree(in_out_info->hwc_layout); destroy_context: kbase_destroy_context(hand->kctx); release_device: kbase_release_device(hand->kbdev); free_hand: kfree(hand); return NULL; }