/**
 * kbase_destroy_context - Destroy a kernel base context.
 * @kctx: Context to destroy
 *
 * Calls kbase_destroy_os_context() to free OS specific structures.
 * Will release all outstanding regions.
 */
void kbase_destroy_context(struct kbase_context *kctx)
{
	struct kbase_device *kbdev;
	int pages;
	unsigned long pending_regions_to_clean;

	/* MALI_SEC_INTEGRATION */
	int profile_count;

	/* MALI_SEC_INTEGRATION */
	if (!kctx) {
		printk("An uninitialized or destroyed context is tried to be destroyed. kctx is null\n");
		return ;
	}
	else if (kctx->ctx_status != CTX_INITIALIZED) {
		printk("An uninitialized or destroyed context is tried to be destroyed\n");
		printk("kctx: 0x%p, kctx->tgid: %d, kctx->ctx_status: 0x%x\n", kctx, kctx->tgid, kctx->ctx_status);
		return ;
	}

	KBASE_DEBUG_ASSERT(NULL != kctx);

	kbdev = kctx->kbdev;
	KBASE_DEBUG_ASSERT(NULL != kbdev);

	/* MALI_SEC_INTEGRATION */
	for (profile_count = 0; profile_count < 3; profile_count++) {
		if (wait_event_timeout(kctx->mem_profile_wait, atomic_read(&kctx->mem_profile_showing_state) == 0, (unsigned int) msecs_to_jiffies(1000)))
			break;
		else
			printk("[G3D] waiting for memory profile\n");
	}

	/* MALI_SEC_INTEGRATION */
	while (wait_event_timeout(kbdev->pm.suspending_wait, kbdev->pm.suspending == false, (unsigned int) msecs_to_jiffies(1000)) == 0)
		printk("[G3D] Waiting for resuming the device\n");

	KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);

	/* Ensure the core is powered up for the destroy process */
	/* A suspend won't happen here, because we're in a syscall from a userspace
	 * thread. */
	kbase_pm_context_active(kbdev);

	kbase_jd_zap_context(kctx);
	kbase_event_cleanup(kctx);

	kbase_gpu_vm_lock(kctx);

	/* MMU is disabled as part of scheduling out the context */
	kbase_mmu_free_pgd(kctx);

	/* drop the aliasing sink page now that it can't be mapped anymore */
	kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);

	/* free pending region setups */
	pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK;
	while (pending_regions_to_clean) {
		unsigned int cookie = __ffs(pending_regions_to_clean);

		BUG_ON(!kctx->pending_regions[cookie]);

		kbase_reg_pending_dtor(kctx->pending_regions[cookie]);

		kctx->pending_regions[cookie] = NULL;
		pending_regions_to_clean &= ~(1UL << cookie);
	}

	kbase_region_tracker_term(kctx);
	kbase_gpu_vm_unlock(kctx);

	/* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
	kbasep_js_kctx_term(kctx);

	kbase_jd_exit(kctx);

	kbase_pm_context_idle(kbdev);

	kbase_mmu_term(kctx);

	pages = atomic_read(&kctx->used_pages);
	if (pages != 0)
		dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);

	kbase_mem_pool_term(&kctx->mem_pool);
	WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);

	/* MALI_SEC_INTEGRATION */
	if(kbdev->vendor_callbacks->destroy_context)
		kbdev->vendor_callbacks->destroy_context(kctx);

	if (kctx->ctx_need_qos) {
		kctx->ctx_need_qos = false;
	}

	vfree(kctx);
	/* MALI_SEC_INTEGRATION */
	kctx = NULL;
}
/**
 * kbase_create_context() - Create a kernel base context.
 * @kbdev: Kbase device
 * @is_compat: Force creation of a 32-bit context
 *
 * Allocate and init a kernel base context.
 *
 * Return: new kbase context
 */
struct kbase_context *
kbase_create_context(struct kbase_device *kbdev, bool is_compat)
{
	struct kbase_context *kctx;
	int err;

	KBASE_DEBUG_ASSERT(kbdev != NULL);

	/* zero-inited as lot of code assume it's zero'ed out on create */
	kctx = vzalloc(sizeof(*kctx));

	if (!kctx)
		goto out;

	/* creating a context is considered a disjoint event */
	kbase_disjoint_event(kbdev);

	kctx->kbdev = kbdev;
	kctx->as_nr = KBASEP_AS_NR_INVALID;
	kctx->is_compat = is_compat;
#ifdef CONFIG_MALI_TRACE_TIMELINE
	kctx->timeline.owner_tgid = task_tgid_nr(current);
#endif
	atomic_set(&kctx->setup_complete, 0);
	atomic_set(&kctx->setup_in_progress, 0);
	kctx->infinite_cache_active = 0;
	spin_lock_init(&kctx->mm_update_lock);
	kctx->process_mm = NULL;
	atomic_set(&kctx->nonmapped_pages, 0);
	kctx->slots_pullable = 0;

	err = kbase_mem_pool_init(&kctx->mem_pool,
			kbdev->mem_pool_max_size_default,
			kctx->kbdev, &kbdev->mem_pool);
	if (err)
		goto free_kctx;

	atomic_set(&kctx->used_pages, 0);

	err = kbase_jd_init(kctx);
	if (err)
		goto free_pool;

	err = kbasep_js_kctx_init(kctx);
	if (err)
		goto free_jd;	/* safe to call kbasep_js_kctx_term  in this case */

	err = kbase_event_init(kctx);
	if (err)
		goto free_jd;

	mutex_init(&kctx->reg_lock);

	INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
#ifdef CONFIG_KDS
	INIT_LIST_HEAD(&kctx->waiting_kds_resource);
#endif

	err = kbase_mmu_init(kctx);
	if (err)
		goto free_event;

	kctx->pgd = kbase_mmu_alloc_pgd(kctx);
	if (!kctx->pgd)
		goto free_mmu;

	kctx->aliasing_sink_page = kbase_mem_pool_alloc(&kctx->mem_pool);
	if (!kctx->aliasing_sink_page)
		goto no_sink_page;

	kctx->tgid = current->tgid;
	kctx->pid = current->pid;
	init_waitqueue_head(&kctx->event_queue);

	kctx->cookies = KBASE_COOKIE_MASK;

	/* Make sure page 0 is not used... */
	err = kbase_region_tracker_init(kctx);
	if (err)
		goto no_region_tracker;
#ifdef CONFIG_GPU_TRACEPOINTS
	atomic_set(&kctx->jctx.work_id, 0);
#endif
#ifdef CONFIG_MALI_TRACE_TIMELINE
	atomic_set(&kctx->timeline.jd_atoms_in_flight, 0);
#endif

	kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1;

	mutex_init(&kctx->vinstr_cli_lock);

	/* MALI_SEC_INTEGRATION */
	if (kbdev->vendor_callbacks->create_context)
		kbdev->vendor_callbacks->create_context(kctx);

	/* MALI_SEC_INTEGRATION */
	atomic_set(&kctx->mem_profile_showing_state, 0);
	init_waitqueue_head(&kctx->mem_profile_wait);

	return kctx;

no_region_tracker:
	kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);
no_sink_page:
	/* VM lock needed for the call to kbase_mmu_free_pgd */
	kbase_gpu_vm_lock(kctx);
	kbase_mmu_free_pgd(kctx);
	kbase_gpu_vm_unlock(kctx);
free_mmu:
	kbase_mmu_term(kctx);
free_event:
	kbase_event_cleanup(kctx);
free_jd:
	/* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
	kbasep_js_kctx_term(kctx);
	kbase_jd_exit(kctx);
free_pool:
	kbase_mem_pool_term(&kctx->mem_pool);
free_kctx:
	vfree(kctx);
out:
	return NULL;
}
mali_error kbasep_8401_workaround_init(kbase_device * const kbdev)
{
	kbasep_js_device_data *js_devdata;
	kbase_context *workaround_kctx;
	int i;
	u16 as_present_mask;

	KBASE_DEBUG_ASSERT(kbdev);
	KBASE_DEBUG_ASSERT(kbdev->workaround_kctx == NULL);

	js_devdata = &kbdev->js_data;

	/* For this workaround we reserve one address space to allow us to
	 * submit a special job independent of other contexts */
	--(kbdev->nr_hw_address_spaces);

	/* Only update nr_user_address_spaces if it was unchanged - to ensure
	 * HW workarounds that have modified this will still work */
	if (kbdev->nr_user_address_spaces == (kbdev->nr_hw_address_spaces + 1))
		--(kbdev->nr_user_address_spaces);

	KBASE_DEBUG_ASSERT(kbdev->nr_user_address_spaces <= kbdev->nr_hw_address_spaces);

	/* Recalculate the free address spaces bit-pattern */
	as_present_mask = (1U << kbdev->nr_hw_address_spaces) - 1;
	js_devdata->as_free &= as_present_mask;

	workaround_kctx = kbase_create_context(kbdev);
	if (!workaround_kctx)
		return MALI_ERROR_FUNCTION_FAILED;

	/* Allocate the pages required to contain the job */
	if (MALI_ERROR_NONE != kbase_mem_allocator_alloc(&workaround_kctx->osalloc, KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT, kbdev->workaround_compute_job_pa, 0))
		goto no_pages;

	/* Get virtual address of mapped memory and write a compute job for each page */
	for (i = 0; i < KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT; i++) {
		kbdev->workaround_compute_job_va[i] = kmap(pfn_to_page(PFN_DOWN(kbdev->workaround_compute_job_pa[i])));
		if (NULL == kbdev->workaround_compute_job_va[i])
			goto page_free;

		/* Generate the compute job data */
		kbasep_8401_workaround_update_job_pointers((u32 *) kbdev->workaround_compute_job_va[i], i);
	}

	/* Insert pages to the gpu mmu. */
	kbase_gpu_vm_lock(workaround_kctx);

	kbase_mmu_insert_pages(workaround_kctx,
			       /* vpfn = page number */
			       (u64) WORKAROUND_PAGE_OFFSET,
			       /* physical address */
			       kbdev->workaround_compute_job_pa,
			       /* number of pages */
			       KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT,
			       /* flags */
			       KBASE_REG_GPU_RD | KBASE_REG_CPU_RD | KBASE_REG_CPU_WR | KBASE_REG_GPU_WR);

	kbase_gpu_vm_unlock(workaround_kctx);

	kbdev->workaround_kctx = workaround_kctx;
	return MALI_ERROR_NONE;
 page_free:
	while (i--)
		kunmap(pfn_to_page(PFN_DOWN(kbdev->workaround_compute_job_pa[i])));

	kbase_mem_allocator_free(&workaround_kctx->osalloc, KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT, kbdev->workaround_compute_job_pa, MALI_TRUE);
 no_pages:
	kbase_destroy_context(workaround_kctx);

	return MALI_ERROR_FUNCTION_FAILED;
}
示例#4
0
static void page_fault_worker(struct work_struct *data)
{
	u64 fault_pfn;
	u32 new_pages;
	u32 fault_rel_pfn;
	kbase_as * faulting_as;
	int as_no;
	kbase_context * kctx;
	kbase_device * kbdev;
	kbase_va_region *region;
	mali_error err;

	u32 fault_status;

	faulting_as = container_of(data, kbase_as, work_pagefault);
	fault_pfn = faulting_as->fault_addr >> PAGE_SHIFT;
	as_no = faulting_as->number;

	kbdev = container_of( faulting_as, kbase_device, as[as_no] );

	/* Grab the context that was already refcounted in kbase_mmu_interrupt().
	 * Therefore, it cannot be scheduled out of this AS until we explicitly release it
	 *
	 * NOTE: NULL can be returned here if we're gracefully handling a spurious interrupt */
	kctx = kbasep_js_runpool_lookup_ctx_noretain( kbdev, as_no );

	if ( kctx == NULL )
	{
		/* Address space has no context, terminate the work */
		u32 reg;
		/* AS transaction begin */
		mutex_lock(&faulting_as->transaction_mutex);
		reg = kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), NULL);
		reg = (reg & (~(u32)MMU_TRANSTAB_ADRMODE_MASK)) | ASn_TRANSTAB_ADRMODE_UNMAPPED;
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), reg, NULL);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_UPDATE, NULL);
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);
		mutex_unlock(&faulting_as->transaction_mutex);
		/* AS transaction end */

		mmu_mask_reenable(kbdev, NULL, faulting_as);
		return;
	}

	fault_status = kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_FAULTSTATUS), NULL);

	OSK_ASSERT( kctx->kbdev == kbdev );

	kbase_gpu_vm_lock(kctx);

	/* find the region object for this VA */
	region = kbase_region_tracker_find_region_enclosing_address(kctx, faulting_as->fault_addr);
	if (NULL == region || (GROWABLE_FLAGS_REQUIRED != (region->flags & GROWABLE_FLAGS_MASK)))
	{
		kbase_gpu_vm_unlock(kctx);
		/* failed to find the region or mismatch of the flags */
		kbase_mmu_report_fault_and_kill(kctx, faulting_as, faulting_as->fault_addr);
		goto fault_done;
	}

	if ((((fault_status & ASn_FAULTSTATUS_ACCESS_TYPE_MASK) == ASn_FAULTSTATUS_ACCESS_TYPE_READ) &&
	        !(region->flags & KBASE_REG_GPU_RD)) ||
	    (((fault_status & ASn_FAULTSTATUS_ACCESS_TYPE_MASK) == ASn_FAULTSTATUS_ACCESS_TYPE_WRITE) &&
	        !(region->flags & KBASE_REG_GPU_WR)) ||
	    (((fault_status & ASn_FAULTSTATUS_ACCESS_TYPE_MASK) == ASn_FAULTSTATUS_ACCESS_TYPE_EX) &&
	        (region->flags & KBASE_REG_GPU_NX)))
	{
		OSK_PRINT_WARN(OSK_BASE_MMU, "Access permissions don't match: region->flags=0x%x", region->flags);
		kbase_gpu_vm_unlock(kctx);
		kbase_mmu_report_fault_and_kill(kctx, faulting_as, faulting_as->fault_addr);
		goto fault_done;
	}

	/* find the size we need to grow it by */
	/* we know the result fit in a u32 due to kbase_region_tracker_find_region_enclosing_address
	 * validating the fault_adress to be within a u32 from the start_pfn */
	fault_rel_pfn = fault_pfn - region->start_pfn;
	
	if (fault_rel_pfn < region->nr_alloc_pages)
	{
		OSK_PRINT_WARN(OSK_BASE_MMU, "Fault in allocated region of growable TMEM: Ignoring");
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);
		mmu_mask_reenable(kbdev, kctx, faulting_as);
		kbase_gpu_vm_unlock(kctx);
		goto fault_done;
	}

	new_pages = make_multiple(fault_rel_pfn - region->nr_alloc_pages + 1, region->extent);
	if (new_pages + region->nr_alloc_pages > region->nr_pages)
	{
		/* cap to max vsize */
		new_pages = region->nr_pages - region->nr_alloc_pages;
	}

	if (0 == new_pages)
	{
		/* Duplicate of a fault we've already handled, nothing to do */
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);
		mmu_mask_reenable(kbdev, kctx, faulting_as);
		kbase_gpu_vm_unlock(kctx);
		goto fault_done;
	}

	if (MALI_ERROR_NONE == kbase_alloc_phy_pages_helper(region, new_pages))
	{
		/* alloc success */
		mali_addr64 lock_addr;
		OSK_ASSERT(region->nr_alloc_pages <= region->nr_pages);

		/* AS transaction begin */
		mutex_lock(&faulting_as->transaction_mutex);

		/* Lock the VA region we're about to update */
		lock_addr = lock_region(kbdev, faulting_as->fault_addr >> PAGE_SHIFT, new_pages);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_LOCKADDR_LO), lock_addr & 0xFFFFFFFFUL, kctx);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_LOCKADDR_HI), lock_addr >> 32, kctx);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_LOCK, kctx);

		/* set up the new pages */
		err = kbase_mmu_insert_pages(kctx, region->start_pfn + region->nr_alloc_pages - new_pages,
		                             &region->phy_pages[region->nr_alloc_pages - new_pages],
		                             new_pages, region->flags);
		if(MALI_ERROR_NONE != err)
		{
			/* failed to insert pages, handle as a normal PF */
			mutex_unlock(&faulting_as->transaction_mutex);
			kbase_gpu_vm_unlock(kctx);
			/* The locked VA region will be unlocked and the cache invalidated in here */
			kbase_mmu_report_fault_and_kill(kctx, faulting_as, faulting_as->fault_addr);
			goto fault_done;
		}

#ifdef CONFIG_MALI_GATOR_SUPPORT
		kbase_trace_mali_page_fault_insert_pages(as_no, new_pages);
#endif /* CONFIG_MALI_GATOR_SUPPORT */
		/* clear the irq */
		/* MUST BE BEFORE THE FLUSH/UNLOCK */
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);

		/* flush L2 and unlock the VA (resumes the MMU) */
		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
		{
			kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_FLUSH, kctx);
		}
		else
		{
			kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_FLUSH_PT, kctx);
		}

		/* wait for the flush to complete */
		while (kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_STATUS), kctx) & 1);

		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630))
		{
			/* Issue an UNLOCK command to ensure that valid page tables are re-read by the GPU after an update.
			Note that, the FLUSH command should perform all the actions necessary, however the bus logs show
			that if multiple page faults occur within an 8 page region the MMU does not always re-read the
			updated page table entries for later faults or is only partially read, it subsequently raises the
			page fault IRQ for the same addresses, the unlock ensures that the MMU cache is flushed, so updates
			can be re-read.  As the region is now unlocked we need to issue 2 UNLOCK commands in order to flush the
			MMU/uTLB, see PRLAM-8812.
                        */
			kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_UNLOCK, kctx);
			kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_UNLOCK, kctx);
		}

		mutex_unlock(&faulting_as->transaction_mutex);
		/* AS transaction end */

		/* reenable this in the mask */
		mmu_mask_reenable(kbdev, kctx, faulting_as);
		kbase_gpu_vm_unlock(kctx);
	}
/**
 * kbase_destroy_context - Destroy a kernel base context.
 * @kctx: Context to destroy
 *
 * Calls kbase_destroy_os_context() to free OS specific structures.
 * Will release all outstanding regions.
 */
void kbase_destroy_context(struct kbase_context *kctx)
{
	struct kbase_device *kbdev;
	int pages;
	unsigned long pending_regions_to_clean;

	KBASE_DEBUG_ASSERT(NULL != kctx);

	kbdev = kctx->kbdev;
	KBASE_DEBUG_ASSERT(NULL != kbdev);

	KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);

	/* Ensure the core is powered up for the destroy process */
	/* A suspend won't happen here, because we're in a syscall from a userspace
	 * thread. */
	kbase_pm_context_active(kbdev);

	kbase_jd_zap_context(kctx);
	kbase_event_cleanup(kctx);

	kbase_gpu_vm_lock(kctx);

	/* MMU is disabled as part of scheduling out the context */
	kbase_mmu_free_pgd(kctx);

	/* drop the aliasing sink page now that it can't be mapped anymore */
	kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);

	/* free pending region setups */
	pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK;
	while (pending_regions_to_clean) {
		unsigned int cookie = __ffs(pending_regions_to_clean);

		BUG_ON(!kctx->pending_regions[cookie]);

		kbase_reg_pending_dtor(kctx->pending_regions[cookie]);

		kctx->pending_regions[cookie] = NULL;
		pending_regions_to_clean &= ~(1UL << cookie);
	}

	kbase_region_tracker_term(kctx);
	kbase_gpu_vm_unlock(kctx);

	/* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
	kbasep_js_kctx_term(kctx);

	kbase_jd_exit(kctx);

	kbase_pm_context_idle(kbdev);

	kbase_mmu_term(kctx);

	pages = atomic_read(&kctx->used_pages);
	if (pages != 0)
		dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);

	kbase_mem_pool_term(&kctx->mem_pool);
	WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);

	vfree(kctx);
}
示例#6
0
static void page_fault_worker(osk_workq_work *data)
{
	u64 fault_pfn;
	u32 new_pages;
	u32 fault_rel_pfn;
	kbase_as * faulting_as;
	int as_no;
	kbase_context * kctx;
	kbase_device * kbdev;
	kbase_va_region *region;
	mali_error err;

	faulting_as = CONTAINER_OF(data, kbase_as, work_pagefault);
	fault_pfn = faulting_as->fault_addr >> OSK_PAGE_SHIFT;
	as_no = faulting_as->number;

	kbdev = CONTAINER_OF( faulting_as, kbase_device, as[as_no] );

	/* Grab the context that was already refcounted in kbase_mmu_interrupt().
	 * Therefore, it cannot be scheduled out of this AS until we explicitly release it
	 *
	 * NOTE: NULL can be returned here if we're gracefully handling a spurious interrupt */
	kctx = kbasep_js_runpool_lookup_ctx_noretain( kbdev, as_no );

	if ( kctx == NULL )
	{
		/* Address space has no context, terminate the work */
		u32 reg;
		/* AS transaction begin */
		osk_mutex_lock(&faulting_as->transaction_mutex);
		reg = kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), NULL);
		reg = (reg & (~(u32)MMU_TRANSTAB_ADRMODE_MASK)) | ASn_TRANSTAB_ADRMODE_UNMAPPED;
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), reg, NULL);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_UPDATE, NULL);
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);
		osk_mutex_unlock(&faulting_as->transaction_mutex);
		/* AS transaction end */

		mmu_mask_reenable(kbdev, NULL, faulting_as);
		return;
	}


	OSK_ASSERT( kctx->kbdev == kbdev );

	kbase_gpu_vm_lock(kctx);

	/* find the region object for this VA */
	region = kbase_region_lookup(kctx, faulting_as->fault_addr);
	if (NULL == region || (GROWABLE_FLAGS_REQUIRED != (region->flags & GROWABLE_FLAGS_MASK)))
	{
		kbase_gpu_vm_unlock(kctx);
		/* failed to find the region or mismatch of the flags */
		kbase_mmu_report_fault_and_kill(kctx, faulting_as, faulting_as->fault_addr);
		goto fault_done;
	}

	/* find the size we need to grow it by */
	/* we know the result fit in a u32 due to kbase_region_lookup
	 * validating the fault_adress to be within a u32 from the start_pfn */
	fault_rel_pfn = fault_pfn - region->start_pfn;
	new_pages = make_multiple(fault_rel_pfn - region->nr_alloc_pages + 1, region->extent);
	if (new_pages + region->nr_alloc_pages > region->nr_pages)
	{
		/* cap to max vsize */
		new_pages = region->nr_pages - region->nr_alloc_pages;
	}

	if (0 == new_pages)
	{
		/* Duplicate of a fault we've already handled, nothing to do */
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);
		mmu_mask_reenable(kbdev, kctx, faulting_as);
		kbase_gpu_vm_unlock(kctx);
		goto fault_done;
	}

	if (MALI_ERROR_NONE == kbase_alloc_phy_pages_helper(region, new_pages))
	{
		/* alloc success */
		mali_addr64 lock_addr;
		OSK_ASSERT(region->nr_alloc_pages <= region->nr_pages);

		/* AS transaction begin */
		osk_mutex_lock(&faulting_as->transaction_mutex);

		/* Lock the VA region we're about to update */
		lock_addr = lock_region(faulting_as->fault_addr >> OSK_PAGE_SHIFT, new_pages);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_LOCKADDR_LO), lock_addr & 0xFFFFFFFFUL, kctx);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_LOCKADDR_HI), lock_addr >> 32, kctx);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_LOCK, kctx);

		/* set up the new pages */
		err = kbase_mmu_insert_pages(kctx, region->start_pfn + region->nr_alloc_pages - new_pages,
		                             &region->phy_pages[region->nr_alloc_pages - new_pages],
		                             new_pages, region->flags);
		if(MALI_ERROR_NONE != err)
		{
			/* failed to insert pages, handle as a normal PF */
			osk_mutex_unlock(&faulting_as->transaction_mutex);
			kbase_gpu_vm_unlock(kctx);
			/* The locked VA region will be unlocked and the cache invalidated in here */
			kbase_mmu_report_fault_and_kill(kctx, faulting_as, faulting_as->fault_addr);
			goto fault_done;
		}

		/* clear the irq */
		/* MUST BE BEFORE THE FLUSH/UNLOCK */
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);

		/* flush L2 and unlock the VA (resumes the MMU) */
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_FLUSH, kctx);

		/* wait for the flush to complete */
		while (kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_STATUS), kctx) & 1);

		osk_mutex_unlock(&faulting_as->transaction_mutex);
		/* AS transaction end */

		/* reenable this in the mask */
		mmu_mask_reenable(kbdev, kctx, faulting_as);
		kbase_gpu_vm_unlock(kctx);
	}
void page_fault_worker(struct work_struct *data)
{
	u64 fault_pfn;
	u32 fault_status;
	size_t new_pages;
	size_t fault_rel_pfn;
	struct kbase_as *faulting_as;
	int as_no;
	struct kbase_context *kctx;
	struct kbase_device *kbdev;
	struct kbase_va_region *region;
	int err;
	bool grown = false;

	faulting_as = container_of(data, struct kbase_as, work_pagefault);
	fault_pfn = faulting_as->fault_addr >> PAGE_SHIFT;
	as_no = faulting_as->number;

	kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);/* [false alarm]: no problem - fortify check */

	/* Grab the context that was already refcounted in kbase_mmu_interrupt().
	 * Therefore, it cannot be scheduled out of this AS until we explicitly release it
	 */
	kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no);
	if (WARN_ON(!kctx)) {
		atomic_dec(&kbdev->faults_pending);
		return;
	}

	KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev);

	fault_status = faulting_as->fault_status;
	switch (fault_status & AS_FAULTSTATUS_EXCEPTION_CODE_MASK) {

	case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT:
		/* need to check against the region to handle this one */
		break;

	case AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT:
		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
				"Permission failure");
		goto fault_done;

	case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT:
		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
				"Tranlation table bus fault");
		goto fault_done;

	case AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG:
		/* nothing to do, but we don't expect this fault currently */
		dev_warn(kbdev->dev, "Access flag unexpectedly set");
		goto fault_done;


	default:
		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
				"Unknown fault code");
		goto fault_done;
	}

	/* so we have a translation fault, let's see if it is for growable
	 * memory */
	kbase_gpu_vm_lock(kctx);

	region = kbase_region_tracker_find_region_enclosing_address(kctx,
			faulting_as->fault_addr);
	if (!region || region->flags & KBASE_REG_FREE) {
		kbase_gpu_vm_unlock(kctx);
		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
				"Memory is not mapped on the GPU");
		goto fault_done;
	}

	if ((region->flags & GROWABLE_FLAGS_REQUIRED)
			!= GROWABLE_FLAGS_REQUIRED) {
		kbase_gpu_vm_unlock(kctx);
		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
				"Memory is not growable");
		goto fault_done;
	}

	/* find the size we need to grow it by */
	/* we know the result fit in a size_t due to kbase_region_tracker_find_region_enclosing_address
	 * validating the fault_adress to be within a size_t from the start_pfn */
	fault_rel_pfn = fault_pfn - region->start_pfn;

	if (fault_rel_pfn < kbase_reg_current_backed_size(region)) {
		dev_dbg(kbdev->dev, "Page fault @ 0x%llx in allocated region 0x%llx-0x%llx of growable TMEM: Ignoring",
				faulting_as->fault_addr, region->start_pfn,
				region->start_pfn +
				kbase_reg_current_backed_size(region));

		kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
				KBASE_MMU_FAULT_TYPE_PAGE);
		/* [1] in case another page fault occurred while we were
		 * handling the (duplicate) page fault we need to ensure we
		 * don't loose the other page fault as result of us clearing
		 * the MMU IRQ. Therefore, after we clear the MMU IRQ we send
		 * an UNLOCK command that will retry any stalled memory
		 * transaction (which should cause the other page fault to be
		 * raised again).
		 */
		kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
				AS_COMMAND_UNLOCK, 1);
		kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
				KBASE_MMU_FAULT_TYPE_PAGE);
		kbase_gpu_vm_unlock(kctx);

		goto fault_done;
	}

	new_pages = make_multiple(fault_rel_pfn -
			kbase_reg_current_backed_size(region) + 1,
			region->extent);

	/* cap to max vsize */
	if (new_pages + kbase_reg_current_backed_size(region) >
			region->nr_pages)
		new_pages = region->nr_pages -
				kbase_reg_current_backed_size(region);

	if (0 == new_pages) {
		/* Duplicate of a fault we've already handled, nothing to do */
		kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
				KBASE_MMU_FAULT_TYPE_PAGE);
		/* See comment [1] about UNLOCK usage */
		kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
				AS_COMMAND_UNLOCK, 1);
		kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
				KBASE_MMU_FAULT_TYPE_PAGE);
		kbase_gpu_vm_unlock(kctx);
		goto fault_done;
	}

	if (kbase_alloc_phy_pages_helper(region->gpu_alloc, new_pages) == 0) {
		if (region->gpu_alloc != region->cpu_alloc) {
			if (kbase_alloc_phy_pages_helper(
					region->cpu_alloc, new_pages) == 0) {
				grown = true;
			} else {
				kbase_free_phy_pages_helper(region->gpu_alloc,
						new_pages);
			}
		} else {
			grown = true;
		}
	}


	if (grown) {
		u32 op;

		/* alloc success */
		KBASE_DEBUG_ASSERT(kbase_reg_current_backed_size(region) <= region->nr_pages);

		/* AS transaction begin */
		mutex_lock(&faulting_as->transaction_mutex);

		/* set up the new pages */
		err = kbase_mmu_insert_pages(kctx, region->start_pfn + kbase_reg_current_backed_size(region) - new_pages, &kbase_get_gpu_phy_pages(region)[kbase_reg_current_backed_size(region) - new_pages], new_pages, region->flags);
		if (err) {
			/* failed to insert pages, handle as a normal PF */
			mutex_unlock(&faulting_as->transaction_mutex);
			kbase_free_phy_pages_helper(region->gpu_alloc, new_pages);
			if (region->gpu_alloc != region->cpu_alloc)
				kbase_free_phy_pages_helper(region->cpu_alloc,
						new_pages);
			kbase_gpu_vm_unlock(kctx);
			/* The locked VA region will be unlocked and the cache invalidated in here */
			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
					"Page table update failure");
			goto fault_done;
		}
#if defined(CONFIG_MALI_GATOR_SUPPORT)
		kbase_trace_mali_page_fault_insert_pages(as_no, new_pages);
#endif
#if defined(CONFIG_MALI_MIPE_ENABLED)
		kbase_tlstream_aux_pagefault(
				kctx->id,
				atomic_read(&kctx->used_pages));
#endif

		/* flush L2 and unlock the VA (resumes the MMU) */
		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
			op = AS_COMMAND_FLUSH;
		else
			op = AS_COMMAND_FLUSH_PT;

		/* clear MMU interrupt - this needs to be done after updating
		 * the page tables but before issuing a FLUSH command. The
		 * FLUSH cmd has a side effect that it restarts stalled memory
		 * transactions in other address spaces which may cause
		 * another fault to occur. If we didn't clear the interrupt at
		 * this stage a new IRQ might not be raised when the GPU finds
		 * a MMU IRQ is already pending.
		 */
		kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
					 KBASE_MMU_FAULT_TYPE_PAGE);

		kbase_mmu_hw_do_operation(kbdev, faulting_as, kctx,
					  faulting_as->fault_addr >> PAGE_SHIFT,
					  new_pages,
					  op, 1);

		mutex_unlock(&faulting_as->transaction_mutex);
		/* AS transaction end */

		/* reenable this in the mask */
		kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
					 KBASE_MMU_FAULT_TYPE_PAGE);
		kbase_gpu_vm_unlock(kctx);
	} else {