コード例 #1
0
/**
 * Submit the 8401 workaround job.
 *
 * Important for BASE_HW_ISSUE_8987: This job always uses 16 RMUs
 * - Therefore, on slot[1] it will always use the same number of RMUs as another
 * GLES job.
 * - On slot[2], no other job (GLES or otherwise) will be running on the
 * cores, by virtue of it being slot[2]. Therefore, any value of RMUs is
 * acceptable.
 */
void kbasep_8401_submit_dummy_job(kbase_device *kbdev, int js)
{
	u32 cfg;
	mali_addr64 jc;
	u32 pgd_high;

	/* While this workaround is active we reserve the last address space just for submitting the dummy jobs */
	int as = kbdev->nr_hw_address_spaces;

	/* Don't issue compute jobs on job slot 0 */
	KBASE_DEBUG_ASSERT(js != 0);
	KBASE_DEBUG_ASSERT(js < KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT);

	/* Job chain GPU address */
	jc = (js + WORKAROUND_PAGE_OFFSET) * PAGE_SIZE;	/* GPU phys address (see kbase_mmu_insert_pages call in kbasep_8401_workaround_init */

	/* Clear the job status words which may contain values from a previous job completion */
	memset(kbdev->workaround_compute_job_va[js], 0, 4 * sizeof(u32));

	/* Get the affinity of the previous job */
	dummy_job_atom[js].affinity = ((u64) kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_LO), NULL)) | (((u64) kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_HI), NULL)) << 32);

	/* Don't submit a compute job if the affinity was previously zero (i.e. no jobs have run yet on this slot) */
	if (!dummy_job_atom[js].affinity)
		return;

	/* Ensure that our page tables are programmed into the MMU */
	kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_TRANSTAB_LO), (kbdev->workaround_kctx->pgd & ASn_TRANSTAB_ADDR_SPACE_MASK) | ASn_TRANSTAB_READ_INNER | ASn_TRANSTAB_ADRMODE_TABLE, NULL);

	/* Need to use a conditional expression to avoid "right shift count >= width of type"
	 * error when using an if statement - although the size_of condition is evaluated at compile
	 * time the unused branch is not removed until after it is type-checked and the error
	 * produced.
	 */
	pgd_high = sizeof(kbdev->workaround_kctx->pgd) > 4 ? (kbdev->workaround_kctx->pgd >> 32) : 0;
	kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_TRANSTAB_HI), pgd_high, NULL);

	kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_MEMATTR_LO), ASn_MEMATTR_IMPL_DEF_CACHE_POLICY, NULL);
	kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_MEMATTR_HI), ASn_MEMATTR_IMPL_DEF_CACHE_POLICY, NULL);
	kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_COMMAND), ASn_COMMAND_UPDATE, NULL);

	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), jc & 0xFFFFFFFF, NULL);
	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), jc >> 32, NULL);

	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_LO), dummy_job_atom[js].affinity & 0xFFFFFFFF, NULL);
	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_HI), dummy_job_atom[js].affinity >> 32, NULL);

	/* start MMU, medium priority, cache clean/flush on end, clean/flush on start */
	cfg = as | JSn_CONFIG_END_FLUSH_CLEAN_INVALIDATE | JSn_CONFIG_START_MMU | JSn_CONFIG_START_FLUSH_CLEAN_INVALIDATE | JSn_CONFIG_THREAD_PRI(8);
	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_CONFIG_NEXT), cfg, NULL);

	KBASE_TRACE_ADD_SLOT(kbdev, JM_SUBMIT, NULL, 0, jc, js);

	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), JSn_COMMAND_START, NULL);
	/* Report that the job has been submitted */
	kbasep_jm_enqueue_submit_slot(&kbdev->jm_slots[js], &dummy_job_atom[js]);
}
コード例 #2
0
void kbasep_8401_submit_dummy_job(kbase_device *kbdev, int js)
{
	u32 cfg;
	mali_addr64 jc;

	/* While this workaround is active we reserve the last address space just for submitting the dummy jobs */
	int as = kbdev->nr_address_spaces;

	/* Don't issue compute jobs on job slot 0 */
	OSK_ASSERT(js != 0);
	OSK_ASSERT(js < KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT);

	/* Job chain GPU address */
	jc = (js+WORKAROUND_PAGE_OFFSET)*OSK_PAGE_SIZE; /* GPU phys address (see kbase_mmu_insert_pages call in kbasep_8401_workaround_init*/

	/* Clear the job status words which may contain values from a previous job completion */
	memset(kbdev->workaround_compute_job_va[js], 0,  4*sizeof(u32));

	/* Get the affinity of the previous job */
	dummy_job_atom[js].affinity = ((u64)kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_LO), NULL)) |
	                          (((u64)kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_HI), NULL)) << 32);

	/* Don't submit a compute job if the affinity was previously zero (i.e. no jobs have run yet on this slot) */
	if(!dummy_job_atom[js].affinity)
	{
		return;
	}

	/* Ensure that our page tables are programmed into the MMU */
	kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_TRANSTAB_LO),
	                (kbdev->workaround_kctx->pgd & 0xFFFFF000) | (1ul << 2) | 3, NULL);
	kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_TRANSTAB_HI),
	                (kbdev->workaround_kctx->pgd >> 32), NULL);

	kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_MEMATTR_LO), 0x48484848, NULL);
	kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_MEMATTR_HI), 0x48484848, NULL);
	kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_COMMAND), ASn_COMMAND_UPDATE, NULL);

	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), jc & 0xFFFFFFFF, NULL);
	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), jc >> 32, NULL);

	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_LO), dummy_job_atom[js].affinity & 0xFFFFFFFF, NULL);
	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_HI), dummy_job_atom[js].affinity >> 32, NULL);

	/* start MMU, medium priority, cache clean/flush on end, clean/flush on start */
	cfg = as | (3 << 12) | (1 << 10) | (8 << 16) | (3 << 8);
	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_CONFIG_NEXT), cfg, NULL);

	KBASE_TRACE_ADD_SLOT( kbdev, JM_SUBMIT, NULL, 0, jc, js );

	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), JSn_COMMAND_START, NULL);
	/* Report that the job has been submitted */
	kbasep_jm_enqueue_submit_slot(&kbdev->jm_slots[js], &dummy_job_atom[js]);
}
コード例 #3
0
ファイル: mali_kbase_mmu.c プロジェクト: 253627764/GT-I9500
static void page_fault_worker(struct work_struct *data)
{
	u64 fault_pfn;
	u32 new_pages;
	u32 fault_rel_pfn;
	kbase_as * faulting_as;
	int as_no;
	kbase_context * kctx;
	kbase_device * kbdev;
	kbase_va_region *region;
	mali_error err;

	u32 fault_status;

	faulting_as = container_of(data, kbase_as, work_pagefault);
	fault_pfn = faulting_as->fault_addr >> PAGE_SHIFT;
	as_no = faulting_as->number;

	kbdev = container_of( faulting_as, kbase_device, as[as_no] );

	/* Grab the context that was already refcounted in kbase_mmu_interrupt().
	 * Therefore, it cannot be scheduled out of this AS until we explicitly release it
	 *
	 * NOTE: NULL can be returned here if we're gracefully handling a spurious interrupt */
	kctx = kbasep_js_runpool_lookup_ctx_noretain( kbdev, as_no );

	if ( kctx == NULL )
	{
		/* Address space has no context, terminate the work */
		u32 reg;
		/* AS transaction begin */
		mutex_lock(&faulting_as->transaction_mutex);
		reg = kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), NULL);
		reg = (reg & (~(u32)MMU_TRANSTAB_ADRMODE_MASK)) | ASn_TRANSTAB_ADRMODE_UNMAPPED;
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), reg, NULL);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_UPDATE, NULL);
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);
		mutex_unlock(&faulting_as->transaction_mutex);
		/* AS transaction end */

		mmu_mask_reenable(kbdev, NULL, faulting_as);
		return;
	}

	fault_status = kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_FAULTSTATUS), NULL);

	OSK_ASSERT( kctx->kbdev == kbdev );

	kbase_gpu_vm_lock(kctx);

	/* find the region object for this VA */
	region = kbase_region_tracker_find_region_enclosing_address(kctx, faulting_as->fault_addr);
	if (NULL == region || (GROWABLE_FLAGS_REQUIRED != (region->flags & GROWABLE_FLAGS_MASK)))
	{
		kbase_gpu_vm_unlock(kctx);
		/* failed to find the region or mismatch of the flags */
		kbase_mmu_report_fault_and_kill(kctx, faulting_as, faulting_as->fault_addr);
		goto fault_done;
	}

	if ((((fault_status & ASn_FAULTSTATUS_ACCESS_TYPE_MASK) == ASn_FAULTSTATUS_ACCESS_TYPE_READ) &&
	        !(region->flags & KBASE_REG_GPU_RD)) ||
	    (((fault_status & ASn_FAULTSTATUS_ACCESS_TYPE_MASK) == ASn_FAULTSTATUS_ACCESS_TYPE_WRITE) &&
	        !(region->flags & KBASE_REG_GPU_WR)) ||
	    (((fault_status & ASn_FAULTSTATUS_ACCESS_TYPE_MASK) == ASn_FAULTSTATUS_ACCESS_TYPE_EX) &&
	        (region->flags & KBASE_REG_GPU_NX)))
	{
		OSK_PRINT_WARN(OSK_BASE_MMU, "Access permissions don't match: region->flags=0x%x", region->flags);
		kbase_gpu_vm_unlock(kctx);
		kbase_mmu_report_fault_and_kill(kctx, faulting_as, faulting_as->fault_addr);
		goto fault_done;
	}

	/* find the size we need to grow it by */
	/* we know the result fit in a u32 due to kbase_region_tracker_find_region_enclosing_address
	 * validating the fault_adress to be within a u32 from the start_pfn */
	fault_rel_pfn = fault_pfn - region->start_pfn;
	
	if (fault_rel_pfn < region->nr_alloc_pages)
	{
		OSK_PRINT_WARN(OSK_BASE_MMU, "Fault in allocated region of growable TMEM: Ignoring");
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);
		mmu_mask_reenable(kbdev, kctx, faulting_as);
		kbase_gpu_vm_unlock(kctx);
		goto fault_done;
	}

	new_pages = make_multiple(fault_rel_pfn - region->nr_alloc_pages + 1, region->extent);
	if (new_pages + region->nr_alloc_pages > region->nr_pages)
	{
		/* cap to max vsize */
		new_pages = region->nr_pages - region->nr_alloc_pages;
	}

	if (0 == new_pages)
	{
		/* Duplicate of a fault we've already handled, nothing to do */
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);
		mmu_mask_reenable(kbdev, kctx, faulting_as);
		kbase_gpu_vm_unlock(kctx);
		goto fault_done;
	}

	if (MALI_ERROR_NONE == kbase_alloc_phy_pages_helper(region, new_pages))
	{
		/* alloc success */
		mali_addr64 lock_addr;
		OSK_ASSERT(region->nr_alloc_pages <= region->nr_pages);

		/* AS transaction begin */
		mutex_lock(&faulting_as->transaction_mutex);

		/* Lock the VA region we're about to update */
		lock_addr = lock_region(kbdev, faulting_as->fault_addr >> PAGE_SHIFT, new_pages);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_LOCKADDR_LO), lock_addr & 0xFFFFFFFFUL, kctx);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_LOCKADDR_HI), lock_addr >> 32, kctx);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_LOCK, kctx);

		/* set up the new pages */
		err = kbase_mmu_insert_pages(kctx, region->start_pfn + region->nr_alloc_pages - new_pages,
		                             &region->phy_pages[region->nr_alloc_pages - new_pages],
		                             new_pages, region->flags);
		if(MALI_ERROR_NONE != err)
		{
			/* failed to insert pages, handle as a normal PF */
			mutex_unlock(&faulting_as->transaction_mutex);
			kbase_gpu_vm_unlock(kctx);
			/* The locked VA region will be unlocked and the cache invalidated in here */
			kbase_mmu_report_fault_and_kill(kctx, faulting_as, faulting_as->fault_addr);
			goto fault_done;
		}

#ifdef CONFIG_MALI_GATOR_SUPPORT
		kbase_trace_mali_page_fault_insert_pages(as_no, new_pages);
#endif /* CONFIG_MALI_GATOR_SUPPORT */
		/* clear the irq */
		/* MUST BE BEFORE THE FLUSH/UNLOCK */
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);

		/* flush L2 and unlock the VA (resumes the MMU) */
		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
		{
			kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_FLUSH, kctx);
		}
		else
		{
			kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_FLUSH_PT, kctx);
		}

		/* wait for the flush to complete */
		while (kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_STATUS), kctx) & 1);

		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630))
		{
			/* Issue an UNLOCK command to ensure that valid page tables are re-read by the GPU after an update.
			Note that, the FLUSH command should perform all the actions necessary, however the bus logs show
			that if multiple page faults occur within an 8 page region the MMU does not always re-read the
			updated page table entries for later faults or is only partially read, it subsequently raises the
			page fault IRQ for the same addresses, the unlock ensures that the MMU cache is flushed, so updates
			can be re-read.  As the region is now unlocked we need to issue 2 UNLOCK commands in order to flush the
			MMU/uTLB, see PRLAM-8812.
                        */
			kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_UNLOCK, kctx);
			kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_UNLOCK, kctx);
		}

		mutex_unlock(&faulting_as->transaction_mutex);
		/* AS transaction end */

		/* reenable this in the mask */
		mmu_mask_reenable(kbdev, kctx, faulting_as);
		kbase_gpu_vm_unlock(kctx);
	}
コード例 #4
0
ファイル: mali_kbase_mmu.c プロジェクト: AndreiLux/Perseus-S3
static void page_fault_worker(osk_workq_work *data)
{
	u64 fault_pfn;
	u32 new_pages;
	u32 fault_rel_pfn;
	kbase_as * faulting_as;
	int as_no;
	kbase_context * kctx;
	kbase_device * kbdev;
	kbase_va_region *region;
	mali_error err;

	faulting_as = CONTAINER_OF(data, kbase_as, work_pagefault);
	fault_pfn = faulting_as->fault_addr >> OSK_PAGE_SHIFT;
	as_no = faulting_as->number;

	kbdev = CONTAINER_OF( faulting_as, kbase_device, as[as_no] );

	/* Grab the context that was already refcounted in kbase_mmu_interrupt().
	 * Therefore, it cannot be scheduled out of this AS until we explicitly release it
	 *
	 * NOTE: NULL can be returned here if we're gracefully handling a spurious interrupt */
	kctx = kbasep_js_runpool_lookup_ctx_noretain( kbdev, as_no );

	if ( kctx == NULL )
	{
		/* Address space has no context, terminate the work */
		u32 reg;
		/* AS transaction begin */
		osk_mutex_lock(&faulting_as->transaction_mutex);
		reg = kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), NULL);
		reg = (reg & (~(u32)MMU_TRANSTAB_ADRMODE_MASK)) | ASn_TRANSTAB_ADRMODE_UNMAPPED;
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), reg, NULL);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_UPDATE, NULL);
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);
		osk_mutex_unlock(&faulting_as->transaction_mutex);
		/* AS transaction end */

		mmu_mask_reenable(kbdev, NULL, faulting_as);
		return;
	}


	OSK_ASSERT( kctx->kbdev == kbdev );

	kbase_gpu_vm_lock(kctx);

	/* find the region object for this VA */
	region = kbase_region_lookup(kctx, faulting_as->fault_addr);
	if (NULL == region || (GROWABLE_FLAGS_REQUIRED != (region->flags & GROWABLE_FLAGS_MASK)))
	{
		kbase_gpu_vm_unlock(kctx);
		/* failed to find the region or mismatch of the flags */
		kbase_mmu_report_fault_and_kill(kctx, faulting_as, faulting_as->fault_addr);
		goto fault_done;
	}

	/* find the size we need to grow it by */
	/* we know the result fit in a u32 due to kbase_region_lookup
	 * validating the fault_adress to be within a u32 from the start_pfn */
	fault_rel_pfn = fault_pfn - region->start_pfn;
	new_pages = make_multiple(fault_rel_pfn - region->nr_alloc_pages + 1, region->extent);
	if (new_pages + region->nr_alloc_pages > region->nr_pages)
	{
		/* cap to max vsize */
		new_pages = region->nr_pages - region->nr_alloc_pages;
	}

	if (0 == new_pages)
	{
		/* Duplicate of a fault we've already handled, nothing to do */
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);
		mmu_mask_reenable(kbdev, kctx, faulting_as);
		kbase_gpu_vm_unlock(kctx);
		goto fault_done;
	}

	if (MALI_ERROR_NONE == kbase_alloc_phy_pages_helper(region, new_pages))
	{
		/* alloc success */
		mali_addr64 lock_addr;
		OSK_ASSERT(region->nr_alloc_pages <= region->nr_pages);

		/* AS transaction begin */
		osk_mutex_lock(&faulting_as->transaction_mutex);

		/* Lock the VA region we're about to update */
		lock_addr = lock_region(faulting_as->fault_addr >> OSK_PAGE_SHIFT, new_pages);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_LOCKADDR_LO), lock_addr & 0xFFFFFFFFUL, kctx);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_LOCKADDR_HI), lock_addr >> 32, kctx);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_LOCK, kctx);

		/* set up the new pages */
		err = kbase_mmu_insert_pages(kctx, region->start_pfn + region->nr_alloc_pages - new_pages,
		                             &region->phy_pages[region->nr_alloc_pages - new_pages],
		                             new_pages, region->flags);
		if(MALI_ERROR_NONE != err)
		{
			/* failed to insert pages, handle as a normal PF */
			osk_mutex_unlock(&faulting_as->transaction_mutex);
			kbase_gpu_vm_unlock(kctx);
			/* The locked VA region will be unlocked and the cache invalidated in here */
			kbase_mmu_report_fault_and_kill(kctx, faulting_as, faulting_as->fault_addr);
			goto fault_done;
		}

		/* clear the irq */
		/* MUST BE BEFORE THE FLUSH/UNLOCK */
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);

		/* flush L2 and unlock the VA (resumes the MMU) */
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_FLUSH, kctx);

		/* wait for the flush to complete */
		while (kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_STATUS), kctx) & 1);

		osk_mutex_unlock(&faulting_as->transaction_mutex);
		/* AS transaction end */

		/* reenable this in the mask */
		mmu_mask_reenable(kbdev, kctx, faulting_as);
		kbase_gpu_vm_unlock(kctx);
	}
コード例 #5
0
bool kbase_debug_job_fault_reg_snapshot_init(struct kbase_context *kctx,
		int reg_range)
{
	int i, j;
	int offset = 0;
	int slot_number;
	int as_number;

	if (kctx->reg_dump == NULL)
		return false;

	slot_number = kctx->kbdev->gpu_props.num_job_slots;
	as_number = kctx->kbdev->gpu_props.num_address_spaces;

	/* get the GPU control registers*/
	for (i = 0; i < sizeof(gpu_control_reg_snapshot)/4; i++) {
		kctx->reg_dump[offset] =
				GPU_CONTROL_REG(gpu_control_reg_snapshot[i]);
		offset += 2;
	}

	/* get the Job control registers*/
	for (i = 0; i < sizeof(job_control_reg_snapshot)/4; i++) {
		kctx->reg_dump[offset] =
				JOB_CONTROL_REG(job_control_reg_snapshot[i]);
		offset += 2;
	}

	/* get the Job Slot registers*/
	for (j = 0; j < slot_number; j++)	{
		for (i = 0; i < sizeof(job_slot_reg_snapshot)/4; i++) {
			kctx->reg_dump[offset] =
			JOB_SLOT_REG(j, job_slot_reg_snapshot[i]);
			offset += 2;
		}
	}

	/* get the MMU registers*/
	for (i = 0; i < sizeof(mmu_reg_snapshot)/4; i++) {
		kctx->reg_dump[offset] = MMU_REG(mmu_reg_snapshot[i]);
		offset += 2;
	}

	/* get the Address space registers*/
	for (j = 0; j < as_number; j++) {
		for (i = 0; i < sizeof(as_reg_snapshot)/4; i++) {
			kctx->reg_dump[offset] =
					MMU_AS_REG(j, as_reg_snapshot[i]);
			offset += 2;
		}
	}

	WARN_ON(offset >= (reg_range*2/4));

	/* set the termination flag*/
	kctx->reg_dump[offset] = REGISTER_DUMP_TERMINATION_FLAG;
	kctx->reg_dump[offset + 1] = REGISTER_DUMP_TERMINATION_FLAG;

	dev_dbg(kctx->kbdev->dev, "kbase_job_fault_reg_snapshot_init:%d\n",
			offset);

	return true;
}