Пример #1
0
void oskp_debug_test_timer_stats( void )
{
	oskp_time_test time_tester;
	osk_ticks start_timestamp;
	osk_ticks end_timestamp;
	u32 msec_elapsed;
	osk_error err;

	if ( oskp_timer_has_been_checked != MALI_FALSE )
	{
		return;
	}
	oskp_timer_has_been_checked = MALI_TRUE;

	OSK_MEMSET( &time_tester, 0, sizeof(time_tester) );

	err = osk_timer_on_stack_init( &time_tester.timer );
	if ( err != OSK_ERR_NONE )
	{
		goto fail_init;
	}

	osk_timer_callback_set( &time_tester.timer, &oskp_check_timer_callback, &time_tester );

	start_timestamp = osk_time_now();
	err = osk_timer_start_ns( &time_tester.timer, TIMER_PERIOD_NS );
	if ( err != OSK_ERR_NONE )
	{
		goto fail_start;
	}

	msleep( TIMER_TEST_TIME_MS );

	time_tester.should_stop = MALI_TRUE;

	osk_timer_stop( &time_tester.timer );
	end_timestamp = osk_time_now();

	msec_elapsed = osk_time_elapsed( start_timestamp, end_timestamp );

	OSK_PRINT( OSK_BASE_CORE, "OSK Timer did %d iterations in %dms", time_tester.val, msec_elapsed );

	osk_timer_on_stack_term( &time_tester.timer );
	return;

 fail_start:
	osk_timer_on_stack_term( &time_tester.timer );
 fail_init:
	OSK_PRINT_WARN( OSK_BASE_CORE, "OSK Timer couldn't init/start for testing stats" );
	return;
}
Пример #2
0
mali_error kbase_gpuprops_uk_get_props(kbase_context *kctx, kbase_uk_gpuprops * kbase_props)
{
	kbase_gpuprops_clock_speed_function get_gpu_speed_mhz;
	u32 gpu_speed_mhz;
	int rc = 1;

	OSK_ASSERT(NULL != kctx);
	OSK_ASSERT(NULL != kbase_props);

	if (OSK_SIMULATE_FAILURE(OSK_BASE_CORE))
	{
		return MALI_ERROR_FUNCTION_FAILED;
	}

	/* Current GPU speed is requested from the system integrator via the KBASE_CONFIG_ATTR_GPU_SPEED_FUNC function.
	 * If that function fails, or the function is not provided by the system integrator, we report the maximum
	 * GPU speed as specified by KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX.
	 */
	get_gpu_speed_mhz = (kbase_gpuprops_clock_speed_function)kbasep_get_config_value(kctx->kbdev, kctx->kbdev->config_attributes, KBASE_CONFIG_ATTR_GPU_SPEED_FUNC);
	if (get_gpu_speed_mhz != NULL)
	{
		rc = get_gpu_speed_mhz(&gpu_speed_mhz);
#ifdef CONFIG_MALI_DEBUG
		/* Issue a warning message when the reported GPU speed falls outside the min/max range */
		if (rc == 0)
		{
			u32 gpu_speed_khz = gpu_speed_mhz * 1000;
			if (gpu_speed_khz < kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min ||
			    gpu_speed_khz > kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max)
			{
				OSK_PRINT_WARN(OSK_BASE_CORE, "GPU Speed is outside of min/max range (got %lu Khz, min %lu Khz, max %lu Khz)\n",
				                   gpu_speed_khz,
				                   kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min,
				                   kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max);
			}
		}
#endif /* CONFIG_MALI_DEBUG */
	}
	if (rc != 0)
	{
		gpu_speed_mhz = kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max / 1000;
	}

	kctx->kbdev->gpu_props.props.core_props.gpu_speed_mhz = gpu_speed_mhz;

	memcpy(&kbase_props->props, &kctx->kbdev->gpu_props.props, sizeof(kbase_props->props));

	return MALI_ERROR_NONE;
}
Пример #3
0
static void oskp_check_timer_callback( void *data )
{
	oskp_time_test *time_tester = (oskp_time_test*)data;

	(time_tester->val)++;

	if ( time_tester->should_stop == MALI_FALSE )
	{
		osk_error err;
		err = osk_timer_start_ns( &time_tester->timer, TIMER_PERIOD_NS );
		if ( err != OSK_ERR_NONE )
		{
			OSK_PRINT_WARN( OSK_BASE_CORE, "OSK Timer couldn't restart - testing stats will be inaccurate" );
		}
	}
}
Пример #4
0
static mali_bool kbasep_validate_gpu_clock_freq(const kbase_attribute *attributes)
{
	uintptr_t freq_min = kbasep_get_config_value(attributes, KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN);
	uintptr_t freq_max = kbasep_get_config_value(attributes, KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX);

	if ((freq_min > MAX_GPU_ALLOWED_FREQ_KHZ) ||
		(freq_min < MIN_GPU_ALLOWED_FREQ_KHZ) ||
		(freq_max > MAX_GPU_ALLOWED_FREQ_KHZ) ||
		(freq_max < MIN_GPU_ALLOWED_FREQ_KHZ) ||
		(freq_min > freq_max))
	{
		OSK_PRINT_WARN(OSK_BASE_CORE, "Invalid GPU frequencies found in configuration: min=%ldkHz, max=%ldkHz.", freq_min, freq_max);
		return MALI_FALSE;
	}
	
	return MALI_TRUE;
}
Пример #5
0
void oskp_validate_format_string(const char *format, ...)
{
#if MALI_DEBUG
	char c;
	static const char *supported[] = 
	{
		"d", "ld", "lld",
		"x", "lx", "llx",
		"X", "lX", "llX",
		"u", "lu", "llu",
		"p",
		"c",
		"s",
	};
	static const unsigned char sizes[] = { 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 1, 1 };

	unsigned int i;

	/* %[flags][width][.precision][length]specifier  */

	while ( (c = *format++) )
	{
		if (c == '%')
		{
			c = *format;

			if (c == '\0')
			{
				/* Unsupported format */
				OSK_PRINT_WARN(OSK_OSK, "OSK Format specification not complete (%% not followed by anything)\n");
				return;
			}
			else if (c != '%')
			{
				/* Skip to the [length]specifier part assuming it starts with
				 * an alphabetic character and flags, width, precision do not
				 * contain alphabetic characters.
				 */
				do 
				{
					if ((c >= 'a' && c <= 'z') || c == 'X')
					{
						/* Match supported formats with current position in format string */
						for (i = 0; i < NELEMS(supported); i++)
						{
							if (strncmp(format, supported[i], sizes[i]) == 0)
							{
								/* Supported format */
								break;
							}
						}

						if (i == NELEMS(supported))
						{
							/* Unsupported format */
							OSK_PRINT_WARN(OSK_OSK, "OSK Format string specifier not supported (starting at '%s')\n", format);
							return;
						}

						/* Start looking for next '%' */
						break;
					}
				} while ( (c = *++format) );
			}
		}
	}
#else
	CSTD_UNUSED(format);
#endif
}
Пример #6
0
STATIC void kbase_gpuprops_construct_coherent_groups(base_gpu_props * const props)
{
	struct mali_base_gpu_coherent_group *current_group;
	u64 group_present;
	u64 group_mask;
	u64 first_set, first_set_prev;
	u32 num_groups = 0;

	OSK_ASSERT(NULL != props);

	props->coherency_info.coherency = props->raw_props.mem_features;
	props->coherency_info.num_core_groups = osk_count_set_bits64(props->raw_props.l2_present);

	if (props->coherency_info.coherency & GROUPS_L3_COHERENT)
	{
		/* Group is l3 coherent */
		group_present = props->raw_props.l3_present;
	}
	else if (props->coherency_info.coherency & GROUPS_L2_COHERENT)
	{
		/* Group is l2 coherent */
		group_present = props->raw_props.l2_present;
	}
	else
	{
		/* Group is l1 coherent */
		group_present = props->raw_props.shader_present;
	}

	/*
	 * The coherent group mask can be computed from the l2/l3 present
	 * register.
	 *
	 * For the coherent group n:
	 * group_mask[n] = (first_set[n] - 1) & ~(first_set[n-1] - 1)
	 * where first_set is group_present with only its nth set-bit kept
	 * (i.e. the position from where a new group starts).
	 *
	 * For instance if the groups are l2 coherent and l2_present=0x0..01111:
	 * The first mask is:
	 * group_mask[1] = (first_set[1] - 1) & ~(first_set[0] - 1)
	 *               = (0x0..010     - 1) & ~(0x0..01      - 1)
	 *               =  0x0..00f
	 * The second mask is:
	 * group_mask[2] = (first_set[2] - 1) & ~(first_set[1] - 1)
	 *               = (0x0..100     - 1) & ~(0x0..010     - 1)
	 *               =  0x0..0f0
	 * And so on until all the bits from group_present have been cleared
	 * (i.e. there is no group left).
	 */

	current_group = props->coherency_info.group;
	first_set = group_present & ~(group_present - 1);

	while (group_present != 0 && num_groups < BASE_MAX_COHERENT_GROUPS)
	{
		group_present -= first_set; /* Clear the current group bit */
		first_set_prev = first_set;

		first_set = group_present & ~(group_present - 1);
		group_mask = (first_set - 1) & ~(first_set_prev - 1);

		/* Populate the coherent_group structure for each group */
		current_group->core_mask = group_mask & props->raw_props.shader_present;
		current_group->num_cores = osk_count_set_bits64(current_group->core_mask);

		num_groups++;
		current_group++;
	}

	if (group_present != 0)
	{
		OSK_PRINT_WARN(OSK_BASE_CORE, "Too many coherent groups (keeping only %d groups).\n", BASE_MAX_COHERENT_GROUPS);	
	}

	props->coherency_info.num_groups = num_groups;
}
Пример #7
0
mali_bool kbasep_validate_configuration_attributes(const kbase_attribute *attributes)
{
	int i;
	mali_bool had_gpu_freq_min = MALI_FALSE, had_gpu_freq_max = MALI_FALSE;

	OSK_ASSERT(attributes);

	for (i = 0; attributes[i].id != KBASE_CONFIG_ATTR_END; i++)
	{
		if (i >= ATTRIBUTE_COUNT_MAX)
		{
			OSK_PRINT_WARN(OSK_BASE_CORE, "More than ATTRIBUTE_COUNT_MAX=%i configuration attributes defined. Is attribute list properly terminated?",
					ATTRIBUTE_COUNT_MAX);
			return MALI_FALSE;
		}

		switch (attributes[i].id)
		{
			case KBASE_CONFIG_ATTR_MEMORY_RESOURCE:
				if (MALI_FALSE == kbasep_validate_memory_resource((kbase_memory_resource *)attributes[i].data))
				{
					OSK_PRINT_WARN(OSK_BASE_CORE, "Invalid memory region found in configuration");
					return MALI_FALSE;
				}
				break;
			case KBASE_CONFIG_ATTR_MEMORY_OS_SHARED_MAX:
				/* any value is allowed */
				break;

			case KBASE_CONFIG_ATTR_MEMORY_OS_SHARED_PERF_GPU:
				if (MALI_FALSE == kbasep_validate_memory_performance((kbase_memory_performance)attributes[i].data))
				{
					OSK_PRINT_WARN(OSK_BASE_CORE, "Shared OS memory GPU performance attribute has invalid value: %i",
							(kbase_memory_performance)attributes[i].data);
					return MALI_FALSE;
				}
				break;

			case KBASE_CONFIG_ATTR_MEMORY_PER_PROCESS_LIMIT:
				/* any value is allowed */
				break;

			case KBASE_CONFIG_ATTR_UMP_DEVICE:
				if (MALI_FALSE == kbasep_validate_ump_device(attributes[i].data))
				{
					OSK_PRINT_WARN(OSK_BASE_CORE, "Unknown UMP device found in configuration: %i",
							(int)attributes[i].data);
					return MALI_FALSE;
				}
				break;

		    case KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN:
				had_gpu_freq_min = MALI_TRUE;
				if (MALI_FALSE == kbasep_validate_gpu_clock_freq(attributes))
				{
					/* Warning message handled by kbasep_validate_gpu_clock_freq() */
					return MALI_FALSE;
				}
				break;

		    case KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX:
				had_gpu_freq_max = MALI_TRUE;
				if (MALI_FALSE == kbasep_validate_gpu_clock_freq(attributes))
				{
					/* Warning message handled by kbasep_validate_gpu_clock_freq() */
					return MALI_FALSE;
				}
				break;

				/* Only non-zero unsigned 32-bit values accepted */
			case KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS:
				#if CSTD_CPU_64BIT
						if ( attributes[i].data == 0u || (u64)attributes[i].data > (u64)U32_MAX )
				#else
						if ( attributes[i].data == 0u )
				#endif
						{
							OSK_PRINT_WARN(OSK_BASE_CORE, "Invalid Job Scheduling Configuration attribute for "
										   "KBASE_CONFIG_ATTR_JS_SCHEDULING_TICKS_NS: %i",
										   (int)attributes[i].data);
							return MALI_FALSE;
						}
				break;

				/* All these Job Scheduling attributes are FALLTHROUGH: only unsigned 32-bit values accepted */
			case KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS:
			case KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS:
			case KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS:
			case KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS:
			case KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS:
			case KBASE_CONFIG_ATTR_JS_RESET_TIMEOUT_MS:
			case KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS:
			case KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES:
			case KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES:
				#if	CSTD_CPU_64BIT
					if ( (u64)attributes[i].data > (u64)U32_MAX )
					{
						OSK_PRINT_WARN(OSK_BASE_CORE, "Job Scheduling Configuration attribute exceeds 32-bits: "
									   "id==%d val==%i",
									   attributes[i].id, (int)attributes[i].data);
						return MALI_FALSE;
					}
				#endif
				break;

			default:
				OSK_PRINT_WARN(OSK_BASE_CORE, "Invalid attribute found in configuration: %i", attributes[i].id);
				return MALI_FALSE;
		}
	}

	if(!had_gpu_freq_min)
	{
		OSK_PRINT_WARN(OSK_BASE_CORE, "Configuration does not include mandatory attribute KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN");
		return MALI_FALSE;
	}

	if(!had_gpu_freq_max)
	{
		OSK_PRINT_WARN(OSK_BASE_CORE, "Configuration does not include mandatory attribute KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX");
		return MALI_FALSE;
	}

	return MALI_TRUE;
}
Пример #8
0
static mali_bool kbasep_validate_memory_resource(const kbase_memory_resource *memory_resource)
{
	OSK_ASSERT(memory_resource != NULL);

	if (memory_resource->name == NULL)
	{
		OSK_PRINT_WARN(OSK_BASE_CORE, "Unnamed memory region found");
		return MALI_FALSE;
	}

	if (memory_resource->base & ((1 << OSK_PAGE_SHIFT) - 1))
	{
		OSK_PRINT_WARN(OSK_BASE_CORE, "Base address of \"%s\" memory region is not page aligned", memory_resource->name);
		return MALI_FALSE;
	}

	if (memory_resource->size & ((1 << OSK_PAGE_SHIFT) - 1))
	{
		OSK_PRINT_WARN(OSK_BASE_CORE, "Size of \"%s\" memory region is not a multiple of page size", memory_resource->name);
		return MALI_FALSE;
	}

	if (memory_resource->attributes != NULL) /* we allow NULL attribute list */
	{
		int i;

		for (i = 0; memory_resource->attributes[i].id != KBASE_MEM_ATTR_END; i++)
		{
			if (i >= MEMORY_ATTRIBUTE_COUNT_MAX)
			{
				OSK_PRINT_WARN(OSK_BASE_CORE, "More than MEMORY_ATTRIBUTE_COUNT_MAX=%i configuration attributes defined. Is memory attribute list properly terminated?",
						MEMORY_ATTRIBUTE_COUNT_MAX);
				return MALI_FALSE;
			}
			switch(memory_resource->attributes[i].id)
			{
				case KBASE_MEM_ATTR_PERF_CPU:
					if (MALI_TRUE != kbasep_validate_memory_performance(
							(kbase_memory_performance)memory_resource->attributes[i].data))
					{
						OSK_PRINT_WARN(OSK_BASE_CORE, "CPU performance of \"%s\" region is invalid: %i",
								memory_resource->name, (kbase_memory_performance)memory_resource->attributes[i].data);
						return MALI_FALSE;
					}
					break;

				case KBASE_MEM_ATTR_PERF_GPU:
					if (MALI_TRUE != kbasep_validate_memory_performance(
											(kbase_memory_performance)memory_resource->attributes[i].data))
					{
						OSK_PRINT_WARN(OSK_BASE_CORE, "GPU performance of \"%s\" region is invalid: %i",
								memory_resource->name, (kbase_memory_performance)memory_resource->attributes[i].data);
							return MALI_FALSE;
					}
					break;
				default:
					OSK_PRINT_WARN(OSK_BASE_CORE, "Invalid memory attribute found in \"%s\" memory region: %i",
							memory_resource->name, memory_resource->attributes[i].id);
					return MALI_FALSE;
			}
		}
	}

	return MALI_TRUE;
}
Пример #9
0
static void page_fault_worker(struct work_struct *data)
{
	u64 fault_pfn;
	u32 new_pages;
	u32 fault_rel_pfn;
	kbase_as * faulting_as;
	int as_no;
	kbase_context * kctx;
	kbase_device * kbdev;
	kbase_va_region *region;
	mali_error err;

	u32 fault_status;

	faulting_as = container_of(data, kbase_as, work_pagefault);
	fault_pfn = faulting_as->fault_addr >> PAGE_SHIFT;
	as_no = faulting_as->number;

	kbdev = container_of( faulting_as, kbase_device, as[as_no] );

	/* Grab the context that was already refcounted in kbase_mmu_interrupt().
	 * Therefore, it cannot be scheduled out of this AS until we explicitly release it
	 *
	 * NOTE: NULL can be returned here if we're gracefully handling a spurious interrupt */
	kctx = kbasep_js_runpool_lookup_ctx_noretain( kbdev, as_no );

	if ( kctx == NULL )
	{
		/* Address space has no context, terminate the work */
		u32 reg;
		/* AS transaction begin */
		mutex_lock(&faulting_as->transaction_mutex);
		reg = kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), NULL);
		reg = (reg & (~(u32)MMU_TRANSTAB_ADRMODE_MASK)) | ASn_TRANSTAB_ADRMODE_UNMAPPED;
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), reg, NULL);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_UPDATE, NULL);
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);
		mutex_unlock(&faulting_as->transaction_mutex);
		/* AS transaction end */

		mmu_mask_reenable(kbdev, NULL, faulting_as);
		return;
	}

	fault_status = kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_FAULTSTATUS), NULL);

	OSK_ASSERT( kctx->kbdev == kbdev );

	kbase_gpu_vm_lock(kctx);

	/* find the region object for this VA */
	region = kbase_region_tracker_find_region_enclosing_address(kctx, faulting_as->fault_addr);
	if (NULL == region || (GROWABLE_FLAGS_REQUIRED != (region->flags & GROWABLE_FLAGS_MASK)))
	{
		kbase_gpu_vm_unlock(kctx);
		/* failed to find the region or mismatch of the flags */
		kbase_mmu_report_fault_and_kill(kctx, faulting_as, faulting_as->fault_addr);
		goto fault_done;
	}

	if ((((fault_status & ASn_FAULTSTATUS_ACCESS_TYPE_MASK) == ASn_FAULTSTATUS_ACCESS_TYPE_READ) &&
	        !(region->flags & KBASE_REG_GPU_RD)) ||
	    (((fault_status & ASn_FAULTSTATUS_ACCESS_TYPE_MASK) == ASn_FAULTSTATUS_ACCESS_TYPE_WRITE) &&
	        !(region->flags & KBASE_REG_GPU_WR)) ||
	    (((fault_status & ASn_FAULTSTATUS_ACCESS_TYPE_MASK) == ASn_FAULTSTATUS_ACCESS_TYPE_EX) &&
	        (region->flags & KBASE_REG_GPU_NX)))
	{
		OSK_PRINT_WARN(OSK_BASE_MMU, "Access permissions don't match: region->flags=0x%x", region->flags);
		kbase_gpu_vm_unlock(kctx);
		kbase_mmu_report_fault_and_kill(kctx, faulting_as, faulting_as->fault_addr);
		goto fault_done;
	}

	/* find the size we need to grow it by */
	/* we know the result fit in a u32 due to kbase_region_tracker_find_region_enclosing_address
	 * validating the fault_adress to be within a u32 from the start_pfn */
	fault_rel_pfn = fault_pfn - region->start_pfn;
	
	if (fault_rel_pfn < region->nr_alloc_pages)
	{
		OSK_PRINT_WARN(OSK_BASE_MMU, "Fault in allocated region of growable TMEM: Ignoring");
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);
		mmu_mask_reenable(kbdev, kctx, faulting_as);
		kbase_gpu_vm_unlock(kctx);
		goto fault_done;
	}

	new_pages = make_multiple(fault_rel_pfn - region->nr_alloc_pages + 1, region->extent);
	if (new_pages + region->nr_alloc_pages > region->nr_pages)
	{
		/* cap to max vsize */
		new_pages = region->nr_pages - region->nr_alloc_pages;
	}

	if (0 == new_pages)
	{
		/* Duplicate of a fault we've already handled, nothing to do */
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);
		mmu_mask_reenable(kbdev, kctx, faulting_as);
		kbase_gpu_vm_unlock(kctx);
		goto fault_done;
	}

	if (MALI_ERROR_NONE == kbase_alloc_phy_pages_helper(region, new_pages))
	{
		/* alloc success */
		mali_addr64 lock_addr;
		OSK_ASSERT(region->nr_alloc_pages <= region->nr_pages);

		/* AS transaction begin */
		mutex_lock(&faulting_as->transaction_mutex);

		/* Lock the VA region we're about to update */
		lock_addr = lock_region(kbdev, faulting_as->fault_addr >> PAGE_SHIFT, new_pages);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_LOCKADDR_LO), lock_addr & 0xFFFFFFFFUL, kctx);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_LOCKADDR_HI), lock_addr >> 32, kctx);
		kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_LOCK, kctx);

		/* set up the new pages */
		err = kbase_mmu_insert_pages(kctx, region->start_pfn + region->nr_alloc_pages - new_pages,
		                             &region->phy_pages[region->nr_alloc_pages - new_pages],
		                             new_pages, region->flags);
		if(MALI_ERROR_NONE != err)
		{
			/* failed to insert pages, handle as a normal PF */
			mutex_unlock(&faulting_as->transaction_mutex);
			kbase_gpu_vm_unlock(kctx);
			/* The locked VA region will be unlocked and the cache invalidated in here */
			kbase_mmu_report_fault_and_kill(kctx, faulting_as, faulting_as->fault_addr);
			goto fault_done;
		}

#ifdef CONFIG_MALI_GATOR_SUPPORT
		kbase_trace_mali_page_fault_insert_pages(as_no, new_pages);
#endif /* CONFIG_MALI_GATOR_SUPPORT */
		/* clear the irq */
		/* MUST BE BEFORE THE FLUSH/UNLOCK */
		kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), (1UL << as_no), NULL);

		/* flush L2 and unlock the VA (resumes the MMU) */
		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
		{
			kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_FLUSH, kctx);
		}
		else
		{
			kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_FLUSH_PT, kctx);
		}

		/* wait for the flush to complete */
		while (kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_STATUS), kctx) & 1);

		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630))
		{
			/* Issue an UNLOCK command to ensure that valid page tables are re-read by the GPU after an update.
			Note that, the FLUSH command should perform all the actions necessary, however the bus logs show
			that if multiple page faults occur within an 8 page region the MMU does not always re-read the
			updated page table entries for later faults or is only partially read, it subsequently raises the
			page fault IRQ for the same addresses, the unlock ensures that the MMU cache is flushed, so updates
			can be re-read.  As the region is now unlocked we need to issue 2 UNLOCK commands in order to flush the
			MMU/uTLB, see PRLAM-8812.
                        */
			kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_UNLOCK, kctx);
			kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_UNLOCK, kctx);
		}

		mutex_unlock(&faulting_as->transaction_mutex);
		/* AS transaction end */

		/* reenable this in the mask */
		mmu_mask_reenable(kbdev, kctx, faulting_as);
		kbase_gpu_vm_unlock(kctx);
	}