예제 #1
0
int
elf_cpu_load_file(linker_file_t lf)
{

	/*
	 * The pmap code does not do an icache sync upon establishing executable
	 * mappings in the kernel pmap.  It's an optimization based on the fact
	 * that kernel memory allocations always have EXECUTABLE protection even
	 * when the memory isn't going to hold executable code.  The only time
	 * kernel memory holding instructions does need a sync is after loading
	 * a kernel module, and that's when this function gets called.
	 *
	 * This syncs data and instruction caches after loading a module.  We
	 * don't worry about the kernel itself (lf->id is 1) as locore.S did
	 * that on entry.  Even if data cache maintenance was done by IO code,
	 * the relocation fixup process creates dirty cache entries that we must
	 * write back before doing icache sync. The instruction cache sync also
	 * invalidates the branch predictor cache on platforms that have one.
	 */
	if (lf->id == 1)
		return (0);
#if __ARM_ARCH >= 6
	dcache_wb_pou((vm_offset_t)lf->address, (vm_size_t)lf->size);
	icache_inv_all();
#else
	cpu_dcache_wb_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
	cpu_l2cache_wb_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
	cpu_icache_sync_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
#endif
	return (0);
}
예제 #2
0
/*
 * fiq_installhandler:
 *
 *	Actually install the FIQ handler down at the FIQ vector.
 *
 *	Note: If the FIQ is invoked via an extra layer of
 *	indirection, the actual FIQ code store lives in the
 *	data segment, so there is no need to manipulate
 *	the vector page's protection.
 */
static void
fiq_installhandler(void *func, size_t size)
{
#if !defined(__ARM_FIQ_INDIRECT)
	vector_page_setprot(VM_PROT_READ|VM_PROT_WRITE);
#endif

	memcpy(vector_page + fiqvector, func, size);

#if !defined(__ARM_FIQ_INDIRECT)
	vector_page_setprot(VM_PROT_READ);
	cpu_icache_sync_range((vm_offset_t) fiqvector, size);
#endif
}
예제 #3
0
/*
 * fiq_installhandler:
 *
 *	Actually install the FIQ handler down at the FIQ vector.
 *
 *	Note: If the FIQ is invoked via an extra layer of
 *	indirection, the actual FIQ code store lives in the
 *	data segment, so there is no need to manipulate
 *	the vector page's protection.
 */
static void
fiq_installhandler(void *func, size_t size)
{
#if !defined(__ARM_FIQ_INDIRECT)
	vector_page_setprot(PROT_READ | PROT_WRITE | PROT_EXEC);
#endif

	memcpy(fiqvector, func, size);

#if !defined(__ARM_FIQ_INDIRECT)
	vector_page_setprot(PROT_READ | PROT_EXEC);
#endif
	cpu_icache_sync_range((vaddr_t) fiqvector, size);
}
예제 #4
0
static int
arm32_sync_icache(struct thread *td, void *args)
{
	struct arm_sync_icache_args ua;
	int error;

	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
		return (error);

	cpu_icache_sync_range(ua.addr, ua.len);

	td->td_retval[0] = 0;
	return (0);
}
예제 #5
0
static int
arm32_sync_icache(struct thread *td, void *args)
{
	struct arm_sync_icache_args ua;
	int error;
	ksiginfo_t ksi;
#if __ARM_ARCH >= 6
	vm_offset_t rv;
#endif

	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
		return (error);

	if  (ua.len == 0) {
		td->td_retval[0] = 0;
		return (0);
	}

	/*
	 * Validate arguments. Address and length are unsigned,
	 * so we can use wrapped overflow check.
	 */
	if (((ua.addr + ua.len) < ua.addr) ||
	    ((ua.addr + ua.len) > VM_MAXUSER_ADDRESS)) {
		ksiginfo_init_trap(&ksi);
		ksi.ksi_signo = SIGSEGV;
		ksi.ksi_code = SEGV_ACCERR;
		ksi.ksi_addr = (void *)max(ua.addr, VM_MAXUSER_ADDRESS);
		trapsignal(td, &ksi);
		return (EINVAL);
	}

#if __ARM_ARCH >= 6
	rv = sync_icache(ua.addr, ua.len);
	if (rv != 1) {
		ksiginfo_init_trap(&ksi);
		ksi.ksi_signo = SIGSEGV;
		ksi.ksi_code = SEGV_MAPERR;
		ksi.ksi_addr = (void *)rv;
		trapsignal(td, &ksi);
		return (EINVAL);
	}
#else
	cpu_icache_sync_range(ua.addr, ua.len);
#endif

	td->td_retval[0] = 0;
	return (0);
}
예제 #6
0
int
linux_sys_cacheflush(struct proc *p, void *v, register_t *retval)
{
#ifndef acorn26
	struct linux_sys_cacheflush_args /* {
		syscallarg(uintptr_t) from;
		syscallarg(uintptr_t) to;
	} */ *uap = v;

	cpu_icache_sync_range(SCARG(uap, from),
	    SCARG(uap, to) - SCARG(uap, from) + 1);
#endif
	*retval = 0;
	return 0;
}
예제 #7
0
/*
 * arm32_vector_init:
 *
 *	Initialize the vector page, and select whether or not to
 *	relocate the vectors.
 *
 *	NOTE: We expect the vector page to be mapped at its expected
 *	destination.
 */
void
arm32_vector_init(vaddr_t va, int which)
{
	extern unsigned int page0[], page0_data[];
	unsigned int *vectors = (unsigned int *) va;
	unsigned int *vectors_data = vectors + (page0_data - page0);
	int vec;

	/*
	 * Loop through the vectors we're taking over, and copy the
	 * vector's insn and data word.
	 */
	for (vec = 0; vec < ARM_NVEC; vec++) {
		if ((which & (1 << vec)) == 0) {
			/* Don't want to take over this vector. */
			continue;
		}
		vectors[vec] = page0[vec];
		vectors_data[vec] = page0_data[vec];
	}

	/* Now sync the vectors. */
	cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));

	vector_page = va;

	if (va == ARM_VECTORS_HIGH) {
		/*
		 * Assume the MD caller knows what it's doing here, and
		 * really does want the vector page relocated.
		 *
		 * Note: This has to be done here (and not just in
		 * cpu_setup()) because the vector page needs to be
		 * accessible *before* cpu_startup() is called.
		 * Think ddb(9) ...
		 *
		 * NOTE: If the CPU control register is not readable,
		 * this will totally fail!  We'll just assume that
		 * any system that has high vector support has a
		 * readable CPU control register, for now.  If we
		 * ever encounter one that does not, we'll have to
		 * rethink this.
		 */
		cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
	}
}
예제 #8
0
/*
 * Write bytes to kernel address space for debugger.
 */
int
db_write_bytes(vm_offset_t addr, size_t size, char *data)
{
	char *dst;

	dst = (char *)addr;
	while (size-- > 0) {
		if (db_validate_address((vm_offset_t)dst)) {
			db_printf("address %p is invalid\n", dst);
			return (-1);
		}
		*dst++ = *data++;
	}
	dsb(ish);

	/* Clean D-cache and invalidate I-cache */
	cpu_dcache_wb_range(addr, (vm_size_t)size);
	cpu_icache_sync_range(addr, (vm_size_t)size);

	return (0);
}
예제 #9
0
/*
 * arm32_vector_init:
 *
 *	Initialize the vector page, and select whether or not to
 *	relocate the vectors.
 *
 *	NOTE: We expect the vector page to be mapped at its expected
 *	destination.
 */
void
arm32_vector_init(vaddr_t va, int which)
{
#if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR)
	/*
	 * If this processor has the security extension, don't bother
	 * to move/map the vector page.  Simply point VBAR to the copy
	 * that exists in the .text segment.
	 */
#ifndef ARM_HAS_VBAR
	if (va == ARM_VECTORS_LOW
	    && (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0) {
#endif
		extern const uint32_t page0rel[];
		vector_page = (vaddr_t)page0rel;
		KASSERT((vector_page & 0x1f) == 0);
		armreg_vbar_write(vector_page);
#ifdef VERBOSE_INIT_ARM
		printf(" vbar=%p", page0rel);
#endif
		cpu_control(CPU_CONTROL_VECRELOC, 0);
		return;
#ifndef ARM_HAS_VBAR
	}
#endif
#endif
#ifndef ARM_HAS_VBAR
	if (CPU_IS_PRIMARY(curcpu())) {
		extern unsigned int page0[], page0_data[];
		unsigned int *vectors = (int *) va;
		unsigned int *vectors_data = vectors + (page0_data - page0);
		int vec;

		/*
		 * Loop through the vectors we're taking over, and copy the
		 * vector's insn and data word.
		 */
		for (vec = 0; vec < ARM_NVEC; vec++) {
			if ((which & (1 << vec)) == 0) {
				/* Don't want to take over this vector. */
				continue;
			}
			vectors[vec] = page0[vec];
			vectors_data[vec] = page0_data[vec];
		}

		/* Now sync the vectors. */
		cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));

		vector_page = va;
	}

	if (va == ARM_VECTORS_HIGH) {
		/*
		 * Assume the MD caller knows what it's doing here, and
		 * really does want the vector page relocated.
		 *
		 * Note: This has to be done here (and not just in
		 * cpu_setup()) because the vector page needs to be
		 * accessible *before* cpu_startup() is called.
		 * Think ddb(9) ...
		 *
		 * NOTE: If the CPU control register is not readable,
		 * this will totally fail!  We'll just assume that
		 * any system that has high vector support has a
		 * readable CPU control register, for now.  If we
		 * ever encounter one that does not, we'll have to
		 * rethink this.
		 */
		cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
	}
#endif
}