示例#1
0
void new_xen_thread(struct vcpu *v,
	unsigned long start_pc,
	unsigned long context)
{
	void *domain_stack;
	struct cpu_info *ci;

	domain_stack = pages_m_alloc(STACK_ORDER);
	if(domain_stack == NULL) {
		return;
	}

	ci = (struct cpu_info *)domain_stack;
	ci->vcpu = v;
	ci->vspsr = PSR_MODE_SVC;
	ci->vsp = 0;
	ci->vdacr = DOMAIN_HYPERVISOR_VALUE;

	domain_stack += STACK_SIZE;

	v->arch.guest_context.user_regs.r13 = (unsigned long) domain_stack;
	v->arch.guest_context.user_regs.r14 = start_pc;

   /* Fix me. */
	v->arch.guest_context.sys_regs.dacr = 0xdf /* DOMAIN_HYPERVISOR_VALUE */;
	v->arch.guest_context.sys_regs.cr = get_cr();
}
示例#2
0
static int nommu_v7_vectors_init(void)
{
	void *vectors;
	u32 cr;

	if (cpu_architecture() < CPU_ARCH_ARMv7)
		return 0;

	/*
	 * High vectors cannot be re-mapped, so we have to use normal
	 * vectors
	 */
	cr = get_cr();
	cr &= ~CR_V;
	set_cr(cr);

	arm_fixup_vectors();

	vectors = xmemalign(PAGE_SIZE, PAGE_SIZE);
	memset(vectors, 0, PAGE_SIZE);
	memcpy(vectors, __exceptions_start, __exceptions_size);

	set_vbar((unsigned int)vectors);

	return 0;
}
示例#3
0
文件: exception.c 项目: marcan/spmp
void exception_initialize(void)
{
	exc_setup_stack();
	u32 cr = get_cr();
	cr |= 0x2; // Data alignment fault checking enable
	set_cr(cr);
}
示例#4
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;

	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
#ifndef CONFIG_SQUASHFS_DEBUGGER_AUTO_DIAGNOSE
	if (WARN_ON(pfn_valid(pfn)))
		return NULL;
#endif

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#ifndef CONFIG_SMP
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
示例#5
0
文件: cpu.c 项目: cpdesign/barebox
/**
 * Disable processor's instruction cache
 */
void icache_disable(void)
{
	u32 r;

	r = get_cr();
	r &= ~CR_I;
	set_cr(r);
}
示例#6
0
文件: cpu.c 项目: cpdesign/barebox
/**
 * Enable processor's instruction cache
 */
void icache_enable(void)
{
	u32 r;

	r = get_cr();
	r |= CR_I;
	set_cr(r);
}
示例#7
0
文件: exception.c 项目: marcan/spmp
void exc_handler(u32 type, u32 spsr, u32 *regs)
{
	if (type > 8) type = 8;
	debug_printf("\nException %d (%s):\n", type, exceptions[type]);

	u32 pc, fsr;

	switch(type) {
		case 1: // UND
		case 2: // SWI
		case 3: // INSTR ABORT
		case 7: // FIQ
			pc = regs[15] - 4;
			break;
		case 4: // DATA ABORT
			pc = regs[15] - 8;
			break;
		default:
			pc = regs[15];
			break;
	}

	debug_printf("Registers (%p):\n", regs);
	debug_printf("  R0-R3: %08x %08x %08x %08x\n", regs[0], regs[1], regs[2], regs[3]);
	debug_printf("  R4-R7: %08x %08x %08x %08x\n", regs[4], regs[5], regs[6], regs[7]);
	debug_printf(" R8-R11: %08x %08x %08x %08x\n", regs[8], regs[9], regs[10], regs[11]);
	debug_printf("R12-R15: %08x %08x %08x %08x\n", regs[12], regs[13], regs[14], pc);

	debug_printf("SPSR: %08x\n", spsr);
	debug_printf("CPSR: %08x\n", get_cpsr());
	debug_printf("CR:   %08x\n", get_cr());
	debug_printf("TTBR: %08x\n", get_ttbr());
	debug_printf("DACR: %08x\n", get_dacr());

	switch (type) {
		case 3: // INSTR ABORT
		case 4: // DATA ABORT 
			if(type == 3)
				fsr = get_ifsr();
			else
				fsr = get_dfsr();
			debug_printf("Abort type: %s\n", aborts[fsr&0xf]);
			if(domvalid[fsr&0xf])
				debug_printf("Domain: %d\n", (fsr>>4)&0xf);
			if(type == 4)
				debug_printf("Address: 0x%08x\n", get_far());
		break;
		default: break;
	}

	if(type != 3) {
		debug_printf("Code dump:\n");
		debug_printf("%08x:  %08x %08x %08x %08x\n", pc-16, read32(pc-16), read32(pc-12), read32(pc-8), read32(pc-4));
		debug_printf("%08x: *%08x %08x %08x %08x\n", pc, read32(pc), read32(pc+4), read32(pc+8), read32(pc+12));
		debug_printf("%08x:  %08x %08x %08x %08x\n", pc+16, read32(pc+16), read32(pc+20), read32(pc+24), read32(pc+28));
	}
	panic2(0, PANIC_EXCEPTION);
}
示例#8
0
文件: spl_boot.c 项目: JamesAng/ub
/*
 * Set/clear program flow prediction and return the previous state.
 */
static int config_branch_prediction(int set_cr_z)
{
	unsigned int cr;

	/* System Control Register: 11th bit Z Branch prediction enable */
	cr = get_cr();
	set_cr(set_cr_z ? cr | CR_Z : cr & ~CR_Z);

	return cr & CR_Z;
}
示例#9
0
文件: memory.c 项目: BhaaLseN/sneek
void mem_shutdown(void)
{
	u32 cookie = irq_kill();
	_dc_flush();
	_drain_write_buffer();
	u32 cr = get_cr();
	cr &= ~(CR_MMU | CR_DCACHE | CR_ICACHE); //disable ICACHE, DCACHE, MMU
	set_cr(cr);
	_ic_inval();
	_dc_inval();
	_tlb_inval();
	irq_restore(cookie);
}
示例#10
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 *
 * 'flags' are the extra L_PTE_ flags that you want to specify for this
 * mapping.  See <asm/pgtable.h> for more information.
 */
void __iomem *
__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
		  unsigned int mtype)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

 	area = get_vm_area(size, VM_IOREMAP);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#ifndef CONFIG_SMP
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = remap_area_pages(addr, pfn, size, type);

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
示例#11
0
void mmu_early_disable(void)
{
	unsigned int cr;

	cr = get_cr();
	cr &= ~(CR_M | CR_C);

	set_cr(cr);
	v8_flush_dcache_all();
	tlb_invalidate();

	dsb();
	isb();
}
示例#12
0
文件: memory.c 项目: BhaaLseN/sneek
void mem_initialize(void)
{
	u32 cr;
	u32 cookie = irq_kill();

	gecko_printf("MEM: cleaning up\n");

	_ic_inval();
	_dc_inval();
	_tlb_inval();

	gecko_printf("MEM: unprotecting memory\n");

	mem_protect(0,NULL,NULL);

	gecko_printf("MEM: mapping sections\n");

	memset32(__page_table, 0, 16384);

	map_section(0x000, 0x000, 0x018, WRITEBACK_CACHE | DOMAIN(0) | AP_RWUSER);
	map_section(0x100, 0x100, 0x040, WRITEBACK_CACHE | DOMAIN(0) | AP_RWUSER);
	map_section(0x0d0, 0x0d0, 0x001, NONBUFFERABLE | DOMAIN(0) | AP_RWUSER);
	map_section(0x0d8, 0x0d8, 0x001, NONBUFFERABLE | DOMAIN(0) | AP_RWUSER);
	map_section(0xfff, 0xfff, 0x001, WRITEBACK_CACHE | DOMAIN(0) | AP_RWUSER);

	set_dacr(0xFFFFFFFF); //manager access for all domains, ignore AP
	set_ttbr((u32)__page_table); //configure translation table

	_drain_write_buffer();

	cr = get_cr();

#ifndef NO_CACHES
	gecko_printf("MEM: enabling caches\n");

	cr |= CR_DCACHE | CR_ICACHE;
	set_cr(cr);

	gecko_printf("MEM: enabling MMU\n");

	cr |= CR_MMU;
	set_cr(cr);
#endif

	gecko_printf("MEM: init done\n");

	irq_restore(cookie);
}
示例#13
0
void adjust_cr(unsigned long mask, unsigned long set)
{
	unsigned long flags;

	mask &= ~CR_A;

	set &= mask;

	local_irq_save(flags);

	cr_no_alignment = (cr_no_alignment & ~mask) | set;
	cr_alignment = (cr_alignment & ~mask) | set;

	set_cr((get_cr() & ~mask) | set);

	local_irq_restore(flags);
}
示例#14
0
// Execute EMUL_OP routine
void sheepshaver_cpu::execute_emul_op(uint32 emul_op)
{
	M68kRegisters r68;
	WriteMacInt32(XLM_68K_R25, gpr(25));
	WriteMacInt32(XLM_RUN_MODE, MODE_EMUL_OP);
	for (int i = 0; i < 8; i++)
		r68.d[i] = gpr(8 + i);
	for (int i = 0; i < 7; i++)
		r68.a[i] = gpr(16 + i);
	r68.a[7] = gpr(1);
	uint32 saved_cr = get_cr() & 0xff9fffff; // mask_operand::compute(11, 8)
	uint32 saved_xer = get_xer();
	EmulOp(&r68, gpr(24), emul_op);
	set_cr(saved_cr);
	set_xer(saved_xer);
	for (int i = 0; i < 8; i++)
		gpr(8 + i) = r68.d[i];
	for (int i = 0; i < 7; i++)
		gpr(16 + i) = r68.a[i];
	gpr(1) = r68.a[7];
	WriteMacInt32(XLM_RUN_MODE, MODE_68K);
}
示例#15
0
文件: tc2_pm_psci.c 项目: monojo/xu3
static void tc2_pm_psci_power_down(void)
{
	struct psci_power_state power_state;
	unsigned int mpidr, cpu, cluster;

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);

	BUG_ON(!psci_ops.cpu_off);

	switch (atomic_dec_return(&tc2_pm_use_count[cpu][cluster])) {
	case 1:
		/*
		 * Overtaken by a power up. Flush caches, exit coherency,
		 * return & fake a reset
		 */
		set_cr(get_cr() & ~CR_C);

		flush_cache_louis();

		asm volatile ("clrex");
		set_auxcr(get_auxcr() & ~(1 << 6));

		return;
	case 0:
		/* A normal request to possibly power down the cluster */
		power_state.id = PSCI_POWER_STATE_ID;
		power_state.type = PSCI_POWER_STATE_TYPE_POWER_DOWN;
		power_state.affinity_level = PSCI_POWER_STATE_AFFINITY_LEVEL1;

		psci_ops.cpu_off(power_state);

		/* On success this function never returns */
	default:
		/* Any other value is a bug */
		BUG();
	}
}
示例#16
0
void mmu_early_enable(unsigned long membase, unsigned long memsize,
		      unsigned long ttb)
{
	int el;

	/*
	 * For the early code we only create level 1 pagetables which only
	 * allow for a 1GiB granularity. If our membase is not aligned to that
	 * bail out without enabling the MMU.
	 */
	if (membase & ((1ULL << level2shift(1)) - 1))
		return;

	memset((void *)ttb, 0, GRANULE_SIZE);

	el = current_el();
	set_ttbr_tcr_mair(el, ttb, calc_tcr(el), MEMORY_ATTRIBUTES);
	create_sections((void *)ttb, 0, 0, 1UL << (BITS_PER_VA - 1), UNCACHED_MEM);
	create_sections((void *)ttb, membase, membase, memsize, CACHED_MEM);
	tlb_invalidate();
	isb();
	set_cr(get_cr() | CR_M);
}
示例#17
0
void new_thread(struct vcpu *v,
	unsigned long start_pc,
	unsigned long start_stack,
	unsigned long start_info)
{
	void *domain_stack;
	struct cpu_info *ci;
	struct cpu_context *cpu_context;

	domain_stack = pages_m_alloc(STACK_ORDER);
	if(domain_stack == NULL) {
		return;
	}

	ci = (struct cpu_info *)domain_stack;
	ci->vcpu = v;
	ci->vspsr = PSR_MODE_USR;
	ci->vsp = 0;
	ci->vdacr = DOMAIN_KERNEL_VALUE;

	domain_stack += (STACK_SIZE - sizeof(struct cpu_context));

	cpu_context = (struct cpu_context *)domain_stack;
	cpu_context->r0 = 0;
	cpu_context->r12 = start_info;
	cpu_context->usp = start_stack;
	cpu_context->ulr = 0;
	cpu_context->ssp = (unsigned long)(domain_stack + sizeof(struct cpu_context));
	cpu_context->pc = start_pc;
	cpu_context->spsr = PSR_MODE_SVC;

	v->arch.guest_context.user_regs.r13 = (unsigned long)domain_stack;
	v->arch.guest_context.user_regs.r14 = return_to_guest;

	v->arch.guest_context.sys_regs.dacr = DOMAIN_KERNEL_VALUE;
	v->arch.guest_context.sys_regs.cr = get_cr();
}
示例#18
0
文件: bs.c 项目: klammerj/iverilog
int bitSRead(BitStream *bs,char * file)
{
	FILE * fp;
	int nw,i;
	memset(bs,0,sizeof(*bs));
	dlistInit(&bs->windows);
	fp=fopen(file,"rt");
	assert(fp);
	
	bs->nu=byte_read(fp);
	assert(0==bs->nu);
	
	bs->pe=byte_read(fp);
	assert(bs->pe==0xb7);
	
	for(i=0;i<4;i++)
	{
		bs->cregs[i]=byte_read(fp);
	}
//	bs->idunno=byte_read(fp);
	
	assert(get_cr(28)==0);//reserved==0
	nw=0;
	nw=(byte_read(fp)&0x0ff)<<8;
	nw|=(byte_read(fp)&0x0ff);
	for(i=0;i<nw;i++)
	{
		BitSWindow * w;
		w=window_read(fp);
		dlistInsertLast(&bs->windows,&w->e);
	}
	
	bs->po=byte_read(fp);
	assert(bs->po==0xe7);
	fclose(fp);
	return 0;
}
示例#19
0
文件: bs.c 项目: klammerj/iverilog
int bitSWrite(BitStream *bs,char * file)
{
	FILE * fp;
	int nw,i;
	DListE *e;
	fp=fopen(file,"wt");
	assert(fp);
	
	assert(0==bs->nu);
	byte_write(fp,bs->nu);
	
	assert(bs->pe==0xb7);
	byte_write(fp,bs->pe);
	
	assert(get_cr(28)==0);//reserved==0
	for(i=0;i<4;i++)
	{
		byte_write(fp,bs->cregs[i]);
	}
//	byte_write(fp,bs->idunno);
	
	nw=dlistCount(&bs->windows);
	byte_write(fp,(nw>>8)&0x0ff);
	byte_write(fp,nw&0x0ff);
	e=dlistFirst(&bs->windows);
	while(e)
	{
		BitSWindow * w=(BitSWindow *)e;
		window_write(fp,w);
		e=dlistENext(e);
	}
	
	assert(bs->po==0xe7);
	byte_write(fp,bs->po);
	fclose(fp);
	return 0;
}
示例#20
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

#ifndef CONFIG_ARM_LPAE
	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;
#endif

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	/*
	 * Try to reuse one of the static mapping whenever possible.
	 */
	read_lock(&vmlist_lock);
	for (area = vmlist; area; area = area->next) {
		if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
			break;
		if (!(area->flags & VM_ARM_STATIC_MAPPING))
			continue;
		if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
			continue;
		if (__phys_to_pfn(area->phys_addr) > pfn ||
		    __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
			continue;
		/* we can drop the lock here as we know *area is static */
		read_unlock(&vmlist_lock);
		addr = (unsigned long)area->addr;
		addr += __pfn_to_phys(pfn) - area->phys_addr;
		return (void __iomem *) (offset + addr);
	}
	read_unlock(&vmlist_lock);

#if 0 /* HACK - do allow RAM to be mapped, the problems are a bit overrated */
	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
	if (WARN_ON(pfn_valid(pfn)))
		return NULL;
#endif

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
示例#21
0
uint32_t authenticate_image(uint32_t ddr_start, uint32_t image_size)
{
    uint32_t load_addr = 0;
    size_t bytes;
    ptrdiff_t ivt_offset = 0;
    int result = 0;
    ulong start;
    hab_rvt_authenticate_image_t *hab_rvt_authenticate_image;
    hab_rvt_entry_t *hab_rvt_entry;
    hab_rvt_exit_t *hab_rvt_exit;

    hab_rvt_authenticate_image = hab_rvt_authenticate_image_p;
    hab_rvt_entry = hab_rvt_entry_p;
    hab_rvt_exit = hab_rvt_exit_p;

    if (is_hab_enabled()) {
        printf("\nAuthenticate image from DDR location 0x%x...\n",
               ddr_start);

        hab_caam_clock_enable(1);

        if (hab_rvt_entry() == HAB_SUCCESS) {
            /* If not already aligned, Align to ALIGN_SIZE */
            ivt_offset = (image_size + ALIGN_SIZE - 1) &
                         ~(ALIGN_SIZE - 1);

            start = ddr_start;
            bytes = ivt_offset + IVT_SIZE + CSF_PAD_SIZE;
#ifdef DEBUG
            printf("\nivt_offset = 0x%x, ivt addr = 0x%x\n",
                   ivt_offset, ddr_start + ivt_offset);
            puts("Dumping IVT\n");
            print_buffer(ddr_start + ivt_offset,
                         (void *)(ddr_start + ivt_offset),
                         4, 0x8, 0);

            puts("Dumping CSF Header\n");
            print_buffer(ddr_start + ivt_offset+IVT_SIZE,
                         (void *)(ddr_start + ivt_offset+IVT_SIZE),
                         4, 0x10, 0);

            get_hab_status();

            puts("\nCalling authenticate_image in ROM\n");
            printf("\tivt_offset = 0x%x\n", ivt_offset);
            printf("\tstart = 0x%08lx\n", start);
            printf("\tbytes = 0x%x\n", bytes);
#endif
            /*
             * If the MMU is enabled, we have to notify the ROM
             * code, or it won't flush the caches when needed.
             * This is done, by setting the "pu_irom_mmu_enabled"
             * word to 1. You can find its address by looking in
             * the ROM map. This is critical for
             * authenticate_image(). If MMU is enabled, without
             * setting this bit, authentication will fail and may
             * crash.
             */
            /* Check MMU enabled */
            if (get_cr() & CR_M) {
                if (is_cpu_type(MXC_CPU_MX6Q) ||
                        is_cpu_type(MXC_CPU_MX6D)) {
                    /*
                     * This won't work on Rev 1.0.0 of
                     * i.MX6Q/D, since their ROM doesn't
                     * do cache flushes. don't think any
                     * exist, so we ignore them.
                     */
                    if (!is_mx6dqp())
                        writel(1, MX6DQ_PU_IROM_MMU_EN_VAR);
                } else if (is_cpu_type(MXC_CPU_MX6DL) ||
                           is_cpu_type(MXC_CPU_MX6SOLO)) {
                    writel(1, MX6DLS_PU_IROM_MMU_EN_VAR);
                } else if (is_cpu_type(MXC_CPU_MX6SL)) {
                    writel(1, MX6SL_PU_IROM_MMU_EN_VAR);
                }
            }

            load_addr = (uint32_t)hab_rvt_authenticate_image(
                            HAB_CID_UBOOT,
                            ivt_offset, (void **)&start,
                            (size_t *)&bytes, NULL);
            if (hab_rvt_exit() != HAB_SUCCESS) {
                puts("hab exit function fail\n");
                load_addr = 0;
            }
        } else {
            puts("hab entry function fail\n");
        }

        hab_caam_clock_enable(0);

        get_hab_status();
    } else {
        puts("hab fuse not enabled\n");
    }

    if ((!is_hab_enabled()) || (load_addr != 0))
        result = 1;

    return result;
}
示例#22
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;

	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
	if (pfn_valid(pfn)) {
		printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory.  This leads\n"
		       KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
		       KERN_WARNING "will fail in the next kernel release.  Please fix your driver.\n");
		WARN_ON(1);
	}

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#ifndef CONFIG_SMP
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
示例#23
0
/* Return true if and only if the ARMv6 unaligned access model is in use. */
static bool cpu_is_v6_unaligned(void)
{
	return cpu_architecture() >= CPU_ARCH_ARMv6 && get_cr() & CR_U;
}
示例#24
0
文件: cpu.c 项目: cpdesign/barebox
/**
 * Detect processor's current instruction cache status
 * @return 0=disabled, 1=enabled
 */
int icache_status(void)
{
	return (get_cr () & CR_I) != 0;
}
示例#25
0
文件: bs.c 项目: klammerj/iverilog
int bitSGCR(BitStream *bs,int n)
{
	assert(n>=0);
	assert(n<=31);
	return get_cr(n);
}
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
                                        unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
    const struct mem_type *type;
    int err;
    unsigned long addr;
    struct vm_struct * area;

#ifndef CONFIG_ARM_LPAE
    if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
        return NULL;
#endif

    type = get_mem_type(mtype);
    if (!type)
        return NULL;

    size = PAGE_ALIGN(offset + size);

    read_lock(&vmlist_lock);
    for (area = vmlist; area; area = area->next) {
        if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
            break;
        if (!(area->flags & VM_ARM_STATIC_MAPPING))
            continue;
        if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
            continue;
        if (__phys_to_pfn(area->phys_addr) > pfn ||
                __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
            continue;

        read_unlock(&vmlist_lock);
        addr = (unsigned long)area->addr;
        addr += __pfn_to_phys(pfn) - area->phys_addr;
        return (void __iomem *) (offset + addr);
    }
    read_unlock(&vmlist_lock);

    if (WARN_ON(pfn_valid(pfn)))
        return NULL;

    area = get_vm_area_caller(size, VM_IOREMAP, caller);
    if (!area)
        return NULL;
    addr = (unsigned long)area->addr;

#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
    if (DOMAIN_IO == 0 &&
            (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
             cpu_is_xsc3()) && pfn >= 0x100000 &&
            !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
        area->flags |= VM_ARM_SECTION_MAPPING;
        err = remap_area_supersections(addr, pfn, size, type);
    } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
        area->flags |= VM_ARM_SECTION_MAPPING;
        err = remap_area_sections(addr, pfn, size, type);
    } else
#endif
        err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
                                 __pgprot(type->prot_pte));

    if (err) {
        vunmap((void *)addr);
        return NULL;
    }

    flush_cache_vmap(addr, addr + size);
    return (void __iomem *) (offset + addr);
}