ffi_status FFI_HIDDEN ffi_prep_closure_loc_sysv (ffi_closure *closure, ffi_cif *cif, void (*fun) (ffi_cif *, void *, void **, void *), void *user_data, void *codeloc) { unsigned int *tramp; if (cif->abi < FFI_SYSV || cif->abi >= FFI_LAST_ABI) return FFI_BAD_ABI; tramp = (unsigned int *) &closure->tramp[0]; tramp[0] = 0x7c0802a6; /* mflr r0 */ tramp[1] = 0x4800000d; /* bl 10 <trampoline_initial+0x10> */ tramp[4] = 0x7d6802a6; /* mflr r11 */ tramp[5] = 0x7c0803a6; /* mtlr r0 */ tramp[6] = 0x800b0000; /* lwz r0,0(r11) */ tramp[7] = 0x816b0004; /* lwz r11,4(r11) */ tramp[8] = 0x7c0903a6; /* mtctr r0 */ tramp[9] = 0x4e800420; /* bctr */ *(void **) &tramp[2] = (void *) ffi_closure_SYSV; /* function */ *(void **) &tramp[3] = codeloc; /* context */ /* Flush the icache. */ flush_icache ((char *)tramp, (char *)codeloc, FFI_TRAMPOLINE_SIZE); closure->cif = cif; closure->fun = fun; closure->user_data = user_data; return FFI_OK; }
void os_flush_icache(os_vm_address_t address, os_vm_size_t length) { #ifndef i386 static int flushit = -1; /* * On some systems, iflush needs to be emulated in the kernel * On those systems, it isn't necessary * Call getenv() only once. */ if (flushit == -1) flushit = getenv("CMUCL_NO_SPARC_IFLUSH") == 0; if (flushit) { static int traceit = -1; if (traceit == -1) traceit = getenv("CMUCL_TRACE_SPARC_IFLUSH") != 0; if (traceit) fprintf(stderr, ";;;iflush %p - %lx\n", (void *) address, length); flush_icache((unsigned int *) address, length); } #endif }
__noreturn void barebox_single_pbl_start(unsigned long membase, unsigned long memsize, void *boarddata) { uint32_t offset; uint32_t pg_start, pg_end, pg_len; void __noreturn (*barebox)(unsigned long, unsigned long, void *); uint32_t endmem = membase + memsize; unsigned long barebox_base; endmem -= STACK_SIZE; /* stack */ if (IS_ENABLED(CONFIG_PBL_RELOCATABLE)) relocate_to_current_adr(); /* Get offset between linked address and runtime address */ offset = get_runtime_offset(); pg_start = (uint32_t)&input_data - offset; pg_end = (uint32_t)&input_data_end - offset; pg_len = pg_end - pg_start; if (IS_ENABLED(CONFIG_RELOCATABLE)) barebox_base = arm_barebox_image_place(membase + memsize); else barebox_base = TEXT_BASE; if (offset && (IS_ENABLED(CONFIG_PBL_FORCE_PIGGYDATA_COPY) || region_overlap(pg_start, pg_len, barebox_base, pg_len * 4))) { /* * copy piggydata binary to its link address */ memcpy(&input_data, (void *)pg_start, pg_len); pg_start = (uint32_t)&input_data; } setup_c(); if (IS_ENABLED(CONFIG_MMU_EARLY)) { endmem &= ~0x3fff; endmem -= SZ_16K; /* ttb */ mmu_early_enable(membase, memsize, endmem); } endmem -= SZ_128K; /* early malloc */ free_mem_ptr = endmem; free_mem_end_ptr = free_mem_ptr + SZ_128K; pbl_barebox_uncompress((void*)barebox_base, (void *)pg_start, pg_len); arm_early_mmu_cache_flush(); flush_icache(); if (IS_ENABLED(CONFIG_THUMB2_BAREBOX)) barebox = (void *)(barebox_base + 1); else barebox = (void *)barebox_base; barebox(membase, memsize, boarddata); }
static void read_out_data(unsigned ep, unsigned sz) { unsigned dwords = DIV_ROUND_UP(sz, 4); uint32_t val; while (dwords--) { val = read_ep_fifo(ep); if (out_data.size >= 4) { debug("<< 0x"); debug_hex(val, 8); debug("\n"); *((volatile uint32_t *)out_data.data) = val; out_data.data += 4; out_data.size -= 4; } else while (out_data.size > 0) { debug("<< 0x"); debug_hex(val & 0xff, 2); debug("\n"); *((volatile uint8_t *)out_data.data) = val; val >>= 8; out_data.data++; out_data.size--; } } if (out_data.size) return; switch (cmd) { case FW_REQ_MEM_SET: memset(cmd_data.mem_set.base, cmd_data.mem_set.args.c, cmd_data.mem_set.args.length); break; case FW_REQ_CACHE_FLUSH: switch (cmd_data.cache_flush.cache) { case CACHE_D: flush_dcache(cmd_data.cache_flush.args.base, cmd_data.cache_flush.args.size); break; case CACHE_I: flush_icache(cmd_data.cache_flush.args.base, cmd_data.cache_flush.args.size); break; } break; case FW_REQ_MTC0: dynamic_mtc0(cmd_data.mtc0.reg, cmd_data.mtc0.sel, cmd_data.mtc0.value); break; } }
/** * m68k_setup_user_interrupt * @vec: first user vector interrupt to handle * @cnt: number of active user vector interrupts * * setup user vector interrupts, this includes activating the specified range * of interrupts, only then these interrupts can be requested (note: this is * different from auto vector interrupts). */ void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt) { int i; BUG_ON(IRQ_USER + cnt > NR_IRQS); m68k_first_user_vec = vec; for (i = 0; i < cnt; i++) irq_set_chip(IRQ_USER + i, &user_irq_chip); *user_irqvec_fixup = vec - IRQ_USER; flush_icache(); }
static void noinline uncompress(uint32_t membase, uint32_t memsize, uint32_t boarddata) { uint32_t offset; uint32_t pg_len; void __noreturn (*barebox)(uint32_t, uint32_t, uint32_t); uint32_t endmem = membase + memsize; unsigned long barebox_base; uint32_t *ptr; void *pg_start; arm_early_mmu_cache_invalidate(); endmem -= STACK_SIZE; /* stack */ if (IS_ENABLED(CONFIG_PBL_RELOCATABLE)) relocate_to_current_adr(); /* Get offset between linked address and runtime address */ offset = get_runtime_offset(); if (IS_ENABLED(CONFIG_RELOCATABLE)) barebox_base = arm_barebox_image_place(membase + memsize); else barebox_base = TEXT_BASE; setup_c(); if (IS_ENABLED(CONFIG_MMU_EARLY)) { endmem &= ~0x3fff; endmem -= SZ_16K; /* ttb */ mmu_early_enable(membase, memsize, endmem); } endmem -= SZ_128K; /* early malloc */ free_mem_ptr = endmem; free_mem_end_ptr = free_mem_ptr + SZ_128K; ptr = (void *)__image_end; pg_start = ptr + 1; pg_len = *(ptr); pbl_barebox_uncompress((void*)barebox_base, pg_start, pg_len); arm_early_mmu_cache_flush(); flush_icache(); if (IS_ENABLED(CONFIG_THUMB2_BAREBOX)) barebox = (void *)(barebox_base + 1); else barebox = (void *)barebox_base; barebox(membase, memsize, boarddata); }
/** * m68k_setup_user_interrupt * @vec: first user vector interrupt to handle * @cnt: number of active user vector interrupts * @handler: called from user vector interrupts * * setup user vector interrupts, this includes activating the specified range * of interrupts, only then these interrupts can be requested (note: this is * different from auto vector interrupts). An optional handler can be installed * to be called instead of the default m68k_handle_int(), it will be called * with irq numbers starting from IRQ_USER. */ void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt, void (*handler)(unsigned int, struct pt_regs *)) { int i; m68k_first_user_vec = vec; for (i = 0; i < cnt; i++) irq_controller[IRQ_USER + i] = &user_irq_controller; *user_irqvec_fixup = vec - IRQ_USER; if (handler) *user_irqhandler_fixup = (u32)handler; flush_icache(); }
void callback_heap::update(callback *stub) { tagged<array> code_template(parent->userenv[CALLBACK_STUB]); cell rel_class = untag_fixnum(array_nth(code_template.untagged(),1)); cell offset = untag_fixnum(array_nth(code_template.untagged(),3)); parent->store_address_in_code_block(rel_class, (cell)(stub + 1) + offset, (cell)(stub->compiled + 1)); flush_icache((cell)stub,stub->size); }
ffi_status ffi_prep_closure_loc (ffi_closure *closure, ffi_cif *cif, void (*fun) (ffi_cif *, void *, void **, void *), void *user_data, void *codeloc) { #ifdef POWERPC64 void **tramp = (void **) &closure->tramp[0]; if (cif->abi != FFI_LINUX64) return FFI_BAD_ABI; /* Copy function address and TOC from ffi_closure_LINUX64. */ memcpy (tramp, (char *) ffi_closure_LINUX64, 16); tramp[2] = codeloc; #else unsigned int *tramp; if (! (cif->abi == FFI_GCC_SYSV || cif->abi == FFI_SYSV || cif->abi == FFI_LINUX || cif->abi == FFI_LINUX_SOFT_FLOAT)) return FFI_BAD_ABI; tramp = (unsigned int *) &closure->tramp[0]; tramp[0] = 0x7c0802a6; /* mflr r0 */ tramp[1] = 0x4800000d; /* bl 10 <trampoline_initial+0x10> */ tramp[4] = 0x7d6802a6; /* mflr r11 */ tramp[5] = 0x7c0803a6; /* mtlr r0 */ tramp[6] = 0x800b0000; /* lwz r0,0(r11) */ tramp[7] = 0x816b0004; /* lwz r11,4(r11) */ tramp[8] = 0x7c0903a6; /* mtctr r0 */ tramp[9] = 0x4e800420; /* bctr */ *(void **) &tramp[2] = (void *) ffi_closure_SYSV; /* function */ *(void **) &tramp[3] = codeloc; /* context */ /* Flush the icache. */ flush_icache ((char *)tramp, (char *)codeloc, FFI_TRAMPOLINE_SIZE); #endif closure->cif = cif; closure->fun = fun; closure->user_data = user_data; return FFI_OK; }
int do_bootm_linux(int flag, int argc, char * const argv[], bootm_headers_t *images) { void (*kernel)(int, int, int, char *) = (void *)images->ep; char *commandline = getenv("bootargs"); ulong initrd_start = images->rd_start; ulong initrd_end = images->rd_end; char *of_flat_tree = NULL; #if defined(CONFIG_OF_LIBFDT) /* did generic code already find a device tree? */ if (images->ft_len) of_flat_tree = images->ft_addr; #endif if (!of_flat_tree && argc > 1) of_flat_tree = (char *)simple_strtoul(argv[1], NULL, 16); if (of_flat_tree) initrd_end = (ulong)of_flat_tree; if ((flag != 0) && (flag != BOOTM_STATE_OS_GO)) return 1; /* flushes data and instruction caches before calling the kernel */ disable_interrupts(); flush_dcache((ulong)kernel, CONFIG_SYS_DCACHE_SIZE); flush_icache((ulong)kernel, CONFIG_SYS_ICACHE_SIZE); debug("bootargs=%s @ 0x%lx\n", commandline, (ulong)&commandline); debug("initrd=0x%lx-0x%lx\n", (ulong)initrd_start, (ulong)initrd_end); /* kernel parameters passing * r4 : NIOS magic * r5 : initrd start * r6 : initrd end or fdt * r7 : kernel command line * fdt is passed to kernel via r6, the same as initrd_end. fdt will be * verified with fdt magic. when both initrd and fdt are used at the * same time, fdt must follow immediately after initrd. */ kernel(NIOS_MAGIC, initrd_start, initrd_end, commandline); /* does not return */ return 1; }
/* Copy all literals referenced from a code block to newspace */ void collect_literals_step(F_COMPILED *compiled, CELL code_start, CELL literals_start) { if(collecting_gen >= compiled->last_scan) { CELL scan; CELL literal_end = literals_start + compiled->literals_length; if(collecting_accumulation_gen_p()) compiled->last_scan = collecting_gen; else compiled->last_scan = collecting_gen + 1; for(scan = literals_start; scan < literal_end; scan += CELLS) copy_handle((CELL*)scan); if(compiled->relocation != F) { copy_handle(&compiled->relocation); F_BYTE_ARRAY *relocation = untag_object(compiled->relocation); F_REL *rel = (F_REL *)(relocation + 1); F_REL *rel_end = (F_REL *)((char *)rel + byte_array_capacity(relocation)); while(rel < rel_end) { if(REL_TYPE(rel) == RT_IMMEDIATE) { CELL offset = rel->offset + code_start; F_FIXNUM absolute_value = get(CREF(literals_start,REL_ARGUMENT(rel))); apply_relocation(REL_CLASS(rel),offset,absolute_value); } rel++; } } flush_icache(code_start,literals_start - code_start); } }
ffi_status FFI_HIDDEN ffi_prep_closure_loc_linux64 (ffi_closure *closure, ffi_cif *cif, void (*fun) (ffi_cif *, void *, void **, void *), void *user_data, void *codeloc) { #if _CALL_ELF == 2 unsigned int *tramp = (unsigned int *) &closure->tramp[0]; if (cif->abi < FFI_LINUX || cif->abi >= FFI_LAST_ABI) return FFI_BAD_ABI; tramp[0] = 0xe96c0018; /* 0: ld 11,2f-0b(12) */ tramp[1] = 0xe98c0010; /* ld 12,1f-0b(12) */ tramp[2] = 0x7d8903a6; /* mtctr 12 */ tramp[3] = 0x4e800420; /* bctr */ /* 1: .quad function_addr */ /* 2: .quad context */ *(void **) &tramp[4] = (void *) ffi_closure_LINUX64; *(void **) &tramp[6] = codeloc; flush_icache ((char *) tramp, (char *) codeloc, 4 * 4); #else void **tramp = (void **) &closure->tramp[0]; if (cif->abi < FFI_LINUX || cif->abi >= FFI_LAST_ABI) return FFI_BAD_ABI; /* Copy function address and TOC from ffi_closure_LINUX64 OPD. */ memcpy (&tramp[0], (void **) ffi_closure_LINUX64, sizeof (void *)); tramp[1] = codeloc; memcpy (&tramp[2], (void **) ffi_closure_LINUX64 + 1, sizeof (void *)); #endif closure->cif = cif; closure->fun = fun; closure->user_data = user_data; return FFI_OK; }
int do_bootm_linux(int flag, int argc, char *argv[], bootm_headers_t *images) { void (*kernel)(int, int, int, char *) = (void *)images->ep; char *commandline = getenv("bootargs"); ulong initrd_start = images->rd_start; ulong initrd_end = images->rd_end; if ((flag != 0) && (flag != BOOTM_STATE_OS_GO)) return 1; /* flushes data and instruction caches before calling the kernel */ disable_interrupts(); flush_dcache((ulong)kernel, CONFIG_SYS_DCACHE_SIZE); flush_icache((ulong)kernel, CONFIG_SYS_ICACHE_SIZE); debug("bootargs=%s @ 0x%lx\n", commandline, (ulong)&commandline); debug("initrd=0x%lx-0x%lx\n", (ulong)initrd_start, (ulong)initrd_end); kernel(NIOS_MAGIC, initrd_start, initrd_end, commandline); /* does not return */ return 1; }
uint32_t * trap_install(unsigned idx, void (*trap_func)(), const struct exc_copy_block *pref) { uint32_t *start; uint32_t *vector; unsigned size; size = pref->size; if(trap_func != NULL) size += 4*sizeof(vector[0]); start = exc_vector_address(idx, size); vector = start; copy_code(vector, pref); vector = (uint32_t *)((uintptr_t)vector + pref->size); // // KLUDGE: We need to rework the kernel entry sequences to make // the following stuff cleaner. // // Careful with these instructions sequences. See trap_chain_addr() // below for rationale. // if(trap_func != NULL) { // lis %r3, func >> 16 // ori %r3, func & 0xffff // mtlr %r3 // ba PPC_KERENTRY_COMMON vector[0] = MK_OPCODE(15, 3, 0, (uint32_t)trap_func >> 16); vector[1] = MK_OPCODE(24, 3, 3, (uint32_t)trap_func & 0xffff); vector[2] = 0x7c6803a6; vector[3] = 0x48000002 | PPC_KERENTRY_COMMON; flush_icache(vector, 4*sizeof(vector[0])); }
/** * m68k_setup_auto_interrupt * @handler: called from auto vector interrupts * * setup the handler to be called from auto vector interrupts instead of the * standard m68k_handle_int(), it will be called with irq numbers in the range * from IRQ_AUTO_1 - IRQ_AUTO_7. */ void __init m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *)) { if (handler) *auto_irqhandler_fixup = (u32)handler; flush_icache(); }
/* * Refer to ARM Procedure Call Standard (APCS) for more info. */ MonoPIFunc mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { MonoType* param; MonoPIFunc code_buff; arminstr_t* p; guint32 code_size, stack_size; guint32 simple_type; int i, hasthis, aregs, regc, stack_offs; int this_loaded; guchar reg_alloc [ARM_NUM_ARG_REGS]; /* pessimistic estimation for prologue/epilogue size */ code_size = 16 + 16; /* push/pop work regs */ code_size += 2; /* call */ code_size += 2; /* handle retval */ code_size += 2; stack_size = 0; hasthis = sig->hasthis ? 1 : 0; aregs = ARM_NUM_ARG_REGS - hasthis; for (i = 0, regc = aregs; i < sig->param_count; ++i) { param = sig->params [i]; /* keep track of argument sizes */ if (i < ARM_NUM_ARG_REGS) reg_alloc [i] = 0; if (param->byref) { if (regc > 0) { code_size += 1; reg_alloc [i] = regc; --regc; } else { code_size += 2; stack_size += sizeof(gpointer); } } else { simple_type = param->type; enum_calc_size: switch (simple_type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_R4: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: if (regc > 0) { /* register arg */ code_size += 1; reg_alloc [i] = regc; --regc; } else { /* stack arg */ code_size += 2; stack_size += 4; } break; case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R8: /* keep track of argument sizes */ if (regc > 1) { /* fits into registers, two LDRs */ code_size += 2; reg_alloc [i] = regc; regc -= 2; } else if (regc > 0) { /* first half fits into register, one LDR */ code_size += 1; reg_alloc [i] = regc; --regc; /* the rest on the stack, LDR/STR */ code_size += 2; stack_size += 4; } else { /* stack arg, 4 instrs - 2x(LDR/STR) */ code_size += 4; stack_size += 2 * 4; } break; case MONO_TYPE_VALUETYPE: if (param->data.klass->enumtype) { simple_type = param->data.klass->enum_basetype->type; goto enum_calc_size; } if (mono_class_value_size(param->data.klass, NULL) != 4) { g_error("can only marshal enums, not generic structures (size: %d)", mono_class_value_size(param->data.klass, NULL)); } if (regc > 0) { /* register arg */ code_size += 1; reg_alloc [i] = regc; --regc; } else { /* stack arg */ code_size += 2; stack_size += 4; } break; default : break; } } } code_buff = (MonoPIFunc)alloc_code_buff(code_size); p = (arminstr_t*)code_buff; /* prologue */ p = arm_emit_lean_prologue(p, stack_size, /* save workset (r4-r7) */ (1 << ARMREG_R4) | (1 << ARMREG_R5) | (1 << ARMREG_R6) | (1 << ARMREG_R7)); /* copy args into workset */ /* callme - always present */ ARM_MOV_REG_REG(p, ARMREG_R4, ARMREG_A1); /* retval */ if (sig->ret->byref || string_ctor || (sig->ret->type != MONO_TYPE_VOID)) { ARM_MOV_REG_REG(p, ARMREG_R5, ARMREG_A2); } /* this_obj */ if (sig->hasthis) { this_loaded = 0; if (stack_size == 0) { ARM_MOV_REG_REG(p, ARMREG_A1, ARMREG_A3); this_loaded = 1; } else { ARM_MOV_REG_REG(p, ARMREG_R6, ARMREG_A3); } } /* args */ if (sig->param_count != 0) { ARM_MOV_REG_REG(p, ARMREG_R7, ARMREG_A4); } stack_offs = stack_size; /* handle arguments */ /* in reverse order so we could use r0 (arg1) for memory transfers */ for (i = sig->param_count; --i >= 0;) { param = sig->params [i]; if (param->byref) { if (i < aregs && reg_alloc[i] > 0) { ARM_LDR_IMM(p, ARMREG_A1 + i, REG_ARGP, i*ARG_SIZE); } else { stack_offs -= sizeof(armword_t); ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE); ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs); } } else { simple_type = param->type; enum_marshal: switch (simple_type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_R4: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: if (i < aregs && reg_alloc [i] > 0) { /* pass in register */ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE); } else { stack_offs -= sizeof(armword_t); ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE); ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs); } break; case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R8: if (i < aregs && reg_alloc [i] > 0) { if (reg_alloc [i] > 1) { /* pass in registers */ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE); ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]) + 1, REG_ARGP, i*ARG_SIZE + 4); } else { stack_offs -= sizeof(armword_t); ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE + 4); ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs); ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE); } } else { /* two words transferred on the stack */ stack_offs -= 2*sizeof(armword_t); ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE); ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs); ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE + 4); ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs + 4); } break; case MONO_TYPE_VALUETYPE: if (param->data.klass->enumtype) { /* it's an enum value, proceed based on its base type */ simple_type = param->data.klass->enum_basetype->type; goto enum_marshal; } else { if (i < aregs && reg_alloc[i] > 0) { int vtreg = ARMREG_A1 + hasthis + hasthis + (aregs - reg_alloc[i]); ARM_LDR_IMM(p, vtreg, REG_ARGP, i * ARG_SIZE); ARM_LDR_IMM(p, vtreg, vtreg, 0); } else { stack_offs -= sizeof(armword_t); ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i * ARG_SIZE); ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R0, 0); ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs); } } break; default: break; } } } if (sig->hasthis && !this_loaded) { /* [this] always passed in A1, regardless of sig->call_convention */ ARM_MOV_REG_REG(p, ARMREG_A1, REG_THIS); } /* call [func] */ ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC); ARM_MOV_REG_REG(p, ARMREG_PC, REG_FUNC_ADDR); /* handle retval */ if (sig->ret->byref || string_ctor) { ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0); } else { simple_type = sig->ret->type; enum_retvalue: switch (simple_type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: ARM_STRB_IMM(p, ARMREG_R0, REG_RETVAL, 0); break; case MONO_TYPE_CHAR: case MONO_TYPE_I2: case MONO_TYPE_U2: ARM_STRH_IMM(p, ARMREG_R0, REG_RETVAL, 0); break; /* * A 32-bit integer and integer-equivalent return value * is returned in R0. * Single-precision floating-point values are returned in R0. */ case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_R4: case MONO_TYPE_OBJECT: case MONO_TYPE_CLASS: case MONO_TYPE_ARRAY: case MONO_TYPE_SZARRAY: case MONO_TYPE_STRING: ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0); break; /* * A 64-bit integer is returned in R0 and R1. * Double-precision floating-point values are returned in R0 and R1. */ case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R8: ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0); ARM_STR_IMM(p, ARMREG_R1, REG_RETVAL, 4); break; case MONO_TYPE_VALUETYPE: if (sig->ret->data.klass->enumtype) { simple_type = sig->ret->data.klass->enum_basetype->type; goto enum_retvalue; } break; case MONO_TYPE_VOID: break; default: break; } } p = arm_emit_std_epilogue(p, stack_size, /* restore R4-R7 */ (1 << ARMREG_R4) | (1 << ARMREG_R5) | (1 << ARMREG_R6) | (1 << ARMREG_R7)); flush_icache(); #ifdef ARM_DUMP_DISASM _armdis_decode((arminstr_t*)code_buff, ((guint8*)p) - ((guint8*)code_buff)); #endif return code_buff; }
/* * paging_init() continues the virtual memory environment setup which * was begun by the code in arch/head.S. */ void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES] = { 0, }; unsigned long min_addr, max_addr; unsigned long addr, size, end; int i; #ifdef DEBUG { extern unsigned long availmem; printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem); } #endif /* Fix the cache mode in the page descriptors for the 680[46]0. */ if (CPU_IS_040_OR_060) { int i; #ifndef mm_cachebits mm_cachebits = _PAGE_CACHE040; #endif for (i = 0; i < 16; i++) pgprot_val(protection_map[i]) |= _PAGE_CACHE040; } min_addr = m68k_memory[0].addr; max_addr = min_addr + m68k_memory[0].size; for (i = 1; i < m68k_num_memory;) { if (m68k_memory[i].addr < min_addr) { printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n", m68k_memory[i].addr, m68k_memory[i].size); printk("Fix your bootloader or use a memfile to make use of this area!\n"); m68k_num_memory--; memmove(m68k_memory + i, m68k_memory + i + 1, (m68k_num_memory - i) * sizeof(struct mem_info)); continue; } addr = m68k_memory[i].addr + m68k_memory[i].size; if (addr > max_addr) max_addr = addr; i++; } m68k_memoffset = min_addr - PAGE_OFFSET; m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6; module_fixup(NULL, __start_fixup, __stop_fixup); flush_icache(); high_memory = phys_to_virt(max_addr); min_low_pfn = availmem >> PAGE_SHIFT; max_low_pfn = max_addr >> PAGE_SHIFT; for (i = 0; i < m68k_num_memory; i++) { addr = m68k_memory[i].addr; end = addr + m68k_memory[i].size; m68k_setup_node(i); availmem = PAGE_ALIGN(availmem); availmem += init_bootmem_node(NODE_DATA(i), availmem >> PAGE_SHIFT, addr >> PAGE_SHIFT, end >> PAGE_SHIFT); } /* * Map the physical memory available into the kernel virtual * address space. First initialize the bootmem allocator with * the memory we already mapped, so map_node() has something * to allocate. */ addr = m68k_memory[0].addr; size = m68k_memory[0].size; free_bootmem_node(NODE_DATA(0), availmem, min(INIT_MAPPED_SIZE, size) - (availmem - addr)); map_node(0); if (size > INIT_MAPPED_SIZE) free_bootmem_node(NODE_DATA(0), addr + INIT_MAPPED_SIZE, size - INIT_MAPPED_SIZE); for (i = 1; i < m68k_num_memory; i++) map_node(i); flush_tlb_all(); /* * initialize the bad page table and bad page to point * to a couple of allocated pages */ empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); memset(empty_zero_page, 0, PAGE_SIZE); /* * Set up SFC/DFC registers */ set_fs(KERNEL_DS); #ifdef DEBUG printk ("before free_area_init\n"); #endif for (i = 0; i < m68k_num_memory; i++) { zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; free_area_init_node(i, pg_data_map + i, zones_size, m68k_memory[i].addr >> PAGE_SHIFT, NULL); } }
int main(void) { /* clear bss segment */ do{*tmpPtr ++ = 0;}while(tmpPtr <= (char *)&__bss_end); #ifdef MMU_OPENED //move other storage to sram: saved_resume_pointer(virtual addr), saved_mmu_state mem_memcpy((void *)&mem_para_info, (void *)(DRAM_BACKUP_BASE_ADDR1), sizeof(mem_para_info)); #else mem_preload_tlb_nommu(); /*switch stack*/ //save_mem_status_nommu(RESUME1_START |0x02); //move other storage to sram: saved_resume_pointer(virtual addr), saved_mmu_state mem_memcpy((void *)&mem_para_info, (void *)(DRAM_BACKUP_BASE_ADDR1_PA), sizeof(mem_para_info)); /*restore mmu configuration*/ restore_mmu_state(&(mem_para_info.saved_mmu_state)); //disable_dcache(); #endif //serial_init(); if(unlikely((mem_para_info.debug_mask)&PM_STANDBY_PRINT_RESUME)){ serial_puts("after restore mmu. \n"); } if (unlikely((mem_para_info.debug_mask)&PM_STANDBY_PRINT_CHECK_CRC)) { standby_dram_crc(1); } //after open mmu mapping #ifdef FLUSH_TLB //busy_waiting(); mem_flush_tlb(); mem_preload_tlb(); #endif #ifdef FLUSH_ICACHE //clean i cache flush_icache(); #endif //twi freq? setup_twi_env(); mem_twi_init(AXP_IICBUS); #ifdef POWER_OFF restore_ccmu(); #endif /*restore pmu config*/ #ifdef POWER_OFF if (likely(mem_para_info.axp_enable)) { mem_power_exit(mem_para_info.axp_event); } /* disable watch-dog: coresponding with boot0 */ mem_tmr_disable_watchdog(); #endif //before jump to late_resume #ifdef FLUSH_TLB mem_flush_tlb(); #endif #ifdef FLUSH_ICACHE //clean i cache flush_icache(); #endif if (unlikely((mem_para_info.debug_mask)&PM_STANDBY_PRINT_CHECK_CRC)) { serial_puts("before jump_to_resume. \n"); } //before jump, invalidate data jump_to_resume((void *)mem_para_info.resume_pointer, mem_para_info.saved_runtime_context_svc); return; }
__s32 eGon2_run_app(__s32 argc, char **argv) { void *paddr; H_FILE pfile; __u32 entry; app_func func; __s32 ret; __u32 length; if(argc <= 0) { return -1; } //打开文件 pfile = FS_fopen(&argv[0][0], "r+"); if(!pfile) { eGon2_printf("can't find %s\n", argv[0]); return -1; } //获取文件长度 length = FS_filelen(pfile); if(!length) { eGon2_printf("error: file %s length is 0\n", argv[0]); FS_fclose(pfile); return -1; } paddr = eGon2_malloc(length); if(!paddr) { eGon2_printf("unable to malloc memory for install driver\n"); FS_fclose(pfile); return -1; } if(!FS_fread(paddr, length, 1, pfile)) { eGon2_printf("read %s fail\n", argv[0]); FS_fclose(pfile); eGon2_free(paddr); return -1; } FS_fclose(pfile); ret = elf_loader(paddr, &entry); eGon2_free(paddr); if(ret < 0) { eGon2_printf("elf file %s load fail\n", argv[0]); return -1; } func = (app_func)entry; //刷新cache flush_icache(); flush_dcache(); func(argc, argv); return 0; }
void copy_code(uint32_t *dst, const struct exc_copy_block *src) { memcpy(dst, src->code, src->size); flush_icache(dst, src->size); }
void factor_vm::flush_icache_for(code_block *block) { flush_icache((cell)block,block->size()); }
void __noreturn barebox_multi_pbl_start(unsigned long membase, unsigned long memsize, void *boarddata) { uint32_t pg_len; void __noreturn (*barebox)(unsigned long, unsigned long, void *); uint32_t endmem = membase + memsize; unsigned long barebox_base; uint32_t *image_end; void *pg_start; unsigned long pc = get_pc(); image_end = (void *)ld_var(__image_end) - get_runtime_offset(); if (IS_ENABLED(CONFIG_PBL_RELOCATABLE)) { /* * If we run from inside the memory just relocate the binary * to the current address. Otherwise it may be a readonly location. * Copy and relocate to the start of the memory in this case. */ if (pc > membase && pc - membase < memsize) relocate_to_current_adr(); else relocate_to_adr(membase); } /* * image_end is the first location after the executable. It contains * the size of the appended compressed binary followed by the binary. */ pg_start = image_end + 1; pg_len = *(image_end); if (IS_ENABLED(CONFIG_RELOCATABLE)) barebox_base = arm_mem_barebox_image(membase, endmem, pg_len); else barebox_base = TEXT_BASE; setup_c(); pr_debug("memory at 0x%08lx, size 0x%08lx\n", membase, memsize); if (IS_ENABLED(CONFIG_MMU_EARLY)) { unsigned long ttb = arm_mem_ttb(membase, endmem); pr_debug("enabling MMU, ttb @ 0x%08lx\n", ttb); mmu_early_enable(membase, memsize, ttb); } free_mem_ptr = arm_mem_early_malloc(membase, endmem); free_mem_end_ptr = arm_mem_early_malloc_end(membase, endmem); pr_debug("uncompressing barebox binary at 0x%p (size 0x%08x) to 0x%08lx\n", pg_start, pg_len, barebox_base); pbl_barebox_uncompress((void*)barebox_base, pg_start, pg_len); arm_early_mmu_cache_flush(); flush_icache(); if (IS_ENABLED(CONFIG_THUMB2_BAREBOX)) barebox = (void *)(barebox_base + 1); else barebox = (void *)barebox_base; pr_debug("jumping to uncompressed image at 0x%p\n", barebox); barebox(membase, memsize, boarddata); }