static void extract_sb_section(struct sb_section_t *sec) { if(sec->is_data) { char sec_name[5]; char *filename = xmalloc(strlen(g_out_prefix) + 32); sb_fill_section_name(sec_name, sec->identifier); sprintf(filename, "%s%s.bin", g_out_prefix, sec_name); FILE *fd = fopen(filename, "wb"); if(fd == NULL) bugp("Cannot open %s for writing\n", filename); if(g_debug) printf("Write data section %s to %s\n", sec_name, filename); free(filename); for(int j = 0; j < sec->nr_insts; j++) { assert(sec->insts[j].inst == SB_INST_DATA); fwrite(sec->insts[j].data, sec->insts[j].size, 1, fd); } fclose(fd); } int elf_count = 0; struct elf_params_t elf; elf_init(&elf); for(int i = 0; i < sec->nr_insts; i++) { struct sb_inst_t *inst = &sec->insts[i]; switch(inst->inst) { case SB_INST_LOAD: elf_add_load_section(&elf, inst->addr, inst->size, inst->data); break; case SB_INST_FILL: elf_add_fill_section(&elf, inst->addr, inst->size, inst->pattern); break; case SB_INST_CALL: case SB_INST_JUMP: elf_set_start_addr(&elf, inst->addr); extract_elf_section(&elf, elf_count++, sec->identifier); elf_release(&elf); elf_init(&elf); break; default: /* ignore mode and nop */ break; } } if(!elf_is_empty(&elf)) extract_elf_section(&elf, elf_count, sec->identifier); elf_release(&elf); }
static void fix_libpthread(uint32_t * sym_stack_used, uint32_t * sym_stack_user) { int err; struct proc_entry pe; memset(&pe, 0, sizeof(pe)); strncpy(pe.fn, opts->pthread_so_fn, 256); pe.bits = PE_FILE; err = proc_fill_entry(&pe, child_pid); /* if err != 0, then there is no libpthread. */ if (err != 0) { SYS_TRACE("no %s found, needn't fix libpthread\n", opts->pthread_so_fn); return; } SYS_TRACE("find %s mapped ", opts->pthread_so_fn); /* find symbol __stack_user and stack_used */ void * img = load_file(pe.fn); struct elf_handler * lp_so = elf_init(img, pe.start); *sym_stack_user = elf_get_symbol_address(lp_so, "__stack_user"); *sym_stack_used = elf_get_symbol_address(lp_so, "stack_used"); elf_cleanup(lp_so); free(img); }
static void load_elf_by_id(struct cmd_file_t *cmd_file, const char *id) { struct cmd_source_t *src = db_find_source_by_id(cmd_file, id); if(src == NULL) bug("undefined reference to source '%s'\n", id); /* avoid reloading */ if(src->type == CMD_SRC_ELF && src->loaded) return; if(src->type != CMD_SRC_UNK) bug("source '%s' seen both as elf and binary file\n", id); /* resolve potential extern file */ resolve_extern(src); /* load it */ src->type = CMD_SRC_ELF; FILE *fd = fopen(src->filename, "rb"); if(fd == NULL) bug("cannot open '%s' (id '%s')\n", src->filename, id); if(g_debug) printf("Loading ELF file '%s'...\n", src->filename); elf_init(&src->elf); src->loaded = elf_read_file(&src->elf, elf_read, elf_printf, fd); fclose(fd); if(!src->loaded) bug("error loading elf file '%s' (id '%s')\n", src->filename, id); elf_translate_addresses(&src->elf); }
status_t load_kernel(stage2_args *args, Directory *volume) { int fd = open_from(volume, KERNEL_PATH, O_RDONLY); if (fd < B_OK) return fd; dprintf("load kernel...\n"); elf_init(); status_t status = elf_load_image(fd, &gKernelArgs.kernel_image); close(fd); if (status < B_OK) { dprintf("loading kernel failed: %lx!\n", status); return status; } status = elf_relocate_image(&gKernelArgs.kernel_image); if (status < B_OK) { dprintf("relocating kernel failed: %lx!\n", status); return status; } gKernelArgs.kernel_image.name = kernel_args_strdup(KERNEL_IMAGE); return B_OK; }
static int do_nanostage_image(uint8_t *buf, unsigned long size) { if(size < sizeof(struct rknano_stage_section_t)) return 1; struct rknano_stage_header_t *hdr = (void *)buf; cprintf(BLUE, "Header\n"); cprintf(GREEN, " Base Address: "); cprintf(YELLOW, "%#08x\n", hdr->addr); cprintf(GREEN, " Load count: "); cprintf(YELLOW, "%d\n", hdr->count); struct rknano_stage_section_t *sec = (void *)(hdr + 1); for(unsigned i = 0; i < hdr->count; i++, sec++) { cprintf(BLUE, "Section %d\n", i); cprintf(GREEN, " Code: "); cprintf(YELLOW, "0x%08x", sec->code_pa); cprintf(RED, "-(txt)-"); cprintf(YELLOW, "0x%08x", sec->code_pa + sec->code_sz); cprintf(BLUE, " |--> "); cprintf(YELLOW, "0x%08x", sec->code_va); cprintf(RED, "-(txt)-"); cprintf(YELLOW, "0x%08x\n", sec->code_va + sec->code_sz); cprintf(GREEN, " Data: "); cprintf(YELLOW, "0x%08x", sec->data_pa); cprintf(RED, "-(dat)-"); cprintf(YELLOW, "0x%08x", sec->data_pa + sec->data_sz); cprintf(BLUE, " |--> "); cprintf(YELLOW, "0x%08x", sec->data_va); cprintf(RED, "-(dat)-"); cprintf(YELLOW, "0x%08x\n", sec->data_va + sec->data_sz); cprintf(GREEN, " Data: "); cprintf(RED, " "); cprintf(BLUE, " |--> "); cprintf(YELLOW, "0x%08x", sec->bss_va); cprintf(RED, "-(bss)-"); cprintf(YELLOW, "0x%08x\n", sec->bss_va + sec->bss_sz); #if 0 struct rknano_blob_t blob; blob.offset = sec->code_pa - hdr->addr; blob.size = sec->code_sz; save_blob(&blob, buf, size, "entry.", i, NO_ENC); #else struct elf_params_t elf; elf_init(&elf); elf_add_load_section(&elf, sec->code_va, sec->code_sz, buf + sec->code_pa - hdr->addr); elf_add_load_section(&elf, sec->data_va, sec->data_sz, buf + sec->data_pa - hdr->addr); elf_add_fill_section(&elf, sec->bss_va, sec->bss_sz, 0); extract_elf_section(&elf, i); elf_release(&elf); #endif } return 0; }
status_t load_kernel(stage2_args* args, BootVolume& volume) { const char *name; int fd = find_kernel(volume, &name); if (fd < B_OK) return fd; dprintf("load kernel %s...\n", name); elf_init(); preloaded_image *image; status_t status = elf_load_image(fd, &image); close(fd); if (status < B_OK) { dprintf("loading kernel failed: %lx!\n", status); return status; } gKernelArgs.kernel_image = image; status = elf_relocate_image(gKernelArgs.kernel_image); if (status < B_OK) { dprintf("relocating kernel failed: %lx!\n", status); return status; } gKernelArgs.kernel_image->name = kernel_args_strdup(name); return B_OK; }
int main(int argc, char *argv[]) { struct elf_handle *handle; char *file, *bytes; int size; file = parse_args(argc, argv); if (!options.flat_binary) { if ((handle = elf_init(file)) == NULL) { fprintf(stderr, "Failed to initialize elf file %s: %s\n", file, strerror(errno)); return 1; } bytes = get_bytes(handle, options.function, &size); elf_free(handle); } else bytes = read_flat_binary(file, &size); fprintf(stderr, "Printing shellcode (%d bytes)\n", size); if (!strcasecmp(options.format, "python")) print_opcodes_py(bytes, size); else if (!strcasecmp(options.format, "hexdump")) print_opcodes_hex(bytes, size); else if (!strcasecmp(options.format, "raw")) print_opcodes_raw(bytes, size); else print_opcodes_C(bytes, size); fflush(stdout); free(bytes); return 0; }
static int xc_dom_parse_elf_kernel(struct xc_dom_image *dom) { struct elf_binary *elf; int rc; rc = check_elf_kernel(dom, 1); if ( rc != 0 ) return rc; elf = xc_dom_malloc(dom, sizeof(*elf)); dom->private_loader = elf; rc = elf_init(elf, dom->kernel_blob, dom->kernel_size); xc_elf_set_logfile(dom->xch, elf, 1); if ( rc != 0 ) { xc_dom_panic(dom->xch, XC_INVALID_KERNEL, "%s: corrupted ELF image", __FUNCTION__); return rc; } /* Find the section-header strings table. */ if ( elf->sec_strtab == NULL ) { xc_dom_panic(dom->xch, XC_INVALID_KERNEL, "%s: ELF image" " has no shstrtab", __FUNCTION__); return -EINVAL; } /* parse binary and get xen meta info */ elf_parse_binary(elf); if ( (rc = elf_xen_parse(elf, &dom->parms)) != 0 ) return rc; if ( elf_xen_feature_get(XENFEAT_dom0, dom->parms.f_required) ) { xc_dom_panic(dom->xch, XC_INVALID_KERNEL, "%s: Kernel does not" " support unprivileged (DomU) operation", __FUNCTION__); return -EINVAL; } /* find kernel segment */ dom->kernel_seg.vstart = dom->parms.virt_kstart; dom->kernel_seg.vend = dom->parms.virt_kend; if ( dom->parms.bsd_symtab ) xc_dom_load_elf_symtab(dom, elf, 0); dom->guest_type = xc_dom_guest_type(dom, elf); DOMPRINTF("%s: %s: 0x%" PRIx64 " -> 0x%" PRIx64 "", __FUNCTION__, dom->guest_type, dom->kernel_seg.vstart, dom->kernel_seg.vend); return 0; }
static void extract_sb1_file(struct sb1_file_t *file) { int elf_count = 0; struct elf_params_t elf; elf_init(&elf); int bss_idx = 0, text_idx = 0; char secname[32]; for(int i = 0; i < file->nr_insts; i++) { struct sb1_inst_t *inst = &file->insts[i]; switch(inst->cmd) { case SB1_INST_LOAD: sprintf(secname, ".text%d", text_idx++); elf_add_load_section(&elf, inst->addr, inst->size, inst->data, secname); break; case SB1_INST_FILL: sprintf(secname, ".bss%d", bss_idx++); elf_add_fill_section(&elf, inst->addr, inst->size, inst->pattern, secname); break; case SB1_INST_CALL: case SB1_INST_JUMP: elf_set_start_addr(&elf, inst->addr); extract_elf(&elf, elf_count++); elf_release(&elf); elf_init(&elf); break; default: /* ignore mode and nop */ break; } } if(!elf_is_empty(&elf)) extract_elf(&elf, elf_count); elf_release(&elf); }
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { struct elf_binary elf_buf, *elf; struct elf_dom_parms parms; elf = &elf_buf; memset(elf, 0, sizeof(*elf)); elf_init(elf, (const char *)data, size); elf_parse_binary(elf); elf_xen_parse(elf, &parms); return 0; }
static int xc_dom_parse_elf_kernel(struct xc_dom_image *dom) { struct elf_binary *elf; int rc; rc = check_elf_kernel(dom, 1); if ( rc != 0 ) return rc; elf = xc_dom_malloc(dom, sizeof(*elf)); dom->private_loader = elf; rc = elf_init(elf, dom->kernel_blob, dom->kernel_size); if ( xc_dom_logfile ) elf_set_logfile(elf, xc_dom_logfile, 1); if ( rc != 0 ) { xc_dom_panic(XC_INVALID_KERNEL, "%s: corrupted ELF image\n", __FUNCTION__); return rc; } /* Find the section-header strings table. */ if ( elf->sec_strtab == NULL ) { xc_dom_panic(XC_INVALID_KERNEL, "%s: ELF image has no shstrtab\n", __FUNCTION__); return -EINVAL; } /* parse binary and get xen meta info */ elf_parse_binary(elf); if ( (rc = elf_xen_parse(elf, &dom->parms)) != 0 ) return rc; /* find kernel segment */ dom->kernel_seg.vstart = dom->parms.virt_kstart; dom->kernel_seg.vend = dom->parms.virt_kend; if ( dom->parms.bsd_symtab ) xc_dom_load_elf_symtab(dom, elf, 0); dom->guest_type = xc_dom_guest_type(dom, elf); xc_dom_printf("%s: %s: 0x%" PRIx64 " -> 0x%" PRIx64 "\n", __FUNCTION__, dom->guest_type, dom->kernel_seg.vstart, dom->kernel_seg.vend); return 0; }
int parse_elf_section(const char *binary_path) { if (elf_init(binary_path) == -1) return -1; elf_hdr = (Elf64_Ehdr *)elf_mem; if (check_elf_header(elf_hdr) == -1) return -1; if (parse_elf_section_name(elf_hdr) == -1) return -1; if (__parse_elf_section(elf_hdr) == -1) return -1; return 0; }
static int load_elf(struct sb1_file_t *sb, const char *filename, int act) { struct elf_params_t elf; FILE *fd = fopen(filename, "rb"); if(fd == NULL) bug("cannot open '%s'\n", filename); if(g_debug) printf("Loading elf file '%s'...\n", filename); elf_init(&elf); bool loaded = elf_read_file(&elf, elf_std_read, generic_std_printf, fd); fclose(fd); if(!loaded) bug("error loading elf file '%s'\n", filename); elf_sort_by_address(&elf); struct elf_section_t *esec = elf.first_section; while(esec) { if(esec->type == EST_LOAD) sb1_add_load(sb, esec->section, esec->size, esec->addr); else if(esec->type == EST_FILL) sb1_add_fill(sb, esec->pattern, esec->size, esec->addr); esec = esec->next; } int ret = 0; if(act == SB1_INST_JUMP || act == SB1_INST_CALL) { if(!elf.has_start_addr) bug("Cannot jump/call: '%s' has no start address!\n", filename); if(act == SB1_INST_JUMP) ret = sb1_add_jump(sb, elf.start_addr, g_jump_arg); else ret = sb1_add_call(sb, elf.start_addr, g_jump_arg); } elf_release(&elf); return ret; }
static elf_negerrnoval xc_dom_probe_elf_kernel(struct xc_dom_image *dom) { struct elf_binary elf; int rc; rc = check_elf_kernel(dom, 0); if ( rc != 0 ) return rc; rc = elf_init(&elf, dom->kernel_blob, dom->kernel_size); if ( rc != 0 ) return rc; /* * We need to check that it contains Xen ELFNOTES, * or else we might be trying to load a plain ELF. */ elf_parse_binary(&elf); rc = elf_xen_parse(&elf, &dom->parms); if ( rc != 0 ) return rc; return 0; }
static int elf_loadbinary(struct binary_s *binp) { struct elf_loadinfo_s loadinfo; /* Contains globals for libelf */ int ret; bvdbg("Loading file: %s\n", binp->filename); /* Initialize the xflat library to load the program binary. */ ret = elf_init(binp->filename, &loadinfo); elf_dumploadinfo(&loadinfo); if (ret != 0) { bdbg("Failed to initialize for load of ELF program: %d\n", ret); goto errout; } /* Load the program binary */ ret = elf_load(&loadinfo); elf_dumploadinfo(&loadinfo); if (ret != 0) { bdbg("Failed to load ELF program binary: %d\n", ret); goto errout_with_init; } /* Bind the program to the exported symbol table */ ret = elf_bind(&loadinfo, binp->exports, binp->nexports); if (ret != 0) { bdbg("Failed to bind symbols program binary: %d\n", ret); goto errout_with_load; } /* Return the load information */ binp->entrypt = (main_t)(loadinfo.elfalloc + loadinfo.ehdr.e_entry); binp->alloc[0] = (FAR void *)loadinfo.elfalloc; binp->stacksize = CONFIG_ELF_STACKSIZE; #ifdef CONFIG_BINFMT_CONSTRUCTORS /* Save information about constructors. NOTE: desctructors are not * yet supported. */ binp->alloc[1] = loadinfo.ctoralloc; binp->ctors = loadinfo.ctors; binp->nctors = loadinfo.nctors; binp->alloc[2] = loadinfo.dtoralloc; binp->dtors = loadinfo.dtors; binp->ndtors = loadinfo.ndtors; #endif elf_dumpbuffer("Entry code", (FAR const uint8_t*)binp->entrypt, MIN(loadinfo.allocsize - loadinfo.ehdr.e_entry, 512)); elf_uninit(&loadinfo); return OK; errout_with_load: elf_unload(&loadinfo); errout_with_init: elf_uninit(&loadinfo); errout: return ret; }
int __init construct_dom0( struct domain *d, const module_t *image, unsigned long image_headroom, module_t *initrd, void *(*bootstrap_map)(const module_t *), char *cmdline) { int i, cpu, rc, compatible, compat32, order, machine; struct cpu_user_regs *regs; unsigned long pfn, mfn; unsigned long nr_pages; unsigned long nr_pt_pages; unsigned long alloc_spfn; unsigned long alloc_epfn; unsigned long initrd_pfn = -1, initrd_mfn = 0; unsigned long count; struct page_info *page = NULL; start_info_t *si; struct vcpu *v = d->vcpu[0]; unsigned long long value; char *image_base = bootstrap_map(image); unsigned long image_len = image->mod_end; char *image_start = image_base + image_headroom; unsigned long initrd_len = initrd ? initrd->mod_end : 0; #if CONFIG_PAGING_LEVELS < 4 module_t mpt; void *mpt_ptr; #else l4_pgentry_t *l4tab = NULL, *l4start = NULL; #endif l3_pgentry_t *l3tab = NULL, *l3start = NULL; l2_pgentry_t *l2tab = NULL, *l2start = NULL; l1_pgentry_t *l1tab = NULL, *l1start = NULL; /* * This fully describes the memory layout of the initial domain. All * *_start address are page-aligned, except v_start (and v_end) which are * superpage-aligned. */ struct elf_binary elf; struct elf_dom_parms parms; unsigned long vkern_start; unsigned long vkern_end; unsigned long vinitrd_start; unsigned long vinitrd_end; unsigned long vphysmap_start; unsigned long vphysmap_end; unsigned long vstartinfo_start; unsigned long vstartinfo_end; unsigned long vstack_start; unsigned long vstack_end; unsigned long vpt_start; unsigned long vpt_end; unsigned long v_start; unsigned long v_end; /* Machine address of next candidate page-table page. */ paddr_t mpt_alloc; /* Sanity! */ BUG_ON(d->domain_id != 0); BUG_ON(d->vcpu[0] == NULL); BUG_ON(v->is_initialised); printk("*** LOADING DOMAIN 0 ***\n"); d->max_pages = ~0U; if ( (rc = bzimage_parse(image_base, &image_start, &image_len)) != 0 ) return rc; if ( (rc = elf_init(&elf, image_start, image_len)) != 0 ) return rc; #ifdef VERBOSE elf_set_verbose(&elf); #endif elf_parse_binary(&elf); if ( (rc = elf_xen_parse(&elf, &parms)) != 0 ) return rc; /* compatibility check */ compatible = compat32 = 0; machine = elf_uval(&elf, elf.ehdr, e_machine); switch (CONFIG_PAGING_LEVELS) { case 3: /* x86_32p */ if (parms.pae == PAEKERN_bimodal) parms.pae = PAEKERN_extended_cr3; printk(" Xen kernel: 32-bit, PAE, lsb\n"); if (elf_32bit(&elf) && parms.pae && machine == EM_386) compatible = 1; break; case 4: /* x86_64 */ printk(" Xen kernel: 64-bit, lsb, compat32\n"); if (elf_32bit(&elf) && parms.pae == PAEKERN_bimodal) parms.pae = PAEKERN_extended_cr3; if (elf_32bit(&elf) && parms.pae && machine == EM_386) { compat32 = 1; compatible = 1; } if (elf_64bit(&elf) && machine == EM_X86_64) compatible = 1; break; } printk(" Dom0 kernel: %s%s, %s, paddr 0x%" PRIx64 " -> 0x%" PRIx64 "\n", elf_64bit(&elf) ? "64-bit" : "32-bit", parms.pae ? ", PAE" : "", elf_msb(&elf) ? "msb" : "lsb", elf.pstart, elf.pend); if ( elf.bsd_symtab_pstart ) printk(" Dom0 symbol map 0x%" PRIx64 " -> 0x%" PRIx64 "\n", elf.bsd_symtab_pstart, elf.bsd_symtab_pend); if ( !compatible ) { printk("Mismatch between Xen and DOM0 kernel\n"); return -EINVAL; } if ( parms.elf_notes[XEN_ELFNOTE_SUPPORTED_FEATURES].type != XEN_ENT_NONE && !test_bit(XENFEAT_dom0, parms.f_supported) ) { printk("Kernel does not support Dom0 operation\n"); return -EINVAL; } #if defined(__x86_64__) if ( compat32 ) { d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1; v->vcpu_info = (void *)&d->shared_info->compat.vcpu_info[0]; if ( setup_compat_arg_xlat(v) != 0 ) BUG(); } #endif nr_pages = compute_dom0_nr_pages(d, &parms, initrd_len); if ( parms.pae == PAEKERN_extended_cr3 ) set_bit(VMASST_TYPE_pae_extended_cr3, &d->vm_assist); if ( (parms.virt_hv_start_low != UNSET_ADDR) && elf_32bit(&elf) ) { unsigned long mask = (1UL << L2_PAGETABLE_SHIFT) - 1; value = (parms.virt_hv_start_low + mask) & ~mask; BUG_ON(!is_pv_32bit_domain(d)); #if defined(__i386__) if ( value > HYPERVISOR_VIRT_START ) panic("Domain 0 expects too high a hypervisor start address.\n"); #else if ( value > __HYPERVISOR_COMPAT_VIRT_START ) panic("Domain 0 expects too high a hypervisor start address.\n"); HYPERVISOR_COMPAT_VIRT_START(d) = max_t(unsigned int, m2p_compat_vstart, value); #endif }
static int xc_dom_load_elf_symtab(struct xc_dom_image *dom, struct elf_binary *elf, int load) { struct elf_binary syms; const elf_shdr *shdr, *shdr2; xen_vaddr_t symtab, maxaddr; char *hdr; size_t size; int h, count, type, i, tables = 0; if ( elf_swap(elf) ) { xc_dom_printf("%s: non-native byte order, bsd symtab not supported\n", __FUNCTION__); return 0; } if ( load ) { if ( !dom->bsd_symtab_start ) return 0; size = dom->kernel_seg.vend - dom->bsd_symtab_start; hdr = xc_dom_vaddr_to_ptr(dom, dom->bsd_symtab_start); *(int *)hdr = size - sizeof(int); } else { size = sizeof(int) + elf_size(elf, elf->ehdr) + elf_shdr_count(elf) * elf_size(elf, shdr); hdr = xc_dom_malloc(dom, size); if ( hdr == NULL ) return 0; dom->bsd_symtab_start = elf_round_up(&syms, dom->kernel_seg.vend); } memcpy(hdr + sizeof(int), elf->image, elf_size(elf, elf->ehdr)); memcpy(hdr + sizeof(int) + elf_size(elf, elf->ehdr), elf->image + elf_uval(elf, elf->ehdr, e_shoff), elf_shdr_count(elf) * elf_size(elf, shdr)); if ( elf_64bit(elf) ) { Elf64_Ehdr *ehdr = (Elf64_Ehdr *)(hdr + sizeof(int)); ehdr->e_phoff = 0; ehdr->e_phentsize = 0; ehdr->e_phnum = 0; ehdr->e_shoff = elf_size(elf, elf->ehdr); ehdr->e_shstrndx = SHN_UNDEF; } else { Elf32_Ehdr *ehdr = (Elf32_Ehdr *)(hdr + sizeof(int)); ehdr->e_phoff = 0; ehdr->e_phentsize = 0; ehdr->e_phnum = 0; ehdr->e_shoff = elf_size(elf, elf->ehdr); ehdr->e_shstrndx = SHN_UNDEF; } if ( elf_init(&syms, hdr + sizeof(int), size - sizeof(int)) ) return -1; if ( xc_dom_logfile ) elf_set_logfile(&syms, xc_dom_logfile, 1); symtab = dom->bsd_symtab_start + sizeof(int); maxaddr = elf_round_up(&syms, symtab + elf_size(&syms, syms.ehdr) + elf_shdr_count(&syms) * elf_size(&syms, shdr)); xc_dom_printf("%s/%s: bsd_symtab_start=%" PRIx64 ", kernel.end=0x%" PRIx64 " -- symtab=0x%" PRIx64 ", maxaddr=0x%" PRIx64 "\n", __FUNCTION__, load ? "load" : "parse", dom->bsd_symtab_start, dom->kernel_seg.vend, symtab, maxaddr); count = elf_shdr_count(&syms); for ( h = 0; h < count; h++ ) { shdr = elf_shdr_by_index(&syms, h); type = elf_uval(&syms, shdr, sh_type); if ( type == SHT_STRTAB ) { /* Look for a strtab @i linked to symtab @h. */ for ( i = 0; i < count; i++ ) { shdr2 = elf_shdr_by_index(&syms, i); if ( (elf_uval(&syms, shdr2, sh_type) == SHT_SYMTAB) && (elf_uval(&syms, shdr2, sh_link) == h) ) break; } /* Skip symtab @h if we found no corresponding strtab @i. */ if ( i == count ) { if ( elf_64bit(&syms) ) *(Elf64_Off*)(&shdr->e64.sh_offset) = 0; else *(Elf32_Off*)(&shdr->e32.sh_offset) = 0; continue; } } if ( (type == SHT_STRTAB) || (type == SHT_SYMTAB) ) { /* Mangled to be based on ELF header location. */ if ( elf_64bit(&syms) ) *(Elf64_Off*)(&shdr->e64.sh_offset) = maxaddr - symtab; else *(Elf32_Off*)(&shdr->e32.sh_offset) = maxaddr - symtab; size = elf_uval(&syms, shdr, sh_size); maxaddr = elf_round_up(&syms, maxaddr + size); tables++; xc_dom_printf("%s: h=%d %s, size=0x%zx, maxaddr=0x%" PRIx64 "\n", __FUNCTION__, h, type == SHT_SYMTAB ? "symtab" : "strtab", size, maxaddr); if ( load ) { shdr2 = elf_shdr_by_index(elf, h); memcpy((void*)elf_section_start(&syms, shdr), elf_section_start(elf, shdr2), size); } } /* Name is NULL. */ if ( elf_64bit(&syms) ) *(Elf64_Half*)(&shdr->e64.sh_name) = 0; else *(Elf32_Word*)(&shdr->e32.sh_name) = 0; } if ( tables == 0 ) { xc_dom_printf("%s: no symbol table present\n", __FUNCTION__); dom->bsd_symtab_start = 0; return 0; } if ( !load ) dom->kernel_seg.vend = maxaddr; return 0; }
int main(int argc, char **argv) { const char *f; int fd,h,size,usize,count; void *image,*tmp; struct stat st; struct elf_binary elf; const elf_shdr *shdr; int notes_found = 0; if (argc != 2) { fprintf(stderr, "Usage: readnotes <elfimage>\n"); return 1; } f = argv[1]; fd = open(f, O_RDONLY); if (fd == -1) { fprintf(stderr, "Unable to open %s: %s\n", f, strerror(errno)); return 1; } if (fstat(fd, &st) == -1) { fprintf(stderr, "Unable to determine size of %s: %s\n", f, strerror(errno)); return 1; } image = mmap(0, st.st_size, PROT_READ, MAP_SHARED, fd, 0); if (image == MAP_FAILED) { fprintf(stderr, "Unable to map %s: %s\n", f, strerror(errno)); return 1; } size = st.st_size; usize = xc_dom_check_gzip(image, st.st_size); if (usize) { tmp = malloc(usize); xc_dom_do_gunzip(image, st.st_size, tmp, usize); image = tmp; size = usize; } if (0 != elf_init(&elf, image, size)) { fprintf(stderr, "File %s is not an ELF image\n", f); return 1; } elf_set_logfile(&elf, stderr, 0); count = elf_phdr_count(&elf); for ( h=0; h < count; h++) { const elf_phdr *phdr; phdr = elf_phdr_by_index(&elf, h); if (elf_uval(&elf, phdr, p_type) != PT_NOTE) continue; /* Some versions of binutils do not correctly set * p_offset for note segments. */ if (elf_uval(&elf, phdr, p_offset) == 0) continue; notes_found = print_notes(&elf, elf_segment_start(&elf, phdr), elf_segment_end(&elf, phdr)); } if ( notes_found == 0 ) { count = elf_shdr_count(&elf); for ( h=0; h < count; h++) { const elf_shdr *shdr; shdr = elf_shdr_by_index(&elf, h); if (elf_uval(&elf, shdr, sh_type) != SHT_NOTE) continue; notes_found = print_notes(&elf, elf_section_start(&elf, shdr), elf_section_end(&elf, shdr)); if ( notes_found ) fprintf(stderr, "using notes from SHT_NOTE section\n"); } } shdr = elf_shdr_by_name(&elf, "__xen_guest"); if (shdr) printf("__xen_guest: %s\n", (char*)elf_section_start(&elf, shdr)); return 0; }
void loader_init (void) { elf_init (); }
int do_extract(const char *output, int argc, char **argv) { if(argc != 1) { printf("You need to specify exactly one input file to extract from.\n"); return 3; } FILE *fout = NULL; if(output) { fout = fopen(output, "w"); if(fout == NULL) { printf("Cannot open output file '%s'\n", output); return 4; } } /* read elf file */ g_elf_buf = read_file(argv[0], g_elf_size); if(g_elf_buf == nullptr) { printf("Cannot open input file '%s'\n", argv[0]); return 1; } if(!elf_init()) { printf("This is not a valid ELF file\n"); return 1; } if(g_elf_symtab == nullptr) { printf("This ELF file does not have a symbol table\n"); return 1; } /* look for symbol 'AreaInfo' */ Elf32_Sym *sym_AreaInfo = elf_get_symbol_by_name("AreaInfo"); if(sym_AreaInfo == nullptr) { printf("Cannot find symbol 'AreaInfo'\n"); return 1; } printf("AreaInfo:\n"); if(g_verbose) { printf("[%u bytes at address %#x in section %u (%s)]\n", (unsigned)sym_AreaInfo->st_size, (unsigned)sym_AreaInfo->st_value, (unsigned)sym_AreaInfo->st_shndx, elf_get_section_name(sym_AreaInfo->st_shndx)); } /* guess version */ int ver = guess_version(elf_get_symbol_ptr(sym_AreaInfo, sizeof(area_info_v1_t))); if(g_verbose) printf("[guessed version: %d]\n", ver); size_t sizeof_area_info = (ver == 1) ? sizeof(area_info_v1_t) : sizeof(area_info_v2_t); size_t sizeof_zone_info = (ver == 1) ? sizeof(zone_info_v1_t) : sizeof(zone_info_v2_t); /* sanity check AreaInfo */ size_t area_count = sym_AreaInfo->st_size / sizeof_area_info; if(!g_unsafe && (sym_AreaInfo->st_size % sizeof_area_info) != 0) { printf("AreaInfo size (%u) is a not a multiple of area_info_t size (%zu).\n", (unsigned)sym_AreaInfo->st_size, sizeof_area_info); printf("Use unsafe option to override this check\n"); return 1; } area_info_v1_t *AreaInfo_v1 = (area_info_v1_t *)elf_get_symbol_ptr(sym_AreaInfo, sym_AreaInfo->st_size); area_info_v2_t *AreaInfo_v2 = (area_info_v2_t *)AreaInfo_v1; if(AreaInfo_v1 == nullptr) { printf("Symbol does not point to a valid address\n"); return 1; } for(size_t i = 0; i < area_count; i++) { uint32_t type; uint32_t *zoneinfo_ptr; uint32_t zonecount; uint32_t *name_ptr; if(ver == 1) { type = AreaInfo_v1[i].type; zoneinfo_ptr = &AreaInfo_v1[i].zoneinfo; zonecount = AreaInfo_v1[i].zonecount; name_ptr = &AreaInfo_v1[i].name; } else { type = AreaInfo_v2[i].type; zoneinfo_ptr = &AreaInfo_v2[i].zoneinfo; zonecount = AreaInfo_v2[i].zonecount; name_ptr = &AreaInfo_v2[i].name; } if(g_verbose) { printf(" [type=%u info=%#x count=%u name=%#x]\n", type, *zoneinfo_ptr, zonecount, *name_ptr); } /* translate name address */ const char *name = (const char *)elf_reloc_addr32_ptr(name_ptr); if(name == nullptr || !elf_is_str_ptr_safe(name)) { printf(" Entry name is not a string\n"); continue; } /* skip reserved entries */ if(*zoneinfo_ptr == 0) { printf(" %s\n", name); continue; } /* relocate the zoneinfo pointer */ void *Zone = elf_reloc_addr32_ptr(zoneinfo_ptr);; if(Zone == nullptr) { printf(" %s\n", name); printf(" Zone info pointer is not valid\n"); continue; } /* in safe mode, make sure the zone info pointer is a symbol */ Elf32_Sym *zoneinfo_sym = elf_get_symbol_by_ptr((void *)Zone); const char *zoneinfo_sym_name = "<no symbol>"; if(zoneinfo_sym) zoneinfo_sym_name = elf_get_symbol_name(zoneinfo_sym); printf(" %s (%s)\n", name, zoneinfo_sym_name); if(!g_unsafe && !zoneinfo_sym) { printf(" Zone info pointer does not correspond to any symbol.\n"); printf(" Use unsafe option to override this check\n"); continue; } /* if we have the symbol, make sure the claimed size match */ if(!g_unsafe && zoneinfo_sym) { if(zoneinfo_sym->st_size != sizeof_zone_info * zonecount) { printf(" Zone info symbol size (%u) does not match expected size (%zu)\n", (unsigned)zoneinfo_sym->st_size, sizeof_zone_info * zonecount); printf(" Use unsafe option to override this check\n"); continue; } } /* sanity check */ if(!elf_is_ptr_safe((void *)Zone, sizeof_zone_info * zonecount)) { printf(" Zone info pointer is not valid\n"); continue; } /* read zone */ zone_info_v1_t *Zone_v1 = (zone_info_v1_t *)Zone; zone_info_v2_t *Zone_v2 = (zone_info_v2_t *)Zone; for(size_t j = 0; j < zonecount; j++) { uint32_t node, start, count, size; uint32_t *name_ptr; if(ver == 1) { node = Zone_v1[j].node; start = Zone_v1[j].start; count = Zone_v1[j].count; size = Zone_v1[j].size; name_ptr = &Zone_v1[j].name; } else { node = Zone_v2[j].node; start = Zone_v2[j].start; count = Zone_v2[j].count; size = Zone_v2[j].size; name_ptr = &Zone_v2[j].name; } if(g_verbose) { printf(" [node=%u start=%#x count=%u size=%u name=%#x]\n", node, start, count, size, *name_ptr); } /* translate name address */ const char *name = (const char *)elf_reloc_addr32_ptr(name_ptr); if(name == nullptr || !elf_is_str_ptr_safe(name)) { printf(" Entry name is not a string\n"); continue; } printf(" %s: node %03u, size %u\n", name, node, size); if(fout) fprintf(fout, "%u,%u,%s\n", node, size, name); } } if(fout) fclose(fout); /* success */ return 0; }
static int __init pvh_load_kernel(struct domain *d, const module_t *image, unsigned long image_headroom, module_t *initrd, void *image_base, char *cmdline, paddr_t *entry, paddr_t *start_info_addr) { void *image_start = image_base + image_headroom; unsigned long image_len = image->mod_end; struct elf_binary elf; struct elf_dom_parms parms; paddr_t last_addr; struct hvm_start_info start_info = { 0 }; struct hvm_modlist_entry mod = { 0 }; struct vcpu *v = d->vcpu[0]; int rc; if ( (rc = bzimage_parse(image_base, &image_start, &image_len)) != 0 ) { printk("Error trying to detect bz compressed kernel\n"); return rc; } if ( (rc = elf_init(&elf, image_start, image_len)) != 0 ) { printk("Unable to init ELF\n"); return rc; } #ifdef VERBOSE elf_set_verbose(&elf); #endif elf_parse_binary(&elf); if ( (rc = elf_xen_parse(&elf, &parms)) != 0 ) { printk("Unable to parse kernel for ELFNOTES\n"); return rc; } if ( parms.phys_entry == UNSET_ADDR32 ) { printk("Unable to find XEN_ELFNOTE_PHYS32_ENTRY address\n"); return -EINVAL; } printk("OS: %s version: %s loader: %s bitness: %s\n", parms.guest_os, parms.guest_ver, parms.loader, elf_64bit(&elf) ? "64-bit" : "32-bit"); /* Copy the OS image and free temporary buffer. */ elf.dest_base = (void *)(parms.virt_kstart - parms.virt_base); elf.dest_size = parms.virt_kend - parms.virt_kstart; elf_set_vcpu(&elf, v); rc = elf_load_binary(&elf); if ( rc < 0 ) { printk("Failed to load kernel: %d\n", rc); printk("Xen dom0 kernel broken ELF: %s\n", elf_check_broken(&elf)); return rc; } last_addr = ROUNDUP(parms.virt_kend - parms.virt_base, PAGE_SIZE); if ( initrd != NULL ) { rc = hvm_copy_to_guest_phys(last_addr, mfn_to_virt(initrd->mod_start), initrd->mod_end, v); if ( rc ) { printk("Unable to copy initrd to guest\n"); return rc; } mod.paddr = last_addr; mod.size = initrd->mod_end; last_addr += ROUNDUP(initrd->mod_end, PAGE_SIZE); } /* Free temporary buffers. */ discard_initial_images(); if ( cmdline != NULL ) { rc = hvm_copy_to_guest_phys(last_addr, cmdline, strlen(cmdline) + 1, v); if ( rc ) { printk("Unable to copy guest command line\n"); return rc; } start_info.cmdline_paddr = last_addr; /* * Round up to 32/64 bits (depending on the guest kernel bitness) so * the modlist/start_info is aligned. */ last_addr += ROUNDUP(strlen(cmdline) + 1, elf_64bit(&elf) ? 8 : 4); } if ( initrd != NULL ) { rc = hvm_copy_to_guest_phys(last_addr, &mod, sizeof(mod), v); if ( rc ) { printk("Unable to copy guest modules\n"); return rc; } start_info.modlist_paddr = last_addr; start_info.nr_modules = 1; last_addr += sizeof(mod); } start_info.magic = XEN_HVM_START_MAGIC_VALUE; start_info.flags = SIF_PRIVILEGED | SIF_INITDOMAIN; rc = hvm_copy_to_guest_phys(last_addr, &start_info, sizeof(start_info), v); if ( rc ) { printk("Unable to copy start info to guest\n"); return rc; } *entry = parms.phys_entry; *start_info_addr = last_addr; return 0; }
static void map_wrap_so(const char * so_file, uintptr_t load_bias, uint32_t * pvdso_entrance, uint32_t * pvdso_ehdr) { uint32_t name_pos; int fd, err; struct stat s; err = stat(so_file, &s); assert_errno_throw("stat file %s failed", so_file); assert_throw(S_ISREG(s.st_mode), "file %s not a regular file", so_file); /* don't use off_t, it may not be a 32 bit word! */ int32_t fsize = s.st_size; SYS_TRACE("desired so file length is %d\n", fsize); /* elf operations */ void * so_image = load_file(so_file); struct elf_handler * h = elf_init(so_image, load_bias); /* load program headers */ int nr_phdr = 0; struct elf32_phdr * phdr = elf_get_phdr_table(h, &nr_phdr); assert_throw(((phdr != NULL) && (nr_phdr != 0)), "load phdr of file %s failed\n", so_file); /* find the entry symbol */ uintptr_t entry_addr = elf_get_symbol_address(h, "syscall_wrapper_entrace"); SYS_TRACE("wrapper func address will be 0x%x\n", entry_addr); name_pos = ptrace_push(so_file, strlen(so_file), TRUE); fd = ptrace_syscall(open, 3, name_pos, O_RDONLY, 0); assert_throw(fd >= 0, "open sofile for child failed, return %d", fd); SYS_TRACE("open so file for child, fd=%d\n", fd); /* for each program header */ for (int i = 0; i < nr_phdr; i++, phdr ++) { SYS_FORCE("phdr %d, type=0x%x, flag=0x%x\n", i, phdr->p_type, phdr->p_flags); if (phdr->p_type != PT_LOAD) continue; int elf_prot = 0, elf_flags = 0; if (phdr->p_flags & PF_R) elf_prot |= PROT_READ; if (phdr->p_flags & PF_W) elf_prot |= PROT_WRITE; if (phdr->p_flags & PF_X) elf_prot |= PROT_EXEC; elf_flags = MAP_PRIVATE | MAP_EXECUTABLE; unsigned long size = phdr->p_filesz + ELF_PAGEOFFSET(phdr->p_vaddr); unsigned long off = phdr->p_offset - ELF_PAGEOFFSET(phdr->p_vaddr); int32_t map_addr = load_bias + phdr->p_vaddr - ELF_PAGEOFFSET(phdr->p_vaddr); map_addr = ptrace_syscall(mmap2, 6, map_addr, size, elf_prot, elf_flags | MAP_FIXED, fd, off); assert_throw(map_addr != 0xffffffff, "map wrap so failed, return 0x%x", map_addr); } elf_cleanup(h); free(so_image); if (pvdso_ehdr) *pvdso_ehdr = load_bias; if (pvdso_entrance) *pvdso_entrance = entry_addr; }
static int elf_loadbinary(FAR struct binary_s *binp) { struct elf_loadinfo_s loadinfo; /* Contains globals for libelf */ int ret; bvdbg("Loading file: %s\n", binp->filename); /* Initialize the ELF library to load the program binary. */ ret = elf_init(binp->filename, &loadinfo); elf_dumploadinfo(&loadinfo); if (ret != 0) { bdbg("Failed to initialize for load of ELF program: %d\n", ret); goto errout; } /* Load the program binary */ ret = elf_load(&loadinfo); elf_dumploadinfo(&loadinfo); if (ret != 0) { bdbg("Failed to load ELF program binary: %d\n", ret); goto errout_with_init; } /* Bind the program to the exported symbol table */ ret = elf_bind(&loadinfo, binp->exports, binp->nexports); if (ret != 0) { bdbg("Failed to bind symbols program binary: %d\n", ret); goto errout_with_load; } /* Return the load information */ binp->entrypt = (main_t)(loadinfo.textalloc + loadinfo.ehdr.e_entry); binp->stacksize = CONFIG_ELF_STACKSIZE; /* Add the ELF allocation to the alloc[] only if there is no address * environment. If there is an address environment, it will automatically * be freed when the function exits * * REVISIT: If the module is loaded then unloaded, wouldn't this cause * a memory leak? */ #ifdef CONFIG_ARCH_ADDRENV # warning "REVISIT" #else binp->alloc[0] = (FAR void *)loadinfo.textalloc; #endif #ifdef CONFIG_BINFMT_CONSTRUCTORS /* Save information about constructors. NOTE: destructors are not * yet supported. */ binp->alloc[1] = loadinfo.ctoralloc; binp->ctors = loadinfo.ctors; binp->nctors = loadinfo.nctors; binp->alloc[2] = loadinfo.dtoralloc; binp->dtors = loadinfo.dtors; binp->ndtors = loadinfo.ndtors; #endif #ifdef CONFIG_ARCH_ADDRENV /* Save the address environment in the binfmt structure. This will be * needed when the module is executed. */ up_addrenv_clone(&loadinfo.addrenv, &binp->addrenv); #endif elf_dumpentrypt(binp, &loadinfo); elf_uninit(&loadinfo); return OK; errout_with_load: elf_unload(&loadinfo); errout_with_init: elf_uninit(&loadinfo); errout: return ret; }
static void extract_section(int data_sec, char name[5], byte *buf, int size, const char *indent) { char filename[PREFIX_SIZE + 32]; snprintf(filename, sizeof filename, "%s%s.bin", out_prefix, name); FILE *fd = fopen(filename, "wb"); if (fd != NULL) { fwrite(buf, size, 1, fd); fclose(fd); } if(data_sec) return; snprintf(filename, sizeof filename, "%s%s", out_prefix, name); /* elf construction */ struct elf_params_t elf; elf_init(&elf); int elf_count = 0; /* Pretty print the content */ int pos = 0; while(pos < size) { struct sb_instruction_header_t *hdr = (struct sb_instruction_header_t *)&buf[pos]; printf("%s", indent); uint8_t checksum = instruction_checksum(hdr); if(checksum != hdr->checksum) { color(GREY); printf("[Bad checksum]"); } if(hdr->opcode == SB_INST_LOAD) { struct sb_instruction_load_t *load = (struct sb_instruction_load_t *)&buf[pos]; color(RED); printf("LOAD"); color(OFF);printf(" | "); color(BLUE); printf("addr=0x%08x", load->addr); color(OFF);printf(" | "); color(GREEN); printf("len=0x%08x", load->len); color(OFF);printf(" | "); color(YELLOW); printf("crc=0x%08x", load->crc); /* data is padded to 16-byte boundary with random data and crc'ed with it */ uint32_t computed_crc = crc(&buf[pos + sizeof(struct sb_instruction_load_t)], ROUND_UP(load->len, 16)); color(RED); if(load->crc == computed_crc) printf(" Ok\n"); else printf(" Failed (crc=0x%08x)\n", computed_crc); /* elf construction */ elf_add_load_section(&elf, load->addr, load->len, &buf[pos + sizeof(struct sb_instruction_load_t)]); pos += load->len + sizeof(struct sb_instruction_load_t); // unsure about rounding pos = ROUND_UP(pos, 16); } else if(hdr->opcode == SB_INST_FILL) { struct sb_instruction_fill_t *fill = (struct sb_instruction_fill_t *)&buf[pos]; color(RED); printf("FILL"); color(OFF);printf(" | "); color(BLUE); printf("addr=0x%08x", fill->addr); color(OFF);printf(" | "); color(GREEN); printf("len=0x%08x", fill->len); color(OFF);printf(" | "); color(YELLOW); printf("pattern=0x%08x\n", fill->pattern); color(OFF); /* elf construction */ elf_add_fill_section(&elf, fill->addr, fill->len, fill->pattern); pos += sizeof(struct sb_instruction_fill_t); // fixme: useless as pos is a multiple of 16 and fill struct is 4-bytes wide ? pos = ROUND_UP(pos, 16); } else if(hdr->opcode == SB_INST_CALL || hdr->opcode == SB_INST_JUMP) { int is_call = (hdr->opcode == SB_INST_CALL); struct sb_instruction_call_t *call = (struct sb_instruction_call_t *)&buf[pos]; color(RED); if(is_call) printf("CALL"); else printf("JUMP"); color(OFF);printf(" | "); color(BLUE); printf("addr=0x%08x", call->addr); color(OFF);printf(" | "); color(GREEN); printf("arg=0x%08x\n", call->arg); color(OFF); /* elf construction */ elf_set_start_addr(&elf, call->addr); extract_elf_section(&elf, elf_count++, filename, indent); elf_release(&elf); elf_init(&elf); pos += sizeof(struct sb_instruction_call_t); // fixme: useless as pos is a multiple of 16 and call struct is 4-bytes wide ? pos = ROUND_UP(pos, 16); } else { color(RED); printf("Unknown instruction %d at address 0x%08lx\n", hdr->opcode, (unsigned long)pos); break; } } if(!elf_is_empty(&elf)) extract_elf_section(&elf, elf_count++, filename, indent); elf_release(&elf); }
static void extract_sb_section(struct sb_section_t *sec, struct cmd_file_t *cmd_file) { struct cmd_section_t *db_sec = db_add_section(cmd_file, sec->identifier, sec->is_data); db_add_int_opt(&db_sec->opt_list, "alignment", sec->alignment); db_add_int_opt(&db_sec->opt_list, "cleartext", sec->is_cleartext); db_add_int_opt(&db_sec->opt_list, "sectionFlags", sec->other_flags); if(sec->is_data) { char sec_name[5]; char *filename = xmalloc(strlen(g_out_prefix) + 32); sb_fill_section_name(sec_name, sec->identifier); sprintf(filename, "%s%s.bin", g_out_prefix, sec_name); db_add_source(cmd_file, sec_name, filename + strlen(g_out_prefix)); db_sec->source_id = strdup(sec_name); FILE *fd = fopen(filename, "wb"); if(fd == NULL) bugp("Cannot open %s for writing\n", filename); if(g_debug) printf("Write data section %s to %s\n", sec_name, filename); free(filename); for(int j = 0; j < sec->nr_insts; j++) { assert(sec->insts[j].inst == SB_INST_DATA); fwrite(sec->insts[j].data, sec->insts[j].size, 1, fd); } fclose(fd); } int elf_count = 0; struct elf_params_t elf; elf_init(&elf); int bss_idx = 0, text_idx = 0; char secname[32]; for(int i = 0; i < sec->nr_insts; i++) { struct sb_inst_t *inst = &sec->insts[i]; switch(inst->inst) { case SB_INST_LOAD: sprintf(secname, ".text%d", text_idx++); elf_add_load_section(&elf, inst->addr, inst->size, inst->data, secname); break; case SB_INST_FILL: sprintf(secname, ".bss%d", bss_idx++); elf_add_fill_section(&elf, inst->addr, inst->size, inst->pattern, secname); break; case SB_INST_CALL: case SB_INST_JUMP: elf_set_start_addr(&elf, inst->addr); extract_elf_section(&elf, elf_count++, sec->identifier, cmd_file, db_sec, inst->inst == SB_INST_CALL, inst->argument); elf_release(&elf); elf_init(&elf); bss_idx = text_idx = 0; break; default: /* ignore mode and nop */ break; } } if(!elf_is_empty(&elf)) extract_elf_section(&elf, elf_count, sec->identifier, cmd_file, db_sec, false, 0); elf_release(&elf); }
int __init construct_dom0( struct domain *d, unsigned long _image_start, unsigned long image_len, unsigned long _initrd_start, unsigned long initrd_len, char *cmdline) { int i, rc, compatible, compat32, order, machine; struct cpu_user_regs *regs; unsigned long pfn, mfn; unsigned long nr_pages; unsigned long nr_pt_pages; unsigned long alloc_spfn; unsigned long alloc_epfn; unsigned long count; struct page_info *page = NULL; start_info_t *si; struct vcpu *v = d->vcpu[0]; unsigned long long value; #if defined(__i386__) char *image_start = (char *)_image_start; /* use lowmem mappings */ char *initrd_start = (char *)_initrd_start; /* use lowmem mappings */ #elif defined(__x86_64__) char *image_start = __va(_image_start); char *initrd_start = __va(_initrd_start); #endif #if CONFIG_PAGING_LEVELS >= 4 l4_pgentry_t *l4tab = NULL, *l4start = NULL; #endif l3_pgentry_t *l3tab = NULL, *l3start = NULL; l2_pgentry_t *l2tab = NULL, *l2start = NULL; l1_pgentry_t *l1tab = NULL, *l1start = NULL; /* * This fully describes the memory layout of the initial domain. All * *_start address are page-aligned, except v_start (and v_end) which are * superpage-aligned. */ struct elf_binary elf; struct elf_dom_parms parms; unsigned long vkern_start; unsigned long vkern_end; unsigned long vinitrd_start; unsigned long vinitrd_end; unsigned long vphysmap_start; unsigned long vphysmap_end; unsigned long vstartinfo_start; unsigned long vstartinfo_end; unsigned long vstack_start; unsigned long vstack_end; unsigned long vpt_start; unsigned long vpt_end; unsigned long v_start; unsigned long v_end; /* Machine address of next candidate page-table page. */ unsigned long mpt_alloc; /* Sanity! */ BUG_ON(d->domain_id != 0); BUG_ON(d->vcpu[0] == NULL); BUG_ON(v->is_initialised); printk("*** LOADING DOMAIN 0 ***\n"); d->max_pages = ~0U; nr_pages = compute_dom0_nr_pages(); if ( (rc = elf_init(&elf, image_start, image_len)) != 0 ) return rc; #ifdef VERBOSE elf_set_verbose(&elf); #endif elf_parse_binary(&elf); if ( (rc = elf_xen_parse(&elf, &parms)) != 0 ) return rc; /* compatibility check */ compatible = 0; compat32 = 0; machine = elf_uval(&elf, elf.ehdr, e_machine); switch (CONFIG_PAGING_LEVELS) { case 3: /* x86_32p */ if (parms.pae == PAEKERN_bimodal) parms.pae = PAEKERN_extended_cr3; printk(" Xen kernel: 32-bit, PAE, lsb\n"); if (elf_32bit(&elf) && parms.pae && machine == EM_386) compatible = 1; break; case 4: /* x86_64 */ printk(" Xen kernel: 64-bit, lsb, compat32\n"); if (elf_32bit(&elf) && parms.pae == PAEKERN_bimodal) parms.pae = PAEKERN_extended_cr3; if (elf_32bit(&elf) && parms.pae && machine == EM_386) { compat32 = 1; compatible = 1; } if (elf_64bit(&elf) && machine == EM_X86_64) compatible = 1; break; } printk(" Dom0 kernel: %s%s, %s, paddr 0x%" PRIx64 " -> 0x%" PRIx64 "\n", elf_64bit(&elf) ? "64-bit" : "32-bit", parms.pae ? ", PAE" : "", elf_msb(&elf) ? "msb" : "lsb", elf.pstart, elf.pend); if ( elf.bsd_symtab_pstart ) printk(" Dom0 symbol map 0x%" PRIx64 " -> 0x%" PRIx64 "\n", elf.bsd_symtab_pstart, elf.bsd_symtab_pend); if ( !compatible ) { printk("Mismatch between Xen and DOM0 kernel\n"); return -EINVAL; } #if defined(__x86_64__) if ( compat32 ) { l1_pgentry_t gdt_l1e; d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1; v->vcpu_info = (void *)&d->shared_info->compat.vcpu_info[0]; if ( nr_pages != (unsigned int)nr_pages ) nr_pages = UINT_MAX; /* * Map compatibility Xen segments into every VCPU's GDT. See * arch_domain_create() for further comments. */ gdt_l1e = l1e_from_page(virt_to_page(compat_gdt_table), PAGE_HYPERVISOR); for ( i = 0; i < MAX_VIRT_CPUS; i++ ) d->arch.mm_perdomain_pt[((i << GDT_LDT_VCPU_SHIFT) + FIRST_RESERVED_GDT_PAGE)] = gdt_l1e; flush_tlb_one_local(GDT_LDT_VIRT_START + FIRST_RESERVED_GDT_BYTE); } #endif if ( parms.pae == PAEKERN_extended_cr3 ) set_bit(VMASST_TYPE_pae_extended_cr3, &d->vm_assist); if ( (parms.virt_hv_start_low != UNSET_ADDR) && elf_32bit(&elf) ) { unsigned long mask = (1UL << L2_PAGETABLE_SHIFT) - 1; value = (parms.virt_hv_start_low + mask) & ~mask; BUG_ON(!is_pv_32bit_domain(d)); #if defined(__i386__) if ( value > HYPERVISOR_VIRT_START ) panic("Domain 0 expects too high a hypervisor start address.\n"); #else if ( value > __HYPERVISOR_COMPAT_VIRT_START ) panic("Domain 0 expects too high a hypervisor start address.\n"); HYPERVISOR_COMPAT_VIRT_START(d) = max_t(unsigned int, m2p_compat_vstart, value); #endif }
static void gdbloader_main(const char * target_fn) { /* check: target_fn should be same as argv[0] */ if (strcmp(target_fn, cf->cmdline[0]) != 0) { SYS_FATAL("target should be %s, not %s\n", cf->cmdline[0], target_fn); THROW(EXCEPTION_FATAL, "cmdline error"); } /* execve child */ child_pid = ptrace_execve(target_fn, cf->cmdline, cf->environ); /* inject memory */ /* before we inject memory, we need to restore heap */ uint32_t heap_end; heap_end = ptrace_syscall(brk, 1, cf->state->brk); CTHROW(heap_end == cf->state->brk, "restore heap failed: %d", heap_end); SYS_TRACE("restore heap to 0x%x\n", heap_end); inject_memory(); /* then, we retrive the inject so file, enter from * __debug_entry. we need to push: * nothing. * process can retrive all from state vector. * and we cannot use stack now * */ /* NOTICE: the state_vector should be saved in the ckpt memory, * we needn't restore them in ptrace process. let the inject so * to do it. */ /* from the opts get the so-file bias */ uint32_t inj_bias = opts->inj_bias; /* use procutils to get the file */ struct proc_entry e; e.start = inj_bias; e.bits = PE_START; proc_fill_entry(&e, child_pid); SYS_TRACE("inject so is %s\n", e.fn); /* use elfutils to retrive the symbol */ void * img = load_file(e.fn); struct elf_handler * inj_so = elf_init(img, inj_bias); uintptr_t debug_entry = elf_get_symbol_address(inj_so, opts->entry); SYS_TRACE("symbol %s at 0x%x\n", opts->entry, debug_entry); /* inject the injector opts */ inject_injopts(inj_so); elf_cleanup(inj_so); free(img); /* we have to restore register here... */ SYS_FORCE("pid=%d\n", child_pid); SYS_FORCE("eip=0x%x\n", cf->state->regs.eip); ptrace_pokeuser(cf->state->regs); SYS_TRACE("eax=0x%x\n", cf->state->regs.eax); SYS_TRACE("ebx=0x%x\n", cf->state->regs.ebx); SYS_TRACE("ecx=0x%x\n", cf->state->regs.ecx); SYS_TRACE("edx=0x%x\n", cf->state->regs.edx); SYS_TRACE("esi=0x%x\n", cf->state->regs.esi); SYS_TRACE("edi=0x%x\n", cf->state->regs.edi); SYS_TRACE("ebp=0x%x\n", cf->state->regs.ebp); SYS_TRACE("esp=0x%x\n", cf->state->regs.esp); // SYS_TRACE("gs=0x%x\n", cf->state->regs.gs); // SYS_TRACE("es=0x%x\n", cf->state->regs.es); /* we push eip at the top of the new stack */ ptrace_push(&cf->state->regs.eip, sizeof(uint32_t), FALSE); /* fix libpthread problem: * * when gdb attaches to target, if it find libpthread, gdb * will try to use libthread_db to retrive thread-local info. * some data, like `errno', is TLS and need those info. * * When gdb does the work, it use ptrace to peek memory from target image. * so gdb will see the original thread info, the tid is different from * current pid, therefore gdb will think there are at least 2 threads and * then it will try to attach to the 'old' one and definitely fail. When this * failure occures, gdb print a warning message. * * We have 2 ways to solve this problem: * * 1. add a syscall into kernel's code, change its pid. it is simple. * 2. change the image when gdb attach. * * We choose the 2nd one because we prefer user space solution. * * */ uint32_t sym_stack_used = 0, sym_stack_user = 0; if (opts->fix_pthread_tid) { fix_libpthread(&sym_stack_used, &sym_stack_user); SYS_WARNING("sym_stack_used=0x%x, sym_stack_user=0x%x\n", sym_stack_used, sym_stack_user); } /* we push those 2 addresses onto the stack */ ptrace_push(&sym_stack_used, sizeof(uint32_t), FALSE); ptrace_push(&sym_stack_user, sizeof(uint32_t), FALSE); /* move eip and detach, let the target process to run */ ptrace_goto(debug_entry); /* detach in main */ return; }
extern "C" int _start(kernel_args *bootKernelArgs, int currentCPU) { if (bootKernelArgs->kernel_args_size != sizeof(kernel_args) || bootKernelArgs->version != CURRENT_KERNEL_ARGS_VERSION) { // This is something we cannot handle right now - release kernels // should always be able to handle the kernel_args of earlier // released kernels. debug_early_boot_message("Version mismatch between boot loader and " "kernel!\n"); return -1; } smp_set_num_cpus(bootKernelArgs->num_cpus); // wait for all the cpus to get here smp_cpu_rendezvous(&sCpuRendezvous, currentCPU); // the passed in kernel args are in a non-allocated range of memory if (currentCPU == 0) memcpy(&sKernelArgs, bootKernelArgs, sizeof(kernel_args)); smp_cpu_rendezvous(&sCpuRendezvous2, currentCPU); // do any pre-booting cpu config cpu_preboot_init_percpu(&sKernelArgs, currentCPU); thread_preboot_init_percpu(&sKernelArgs, currentCPU); // if we're not a boot cpu, spin here until someone wakes us up if (smp_trap_non_boot_cpus(currentCPU, &sCpuRendezvous3)) { // init platform arch_platform_init(&sKernelArgs); // setup debug output debug_init(&sKernelArgs); set_dprintf_enabled(true); dprintf("Welcome to kernel debugger output!\n"); dprintf("Haiku revision: %lu\n", get_haiku_revision()); // init modules TRACE("init CPU\n"); cpu_init(&sKernelArgs); cpu_init_percpu(&sKernelArgs, currentCPU); TRACE("init interrupts\n"); int_init(&sKernelArgs); TRACE("init VM\n"); vm_init(&sKernelArgs); // Before vm_init_post_sem() is called, we have to make sure that // the boot loader allocated region is not used anymore boot_item_init(); debug_init_post_vm(&sKernelArgs); low_resource_manager_init(); // now we can use the heap and create areas arch_platform_init_post_vm(&sKernelArgs); lock_debug_init(); TRACE("init driver_settings\n"); driver_settings_init(&sKernelArgs); debug_init_post_settings(&sKernelArgs); TRACE("init notification services\n"); notifications_init(); TRACE("init teams\n"); team_init(&sKernelArgs); TRACE("init ELF loader\n"); elf_init(&sKernelArgs); TRACE("init modules\n"); module_init(&sKernelArgs); TRACE("init semaphores\n"); haiku_sem_init(&sKernelArgs); TRACE("init interrupts post vm\n"); int_init_post_vm(&sKernelArgs); cpu_init_post_vm(&sKernelArgs); commpage_init(); TRACE("init system info\n"); system_info_init(&sKernelArgs); TRACE("init SMP\n"); smp_init(&sKernelArgs); TRACE("init timer\n"); timer_init(&sKernelArgs); TRACE("init real time clock\n"); rtc_init(&sKernelArgs); TRACE("init condition variables\n"); condition_variable_init(); // now we can create and use semaphores TRACE("init VM semaphores\n"); vm_init_post_sem(&sKernelArgs); TRACE("init generic syscall\n"); generic_syscall_init(); smp_init_post_generic_syscalls(); TRACE("init scheduler\n"); scheduler_init(); TRACE("init threads\n"); thread_init(&sKernelArgs); TRACE("init kernel daemons\n"); kernel_daemon_init(); arch_platform_init_post_thread(&sKernelArgs); TRACE("init I/O interrupts\n"); int_init_io(&sKernelArgs); TRACE("init VM threads\n"); vm_init_post_thread(&sKernelArgs); low_resource_manager_init_post_thread(); TRACE("init VFS\n"); vfs_init(&sKernelArgs); #if ENABLE_SWAP_SUPPORT TRACE("init swap support\n"); swap_init(); #endif TRACE("init POSIX semaphores\n"); realtime_sem_init(); xsi_sem_init(); xsi_msg_init(); // Start a thread to finish initializing the rest of the system. Note, // it won't be scheduled before calling scheduler_start() (on any CPU). TRACE("spawning main2 thread\n"); thread_id thread = spawn_kernel_thread(&main2, "main2", B_NORMAL_PRIORITY, NULL); resume_thread(thread); // We're ready to start the scheduler and enable interrupts on all CPUs. scheduler_enable_scheduling(); // bring up the AP cpus in a lock step fashion TRACE("waking up AP cpus\n"); sCpuRendezvous = sCpuRendezvous2 = 0; smp_wake_up_non_boot_cpus(); smp_cpu_rendezvous(&sCpuRendezvous, 0); // wait until they're booted // exit the kernel startup phase (mutexes, etc work from now on out) TRACE("exiting kernel startup\n"); gKernelStartup = false; smp_cpu_rendezvous(&sCpuRendezvous2, 0); // release the AP cpus to go enter the scheduler TRACE("starting scheduler on cpu 0 and enabling interrupts\n"); scheduler_start(); enable_interrupts(); } else { // lets make sure we're in sync with the main cpu // the boot processor has probably been sending us // tlb sync messages all along the way, but we've // been ignoring them arch_cpu_global_TLB_invalidate(); // this is run for each non boot processor after they've been set loose cpu_init_percpu(&sKernelArgs, currentCPU); smp_per_cpu_init(&sKernelArgs, currentCPU); // wait for all other AP cpus to get to this point smp_cpu_rendezvous(&sCpuRendezvous, currentCPU); smp_cpu_rendezvous(&sCpuRendezvous2, currentCPU); // welcome to the machine scheduler_start(); enable_interrupts(); } #ifdef TRACE_BOOT // We disable interrupts for this dprintf(), since otherwise dprintf() // would acquires a mutex, which is something we must not do in an idle // thread, or otherwise the scheduler would be seriously unhappy. disable_interrupts(); TRACE("main: done... begin idle loop on cpu %d\n", currentCPU); enable_interrupts(); #endif for (;;) arch_cpu_idle(); return 0; }
int parse_elf_header(const char *binary_path) { if (elf_init(binary_path) == -1) return -1; elf_hdr = (Elf64_Ehdr *)elf_mem; if (check_elf_header(elf_hdr) == -1) return -1; print_elf_hdr_ident(elf_hdr); if (print_elf_hdr_class(elf_hdr) == -1) return -1; if (print_elf_hdr_endian(elf_hdr) == -1) return -1; print_elf_hdr_version(elf_hdr); if (print_elf_hdr_osabi(elf_hdr) == -1) return -1; print_elf_hdr_abiversion(elf_hdr); if (print_elf_hdr_object_type(elf_hdr) == -1) return -1; if (print_elf_hdr_machine(elf_hdr) == -1) return -1; if (print_elf_hdr_elf_version(elf_hdr) == -1) return -1; if (print_elf_hdr_entry(elf_hdr) == -1) return -1; if (print_elf_hdr_phoff(elf_hdr) == -1) return -1; if (print_elf_hdr_shoff(elf_hdr) == -1) return -1; if (print_elf_hdr_flags(elf_hdr) == -1) return -1; if (print_elf_hdr_size(elf_hdr) == -1) return -1; if (print_elf_hdr_phentsize(elf_hdr) == -1) return -1; if (print_elf_hdr_phnum(elf_hdr) == -1) return -1; if (print_elf_hdr_shentsize(elf_hdr) == -1) return -1; if (print_elf_hdr_shnum(elf_hdr) == -1) return -1; if (print_elf_hdr_shstrndx(elf_hdr) == -1) return -1; elf_exit(); }