static char *module_get_string(struct module *mod, const size_t name) { // If strings table is empty we shouldn't ask for symbol's name TRY_PTR(mod->strings); TRY_TRUE(name < mod->strings_sz); return mod->strings + name; CATCH: return NULL; }
static uintptr_t module_get_symbol_addr(struct module *mod, const symbol_t *symbol, const uint32_t type) { // We skip symbols from special sections and those of not matching type if (IS_VALID_SHNDX(symbol->st_shndx) && (SYM_ST_INFO(symbol->st_info) & type)) { struct section *section; TRY_PTR(section = module_get_section(mod, symbol->st_shndx)); return section->addr + symbol->st_value; } CATCH: return 0; }
static int do_relocation(struct module *mod, struct section *dest_section, const Elf32_Rel *relocation, getsym_t getsym_fun, void *getsym_arg ) { size_t symbol_idx = ELF32_R_SYM(relocation->r_info); TRY_TRUE(symbol_idx < mod->symbols_sz); symbol_t *symbol = mod->symbols + symbol_idx; TRY_TRUE(!IS_RES_SHNDX(symbol->st_shndx)); TRY_TRUE(SYM_ST_INFO(symbol->st_info) & ST_ANY_ALLOWED); uint32_t symbol_addr = 0; if (symbol->st_shndx == SHN_UNDEF) { const char *sym_name; TRY_PTR(sym_name = module_get_string(mod, symbol->st_name)); symbol_addr = (uint32_t) getsym_fun(getsym_arg, sym_name); TRY_PTR(symbol_addr); } else { struct section *section; TRY_PTR(section = module_get_section(mod, symbol->st_shndx)); if (!section_is_alloc(section)) { return 0; } symbol_addr = (uint32_t) (section->addr + symbol->st_value); } uint32_t *destination = (uint32_t *) (dest_section->addr + relocation->r_offset); switch (ELF32_R_TYPE(relocation->r_info)) { case R_386_32: *destination = *destination + symbol_addr; break; case R_386_PC32: *destination = *destination + symbol_addr - (uint32_t) destination; break; default: // Unrecognized relocation type encountered TRY_TRUE(0); } return 0; CATCH: return -1; }
static inline __must_check int initFreeMinorNumbersPool(void) { int i; LOCK_MINOR_NUMBERS_POOL(); TRY_PTR(OR_GOTO(fail), freeMinorNumbers, kmalloc(XORDEV_MAX_DEVS * sizeof(int), GFP_KERNEL), "allocate character device minor numbers pool"); for (i = 0; i < XORDEV_MAX_DEVS; ++i) { freeMinorNumbers[i] = 3 * i; } UNLOCK_MINOR_NUMBERS_POOL(); return 0; fail: UNLOCK_MINOR_NUMBERS_POOL(); return -ENOMEM; }
static inline __must_check int setup3DeviceDataStructures(struct xordev *dev, int minorNumber) { int i; struct semaphore *mutex; TRY_PTR(OR_RETURN(-ENOMEM), mutex, kmalloc(sizeof(struct semaphore), GFP_KERNEL), "allocate memory for mutex for device with MINOR=%d", minorNumber); sema_init(mutex, 1); for (i = 0; i < 3; ++i) { printk(KERN_INFO "xordev: Setting up char device MAJOR=%d MINOR=%d", xordev_major, minorNumber + i); dev[i].number = MKDEV(xordev_major, minorNumber + i); dev[i].mutex = mutex; } return 0; }
static int module_read_strings(struct module *mod, const Elf32_Shdr *elf_shdr, FILE* elf_file) { TRY_TRUE(mod->strings == NULL); TRY_TRUE(elf_shdr->sh_type == SHT_STRTAB); // Assuming there is only one symbols table (which implies only one // associated strings table) mod->strings_sz = elf_shdr->sh_size; // An empty string table section is permitted. if (mod->strings_sz > 0) { TRY_PTR(mod->strings = malloc(mod->strings_sz)); TRY_TRUE(fseek(elf_file, elf_shdr->sh_offset, SEEK_SET) == 0); TRY_TRUE(fread(mod->strings, mod->strings_sz, 1, elf_file) == 1); TRY_TRUE(mod->strings[0] == '\0'); TRY_TRUE(mod->strings[mod->strings_sz - 1] == '\0'); } return 0; CATCH: free(mod->strings); mod->strings = NULL; return -1; }
static int module_read_symbols(struct module *mod, const Elf32_Shdr *elf_shdr, FILE* elf_file) { TRY_TRUE(mod->symbols == NULL); TRY_TRUE(elf_shdr->sh_type == SHT_SYMTAB); TRY_TRUE(sizeof(Elf32_Sym) == elf_shdr->sh_entsize); size_t rem = elf_shdr->sh_size % elf_shdr->sh_entsize; TRY_TRUE(rem == 0); // Assuming there is only one symbols table TRY_TRUE(fseek(elf_file, elf_shdr->sh_offset, SEEK_SET) == 0); mod->symbols_sz = elf_shdr->sh_size / elf_shdr->sh_entsize; // We read symbols as Elf32_Sym but we store them internally as symbol_t // Elf32_Sym is known only to loading code TRY_TRUE(sizeof(Elf32_Sym) == sizeof(symbol_t)); TRY_PTR(mod->symbols = malloc(mod->symbols_sz * sizeof(symbol_t))); TRY_TRUE(fread(mod->symbols, sizeof(symbol_t), mod->symbols_sz, elf_file) == mod->symbols_sz); return 0; CATCH: free(mod->symbols); mod->symbols = NULL; return -1; }
static inline __must_check int allocQueues(struct xordev *dev) { dev->dmaSource1 = NULL; dev->dmaSource2 = NULL; dev->dmaDestination = NULL; dev->source1 = NULL; dev->source2 = NULL; dev->destination = NULL; dev->dmaSize = NULL; dev->deviceState = NULL; dev->deviceStateSpinlock = NULL; dev->waitSource1 = NULL; dev->waitSource2 = NULL; dev->waitDestination = NULL; pci_set_master(dev->pciDev); TRY_NORES(OR_GOTO(fail), pci_set_dma_mask(dev->pciDev, DMA_BIT_MASK(32)), "set dma mast"); TRY_PTR(OR_GOTO(fail), dev->dmaSource1PciAddr, kmalloc(sizeof(dma_addr_t), GFP_KERNEL)); TRY_PTR(OR_GOTO(fail), dev->dmaSource2PciAddr, kmalloc(sizeof(dma_addr_t), GFP_KERNEL)); TRY_PTR(OR_GOTO(fail), dev->dmaDestinationPciAddr, kmalloc(sizeof(dma_addr_t), GFP_KERNEL)); TRY_PTR(OR_GOTO(fail), dev->dmaSource1, dma_alloc_coherent(&dev->pciDev->dev, DMA_BUFFER_BYTES, dev->dmaSource1PciAddr, GFP_KERNEL)); TRY_PTR(OR_GOTO(fail), dev->dmaSource2, dma_alloc_coherent(&dev->pciDev->dev, DMA_BUFFER_BYTES, dev->dmaSource2PciAddr, GFP_KERNEL)); TRY_PTR(OR_GOTO(fail), dev->dmaDestination, dma_alloc_coherent(&dev->pciDev->dev, DMA_BUFFER_BYTES, dev->dmaDestinationPciAddr, GFP_KERNEL)); TRY_PTR(OR_GOTO(fail), dev->dmaSize, kmalloc(sizeof(size_t), GFP_KERNEL)); TRY_PTR(OR_GOTO(fail), dev->deviceState, kmalloc(sizeof(int), GFP_KERNEL)); *dev->deviceState = DEVICE_UNOCCUPIED; TRY_PTR(OR_GOTO(fail), dev->deviceStateSpinlock, kmalloc(sizeof(spinlock_t), GFP_KERNEL)); spin_lock_init(dev->deviceStateSpinlock); TRY_PTR(OR_GOTO(fail), dev->waitSource1, kmalloc(sizeof(wait_queue_t), GFP_KERNEL)); init_waitqueue_head(dev->waitSource1); TRY_PTR(OR_GOTO(fail), dev->waitSource2, kmalloc(sizeof(wait_queue_t), GFP_KERNEL)); init_waitqueue_head(dev->waitSource2); TRY_PTR(OR_GOTO(fail), dev->waitDestination, kmalloc(sizeof(wait_queue_t), GFP_KERNEL)); init_waitqueue_head(dev->waitDestination); TRY_NORES(OR_GOTO(fail), memfifoNew(&dev->source1), "create source1 memory queue"); TRY_NORES(OR_GOTO(fail), memfifoNew(&dev->source2), "create source2 memory queue"); TRY_NORES(OR_GOTO(fail), memfifoNew(&dev->destination), "create destination memory queue"); TRY_NORES(OR_GOTO(fail), pci_request_region(dev->pciDev, 0, "xordev"), "request BAR0"); TRY_PTR(OR_GOTO(fail), dev->bar0, pci_iomap(dev->pciDev, 0, BAR0_SIZE), "map pci iomem"); return 0; fail: memfifoDelete(&dev->destination); memfifoDelete(&dev->source2); memfifoDelete(&dev->source1); return -ENOMEM; }
/* export { */ struct module *module_load(const char *filename, getsym_t getsym_fun, void *getsym_arg) { FILE* elf_file = NULL; Elf32_Shdr *section_headers = NULL; struct module *mod = NULL; TRY_PTR(mod = malloc(sizeof(struct module))); module_init(mod); TRY_PTR(elf_file = fopen(filename, "rb")); Elf32_Ehdr elf_header; TRY_TRUE(fread(&elf_header, sizeof(Elf32_Ehdr), 1, elf_file) == 1); TRY_TRUE(elf_header.e_ident[EI_MAG0] == ELFMAG0 && elf_header.e_ident[EI_MAG1] == ELFMAG1 && elf_header.e_ident[EI_MAG2] == ELFMAG2 && elf_header.e_ident[EI_MAG3] == ELFMAG3); TRY_TRUE(elf_header.e_ident[EI_CLASS] == ELFCLASS32); TRY_TRUE(elf_header.e_ident[EI_DATA] == ELFDATA2LSB); TRY_TRUE(elf_header.e_type == ET_REL); TRY_TRUE(elf_header.e_machine == EM_386); TRY_PTR(elf_header.e_shoff); TRY_TRUE(fseek(elf_file, elf_header.e_shoff, SEEK_SET) == 0); TRY_TRUE(elf_header.e_shentsize == sizeof(Elf32_Shdr)); // If the number of sections is greater than or equal to SHN_LORESERVE // (0xff00), e_shnum has the value SHN_UNDEF (0) and the actual number of // section header table entries is contained in the sh_size field of the // section header at index 0 // We do not handle this extension TRY_TRUE(elf_header.e_shnum != SHN_UNDEF); TRY_TRUE(elf_header.e_shnum < SHN_LORESERVE); TRY_PTR(section_headers = malloc(sizeof(Elf32_Shdr) * elf_header.e_shnum)); TRY_TRUE(fread(section_headers, elf_header.e_shentsize, elf_header.e_shnum, elf_file) == elf_header.e_shnum); // Count and create sections mod->sections_sz = elf_header.e_shnum; TRY_PTR(mod->sections = malloc(sizeof(struct section) * mod->sections_sz)); memset(mod->sections, 0, sizeof(struct section) * mod->sections_sz); // Not actually first global but we will treat all of them as global size_t global_sym_idx = 0; size_t symtab_idx = 0; // Load sections for (size_t idx = 0; idx < elf_header.e_shnum; idx++) { Elf32_Shdr *shdr = section_headers + idx; struct section *section; TRY_PTR(section = module_get_section(mod, idx)); switch (shdr->sh_type) { case SHT_NULL: case SHT_STRTAB: // We will read appropriate strings table with symbols table case SHT_REL: // We will perform relocations later on break; case SHT_SYMTAB: TRY_TRUE(symtab_idx == 0); symtab_idx = idx; TRY_SYS(module_read_symbols(mod, shdr, elf_file)); global_sym_idx = shdr->sh_info; // Field sh_link contains section header index of associated string table TRY_TRUE(IS_VALID_SHNDX(shdr->sh_link) && shdr->sh_link < elf_header.e_shnum); TRY_SYS(module_read_strings(mod, section_headers + shdr->sh_link, elf_file)); break; case SHT_NOBITS: if ((shdr->sh_flags & SHF_ALLOC) && shdr->sh_size > 0) { TRY_SYS(section_alloc(section, shdr)); memset((void *) section->addr, 0, shdr->sh_size); } break; case SHT_PROGBITS: default: if ((shdr->sh_flags & SHF_ALLOC) && shdr->sh_size > 0) { TRY_SYS(section_alloc(section, shdr)); TRY_TRUE(fseek(elf_file, shdr->sh_offset, SEEK_SET) == 0); TRY_TRUE(fread((void *) section->addr, shdr->sh_size, 1, elf_file) == 1); } break; } } // An empty string table section is permitted. // TRY_PTR(mod->strings); TRY_PTR(mod->symbols); // Perform relocations for (size_t idx = 0; idx < elf_header.e_shnum; idx++) { Elf32_Shdr *shdr = section_headers + idx; if (shdr->sh_type == SHT_REL && shdr->sh_link == symtab_idx) { // mising (shdr->sh_flags & SHF_INFO_LINK) TRY_TRUE(fseek(elf_file, shdr->sh_offset, SEEK_SET) == 0); TRY_TRUE(sizeof(Elf32_Rel) == shdr->sh_entsize); size_t rel_num = shdr->sh_size / shdr->sh_entsize; TRY_TRUE(shdr->sh_size == rel_num * shdr->sh_entsize); struct section *dest_section; TRY_PTR(dest_section = module_get_section(mod, shdr->sh_info)); if (section_is_alloc(dest_section)) { for (size_t idx = 0; idx < rel_num; idx++) { Elf32_Rel relocation; TRY_TRUE(fread(&relocation, sizeof(Elf32_Rel), 1, elf_file) == 1); TRY_SYS(do_relocation(mod, dest_section, &relocation, getsym_fun, getsym_arg)); } } } } // Compress symbol table (remove local symbols after relocations) TRY_TRUE(global_sym_idx < mod->symbols_sz); mod->symbols_sz -= global_sym_idx; memmove(mod->symbols, mod->symbols + global_sym_idx, mod->symbols_sz * sizeof(symbol_t)); TRY_PTR(mod->symbols = realloc(mod->symbols, mod->symbols_sz * sizeof(symbol_t))); // Set-up sections protection for (size_t idx = 0; idx < elf_header.e_shnum; idx++) { struct section *section; TRY_PTR(section = module_get_section(mod, idx)); if (section_is_alloc(section)) { TRY_SYS(mprotect((void *) section->mmap_start, section->mmap_length, section->mmap_prot)); } } free(section_headers); fclose(elf_file); return mod; CATCH: free(section_headers); if (elf_file) { fclose(elf_file); } module_unload(mod); return NULL; }