int _nisam_read_static_record(register N_INFO *info, register ulong pos, register byte *record) { int error; if (pos != NI_POS_ERROR) { if (info->opt_flag & WRITE_CACHE_USED && info->rec_cache.pos_in_file <= pos && flush_io_cache(&info->rec_cache)) return(-1); info->rec_cache.seek_not_done=1; /* We have done a seek */ error=my_pread(info->dfile,(char*) record,info->s->base.reclength, pos,MYF(MY_NABP)) != 0; if (info->s->r_locks == 0 && info->s->w_locks == 0) VOID(_nisam_writeinfo(info,0)); if (! error) { if (!*record) return(1); /* Record is deleted */ info->update|= HA_STATE_AKTIV; /* Record is read */ my_errno=HA_ERR_RECORD_DELETED; return(0); } return(-1); /* Error on read */ } VOID(_nisam_writeinfo(info,0)); /* No such record */ return(-1); } /* _nisam_read_record */
static struct symlist* get_syms(int fd, Elf32_Shdr *symh, Elf32_Shdr *strh) { struct symlist *sl, *ret; int rv; ret = NULL; sl = (struct symlist *) xmalloc(sizeof(struct symlist)); sl->str = NULL; sl->sym = NULL; /* sanity */ if (symh->sh_size % sizeof(Elf32_Sym)) { //printf("elf_error\n"); goto out; } /* symbol table */ sl->num = symh->sh_size / sizeof(Elf32_Sym); sl->sym = (Elf32_Sym *) xmalloc(symh->sh_size); rv = my_pread(fd, sl->sym, symh->sh_size, symh->sh_offset); if (0 > rv) { //perror("read"); goto out; } if (rv != symh->sh_size) { //printf("elf error\n"); goto out; } /* string table */ sl->str = (char *) xmalloc(strh->sh_size); rv = my_pread(fd, sl->str, strh->sh_size, strh->sh_offset); if (0 > rv) { //perror("read"); goto out; } if (rv != strh->sh_size) { //printf("elf error"); goto out; } ret = sl; out: return ret; }
int _mi_read_cache(IO_CACHE *info, byte *buff, my_off_t pos, uint length, int flag) { uint read_length,in_buff_length; my_off_t offset; char *in_buff_pos; DBUG_ENTER("_mi_read_cache"); if (pos < info->pos_in_file) { read_length=length; if ((my_off_t) read_length > (my_off_t) (info->pos_in_file-pos)) read_length=(uint) (info->pos_in_file-pos); info->seek_not_done=1; if (my_pread(info->file,buff,read_length,pos,MYF(MY_NABP))) DBUG_RETURN(1); if (!(length-=read_length)) DBUG_RETURN(0); pos+=read_length; buff+=read_length; } if (pos >= info->pos_in_file && (offset= (my_off_t) (pos - info->pos_in_file)) < (my_off_t) (info->read_end - info->request_pos)) { in_buff_pos=info->request_pos+(uint) offset; in_buff_length= min(length,(uint) (info->read_end-in_buff_pos)); memcpy(buff,info->request_pos+(uint) offset,(size_t) in_buff_length); if (!(length-=in_buff_length)) DBUG_RETURN(0); pos+=in_buff_length; buff+=in_buff_length; } else in_buff_length=0; if (flag & READING_NEXT) { if (pos != (info->pos_in_file + (uint) (info->read_end - info->request_pos))) { info->pos_in_file=pos; /* Force start here */ info->read_pos=info->read_end=info->request_pos; /* Everything used */ info->seek_not_done=1; } else info->read_pos=info->read_end; /* All block used */ if (!(*info->read_function)(info,buff,length)) DBUG_RETURN(0); read_length=info->error; } else { info->seek_not_done=1; if ((read_length=my_pread(info->file,buff,length,pos,MYF(0))) == length) DBUG_RETURN(0); } if (!(flag & READING_HEADER) || (int) read_length == -1 || read_length+in_buff_length < 3) { DBUG_PRINT("error", ("Error %d reading next-multi-part block (Got %d bytes)", my_errno, (int) read_length)); if (!my_errno || my_errno == -1) my_errno=HA_ERR_WRONG_IN_RECORD; DBUG_RETURN(1); } bzero(buff+read_length,MI_BLOCK_INFO_HEADER_LENGTH - in_buff_length - read_length); DBUG_RETURN(0); } /* _mi_read_cache */
static int do_load(int fd, symtab_t symtab) { int rv; size_t size; Elf32_Ehdr ehdr; Elf32_Shdr *shdr = NULL, *p; Elf32_Shdr *dynsymh, *dynstrh; Elf32_Shdr *symh, *strh; char *shstrtab = NULL; int i; int ret = -1; /* elf header */ rv = read(fd, &ehdr, sizeof(ehdr)); if (0 > rv) { ALOGD("read\n"); goto out; } if (rv != sizeof(ehdr)) { ALOGD("elf error 1\n"); goto out; } if (strncmp((const char *) ELFMAG, (const char *) ehdr.e_ident, SELFMAG)) { /* sanity */ ALOGD("not an elf\n"); goto out; } if (sizeof(Elf32_Shdr) != ehdr.e_shentsize) { /* sanity */ ALOGD("elf error 2\n"); goto out; } /* section header table */ size = ehdr.e_shentsize * ehdr.e_shnum; shdr = (Elf32_Shdr *) xmalloc(size); rv = my_pread(fd, shdr, size, ehdr.e_shoff); if (0 > rv) { ALOGD("read\n"); goto out; } if (rv != size) { ALOGD("elf error 3 %d %d\n", rv, size); goto out; } /* section header string table */ size = shdr[ehdr.e_shstrndx].sh_size; shstrtab = (char *) xmalloc(size); rv = my_pread(fd, shstrtab, size, shdr[ehdr.e_shstrndx].sh_offset); if (0 > rv) { ALOGD("read\n"); goto out; } if (rv != size) { ALOGD("elf error 4 %d %d\n", rv, size); goto out; } /* symbol table headers */ symh = dynsymh = NULL; strh = dynstrh = NULL; for (i = 0, p = shdr; i < ehdr.e_shnum; i++, p++) if (SHT_SYMTAB == p->sh_type) { if (symh) { ALOGD("too many symbol tables\n"); goto out; } symh = p; } else if (SHT_DYNSYM == p->sh_type) { if (dynsymh) { ALOGD("too many symbol tables\n"); goto out; } dynsymh = p; } else if (SHT_STRTAB == p->sh_type && !strncmp(shstrtab + p->sh_name, ".strtab", 7)) { if (strh) { ALOGD("too many string tables\n"); goto out; } strh = p; } else if (SHT_STRTAB == p->sh_type && !strncmp(shstrtab + p->sh_name, ".dynstr", 7)) { if (dynstrh) { ALOGD("too many string tables\n"); goto out; } dynstrh = p; } /* sanity checks */ if ((!dynsymh && dynstrh) || (dynsymh && !dynstrh)) { ALOGD("bad dynamic symbol table\n"); goto out; } if ((!symh && strh) || (symh && !strh)) { ALOGD("bad symbol table\n"); goto out; } if (!dynsymh && !symh) { ALOGD("no symbol table\n"); goto out; } /* symbol tables */ if (dynsymh) symtab->dyn = get_syms(fd, dynsymh, dynstrh); if (symh) symtab->st = get_syms(fd, symh, strh); ret = 0; out: free(shstrtab); free(shdr); return ret; }
/* =========================================================================== Opens a gzip (.gz) file for reading or writing. The mode parameter is as in fopen ("rb" or "wb"). The file is given either by file descriptor or path name (if fd == -1). az_open returns NULL if the file could not be opened or if there was insufficient memory to allocate the (de)compression state; errno can be checked to distinguish the two cases (if errno is zero, the zlib error is Z_MEM_ERROR). */ int az_open (azio_stream *s, const char *path, int Flags, File fd) { int err; int level = Z_DEFAULT_COMPRESSION; /* compression level */ int strategy = Z_DEFAULT_STRATEGY; /* compression strategy */ s->stream.zalloc = (alloc_func)0; s->stream.zfree = (free_func)0; s->stream.opaque = (voidpf)0; memset(s->inbuf, 0, AZ_BUFSIZE_READ); memset(s->outbuf, 0, AZ_BUFSIZE_WRITE); s->stream.next_in = s->inbuf; s->stream.next_out = s->outbuf; s->stream.avail_in = s->stream.avail_out = 0; s->z_err = Z_OK; s->z_eof = 0; s->in = 0; s->out = 0; s->back = EOF; s->crc = crc32(0L, Z_NULL, 0); s->transparent = 0; s->mode = 'r'; s->version = (unsigned char)az_magic[1]; /* this needs to be a define to version */ s->minor_version= (unsigned char) az_magic[2]; /* minor version */ s->dirty= AZ_STATE_CLEAN; /* We do our own version of append by nature. We must always have write access to take card of the header. */ DBUG_ASSERT(Flags | O_APPEND); DBUG_ASSERT(Flags | O_WRONLY); if (Flags & O_RDWR) s->mode = 'w'; if (s->mode == 'w') { err = deflateInit2(&(s->stream), level, Z_DEFLATED, -MAX_WBITS, 8, strategy); /* windowBits is passed < 0 to suppress zlib header */ s->stream.next_out = s->outbuf; if (err != Z_OK) { destroy(s); return Z_NULL; } } else { s->stream.next_in = s->inbuf; err = inflateInit2(&(s->stream), -MAX_WBITS); /* windowBits is passed < 0 to tell that there is no zlib header. * Note that in this case inflate *requires* an extra "dummy" byte * after the compressed stream in order to complete decompression and * return Z_STREAM_END. Here the gzip CRC32 ensures that 4 bytes are * present after the compressed stream. */ if (err != Z_OK) { destroy(s); return Z_NULL; } } s->stream.avail_out = AZ_BUFSIZE_WRITE; errno = 0; s->file = fd < 0 ? my_open(path, Flags, MYF(0)) : fd; if (s->file < 0 ) { destroy(s); return Z_NULL; } if (Flags & O_CREAT || Flags & O_TRUNC) { s->rows= 0; s->forced_flushes= 0; s->shortest_row= 0; s->longest_row= 0; s->auto_increment= 0; s->check_point= 0; s->comment_start_pos= 0; s->comment_length= 0; s->frm_start_pos= 0; s->frm_length= 0; s->dirty= 1; /* We create the file dirty */ s->start = AZHEADER_SIZE + AZMETA_BUFFER_SIZE; write_header(s); my_seek(s->file, 0, MY_SEEK_END, MYF(0)); } else if (s->mode == 'w') { uchar buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE]; my_pread(s->file, buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0, MYF(0)); read_header(s, buffer); /* skip the .az header */ my_seek(s->file, 0, MY_SEEK_END, MYF(0)); } else { check_header(s); /* skip the .az header */ } return 1; }
int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves) { uint i; ulong length, block_length= 0; uchar *buff= NULL; MYISAM_SHARE* share= info->s; uint keys= share->state.header.keys; MI_KEYDEF *keyinfo= share->keyinfo; my_off_t key_file_length= share->state.state.key_file_length; my_off_t pos= share->base.keystart; DBUG_ENTER("mi_preload"); if (!keys || !mi_is_any_key_active(key_map) || key_file_length == pos) DBUG_RETURN(0); block_length= keyinfo[0].block_length; if (ignore_leaves) { /* Check whether all indexes use the same block size */ for (i= 1 ; i < keys ; i++) { if (keyinfo[i].block_length != block_length) DBUG_RETURN(my_errno= HA_ERR_NON_UNIQUE_BLOCK_SIZE); } } else block_length= share->key_cache->key_cache_block_size; length= info->preload_buff_size/block_length * block_length; set_if_bigger(length, block_length); if (!(buff= (uchar *) my_malloc(length, MYF(MY_WME)))) DBUG_RETURN(my_errno= HA_ERR_OUT_OF_MEM); if (flush_key_blocks(share->key_cache,share->kfile, FLUSH_RELEASE)) goto err; do { /* Read the next block of index file into the preload buffer */ if ((my_off_t) length > (key_file_length-pos)) length= (ulong) (key_file_length-pos); if (my_pread(share->kfile, (uchar*) buff, length, pos, MYF(MY_FAE|MY_FNABP))) goto err; if (ignore_leaves) { uchar *end= buff+length; do { if (mi_test_if_nod(buff)) { if (key_cache_insert(share->key_cache, share->kfile, pos, DFLT_INIT_HITS, (uchar*) buff, block_length)) goto err; } pos+= block_length; } while ((buff+= block_length) != end); buff= end-length; } else { if (key_cache_insert(share->key_cache, share->kfile, pos, DFLT_INIT_HITS, (uchar*) buff, length)) goto err; pos+= length; } } while (pos != key_file_length); my_free((char*) buff, MYF(0)); DBUG_RETURN(0); err: my_free((char*) buff, MYF(MY_ALLOW_ZERO_PTR)); DBUG_RETURN(my_errno= errno); }
/* * Open an ELF file and load it into memory. */ static Elf32_Addr load_elf_file(const char *filename, size_t pagesize, Elf32_Addr *out_base, Elf32_Addr *out_phdr, Elf32_Addr *out_phnum, const char **out_interp) { int fd = open_program(filename); if (fd < 0) { fprintf(stderr, "Cannot open %s: %s\n", filename, strerror(errno)); exit(2); } uintptr_t pread_pos = 0; Elf32_Ehdr ehdr; my_pread(filename, "Failed to read ELF header from file! ", fd, &ehdr, sizeof(ehdr), 0, &pread_pos); if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || ehdr.e_version != EV_CURRENT || ehdr.e_ehsize != sizeof(ehdr) || ehdr.e_phentsize != sizeof(Elf32_Phdr)) { fprintf(stderr, "%s has no valid ELF header!\n", filename); exit(1); } switch (ehdr.e_machine) { #if defined(__i386__) case EM_386: #elif defined(__x86_64__) case EM_X86_64: #elif defined(__arm__) case EM_ARM: #elif defined(__mips__) case EM_MIPS: #else # error "Don't know the e_machine value for this architecture!" #endif break; default: fprintf(stderr, "%s: ELF file has wrong architecture (e_machine=%u)\n", filename, ehdr.e_machine); exit(1); } Elf32_Phdr phdr[MAX_PHNUM]; if (ehdr.e_phnum > sizeof(phdr) / sizeof(phdr[0]) || ehdr.e_phnum < 1) { fprintf(stderr, "%s: ELF file has unreasonable e_phnum=%u\n", filename, ehdr.e_phnum); exit(1); } bool anywhere; switch (ehdr.e_type) { case ET_EXEC: anywhere = false; break; case ET_DYN: anywhere = true; break; default: fprintf(stderr, "%s: ELF file has unexpected e_type=%u\n", filename, ehdr.e_type); exit(1); } my_pread(filename, "Failed to read program headers from ELF file! ", fd, phdr, sizeof(phdr[0]) * ehdr.e_phnum, ehdr.e_phoff, &pread_pos); size_t i = 0; while (i < ehdr.e_phnum && phdr[i].p_type != PT_LOAD) ++i; if (i == ehdr.e_phnum) { fprintf(stderr, "%s: ELF file has no PT_LOAD header!", filename); exit(1); } /* * ELF requires that PT_LOAD segments be in ascending order of p_vaddr. * Find the last one to calculate the whole address span of the image. */ const Elf32_Phdr *first_load = &phdr[i]; const Elf32_Phdr *last_load = &phdr[ehdr.e_phnum - 1]; while (last_load > first_load && last_load->p_type != PT_LOAD) --last_load; /* * For NaCl, the first load segment must always be the code segment. */ if (first_load->p_flags != (PF_R | PF_X)) { fprintf(stderr, "%s: First PT_LOAD has p_flags=%#x (expecting RX=%#x)\n", filename, first_load->p_flags, PF_R | PF_X); exit(1); } if (first_load->p_filesz != first_load->p_memsz) { fprintf(stderr, "%s: Code segment has p_filesz %u != p_memsz %u\n", filename, first_load->p_filesz, first_load->p_memsz); exit(1); } /* * Decide where to load the image and reserve the portions of the address * space where it will reside. */ Elf32_Addr load_bias = choose_load_bias(filename, pagesize, first_load, last_load, anywhere); DEBUG_PRINTF("XXX load_bias (%s) %#x\n", anywhere ? "anywhere" : "fixed", load_bias); /* * Map the code segment in. */ my_mmap(filename, "code segment", first_load - phdr, load_bias + round_down(first_load->p_vaddr, pagesize), first_load->p_memsz, prot_from_phdr(first_load), MAP_PRIVATE | MAP_FIXED, fd, round_down(first_load->p_offset, pagesize)); Elf32_Addr last_end = first_load->p_vaddr + load_bias + first_load->p_memsz; Elf32_Addr last_page_end = round_up(last_end, pagesize); /* * Map the remaining segments, and protect any holes between them. * The large hole after the code segment does not need to be * protected (and cannot be). It covers the whole large tail of the * dynamic text area, which cannot be touched by mprotect. */ const Elf32_Phdr *ph; for (ph = first_load + 1; ph <= last_load; ++ph) { if (ph->p_type == PT_LOAD) { Elf32_Addr start = round_down(ph->p_vaddr + load_bias, pagesize); if (start > last_page_end && ph > first_load + 1) { if (mprotect((void *) last_page_end, start - last_page_end, PROT_NONE) != 0) { fprintf(stderr, "%s: Failed to mprotect segment %u hole! (%s)\n", filename, ph - phdr, strerror(errno)); exit(1); } } last_end = ph->p_vaddr + load_bias + ph->p_memsz; last_page_end = round_up(last_end, pagesize); Elf32_Addr map_end = last_page_end; /* * Unlike POSIX mmap, NaCl's mmap does not reliably handle COW * faults in the remainder of the final partial page. So to get * the expected behavior for the unaligned boundary between data * and bss, it's necessary to allocate the final partial page of * data as anonymous memory rather than mapping it from the file. */ Elf32_Addr file_end = ph->p_vaddr + load_bias + ph->p_filesz; if (ph->p_memsz > ph->p_filesz) map_end = round_down(file_end, pagesize); if (map_end > start) { my_mmap(filename, "segment", ph - phdr, start, map_end - start, prot_from_phdr(ph), MAP_PRIVATE | MAP_FIXED, fd, round_down(ph->p_offset, pagesize)); } if (map_end < last_page_end) { /* * Handle the "bss" portion of a segment, where the memory size * exceeds the file size and we zero-fill the difference. We map * anonymous pages for all the pages containing bss space. Then, * if there is any partial-page tail of the file data, we read that * into the first such page. * * This scenario is invalid for an unwritable segment. */ if ((ph->p_flags & PF_W) == 0) { fprintf(stderr, "%s: Segment %u has p_memsz %u > p_filesz %u but no PF_W!\n", filename, ph - phdr, ph->p_memsz, ph->p_filesz); exit(1); } my_mmap(filename, "bss segment", ph - phdr, map_end, last_page_end - map_end, prot_from_phdr(ph), MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0); if (file_end > map_end) { /* * There is a partial page of data to read in. */ my_pread(filename, "Failed to read final partial page of data! ", fd, (void *) map_end, file_end - map_end, round_down(ph->p_offset + ph->p_filesz, pagesize), &pread_pos); } } } } /* * We've finished with the file now. */ close(fd); /* * Find the PT_INTERP header, if there is one. */ const Elf32_Phdr *interp = NULL; if (out_interp != NULL) { for (i = 0; i < ehdr.e_phnum; ++i) { if (phdr[i].p_type == PT_INTERP) { interp = &phdr[i]; break; } } } /* * Find the PT_LOAD segments containing the PT_INTERP data and the phdrs. */ for (ph = first_load; ph <= last_load && (interp != NULL || out_phdr != NULL); ++ph) { if (interp != NULL && segment_contains(ph, interp->p_offset, interp->p_filesz)) { *out_interp = (const char *) (interp->p_vaddr + load_bias); interp = NULL; } if (out_phdr != NULL && segment_contains(ph, ehdr.e_phoff, ehdr.e_phnum * sizeof(phdr[0]))) { *out_phdr = ehdr.e_phoff - ph->p_offset + ph->p_vaddr + load_bias; out_phdr = NULL; } } if (interp != NULL) { fprintf(stderr, "%s: PT_INTERP not within any PT_LOAD segment\n", filename); exit(1); } if (out_phdr != NULL) { *out_phdr = 0; fprintf(stderr, "Warning: %s: ELF program headers not within any PT_LOAD segment\n", filename); } if (out_phnum != NULL) *out_phnum = ehdr.e_phnum; if (out_base != NULL) *out_base = load_bias; return ehdr.e_entry + load_bias; }