static int esp8266_Read(sqlite3_file *id, void *buffer, int amount, sqlite3_int64 offset) { size_t nRead; sint32_t ofst, iofst; esp8266_file *file = (esp8266_file*) id; iofst = (sint32_t)(offset & 0x7FFFFFFF); dbg_printf("esp8266_Read: 1r %s %d %d %lld[%ld] \n", file->name, file->fd, amount, offset, iofst); ofst = vfs_lseek(file->fd, iofst, VFS_SEEK_SET); if (ofst != iofst) { dbg_printf("esp8266_Read: 2r %ld != %ld FAIL\n", ofst, iofst); return SQLITE_IOERR_SHORT_READ /* SQLITE_IOERR_SEEK */; } nRead = vfs_read(file->fd, buffer, amount); if ( nRead == amount ) { dbg_printf("esp8266_Read: 3r %s %u %d OK\n", file->name, nRead, amount); return SQLITE_OK; } else if ( nRead >= 0 ) { dbg_printf("esp8266_Read: 3r %s %u %d FAIL\n", file->name, nRead, amount); return SQLITE_IOERR_SHORT_READ; } dbg_printf("esp8266_Read: 4r %s FAIL\n", file->name); return SQLITE_IOERR_READ; }
static int fuse_read(const char *path, char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { int res; if((res = vfs_lseek(fi->fh, offset, 0)) < 0) return res; if((res = vfs_read(fi->fh, buf, size)) < 0) return res; return res; }
int sys_lseek(int fd, off_t offset, int whence) { struct process *proc; proc = process_get_current(); if(fd>=0 && fd<PROCESS_MAX_FILE && proc->files[fd] != NULL) { return vfs_lseek(proc->files[fd], offset, whence); } else { printk(LOG_DEBUG, "sys_lseek: invalid fd\n"); } return -1; }
static error_t elf_program_entries_read(struct vfs_file_s *file, Elf32_Ehdr *e_header, Elf32_Phdr **p_entries) { register error_t err; register size_t size; register ssize_t count; uint8_t *buff; kmem_req_t req; size = e_header->e_phoff * e_header->e_phnum; if(size == 0) { printk(ERROR, "\nERROR: elf_program_entries_read: no program entries found\n"); return EINVAL; } req.type = KMEM_GENERIC; req.size = size; req.flags = AF_USER; if((buff = kmem_alloc(&req)) == NULL) return ENOMEM; if((err=vfs_lseek(file, e_header->e_phoff, VFS_SEEK_SET, NULL))) { printk(ERROR, "\nERROR: elf_program_entries_read: faild to localise entries\n"); goto ELF_PROG_ENTRIES_READ_ERR; } if((count=vfs_read(file, buff, size)) != size) { printk(ERROR, "\nERROR: elf_program_entries_read: faild to read program entries, got %d bytes, expected %d bytes\n", count, size); err = (error_t) count; goto ELF_PROG_ENTRIES_READ_ERR; } *p_entries = (Elf32_Phdr*) buff; return 0; ELF_PROG_ENTRIES_READ_ERR: req.ptr = buff; kmem_free(&req); return err; }
static int esp8266_Write(sqlite3_file *id, const void *buffer, int amount, sqlite3_int64 offset) { size_t nWrite; sint32_t ofst, iofst; esp8266_file *file = (esp8266_file*) id; iofst = (sint32_t)(offset & 0x7FFFFFFF); dbg_printf("esp8266_Write: 1w %s %d %d %lld[%ld] \n", file->name, file->fd, amount, offset, iofst); ofst = vfs_lseek(file->fd, iofst, VFS_SEEK_SET); if (ofst != iofst) { return SQLITE_IOERR_SEEK; } nWrite = vfs_write(file->fd, buffer, amount); if ( nWrite != amount ) { dbg_printf("esp8266_Write: 2w %s %u %d\n", file->name, nWrite, amount); return SQLITE_IOERR_WRITE; } dbg_printf("esp8266_Write: 3w %s OK\n", file->name); return SQLITE_OK; }
static int _read_handler(int argc, char **argv) { uint8_t buf[16]; size_t nbytes = sizeof(buf); off_t offset = 0; char *path = argv[1]; if (argc < 2) { puts("vfs read: missing file name"); return 1; } if (argc > 2) { nbytes = atoi(argv[2]); } if (argc > 3) { offset = atoi(argv[3]); } int res; res = vfs_normalize_path(path, path, strlen(path) + 1); if (res < 0) { _errno_string(res, (char *)buf, sizeof(buf)); printf("Invalid path \"%s\": %s\n", path, buf); return 5; } int fd = vfs_open(path, O_RDONLY, 0); if (fd < 0) { _errno_string(fd, (char *)buf, sizeof(buf)); printf("Error opening file \"%s\": %s\n", path, buf); return 3; } res = vfs_lseek(fd, offset, SEEK_SET); if (res < 0) { _errno_string(res, (char *)buf, sizeof(buf)); printf("Seek error: %s\n", buf); vfs_close(fd); return 4; } while (nbytes > 0) { memset(buf, 0, sizeof(buf)); size_t line_len = (nbytes < sizeof(buf) ? nbytes : sizeof(buf)); res = vfs_read(fd, buf, line_len); if (res < 0) { _errno_string(res, (char *)buf, sizeof(buf)); printf("Read error: %s\n", buf); vfs_close(fd); return 5; } else if ((size_t)res > line_len) { printf("BUFFER OVERRUN! %d > %lu\n", res, (unsigned long)line_len); vfs_close(fd); return 6; } else if (res == 0) { /* EOF */ printf("-- EOF --\n"); break; } printf("%08lx:", (unsigned long)offset); for (int k = 0; k < res; ++k) { if ((k % 2) == 0) { putchar(' '); } printf("%02x", buf[k]); } for (unsigned k = res; k < sizeof(buf); ++k) { if ((k % 2) == 0) { putchar(' '); } putchar(' '); putchar(' '); } putchar(' '); putchar(' '); for (int k = 0; k < res; ++k) { if (isprint(buf[k])) { putchar(buf[k]); } else { putchar('.'); } } puts(""); offset += res; nbytes -= res; } vfs_close(fd); return 0; }
int vfs_ungetc( int c, int fd ) { return vfs_lseek( fd, -1, VFS_SEEK_CUR ); }
static int cmd_vfs_run(struct vmm_chardev *cdev, const char *path) { int fd, rc; u32 len; size_t buf_rd; char buf[VFS_LOAD_BUF_SZ]; struct stat st; u32 tok_len; char *token, *save; const char *delim = "\n"; u32 end, cleanup = 0; fd = vfs_open(path, O_RDONLY, 0); if (fd < 0) { vmm_cprintf(cdev, "Failed to open %s\n", path); return fd; } rc = vfs_fstat(fd, &st); if (rc) { vfs_close(fd); vmm_cprintf(cdev, "Failed to stat %s\n", path); return rc; } if (!(st.st_mode & S_IFREG)) { vfs_close(fd); vmm_cprintf(cdev, "Cannot read %s\n", path); return VMM_EINVALID; } len = st.st_size; while (len) { memset(buf, 0, sizeof(buf)); buf_rd = (len < VFS_LOAD_BUF_SZ) ? len : VFS_LOAD_BUF_SZ; buf_rd = vfs_read(fd, buf, buf_rd); if (buf_rd < 1) { break; } end = buf_rd - 1; while (buf[end] != '\n') { buf[end] = 0; end--; cleanup++; } if (cleanup) { vfs_lseek(fd, (buf_rd - cleanup), SEEK_SET); cleanup = 0; } for (token = strtok_r(buf, delim, &save); token; token = strtok_r(NULL, delim, &save)) { tok_len = strlen(token); if (*token != '#' && *token != '\n') { vmm_cmdmgr_execute_cmdstr(cdev, token, NULL); } len -= (tok_len + 1); } } rc = vfs_close(fd); if (rc) { vmm_cprintf(cdev, "Failed to close %s\n", path); return rc; } return VMM_OK; }
static error_t elf_segments_load(struct vfs_file_s *file, Elf32_Ehdr *e_header, Elf32_Phdr *p_entry, struct task_s *task) { register error_t err; register uint_t index; register size_t size; register uint_t start; register uint_t limit; uint_t proto; uint_t flags; for(index = 0; index < e_header->e_phnum; index++, p_entry++) { if(p_entry->p_type != PT_LOAD) continue; #if 1 if((p_entry->p_vaddr < USR_OFFSET) || (p_entry->p_vaddr >= USR_LIMIT)) { err = EPERM; printk(ERROR, "\nERROR: %s: p_vaddr %x, index %d [ EPERM ]\n", __FUNCTION__, p_entry->p_vaddr, index); return err; } #endif if((err=vfs_lseek(file, p_entry->p_offset, VFS_SEEK_SET, NULL))) { printk(ERROR, "\nERROR: %s: faild to localise segment @index %d\n", __FUNCTION__, index); return err; } size = 0; start = p_entry->p_vaddr; limit = p_entry->p_vaddr + p_entry->p_memsz; if((start & PMM_PAGE_MASK) || (limit & PMM_PAGE_MASK)) return EACCES; if(task->vmm.text_start == 0) { proto = VM_REG_RD | VM_REG_EX; flags = VM_REG_SHARED | VM_REG_FIXED | VM_REG_INST; task->vmm.text_start = start; task->vmm.text_end = limit; printk(INFO, "INFO: %s: Text <0x%x - 0x%x>\n", __FUNCTION__, start, limit); } else { if(task->vmm.data_start != 0) continue; proto = VM_REG_RD | VM_REG_WR; flags = VM_REG_PRIVATE | VM_REG_FIXED; task->vmm.data_start = start; task->vmm.data_end = limit; task->vmm.heap_start = limit; task->vmm.heap_current = limit; printk(INFO, "INFO: %s: Data <0x%x - 0x%x>\n", __FUNCTION__, start, limit); } err = (error_t) vmm_mmap(task, file, (void*)start, limit - start, proto, flags, p_entry->p_offset); if(err == (error_t)VM_FAILED) { printk(WARNING,"WARNING: %s: Faild to map segment <0x%x - 0x%x>, proto %x, file name %x\n", __FUNCTION__, start, limit, proto, file->f_node->n_name); return current_thread->info.errno; } atomic_add(&file->f_count, 1); } return 0; }
static int load_bin_elf(struct exe_params *params, struct irq_frame *frame) { struct elf_header head; struct elf_prog_section sect; int ret, i; off_t current_off; struct address_space *new_addrspc; struct task *current; ret = vfs_read(params->exe, &head, sizeof(head)); if (ret != sizeof(head)) return -ENOEXEC; if (head.magic != ELF_MAGIC) return -ENOEXEC; new_addrspc = kmalloc(sizeof(*new_addrspc), PAL_KERNEL); address_space_init(new_addrspc); kp(KP_TRACE, "Parsing ELF binary... frame: %p, state: %d\n", frame, cpu_get_local()->current->state); /* The idea is that you loop over every header, and if a header's type is * 'LOAD' then we make a vm_map for it and load it into memory. */ for (i = 0, current_off = head.prog_head_pos; i < head.prog_head_count; i++, current_off += sizeof(struct elf_prog_section)) { kp(KP_TRACE, "Reading ELF section...\n"); vfs_lseek(params->exe, current_off, SEEK_SET); ret = vfs_read(params->exe, §, sizeof(sect)); kp(KP_TRACE, "Reading ret: %d\n", ret); if (ret != sizeof(sect)) return -ENOEXEC; if (sect.type != ELF_PROG_TYPE_LOAD) continue; kp(KP_TRACE, "Creating new vm_map...\n"); struct vm_map *new_sect = kmalloc(sizeof(struct vm_map), PAL_KERNEL); vm_map_init(new_sect); new_sect->addr.start = va_make(sect.vaddr); new_sect->addr.end = va_make(sect.vaddr + sect.mem_size); if (sect.flags & ELF_PROG_FLAG_EXEC) flag_set(&new_sect->flags, VM_MAP_EXE); if (sect.flags & ELF_PROG_FLAG_READ) flag_set(&new_sect->flags, VM_MAP_READ); if (sect.flags & ELF_PROG_FLAG_WRITE) flag_set(&new_sect->flags, VM_MAP_WRITE); kp(KP_TRACE, "Map from %p to %p\n", new_sect->addr.start, new_sect->addr.end); if (!new_addrspc->code) new_addrspc->code = new_sect; else if (new_addrspc->code->addr.start > new_sect->addr.start) new_addrspc->code = new_sect; if (!new_addrspc->data) new_addrspc->data = new_sect; else if (new_addrspc->data->addr.start < new_sect->addr.start) new_addrspc->data = new_sect; /* f_size is the size in the file, mem_size is the size in memory of * our copy. * * If mem_size > f_size, then the empty space is filled with zeros. */ int pages = PG_ALIGN(sect.mem_size) / PG_SIZE; int k; off_t starting_offset = sect.f_off - PG_ALIGN_DOWN(sect.f_off); off_t file_size = sect.f_size + starting_offset; off_t file_offset = sect.f_off - starting_offset; kp(KP_TRACE,"Starting_offset: %ld\n", starting_offset); /* This could be cleaned-up. The complexity comes from the fact that * sect.f_off doesn't have to be page aligned, even if the section it * is in has to be page aligned - This is more then likely due to * sections being stripped out, leaving the other sections at odd offsets. * * Thus, we handle the first page separate from the rest of the pages, * and handle it's offset into virtual memory manually. Then, we loop * to handle the rest of the pages, using 'file_size' and 'file_offset' * which are adjusted values to skip the first part of the file that we * already read. */ for (k = 0; k < pages; k++) { off_t len; struct page *p = palloc(0, PAL_KERNEL); if (PG_SIZE * k + PG_SIZE < file_size) len = PG_SIZE - starting_offset; else if (file_size > PG_SIZE * k) len = file_size - starting_offset - PG_SIZE * k; else len = 0; if (len) { vfs_lseek(params->exe, file_offset + starting_offset + (k * PG_SIZE), SEEK_SET); vfs_read(params->exe, p->virt + starting_offset, len); } len += starting_offset; starting_offset = 0; if (len < PG_SIZE) memset(p->virt + len, 0, PG_SIZE - len); starting_offset = 0; list_add_tail(&new_sect->page_list, &p->page_list_node); } address_space_vm_map_add(new_addrspc, new_sect); } /* If we detected both the code and data segments to be the same segment, * then that means we don't actually have a data segment, so we set it to * NULL. This only actually matters for setting the BRK, and we'll just * make a new vm_map if the data is NULL in that case. */ if (new_addrspc->code == new_addrspc->data) new_addrspc->data = NULL; struct vm_map *stack = kmalloc(sizeof(struct vm_map), PAL_KERNEL); vm_map_init(stack); stack->addr.end = KMEM_PROG_STACK_END; stack->addr.start = KMEM_PROG_STACK_START; flag_set(&stack->flags, VM_MAP_READ); flag_set(&stack->flags, VM_MAP_WRITE); flag_set(&stack->flags, VM_MAP_EXE); palloc_unordered(&stack->page_list, KMEM_STACK_LIMIT, PAL_KERNEL); address_space_vm_map_add(new_addrspc, stack); new_addrspc->stack = stack; kp(KP_TRACE, "New code segment: %p-%p\n", new_addrspc->code->addr.start, new_addrspc->code->addr.end); if (new_addrspc->data) kp(KP_TRACE, "New data segment: %p-%p\n", new_addrspc->data->addr.start, new_addrspc->data->addr.end); kp(KP_TRACE, "New stack segment: %p-%p\n", new_addrspc->stack->addr.start, new_addrspc->stack->addr.end); current = cpu_get_local()->current; arch_task_change_address_space(new_addrspc); irq_frame_initalize(current->context.frame); irq_frame_set_stack(current->context.frame, new_addrspc->stack->addr.end); irq_frame_set_ip(current->context.frame, va_make(head.entry_vaddr)); return 0; }
static sint32 EMU_CALL vlseek(void *vfsstate, sint32 emufd, sint32 offset, sint32 whence) { if(emufd < 0) return -9; if(whence < 0 || whence > 2) return -22; if(whence == 0 && offset < 0) return -22; return vfs_lseek(vfsstate, emufd, offset, whence); }
static int get_user_hash(const char *user, u8 *dst_hash, u32 dst_len) { int fd, rc; u32 len; size_t buf_rd; char buf[VFS_LOAD_BUF_SZ]; struct stat st; u32 tok_len; char *token, *save; const char *delim = "\n"; u32 end, cleanup = 0; const char *path = CONFIG_LIBAUTH_FILE; fd = vfs_open(path, O_RDONLY, 0); if (fd < 0) { return VMM_EFAIL; } rc = vfs_fstat(fd, &st); if (rc) { vfs_close(fd); return VMM_EFAIL; } if (!(st.st_mode & S_IFREG)) { vfs_close(fd); return VMM_EFAIL; } len = st.st_size; while (len) { memset(buf, 0, sizeof(buf)); buf_rd = (len < VFS_LOAD_BUF_SZ) ? len : VFS_LOAD_BUF_SZ; buf_rd = vfs_read(fd, buf, buf_rd); if (buf_rd < 1) { break; } end = buf_rd - 1; while (buf[end] != '\n') { buf[end] = 0; end--; cleanup++; } if (cleanup) { vfs_lseek(fd, (buf_rd - cleanup), SEEK_SET); cleanup = 0; } for (token = strtok_r(buf, delim, &save); token; token = strtok_r(NULL, delim, &save)) { tok_len = strlen(token); if (*token != '#' && *token != '\n') { if (process_auth_entry(token, user, dst_hash, dst_len) == VMM_OK) return VMM_OK; } len -= (tok_len + 1); } } rc = vfs_close(fd); if (rc) { return VMM_EFAIL; } return VMM_EFAIL; }