static void testdiv0(void) { int funcidx; assert(cmp64u(j, 0) == 0); /* loop through the 5 different division functions */ for (funcidx = 0; funcidx < 5; funcidx++) { expect_SIGFPE = 1; if (setjmp(jmpbuf_SIGFPE) == 0) { /* divide by zero using various functions */ switch (funcidx) { case 0: div64(i, j); ERR; break; case 1: div64u64(i, ex64lo(j)); ERR; break; case 2: div64u(i, ex64lo(j)); ERR; break; case 3: rem64(i, j); ERR; break; case 4: rem64u(i, ex64lo(j)); ERR; break; default: assert(0); ERR; break; } /* if we reach this point there was no signal and an * error has been recorded */ expect_SIGFPE = 0; } else { /* a signal has been received and expect_SIGFPE has * been reset; all is ok now */ assert(!expect_SIGFPE); } } }
PRIVATE int counter_transfer(endpoint_t endpt, int opcode, u64_t position, iovec_t *iov, unsigned nr_req, endpoint_t UNUSED(user_endpt)) { int bytes, ret; printf("counter_transfer()\n"); if (nr_req != 1) { /* This should never trigger for character drivers at the moment. */ printf("COUNTER: vectored transfer request, using first element only\n"); } bytes = strlen(countNum) - ex64lo(position) < iov->iov_size ? strlen(countNum) - ex64lo(position) : iov->iov_size; if (bytes <= 0) { return OK; } switch (opcode) { case DEV_GATHER_S: ret = sys_safecopyto(endpt, (cp_grant_id_t) iov->iov_addr, 0, (vir_bytes) (countNum + ex64lo(position)), bytes, D,); iov->iov_size -= bytes; break; default: return EINVAL; } return ret; }
/*===========================================================================* * do_lseek * *===========================================================================*/ int do_lseek() { /* Perform the lseek(ls_fd, offset, whence) system call. */ register struct filp *rfilp; int r = OK, seekfd, seekwhence; off_t offset; u64_t pos, newpos; seekfd = job_m_in.ls_fd; seekwhence = job_m_in.whence; offset = (off_t) job_m_in.offset_lo; /* Check to see if the file descriptor is valid. */ if ( (rfilp = get_filp(seekfd, VNODE_READ)) == NULL) return(err_code); /* No lseek on pipes. */ if (S_ISFIFO(rfilp->filp_vno->v_mode)) { unlock_filp(rfilp); return(ESPIPE); } /* The value of 'whence' determines the start position to use. */ switch(seekwhence) { case SEEK_SET: pos = cvu64(0); break; case SEEK_CUR: pos = rfilp->filp_pos; break; case SEEK_END: pos = cvul64(rfilp->filp_vno->v_size); break; default: unlock_filp(rfilp); return(EINVAL); } if (offset >= 0) newpos = add64ul(pos, offset); else newpos = sub64ul(pos, -offset); /* Check for overflow. */ if (ex64hi(newpos) != 0) { r = EOVERFLOW; } else if ((off_t) ex64lo(newpos) < 0) { /* no negative file size */ r = EOVERFLOW; } else { /* insert the new position into the output message */ m_out.reply_l1 = ex64lo(newpos); if (cmp64(newpos, rfilp->filp_pos) != 0) { rfilp->filp_pos = newpos; /* Inhibit read ahead request */ r = req_inhibread(rfilp->filp_vno->v_fs_e, rfilp->filp_vno->v_inode_nr); } } unlock_filp(rfilp); return(r); }
static int hello_transfer(endpoint_t endpt, int opcode, u64_t position, iovec_t *iov, unsigned nr_req, endpoint_t user_endpt, unsigned int UNUSED(flags)) { int bytes, ret; // printf("hello_transfer()\n"); if (nr_req != 1) { /* This should never trigger for character drivers at the moment. */ printf("HELLO: vectored transfer request, using first element only\n"); } bytes = SECRET_SIZE - ex64lo(position) < iov->iov_size ? SECRET_SIZE - ex64lo(position) : iov->iov_size; // printf("iov->iov_size: %lu, transfer() bytes: %d\n", iov->iov_size, bytes); if (bytes <= 0) { return OK; } switch (opcode) { case DEV_SCATTER_S: printf("transfer() WRITE...\n"); ret = sys_safecopyfrom(user_endpt, (cp_grant_id_t) iov->iov_addr, 0, (vir_bytes) (the_secret + ex64lo(position)), bytes); iov->iov_size += bytes; printf("the secret: %s\n", the_secret); break; case DEV_GATHER_S: printf("transfer() READ...\n"); ret = sys_safecopyto(endpt, (cp_grant_id_t) iov->iov_addr, 0, (vir_bytes) (the_secret + ex64lo(position)), bytes); iov->iov_size -= bytes; printf("the secret: %s\n", the_secret); break; default: fprintf(stderr, "Unknown opcode: %d\n", opcode); return EINVAL; } return ret; }
PRIVATE void dump_bkl_usage(void) { unsigned cpu; printf("--- BKL usage ---\n"); for (cpu = 0; cpu < ncpus; cpu++) { printf("cpu %3d kernel ticks 0x%x%08x bkl ticks 0x%x%08x succ %d tries %d\n", cpu, ex64hi(kernel_ticks[cpu]), ex64lo(kernel_ticks[cpu]), ex64hi(bkl_ticks[cpu]), ex64lo(bkl_ticks[cpu]), bkl_succ[cpu], bkl_tries[cpu]); } }
static void err(int line) { /* print error information */ printf("error line %d; i=0x%.8x%.8x; j=0x%.8x%.8x; k=0x%.8x%.8x\n", line, ex64hi(i), ex64lo(i), ex64hi(j), ex64lo(j), ex64hi(k), ex64lo(k)); /* quit after too many errors */ if (errct++ > MAX_ERROR) { printf("Too many errors; test aborted\n"); quit(); } }
/*===========================================================================* * do_fstat * *===========================================================================*/ int do_fstat() { /* Perform the fstat(fd, buf) system call. */ register struct filp *rfilp; int r, pipe_pos = 0, old_stat = 0, rfd; vir_bytes statbuf; statbuf = (vir_bytes) job_m_in.buffer; rfd = job_m_in.fd; if (job_call_nr == PREV_FSTAT) old_stat = 1; /* Is the file descriptor valid? */ if ((rfilp = get_filp(rfd, VNODE_READ)) == NULL) return(err_code); /* If we read from a pipe, send position too */ if (rfilp->filp_vno->v_pipe == I_PIPE) { if (rfilp->filp_mode & R_BIT) if (ex64hi(rfilp->filp_pos) != 0) { panic("do_fstat: bad position in pipe"); } pipe_pos = ex64lo(rfilp->filp_pos); } r = req_stat(rfilp->filp_vno->v_fs_e, rfilp->filp_vno->v_inode_nr, who_e, statbuf, pipe_pos, old_stat); unlock_filp(rfilp); return(r); }
static int fb_do_write(endpoint_t ep, iovec_t *iov, int minor, u64_t pos, size_t *io_bytes) { struct device dev; arch_get_device(minor, &dev); if (pos >= dev.dv_size) { return EINVAL; } if (dev.dv_size - pos < iov->iov_size) { *io_bytes = dev.dv_size - pos; } else { *io_bytes = iov->iov_size; } if (*io_bytes <= 0) { return OK; } if (has_restarted && keep_displaying_restarted()) { return EAGAIN; } return sys_safecopyfrom(ep, (cp_grant_id_t) iov->iov_addr, 0, (vir_bytes) (dev.dv_base + ex64lo(pos)), *io_bytes); }
void *minix_mmap_for(endpoint_t forwhom, void *addr, size_t len, int prot, int flags, int fd, u64_t offset) { message m; int r; m.VMM_ADDR = (vir_bytes) addr; m.VMM_LEN = len; m.VMM_PROT = prot; m.VMM_FLAGS = flags; m.VMM_FD = fd; m.VMM_OFFSET_LO = ex64lo(offset); if(forwhom != SELF) { m.VMM_FLAGS |= MAP_THIRDPARTY; m.VMM_FORWHOM = forwhom; } else { m.VMM_OFFSET_HI = ex64hi(offset); } r = _syscall(VM_PROC_NR, VM_MMAP, &m); if(r != OK) { return MAP_FAILED; } return (void *) m.VMM_RETADDR; }
/*===========================================================================* * fs_breadwrite_s * *===========================================================================*/ PUBLIC int fs_breadwrite_s(void) { int r, rw_flag, chunk, block_size; cp_grant_id_t gid; int nrbytes; u64_t position; unsigned int off, cum_io; mode_t mode_word; int completed, r2 = OK; /* Pseudo inode for rw_chunk */ struct inode rip; r = OK; /* Get the values from the request message */ rw_flag = (fs_m_in.m_type == REQ_BREAD_S ? READING : WRITING); gid = fs_m_in.REQ_XFD_GID; position = make64(fs_m_in.REQ_XFD_POS_LO, fs_m_in.REQ_XFD_POS_HI); nrbytes = (unsigned) fs_m_in.REQ_XFD_NBYTES; block_size = get_block_size(fs_m_in.REQ_XFD_BDEV); rip.i_zone[0] = fs_m_in.REQ_XFD_BDEV; rip.i_mode = I_BLOCK_SPECIAL; rip.i_size = 0; rdwt_err = OK; /* set to EIO if disk error occurs */ cum_io = 0; /* Split the transfer into chunks that don't span two blocks. */ while (nrbytes != 0) { off = rem64u(position, block_size); /* offset in blk*/ chunk = MIN(nrbytes, block_size - off); if (chunk < 0) chunk = block_size - off; /* Read or write 'chunk' bytes. */ r = rw_chunk_s(&rip, position, off, chunk, (unsigned) nrbytes, rw_flag, gid, cum_io, block_size, &completed); if (r != OK) break; /* EOF reached */ if (rdwt_err < 0) break; /* Update counters and pointers. */ nrbytes -= chunk; /* bytes yet to be read */ cum_io += chunk; /* bytes read so far */ position= add64ul(position, chunk); /* position within the file */ } fs_m_out.RES_XFD_POS_LO = ex64lo(position); fs_m_out.RES_XFD_POS_HI = ex64hi(position); if (rdwt_err != OK) r = rdwt_err; /* check for disk error */ if (rdwt_err == END_OF_FILE) r = OK; fs_m_out.RES_XFD_CUM_IO = cum_io; return(r); }
void print_proc(struct proc *pp) { endpoint_t dep; printf("%d: %s %d prio %d time %d/%d cycles 0x%x%08x cpu %2d " "pdbr 0x%lx rts %s misc %s sched %s ", proc_nr(pp), pp->p_name, pp->p_endpoint, pp->p_priority, pp->p_user_time, pp->p_sys_time, ex64hi(pp->p_cycles), ex64lo(pp->p_cycles), pp->p_cpu, #if defined(__i386__) pp->p_seg.p_cr3, #elif defined(__arm__) pp->p_seg.p_ttbr, #endif rtsflagstr(pp->p_rts_flags), miscflagstr(pp->p_misc_flags), schedulerstr(pp->p_scheduler)); print_sigmgr(pp); dep = P_BLOCKEDON(pp); if(dep != NONE) { printf(" blocked on: "); print_endpoint(dep); } printf("\n"); }
static int bdev_rdwt_setup(int req, dev_t dev, u64_t pos, char *buf, size_t count, int flags, message *m) { /* Set up a single-buffer read/write request. */ endpoint_t endpt; cp_grant_id_t grant; int access; assert((ssize_t) count >= 0); if ((endpt = bdev_driver_get(dev)) == NONE) return EDEADSRCDST; access = (req == BDEV_READ) ? CPF_WRITE : CPF_READ; grant = cpf_grant_direct(endpt, (vir_bytes) buf, count, access); if (!GRANT_VALID(grant)) { printf("bdev: unable to allocate grant!\n"); return EINVAL; } memset(m, 0, sizeof(*m)); m->m_type = req; m->BDEV_MINOR = minor(dev); m->BDEV_POS_LO = ex64lo(pos); m->BDEV_POS_HI = ex64hi(pos); m->BDEV_COUNT = count; m->BDEV_GRANT = grant; m->BDEV_FLAGS = flags; return OK; }
static __inline u32_t makehash(u32_t p1, u64_t p2) { u32_t offlo = ex64lo(p2), offhi = ex64hi(p2), v = 0x12345678; hash_mix(p1, offlo, offhi); hash_final(offlo, offhi, v); return v % HASHSIZE; }
/*===========================================================================* * fs_breadwrite * *===========================================================================*/ int fs_breadwrite(void) { int r, rw_flag, completed; cp_grant_id_t gid; u64_t position; unsigned int off, cum_io, chunk, block_size; size_t nrbytes; /* Pseudo inode for rw_chunk */ struct inode rip; r = OK; /* Get the values from the request message */ rw_flag = (fs_m_in.m_type == REQ_BREAD ? READING : WRITING); gid = (cp_grant_id_t) fs_m_in.REQ_GRANT; position = make64((unsigned long) fs_m_in.REQ_SEEK_POS_LO, (unsigned long) fs_m_in.REQ_SEEK_POS_HI); nrbytes = (size_t) fs_m_in.REQ_NBYTES; block_size = get_block_size( (dev_t) fs_m_in.REQ_DEV2); rip.i_block[0] = (block_t) fs_m_in.REQ_DEV2; rip.i_mode = I_BLOCK_SPECIAL; rip.i_size = 0; rdwt_err = OK; /* set to EIO if disk error occurs */ cum_io = 0; /* Split the transfer into chunks that don't span two blocks. */ while (nrbytes > 0) { off = rem64u(position, block_size); /* offset in blk*/ chunk = min(nrbytes, block_size - off); /* Read or write 'chunk' bytes. */ r = rw_chunk(&rip, position, off, chunk, nrbytes, rw_flag, gid, cum_io, block_size, &completed); if (r != OK) break; /* EOF reached */ if (rdwt_err < 0) break; /* Update counters and pointers. */ nrbytes -= chunk; /* bytes yet to be read */ cum_io += chunk; /* bytes read so far */ position = add64ul(position, chunk); /* position within the file */ } fs_m_out.RES_SEEK_POS_LO = ex64lo(position); fs_m_out.RES_SEEK_POS_HI = ex64hi(position); if (rdwt_err != OK) r = rdwt_err; /* check for disk error */ if (rdwt_err == END_OF_FILE) r = OK; fs_m_out.RES_NBYTES = cum_io; return(r); }
/*===========================================================================* * do_read * *===========================================================================*/ int do_read() { /* Read data from a file. */ struct inode *ino; u64_t pos; size_t count, size; vir_bytes off; char *ptr; int r, chunk; if ((ino = find_inode(m_in.REQ_INODE_NR)) == NULL) return EINVAL; if (IS_DIR(ino)) return EISDIR; if ((r = get_handle(ino)) != OK) return r; pos = make64(m_in.REQ_SEEK_POS_LO, m_in.REQ_SEEK_POS_HI); count = m_in.REQ_NBYTES; assert(count > 0); /* Use the buffer from below to eliminate extra copying. */ size = sffs_table->t_readbuf(&ptr); off = 0; while (count > 0) { chunk = MIN(count, size); if ((r = sffs_table->t_read(ino->i_file, ptr, chunk, pos)) <= 0) break; chunk = r; r = sys_safecopyto(m_in.m_source, m_in.REQ_GRANT, off, (vir_bytes) ptr, chunk, D); if (r != OK) break; count -= chunk; off += chunk; pos = add64u(pos, chunk); } if (r < 0) return r; m_out.RES_SEEK_POS_HI = ex64hi(pos); m_out.RES_SEEK_POS_LO = ex64lo(pos); m_out.RES_NBYTES = off; return OK; }
/*===========================================================================* * action_io_corrupt * *===========================================================================*/ static void action_io_corrupt(struct fbd_rule *rule, char *buf, size_t size, u64_t pos, int UNUSED(flag)) { u64_t skip; u32_t val; buf += get_range(rule, pos, &size, &skip); switch (rule->params.corrupt.type) { case FBD_CORRUPT_ZERO: memset(buf, 0, size); break; case FBD_CORRUPT_PERSIST: /* Non-dword-aligned positions and sizes are not supported; * not by us, and not by the driver. */ if (ex64lo(pos) & (sizeof(val) - 1)) break; if (size & (sizeof(val) - 1)) break; /* Consistently produce the same pattern for the same range. */ val = ex64lo(skip); for ( ; size >= sizeof(val); size -= sizeof(val)) { *((u32_t *) buf) = val ^ 0xdeadbeefUL; val += sizeof(val); buf += sizeof(val); } break; case FBD_CORRUPT_RANDOM: while (size--) *buf++ = get_rand(255); break; default: printf("FBD: unknown corruption type %d\n", rule->params.corrupt.type); } }
/*===========================================================================* * get_range * *===========================================================================*/ static size_t get_range(struct fbd_rule *rule, u64_t pos, size_t *size, u64_t *skip) { /* Compute the range within the given request range that is affected * by the given rule, and optionally the number of bytes preceding * the range that are also affected by the rule. */ u64_t delta; size_t off; int to_eof; to_eof = cmp64(rule->start, rule->end) >= 0; if (cmp64(pos, rule->start) > 0) { if (skip != NULL) *skip = sub64(pos, rule->start); off = 0; } else { if (skip != NULL) *skip = cvu64(0); delta = sub64(rule->start, pos); assert(ex64hi(delta) == 0); off = ex64lo(delta); } if (!to_eof) { assert(cmp64(pos, rule->end) < 0); delta = sub64(rule->end, pos); if (cmp64u(delta, *size) < 0) *size = ex64lo(delta); } assert(*size > off); *size -= off; return off; }
/*===========================================================================* * do_readsuper * *===========================================================================*/ PUBLIC int do_readsuper() { /* Mount the file system. */ char path[PATH_MAX]; struct inode *ino; struct hgfs_attr attr; int r; dprintf(("HGFS: readsuper (dev %x, flags %x)\n", (dev_t) m_in.REQ_DEV, m_in.REQ_FLAGS)); if (m_in.REQ_FLAGS & REQ_ISROOT) { printf("HGFS: attempt to mount as root device\n"); return EINVAL; } state.read_only = !!(m_in.REQ_FLAGS & REQ_RDONLY); state.dev = m_in.REQ_DEV; init_dentry(); ino = init_inode(); attr.a_mask = HGFS_ATTR_MODE | HGFS_ATTR_SIZE; /* We cannot continue if we fail to get the properties of the root inode at * all, because we cannot guess the details of the root node to return to * VFS. Print a (hopefully) helpful error message, and abort the mount. */ if ((r = verify_inode(ino, path, &attr)) != OK) { if (r == EAGAIN) printf("HGFS: shared folders disabled\n"); else if (opt.prefix[0] && (r == ENOENT || r == EACCES)) printf("HGFS: unable to access the given prefix directory\n"); else printf("HGFS: unable to access shared folders\n"); return r; } m_out.RES_INODE_NR = INODE_NR(ino); m_out.RES_MODE = get_mode(ino, attr.a_mode); m_out.RES_FILE_SIZE_HI = ex64hi(attr.a_size); m_out.RES_FILE_SIZE_LO = ex64lo(attr.a_size); m_out.RES_UID = opt.uid; m_out.RES_GID = opt.gid; m_out.RES_DEV = NO_DEV; m_out.RES_CONREQS = 1; /* We can handle only 1 request at a time */ state.mounted = TRUE; return OK; }
/*===========================================================================* * scall_lseek * *===========================================================================*/ int scall_lseek() { /* Perform the lseek(ls_fd, offset, whence) system call. */ register struct filp *rfilp; int r; long offset; u64_t pos, newpos; /* Check to see if the file descriptor is valid. */ if ( (rfilp = get_filp(m_in.ls_fd)) == NIL_FILP) return(err_code); /* No lseek on pipes. */ if (rfilp->filp_vno->v_pipe == I_PIPE) return -ESPIPE; /* The value of 'whence' determines the start position to use. */ switch(m_in.whence) { case SEEK_SET: pos = cvu64(0); break; case SEEK_CUR: pos = rfilp->filp_pos; break; case SEEK_END: pos = cvul64(rfilp->filp_vno->v_size); break; default: return(-EINVAL); } offset= m_in.offset_lo; if (offset >= 0) newpos= add64ul(pos, offset); else newpos= sub64ul(pos, -offset); /* Check for overflow. */ if (ex64hi(newpos) != 0) return -EINVAL; if (cmp64(newpos, rfilp->filp_pos) != 0) { /* Inhibit read ahead request */ r = req_inhibread(rfilp->filp_vno->v_fs_e, rfilp->filp_vno->v_inode_nr); if (r != 0) return r; } rfilp->filp_pos = newpos; return ex64lo(newpos); }
static void testmul(void) { int kdone, kidx; u32_t ilo = ex64lo(i), jlo = ex64lo(j); u64_t prod = mul64(i, j); int prodbits; /* compute maximum index of highest-order bit */ prodbits = bsr64(i) + bsr64(j) + 1; if (cmp64u(i, 0) == 0 || cmp64u(j, 0) == 0) prodbits = -1; if (bsr64(prod) > prodbits) ERR; /* compare to 32-bit multiplication if possible */ if (ex64hi(i) == 0 && ex64hi(j) == 0) { if (cmp64(prod, mul64u(ilo, jlo)) != 0) ERR; /* if there is no overflow we can check against pure 32-bit */ if (prodbits < 32 && cmp64u(prod, ilo * jlo) != 0) ERR; } /* in 32-bit arith low-order DWORD matches regardless of overflow */ if (ex64lo(prod) != ilo * jlo) ERR; /* multiplication by zero yields zero */ if (prodbits < 0 && cmp64u(prod, 0) != 0) ERR; /* if there is no overflow, check absence of zero divisors */ if (prodbits >= 0 && prodbits < 64 && cmp64u(prod, 0) == 0) ERR; /* commutativity */ if (cmp64(prod, mul64(j, i)) != 0) ERR; /* loop though all argument value combinations for third argument */ for (kdone = 0, kidx = 0; k = getargval(kidx, &kdone), !kdone; kidx++) { /* associativity */ if (cmp64(mul64(mul64(i, j), k), mul64(i, mul64(j, k))) != 0) ERR; /* left and right distributivity */ if (cmp64(mul64(add64(i, j), k), add64(mul64(i, k), mul64(j, k))) != 0) ERR; if (cmp64(mul64(i, add64(j, k)), add64(mul64(i, j), mul64(i, k))) != 0) ERR; } }
/*===========================================================================* * print64 * *===========================================================================*/ char *print64(u64_t p) { #define NB 10 static int n = 0; static char buf[NB][100]; u32_t lo = ex64lo(p), hi = ex64hi(p); n = (n+1) % NB; if(!hi) sprintf(buf[n], "%lx", lo); else sprintf(buf[n], "%lx%08lx", hi, lo); return buf[n]; }
/*===========================================================================* * fs_bread * *===========================================================================*/ int fs_bread(void) { int r, rw_flag, chunk, block_size; cp_grant_id_t gid; int nrbytes; u64_t position; unsigned int off, cum_io; int completed; struct dir_record *dir; r = OK; rw_flag = (fs_m_in.m_type == REQ_BREAD ? READING : WRITING); gid = fs_m_in.REQ_GRANT; position = make64(fs_m_in.REQ_SEEK_POS_LO, fs_m_in.REQ_SEEK_POS_HI); nrbytes = (unsigned) fs_m_in.REQ_NBYTES; block_size = v_pri.logical_block_size_l; dir = v_pri.dir_rec_root; if(rw_flag == WRITING) return (EIO); /* Not supported */ rdwt_err = OK; /* set to EIO if disk error occurs */ cum_io = 0; /* Split the transfer into chunks that don't span two blocks. */ while (nrbytes != 0) { off = rem64u(position, block_size); /* offset in blk*/ chunk = MIN(nrbytes, block_size - off); if (chunk < 0) chunk = block_size - off; /* Read 'chunk' bytes. */ r = read_chunk(dir, position, off, chunk, (unsigned) nrbytes, gid, cum_io, block_size, &completed); if (r != OK) break; /* EOF reached */ if (rdwt_err < 0) break; /* Update counters and pointers. */ nrbytes -= chunk; /* bytes yet to be read */ cum_io += chunk; /* bytes read so far */ position= add64ul(position, chunk); /* position within the file */ } fs_m_out.RES_SEEK_POS_LO = ex64lo(position); fs_m_out.RES_SEEK_POS_HI = ex64hi(position); if (rdwt_err != OK) r = rdwt_err; /* check for disk error */ if (rdwt_err == END_OF_FILE) r = OK; fs_m_out.RES_NBYTES = cum_io; return(r); }
/*===========================================================================* * actual_llseek * *===========================================================================*/ int actual_llseek(struct fproc *rfp, message *m_out, int seekfd, int seekwhence, u64_t offset) { /* Perform the llseek(ls_fd, offset, whence) system call. */ register struct filp *rfilp; u64_t pos, newpos; int r = OK; long off_hi = ex64hi(offset); /* Check to see if the file descriptor is valid. */ if ( (rfilp = get_filp2(rfp, seekfd, VNODE_READ)) == NULL) { return(err_code); } /* No lseek on pipes. */ if (S_ISFIFO(rfilp->filp_vno->v_mode)) { unlock_filp(rfilp); return(ESPIPE); } /* The value of 'whence' determines the start position to use. */ switch(seekwhence) { case SEEK_SET: pos = cvu64(0); break; case SEEK_CUR: pos = rfilp->filp_pos; break; case SEEK_END: pos = cvul64(rfilp->filp_vno->v_size); break; default: unlock_filp(rfilp); return(EINVAL); } newpos = pos + offset; /* Check for overflow. */ if ((off_hi > 0) && cmp64(newpos, pos) < 0) r = EINVAL; else if ((off_hi < 0) && cmp64(newpos, pos) > 0) r = EINVAL; else { /* insert the new position into the output message */ m_out->reply_l1 = ex64lo(newpos); m_out->reply_l2 = ex64hi(newpos); if (cmp64(newpos, rfilp->filp_pos) != 0) { rfilp->filp_pos = newpos; /* Inhibit read ahead request */ r = req_inhibread(rfilp->filp_vno->v_fs_e, rfilp->filp_vno->v_inode_nr); } } unlock_filp(rfilp); return(r); }
/*===========================================================================* * fbd_transfer * *===========================================================================*/ static int fbd_transfer(dev_t UNUSED(minor), int do_write, u64_t position, endpoint_t endpt, iovec_t *iov, unsigned int nr_req, int flags) { /* Transfer data from or to the device. */ unsigned count; size_t size, osize; int i, hooks; ssize_t r; /* Compute the total size of the request. */ for (size = i = 0; i < nr_req; i++) size += iov[i].iov_size; osize = size; count = nr_req; hooks = rule_find(position, size, do_write ? FBD_FLAG_WRITE : FBD_FLAG_READ); #if DEBUG printf("FBD: %s operation for pos %lx:%08lx size %u -> hooks %x\n", do_write ? "write" : "read", ex64hi(position), ex64lo(position), size, hooks); #endif if (hooks & PRE_HOOK) rule_pre_hook(iov, &count, &size, &position); if (count > 0) { if (hooks & IO_HOOK) { r = fbd_transfer_copy(do_write, position, endpt, iov, count, size, flags); } else { r = fbd_transfer_direct(do_write, position, endpt, iov, count, flags); } } else r = 0; if (hooks & POST_HOOK) rule_post_hook(osize, &r); #if DEBUG printf("FBD: returning %d\n", r); #endif return r; }
/*===========================================================================* * do_llseek * *===========================================================================*/ PUBLIC int do_llseek() { /* Perform the llseek(ls_fd, offset, whence) system call. */ register struct filp *rfilp; u64_t pos, newpos; int r = OK; /* Check to see if the file descriptor is valid. */ if ( (rfilp = get_filp(m_in.ls_fd, VNODE_READ)) == NULL) return(err_code); /* No lseek on pipes. */ if (rfilp->filp_vno->v_pipe == I_PIPE) { unlock_filp(rfilp); return(ESPIPE); } /* The value of 'whence' determines the start position to use. */ switch(m_in.whence) { case SEEK_SET: pos = cvu64(0); break; case SEEK_CUR: pos = rfilp->filp_pos; break; case SEEK_END: pos = cvul64(rfilp->filp_vno->v_size); break; default: unlock_filp(rfilp); return(EINVAL); } newpos = add64(pos, make64(m_in.offset_lo, m_in.offset_high)); /* Check for overflow. */ if (( (long) m_in.offset_high > 0) && cmp64(newpos, pos) < 0) r = EINVAL; else if (( (long) m_in.offset_high < 0) && cmp64(newpos, pos) > 0) r = EINVAL; else { rfilp->filp_pos = newpos; /* insert the new position into the output message */ m_out.reply_l1 = ex64lo(newpos); m_out.reply_l2 = ex64hi(newpos); if (cmp64(newpos, rfilp->filp_pos) != 0) { /* Inhibit read ahead request */ r = req_inhibread(rfilp->filp_vno->v_fs_e, rfilp->filp_vno->v_inode_nr); } } unlock_filp(rfilp); return(r); }
static double make_double(u64_t d) { /* Convert a 64-bit fixed point value into a double. * This whole thing should be replaced by something better eventually. */ double value; size_t i; value = (double) ex64hi(d); for (i = 0; i < sizeof(unsigned long); i += 2) value *= 65536.0; value += (double) ex64lo(d); return value; }
/*===========================================================================* * vfs_request * *===========================================================================*/ int vfs_request(int reqno, int fd, struct vmproc *vmp, u64_t offset, u32_t len, vfs_callback_t reply_callback, void *cbarg, void *state, int statelen) { /* Perform an asynchronous request to VFS. * We send a message of type VFS_VMCALL to VFS. VFS will respond * with message type VM_VFS_REPLY. We send the request asynchronously * and then handle the reply as it if were a VM_VFS_REPLY request. */ message *m; static int reqid = 0; struct vfs_request_node *reqnode; reqid++; assert(statelen <= STATELEN); if(!SLABALLOC(reqnode)) { printf("vfs_request: no memory for request node\n"); return ENOMEM; } m = &reqnode->reqmsg; m->m_type = VFS_VMCALL; m->VFS_VMCALL_REQ = reqno; m->VFS_VMCALL_FD = fd; m->VFS_VMCALL_REQID = reqid; m->VFS_VMCALL_ENDPOINT = vmp->vm_endpoint; m->VFS_VMCALL_OFFSET_LO = ex64lo(offset); m->VFS_VMCALL_OFFSET_HI = ex64hi(offset); m->VFS_VMCALL_LENGTH = len; reqnode->who = vmp->vm_endpoint; reqnode->req_id = reqid; reqnode->next = first_queued; reqnode->callback = reply_callback; reqnode->opaque = cbarg; if(state) memcpy(reqnode->reqstate, state, statelen); first_queued = reqnode; /* Send the request message if none pending. */ if(!active) activate(); return OK; }
/*===========================================================================* * req_bpeek * *===========================================================================*/ int req_bpeek(endpoint_t fs_e, dev_t dev, u64_t pos, unsigned int num_of_bytes) { message m; memset(&m, 0, sizeof(m)); /* Fill in request message */ m.m_type = REQ_BPEEK; m.REQ_DEV2 = dev; m.REQ_SEEK_POS_LO = ex64lo(pos); m.REQ_SEEK_POS_HI = ex64hi(pos); m.REQ_NBYTES = num_of_bytes; /* Send/rec request */ return fs_sendrec(fs_e, &m); return(OK); }
/*===========================================================================* * req_getdents * *===========================================================================*/ PUBLIC int req_getdents( endpoint_t fs_e, ino_t inode_nr, u64_t pos, char *buf, size_t size, u64_t *new_pos, int direct ) { int r; message m; cp_grant_id_t grant_id; if (direct) { grant_id = cpf_grant_direct(fs_e, (vir_bytes) buf, size, CPF_WRITE); } else { grant_id = cpf_grant_magic(fs_e, who_e, (vir_bytes) buf, size, CPF_WRITE); } if (grant_id < 0) panic("req_getdents: cpf_grant_direct/cpf_grant_magic failed: %d", grant_id); m.m_type = REQ_GETDENTS; m.REQ_INODE_NR = inode_nr; m.REQ_GRANT = grant_id; m.REQ_MEM_SIZE = size; m.REQ_SEEK_POS_LO = ex64lo(pos); m.REQ_SEEK_POS_HI = 0; /* Not used for now, so clear it. */ r = fs_sendrec(fs_e, &m); cpf_revoke(grant_id); if (r == OK) { *new_pos = cvul64(m.RES_SEEK_POS_LO); r = m.RES_NBYTES; } return(r); }
/*===========================================================================* * fbd_transfer_direct * *===========================================================================*/ static ssize_t fbd_transfer_direct(int do_write, u64_t position, endpoint_t endpt, iovec_t *iov, unsigned int count, int flags) { /* Forward the entire transfer request, without any intervention. */ iovec_s_t iovec[NR_IOREQS]; cp_grant_id_t grant; message m; int i, r; for (i = 0; i < count; i++) { iovec[i].iov_size = iov[i].iov_size; iovec[i].iov_grant = cpf_grant_indirect(driver_endpt, endpt, iov[i].iov_addr); assert(iovec[i].iov_grant != GRANT_INVALID); } grant = cpf_grant_direct(driver_endpt, (vir_bytes) iovec, count * sizeof(iovec[0]), CPF_READ); assert(grant != GRANT_INVALID); m.m_type = do_write ? BDEV_SCATTER : BDEV_GATHER; m.BDEV_MINOR = driver_minor; m.BDEV_COUNT = count; m.BDEV_GRANT = grant; m.BDEV_FLAGS = flags; m.BDEV_ID = 0; m.BDEV_POS_LO = ex64lo(position); m.BDEV_POS_HI = ex64hi(position); if ((r = sendrec(driver_endpt, &m)) != OK) panic("sendrec to driver failed (%d)\n", r); if (m.m_type != BDEV_REPLY) panic("invalid reply from driver (%d)\n", m.m_type); cpf_revoke(grant); for (i = 0; i < count; i++) cpf_revoke(iovec[i].iov_grant); return m.BDEV_STATUS; }