key_event_t key_limit_update (key_limit_t key) { #ifdef NO_64BIT_MATH if (low32(key->num_left) == 0) { // carry key->num_left = make64(high32(key->num_left) - 1, low32(key->num_left) - 1); }else { // no carry key->num_left = make64(high32(key->num_left), low32(key->num_left) - 1); } if (high32(key->num_left) != 0 || low32(key->num_left) >= soft_limit) { return key_event_normal; /* we're above the soft limit */ } #else key->num_left--; if (key->num_left >= soft_limit) { return key_event_normal; /* we're above the soft limit */ } #endif if (key->state == key_state_normal) { /* we just passed the soft limit, so change the state */ key->state = key_state_past_soft_limit; } #ifdef NO_64BIT_MATH if (low32(key->num_left) == 0 && high32(key->num_left == 0)) #else if (key->num_left < 1) #endif { /* we just hit the hard limit */ key->state = key_state_expired; return key_event_hard_limit; } return key_event_soft_limit; }
/*===========================================================================* * hgfs_queryvol * *===========================================================================*/ int hgfs_queryvol(const char *path, u64_t *free, u64_t *total) { /* Retrieve information about available and total volume space associated with * a given path. */ u32_t lo, hi; int r; RPC_REQUEST(HGFS_REQ_QUERYVOL); path_put(path); /* It appears that this call always fails with EACCES ("permission denied") * on read-only folders. As far as I can tell, this is a VMware bug. */ if ((r = rpc_query()) != OK) return r; lo = RPC_NEXT32; hi = RPC_NEXT32; *free = make64(lo, hi); lo = RPC_NEXT32; hi = RPC_NEXT32; *total = make64(lo, hi); return OK; }
int bdev_restart_asyn(bdev_call_t *call) { /* The driver for the given call has restarted, and may now have a new * endpoint. Recreate and resend the request for the given call. */ int type, r = OK; /* Update and check the retry limit for driver restarts first. */ if (++call->driver_tries >= DRIVER_TRIES) return EDEADSRCDST; /* Recreate all grants for the new endpoint. */ type = call->msg.m_type; switch (type) { case BDEV_READ: case BDEV_WRITE: bdev_rdwt_cleanup(&call->msg); r = bdev_rdwt_setup(type, call->dev, make64(call->msg.BDEV_POS_LO, call->msg.BDEV_POS_HI), (char *) call->vec[0].iov_addr, call->msg.BDEV_COUNT, call->msg.BDEV_FLAGS, &call->msg); break; case BDEV_GATHER: case BDEV_SCATTER: bdev_vrdwt_cleanup(&call->msg, call->gvec); r = bdev_vrdwt_setup(type, call->dev, make64(call->msg.BDEV_POS_LO, call->msg.BDEV_POS_HI), call->vec, call->msg.BDEV_COUNT, call->msg.BDEV_FLAGS, &call->msg, call->gvec); break; case BDEV_IOCTL: bdev_ioctl_cleanup(&call->msg); r = bdev_ioctl_setup(call->dev, call->msg.BDEV_REQUEST, (char *) call->vec[0].iov_addr, &call->msg); break; default: assert(0); } if (r != OK) return r; /* Try to resend the request. */ return bdev_senda(call->dev, &call->msg, call->id); }
/*===========================================================================* * fs_breadwrite_s * *===========================================================================*/ PUBLIC int fs_breadwrite_s(void) { int r, rw_flag, chunk, block_size; cp_grant_id_t gid; int nrbytes; u64_t position; unsigned int off, cum_io; mode_t mode_word; int completed, r2 = OK; /* Pseudo inode for rw_chunk */ struct inode rip; r = OK; /* Get the values from the request message */ rw_flag = (fs_m_in.m_type == REQ_BREAD_S ? READING : WRITING); gid = fs_m_in.REQ_XFD_GID; position = make64(fs_m_in.REQ_XFD_POS_LO, fs_m_in.REQ_XFD_POS_HI); nrbytes = (unsigned) fs_m_in.REQ_XFD_NBYTES; block_size = get_block_size(fs_m_in.REQ_XFD_BDEV); rip.i_zone[0] = fs_m_in.REQ_XFD_BDEV; rip.i_mode = I_BLOCK_SPECIAL; rip.i_size = 0; rdwt_err = OK; /* set to EIO if disk error occurs */ cum_io = 0; /* Split the transfer into chunks that don't span two blocks. */ while (nrbytes != 0) { off = rem64u(position, block_size); /* offset in blk*/ chunk = MIN(nrbytes, block_size - off); if (chunk < 0) chunk = block_size - off; /* Read or write 'chunk' bytes. */ r = rw_chunk_s(&rip, position, off, chunk, (unsigned) nrbytes, rw_flag, gid, cum_io, block_size, &completed); if (r != OK) break; /* EOF reached */ if (rdwt_err < 0) break; /* Update counters and pointers. */ nrbytes -= chunk; /* bytes yet to be read */ cum_io += chunk; /* bytes read so far */ position= add64ul(position, chunk); /* position within the file */ } fs_m_out.RES_XFD_POS_LO = ex64lo(position); fs_m_out.RES_XFD_POS_HI = ex64hi(position); if (rdwt_err != OK) r = rdwt_err; /* check for disk error */ if (rdwt_err == END_OF_FILE) r = OK; fs_m_out.RES_XFD_CUM_IO = cum_io; return(r); }
/*===========================================================================* * do_rdwt * *===========================================================================*/ static int do_rdwt(struct chardriver *cdp, message *mp) { /* Carry out a single read or write request. */ iovec_t iovec1; int r, opcode; u64_t position; /* Disk address? Address and length of the user buffer? */ if (mp->COUNT < 0) return(EINVAL); /* Prepare for I/O. */ if ((*cdp->cdr_prepare)(mp->DEVICE) == NULL) return(ENXIO); /* Create a one element scatter/gather vector for the buffer. */ if(mp->m_type == DEV_READ_S) opcode = DEV_GATHER_S; else opcode = DEV_SCATTER_S; iovec1.iov_addr = (vir_bytes) mp->IO_GRANT; iovec1.iov_size = mp->COUNT; /* Transfer bytes from/to the device. */ position= make64(mp->POSITION, mp->HIGHPOS); r = (*cdp->cdr_transfer)(mp->m_source, opcode, position, &iovec1, 1, mp->USER_ENDPT, mp->FLAGS); /* Return the number of bytes transferred or an error code. */ return(r == OK ? (int) (mp->COUNT - iovec1.iov_size) : r); }
PRIVATE void estimate_cpu_freq(void) { u64_t tsc_delta; u64_t cpu_freq; irq_hook_t calib_cpu; /* set the probe, we use the legacy timer, IRQ 0 */ put_irq_handler(&calib_cpu, CLOCK_IRQ, calib_cpu_handler); /* just in case we are in an SMP single cpu fallback mode */ BKL_UNLOCK(); /* set the PIC timer to get some time */ intr_enable(); /* loop for some time to get a sample */ while(probe_ticks < PROBE_TICKS) { intr_enable(); } intr_disable(); /* just in case we are in an SMP single cpu fallback mode */ BKL_LOCK(); /* remove the probe */ rm_irq_handler(&calib_cpu); tsc_delta = sub64(tsc1, tsc0); cpu_freq = mul64(div64u64(tsc_delta, PROBE_TICKS - 1), make64(system_hz, 0)); cpu_set_freq(cpuid, cpu_freq); cpu_info[cpuid].freq = div64u(cpu_freq, 1000000); BOOT_VERBOSE(cpu_print_freq(cpuid)); }
/*===========================================================================* * do_rdwt * *===========================================================================*/ static int do_rdwt(struct blockdriver *bdp, message *mp) { /* Carry out a single read or write request. */ iovec_t iovec1; u64_t position; int do_write; ssize_t r; /* Disk address? Address and length of the user buffer? */ if (mp->BDEV_COUNT < 0) return EINVAL; /* Create a one element scatter/gather vector for the buffer. */ iovec1.iov_addr = mp->BDEV_GRANT; iovec1.iov_size = mp->BDEV_COUNT; /* Transfer bytes from/to the device. */ do_write = (mp->m_type == BDEV_WRITE); position = make64(mp->BDEV_POS_LO, mp->BDEV_POS_HI); r = (*bdp->bdr_transfer)(mp->BDEV_MINOR, do_write, position, mp->m_source, &iovec1, 1, mp->BDEV_FLAGS); /* Return the number of bytes transferred or an error code. */ return r; }
void index_init(xtd_seq_num_t *pi) { #ifdef NO_64BIT_MATH *pi = make64(0,0); #else *pi = 0; #endif }
/*===========================================================================* * fs_breadwrite * *===========================================================================*/ int fs_breadwrite(void) { int r, rw_flag, completed; cp_grant_id_t gid; u64_t position; unsigned int off, cum_io, chunk, block_size; size_t nrbytes; /* Pseudo inode for rw_chunk */ struct inode rip; r = OK; /* Get the values from the request message */ rw_flag = (fs_m_in.m_type == REQ_BREAD ? READING : WRITING); gid = (cp_grant_id_t) fs_m_in.REQ_GRANT; position = make64((unsigned long) fs_m_in.REQ_SEEK_POS_LO, (unsigned long) fs_m_in.REQ_SEEK_POS_HI); nrbytes = (size_t) fs_m_in.REQ_NBYTES; block_size = get_block_size( (dev_t) fs_m_in.REQ_DEV2); rip.i_block[0] = (block_t) fs_m_in.REQ_DEV2; rip.i_mode = I_BLOCK_SPECIAL; rip.i_size = 0; rdwt_err = OK; /* set to EIO if disk error occurs */ cum_io = 0; /* Split the transfer into chunks that don't span two blocks. */ while (nrbytes > 0) { off = rem64u(position, block_size); /* offset in blk*/ chunk = min(nrbytes, block_size - off); /* Read or write 'chunk' bytes. */ r = rw_chunk(&rip, position, off, chunk, nrbytes, rw_flag, gid, cum_io, block_size, &completed); if (r != OK) break; /* EOF reached */ if (rdwt_err < 0) break; /* Update counters and pointers. */ nrbytes -= chunk; /* bytes yet to be read */ cum_io += chunk; /* bytes read so far */ position = add64ul(position, chunk); /* position within the file */ } fs_m_out.RES_SEEK_POS_LO = ex64lo(position); fs_m_out.RES_SEEK_POS_HI = ex64hi(position); if (rdwt_err != OK) r = rdwt_err; /* check for disk error */ if (rdwt_err == END_OF_FILE) r = OK; fs_m_out.RES_NBYTES = cum_io; return(r); }
/*===========================================================================* * do_read * *===========================================================================*/ int do_read() { /* Read data from a file. */ struct inode *ino; u64_t pos; size_t count, size; vir_bytes off; char *ptr; int r, chunk; if ((ino = find_inode(m_in.REQ_INODE_NR)) == NULL) return EINVAL; if (IS_DIR(ino)) return EISDIR; if ((r = get_handle(ino)) != OK) return r; pos = make64(m_in.REQ_SEEK_POS_LO, m_in.REQ_SEEK_POS_HI); count = m_in.REQ_NBYTES; assert(count > 0); /* Use the buffer from below to eliminate extra copying. */ size = sffs_table->t_readbuf(&ptr); off = 0; while (count > 0) { chunk = MIN(count, size); if ((r = sffs_table->t_read(ino->i_file, ptr, chunk, pos)) <= 0) break; chunk = r; r = sys_safecopyto(m_in.m_source, m_in.REQ_GRANT, off, (vir_bytes) ptr, chunk, D); if (r != OK) break; count -= chunk; off += chunk; pos = add64u(pos, chunk); } if (r < 0) return r; m_out.RES_SEEK_POS_HI = ex64hi(pos); m_out.RES_SEEK_POS_LO = ex64lo(pos); m_out.RES_NBYTES = off; return OK; }
/*===========================================================================* * do_vrdwt * *===========================================================================*/ static int do_vrdwt(int flag_rw) { size_t size, size_ret; int grants; int r, i; u64_t pos; iovec_t iov_proc[NR_IOREQS]; /* Extract informations. */ grants = m_in.BDEV_COUNT; if((r = sys_safecopyfrom(who_e, grant_id, 0, (vir_bytes) iov_proc, grants * sizeof(iovec_t))) != OK) { panic("copying in grant vector failed: %d", r); } pos = make64(m_in.BDEV_POS_LO, m_in.BDEV_POS_HI); for(size = 0, i = 0; i < grants; i++) size += iov_proc[i].iov_size; if (rem64u(pos, SECTOR_SIZE) != 0 || size % SECTOR_SIZE != 0) { printf("Filter: unaligned request from caller!\n"); return EINVAL; } buffer = flt_malloc(size, buf_array, BUF_SIZE); if(flag_rw == FLT_WRITE) vcarry(grants, iov_proc, flag_rw, size); reset_kills(); for (;;) { size_ret = size; r = transfer(pos, buffer, &size_ret, flag_rw); if(r != RET_REDO) break; #if DEBUG printf("Filter: transfer yielded RET_REDO, checking drivers\n"); #endif if((r = check_driver(DRIVER_MAIN)) != OK) break; if((r = check_driver(DRIVER_BACKUP)) != OK) break; } if(r != OK) { flt_free(buffer, size, buf_array); return r; } if(flag_rw == FLT_READ) vcarry(grants, iov_proc, flag_rw, size_ret); flt_free(buffer, size, buf_array); return size_ret; }
void index_advance(xtd_seq_num_t *pi, sequence_number_t s) { #ifdef NO_64BIT_MATH /* a > ~b means a+b will generate a carry */ /* s is uint16 here */ *pi = make64(high32(*pi) + (s > ~low32(*pi) ? 1 : 0),low32(*pi) + s); #else *pi += s; #endif }
/*===========================================================================* * do_llseek * *===========================================================================*/ int do_llseek() { /* Perform the llseek(ls_fd, offset, whence) system call. */ register struct filp *rfilp; u64_t pos, newpos; int r = OK, seekfd, seekwhence; long off_hi, off_lo; seekfd = job_m_in.ls_fd; seekwhence = job_m_in.whence; off_hi = job_m_in.offset_high; off_lo = job_m_in.offset_lo; /* Check to see if the file descriptor is valid. */ if ( (rfilp = get_filp(seekfd, VNODE_READ)) == NULL) return(err_code); /* No lseek on pipes. */ if (S_ISFIFO(rfilp->filp_vno->v_mode)) { unlock_filp(rfilp); return(ESPIPE); } /* The value of 'whence' determines the start position to use. */ switch(seekwhence) { case SEEK_SET: pos = cvu64(0); break; case SEEK_CUR: pos = rfilp->filp_pos; break; case SEEK_END: pos = cvul64(rfilp->filp_vno->v_size); break; default: unlock_filp(rfilp); return(EINVAL); } newpos = add64(pos, make64(off_lo, off_hi)); /* Check for overflow. */ if ((off_hi > 0) && cmp64(newpos, pos) < 0) r = EINVAL; else if ((off_hi < 0) && cmp64(newpos, pos) > 0) r = EINVAL; else { /* insert the new position into the output message */ m_out.reply_l1 = ex64lo(newpos); m_out.reply_l2 = ex64hi(newpos); if (cmp64(newpos, rfilp->filp_pos) != 0) { rfilp->filp_pos = newpos; /* Inhibit read ahead request */ r = req_inhibread(rfilp->filp_vno->v_fs_e, rfilp->filp_vno->v_inode_nr); } } unlock_filp(rfilp); return(r); }
/*===========================================================================* * update_idle_time * *===========================================================================*/ static void update_idle_time(void) { int i; struct proc * idl = proc_addr(IDLE); idl->p_cycles = make64(0, 0); for (i = 0; i < CONFIG_MAX_CPUS ; i++) { idl->p_cycles += get_cpu_var(i, idle_proc).p_cycles; } }
/*===========================================================================* * fs_bread * *===========================================================================*/ int fs_bread(void) { int r, rw_flag, chunk, block_size; cp_grant_id_t gid; int nrbytes; u64_t position; unsigned int off, cum_io; int completed; struct dir_record *dir; r = OK; rw_flag = (fs_m_in.m_type == REQ_BREAD ? READING : WRITING); gid = fs_m_in.REQ_GRANT; position = make64(fs_m_in.REQ_SEEK_POS_LO, fs_m_in.REQ_SEEK_POS_HI); nrbytes = (unsigned) fs_m_in.REQ_NBYTES; block_size = v_pri.logical_block_size_l; dir = v_pri.dir_rec_root; if(rw_flag == WRITING) return (EIO); /* Not supported */ rdwt_err = OK; /* set to EIO if disk error occurs */ cum_io = 0; /* Split the transfer into chunks that don't span two blocks. */ while (nrbytes != 0) { off = rem64u(position, block_size); /* offset in blk*/ chunk = MIN(nrbytes, block_size - off); if (chunk < 0) chunk = block_size - off; /* Read 'chunk' bytes. */ r = read_chunk(dir, position, off, chunk, (unsigned) nrbytes, gid, cum_io, block_size, &completed); if (r != OK) break; /* EOF reached */ if (rdwt_err < 0) break; /* Update counters and pointers. */ nrbytes -= chunk; /* bytes yet to be read */ cum_io += chunk; /* bytes read so far */ position= add64ul(position, chunk); /* position within the file */ } fs_m_out.RES_SEEK_POS_LO = ex64lo(position); fs_m_out.RES_SEEK_POS_HI = ex64hi(position); if (rdwt_err != OK) r = rdwt_err; /* check for disk error */ if (rdwt_err == END_OF_FILE) r = OK; fs_m_out.RES_NBYTES = cum_io; return(r); }
/*===========================================================================* * time_init * *===========================================================================*/ PUBLIC void time_init() { /* Initialize the time conversion module. */ /* Generate a 64-bit value for the offset to use in time conversion. The * HGFS time format uses Windows' FILETIME standard, expressing time in * 100ns-units since Jan 1, 1601 UTC. The value that is generated is * 116444736000000000. */ /* FIXME: we currently do not take into account timezones. */ time_offset = make64(3577643008UL, 27111902UL); }
static u64_t getargval(int index, int *done) { u32_t values[] = { /* corner cases */ 0, 1, 0x7fffffff, 0x80000000, 0x80000001, 0xffffffff, /* random values */ 0xa9, 0x0d88, 0x242811, 0xeb44d1bc, 0x5b, 0xfb50, 0x569c02, 0xb23c8f7d, 0xc3, 0x2366, 0xfabb73, 0xcb4e8aef, 0xe9, 0xffdc, 0x05842d, 0x3fff902d}; assert(done); /* values with corner case and random 32-bit components */ if (index < LENGTHOF(values) * LENGTHOF(values)) return make64(values[index / LENGTHOF(values)], values[index % LENGTHOF(values)]); index -= LENGTHOF(values) * LENGTHOF(values); /* small numbers */ if (index < 16) return make64(index + 2, 0); index -= 16; /* big numbers */ if (index < 16) return make64(-index - 2, -1); index -= 16; /* powers of two */ if (index < 14) return make64(1 << (index * 2 + 5), 0); index -= 14; if (index < 16) return make64(0, 1 << (index * 2 + 1)); index -= 16; /* done */ *done = 1; return make64(0, 0); }
/*===========================================================================* * do_rdwt * *===========================================================================*/ static int do_rdwt(int flag_rw) { size_t size, size_ret; u64_t pos; int r; pos = make64(m_in.BDEV_POS_LO, m_in.BDEV_POS_HI); size = m_in.BDEV_COUNT; if (rem64u(pos, SECTOR_SIZE) != 0 || size % SECTOR_SIZE != 0) { printf("Filter: unaligned request from caller!\n"); return EINVAL; } buffer = flt_malloc(size, buf_array, BUF_SIZE); if(flag_rw == FLT_WRITE) carry(size, flag_rw); reset_kills(); for (;;) { size_ret = size; r = transfer(pos, buffer, &size_ret, flag_rw); if(r != RET_REDO) break; #if DEBUG printf("Filter: transfer yielded RET_REDO, checking drivers\n"); #endif if((r = check_driver(DRIVER_MAIN)) != OK) break; if((r = check_driver(DRIVER_BACKUP)) != OK) break; } if(r == OK && flag_rw == FLT_READ) carry(size_ret, flag_rw); flt_free(buffer, size, buf_array); if (r != OK) return r; return size_ret; }
/*===========================================================================* * do_vrdwt * *===========================================================================*/ static int do_vrdwt(struct chardriver *cdp, message *mp) { /* Carry out an device read or write to/from a vector of user addresses. * The "user addresses" are assumed to be safe, i.e. FS transferring to/from * its own buffers, so they are not checked. */ iovec_t iovec[NR_IOREQS]; phys_bytes iovec_size; unsigned nr_req; int r, opcode; u64_t position; nr_req = mp->COUNT; /* Length of I/O vector */ /* Copy the vector from the caller to kernel space. */ if (nr_req > NR_IOREQS) nr_req = NR_IOREQS; iovec_size = (phys_bytes) (nr_req * sizeof(iovec[0])); if (OK != sys_safecopyfrom(mp->m_source, (vir_bytes) mp->IO_GRANT, 0, (vir_bytes) iovec, iovec_size, D)) { printf("bad I/O vector by: %d\n", mp->m_source); return(EINVAL); } /* Prepare for I/O. */ if ((*cdp->cdr_prepare)(mp->DEVICE) == NULL) return(ENXIO); /* Transfer bytes from/to the device. */ opcode = mp->m_type; position= make64(mp->POSITION, mp->HIGHPOS); r = (*cdp->cdr_transfer)(mp->m_source, opcode, position, iovec, nr_req, mp->USER_ENDPT, mp->FLAGS); /* Copy the I/O vector back to the caller. */ if (OK != sys_safecopyto(mp->m_source, (vir_bytes) mp->IO_GRANT, 0, (vir_bytes) iovec, iovec_size, D)) { printf("couldn't return I/O vector: %d\n", mp->m_source); return(EINVAL); } return(r); }
/*===========================================================================* * req_breadwrite * *===========================================================================*/ PUBLIC int req_breadwrite( endpoint_t fs_e, endpoint_t user_e, dev_t dev, u64_t pos, unsigned int num_of_bytes, char *user_addr, int rw_flag, u64_t *new_posp, unsigned int *cum_iop ) { int r; cp_grant_id_t grant_id; message m; grant_id = cpf_grant_magic(fs_e, user_e, (vir_bytes) user_addr, num_of_bytes, (rw_flag == READING ? CPF_WRITE : CPF_READ)); if(grant_id == -1) panic("req_breadwrite: cpf_grant_magic failed"); /* Fill in request message */ m.m_type = rw_flag == READING ? REQ_BREAD : REQ_BWRITE; m.REQ_DEV2 = dev; m.REQ_GRANT = grant_id; m.REQ_SEEK_POS_LO = ex64lo(pos); m.REQ_SEEK_POS_HI = ex64hi(pos); m.REQ_NBYTES = num_of_bytes; /* Send/rec request */ r = fs_sendrec(fs_e, &m); cpf_revoke(grant_id); if (r != OK) return(r); /* Fill in response structure */ *new_posp = make64(m.RES_SEEK_POS_LO, m.RES_SEEK_POS_HI); *cum_iop = m.RES_NBYTES; return(OK); }
/*===========================================================================* * do_llseek * *===========================================================================*/ PUBLIC int do_llseek() { /* Perform the llseek(ls_fd, offset, whence) system call. */ register struct filp *rfilp; u64_t pos, newpos; int r; /* Check to see if the file descriptor is valid. */ if ( (rfilp = get_filp(m_in.ls_fd)) == NULL) return(err_code); /* No lseek on pipes. */ if (rfilp->filp_vno->v_pipe == I_PIPE) return(ESPIPE); /* The value of 'whence' determines the start position to use. */ switch(m_in.whence) { case SEEK_SET: pos = cvu64(0); break; case SEEK_CUR: pos = rfilp->filp_pos; break; case SEEK_END: pos = cvul64(rfilp->filp_vno->v_size); break; default: return(EINVAL); } newpos = add64(pos, make64(m_in.offset_lo, m_in.offset_high)); /* Check for overflow. */ if (((long)m_in.offset_high > 0) && cmp64(newpos, pos) < 0) return(EINVAL); if (((long)m_in.offset_high < 0) && cmp64(newpos, pos) > 0) return(EINVAL); if (cmp64(newpos, rfilp->filp_pos) != 0) { /* Inhibit read ahead request */ r = req_inhibread(rfilp->filp_vno->v_fs_e, rfilp->filp_vno->v_inode_nr); if (r != OK) return(r); } rfilp->filp_pos = newpos; m_out.reply_l1 = ex64lo(newpos); m_out.reply_l2 = ex64hi(newpos); return(OK); }
PUBLIC short cpu_load(void) { u64_t current_tsc, *current_idle; u64_t tsc_delta, idle_delta, busy; struct proc *idle; short load; #ifdef CONFIG_SMP unsigned cpu = cpuid; #endif u64_t *last_tsc, *last_idle; last_tsc = get_cpu_var_ptr(cpu, cpu_last_tsc); last_idle = get_cpu_var_ptr(cpu, cpu_last_idle); idle = get_cpu_var_ptr(cpu, idle_proc);; read_tsc_64(¤t_tsc); current_idle = &idle->p_cycles; /* ptr to idle proc */ /* calculate load since last cpu_load invocation */ if (!is_zero64(*last_tsc)) { tsc_delta = sub64(current_tsc, *last_tsc); idle_delta = sub64(*current_idle, *last_idle); busy = sub64(tsc_delta, idle_delta); busy = mul64(busy, make64(100, 0)); load = ex64lo(div64(busy, tsc_delta)); if (load > 100) load = 100; } else load = 0; *last_tsc = current_tsc; *last_idle = *current_idle; return load; }
/*===========================================================================* * do_vrdwt * *===========================================================================*/ static int do_vrdwt(struct blockdriver *bdp, message *mp, thread_id_t id) { /* Carry out an device read or write to/from a vector of buffers. */ iovec_t iovec[NR_IOREQS]; unsigned nr_req; u64_t position; int i, do_write; ssize_t r, size; /* Copy the vector from the caller to kernel space. */ nr_req = mp->BDEV_COUNT; /* Length of I/O vector */ if (nr_req > NR_IOREQS) nr_req = NR_IOREQS; if (OK != sys_safecopyfrom(mp->m_source, (vir_bytes) mp->BDEV_GRANT, 0, (vir_bytes) iovec, nr_req * sizeof(iovec[0]))) { printf("blockdriver: bad I/O vector by: %d\n", mp->m_source); return EINVAL; } /* Check for overflow condition, and update the size for block tracing. */ for (i = size = 0; i < nr_req; i++) { if ((ssize_t) (size + iovec[i].iov_size) < size) return EINVAL; size += iovec[i].iov_size; } trace_setsize(id, size); /* Transfer bytes from/to the device. */ do_write = (mp->m_type == BDEV_SCATTER); position = make64(mp->BDEV_POS_LO, mp->BDEV_POS_HI); r = (*bdp->bdr_transfer)(mp->BDEV_MINOR, do_write, position, mp->m_source, iovec, nr_req, mp->BDEV_FLAGS); /* Return the number of bytes transferred or an error code. */ return r; }
/*===========================================================================* * get_block * *===========================================================================*/ struct buf *get_block( register dev_t dev, /* on which device is the block? */ register block_t block, /* which block is wanted? */ int only_search /* if NO_READ, don't read, else act normal */ ) { /* Check to see if the requested block is in the block cache. If so, return * a pointer to it. If not, evict some other block and fetch it (unless * 'only_search' is 1). All the blocks in the cache that are not in use * are linked together in a chain, with 'front' pointing to the least recently * used block and 'rear' to the most recently used block. If 'only_search' is * 1, the block being requested will be overwritten in its entirety, so it is * only necessary to see if it is in the cache; if it is not, any free buffer * will do. It is not necessary to actually read the block in from disk. * If 'only_search' is PREFETCH, the block need not be read from the disk, * and the device is not to be marked on the block, so callers can tell if * the block returned is valid. * In addition to the LRU chain, there is also a hash chain to link together * blocks whose block numbers end with the same bit strings, for fast lookup. */ int b; static struct buf *bp, *prev_ptr; u64_t yieldid = VM_BLOCKID_NONE, getid = make64(dev, block); assert(buf_hash); assert(buf); assert(nr_bufs > 0); ASSERT(fs_block_size > 0); /* Search the hash chain for (dev, block). Do_read() can use * get_block(NO_DEV ...) to get an unnamed block to fill with zeros when * someone wants to read from a hole in a file, in which case this search * is skipped */ if (dev != NO_DEV) { b = BUFHASH(block); bp = buf_hash[b]; while (bp != NULL) { if (bp->b_blocknr == block && bp->b_dev == dev) { /* Block needed has been found. */ if (bp->b_count == 0) rm_lru(bp); bp->b_count++; /* record that block is in use */ ASSERT(bp->b_bytes == fs_block_size); ASSERT(bp->b_dev == dev); ASSERT(bp->b_dev != NO_DEV); ASSERT(bp->bp); return(bp); } else { /* This block is not the one sought. */ bp = bp->b_hash; /* move to next block on hash chain */ } } } /* Desired block is not on available chain. Take oldest block ('front'). */ if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs); if(bp->b_bytes < fs_block_size) { ASSERT(!bp->bp); ASSERT(bp->b_bytes == 0); if(!(bp->bp = alloc_contig( (size_t) fs_block_size, 0, NULL))) { printf("MFS: couldn't allocate a new block.\n"); for(bp = front; bp && bp->b_bytes < fs_block_size; bp = bp->b_next) ; if(!bp) { panic("no buffer available"); } } else { bp->b_bytes = fs_block_size; } } ASSERT(bp); ASSERT(bp->bp); ASSERT(bp->b_bytes == fs_block_size); ASSERT(bp->b_count == 0); rm_lru(bp); /* Remove the block that was just taken from its hash chain. */ b = BUFHASH(bp->b_blocknr); prev_ptr = buf_hash[b]; if (prev_ptr == bp) { buf_hash[b] = bp->b_hash; } else { /* The block just taken is not on the front of its hash chain. */ while (prev_ptr->b_hash != NULL) if (prev_ptr->b_hash == bp) { prev_ptr->b_hash = bp->b_hash; /* found it */ break; } else { prev_ptr = prev_ptr->b_hash; /* keep looking */ } } /* If the block taken is dirty, make it clean by writing it to the disk. * Avoid hysteresis by flushing all other dirty blocks for the same device. */ if (bp->b_dev != NO_DEV) { if (ISDIRTY(bp)) flushall(bp->b_dev); /* Are we throwing out a block that contained something? * Give it to VM for the second-layer cache. */ yieldid = make64(bp->b_dev, bp->b_blocknr); assert(bp->b_bytes == fs_block_size); BP_CLEARDEV(bp); } /* Fill in block's parameters and add it to the hash chain where it goes. */ if(dev == NO_DEV) BP_CLEARDEV(bp); else BP_SETDEV(bp, dev); bp->b_blocknr = block; /* fill in block number */ bp->b_count++; /* record that block is being used */ b = BUFHASH(bp->b_blocknr); bp->b_hash = buf_hash[b]; buf_hash[b] = bp; /* add to hash list */ if(dev == NO_DEV) { if(vmcache && cmp64(yieldid, VM_BLOCKID_NONE) != 0) { vm_yield_block_get_block(yieldid, VM_BLOCKID_NONE, bp->bp, fs_block_size); } return(bp); /* If the caller wanted a NO_DEV block, work is done. */ } /* Go get the requested block unless searching or prefetching. */ if(only_search == PREFETCH || only_search == NORMAL) { /* Block is not found in our cache, but we do want it * if it's in the vm cache. */ if(vmcache) { /* If we can satisfy the PREFETCH or NORMAL request * from the vm cache, work is done. */ if(vm_yield_block_get_block(yieldid, getid, bp->bp, fs_block_size) == OK) { return bp; } } } if(only_search == PREFETCH) { /* PREFETCH: don't do i/o. */ BP_CLEARDEV(bp); } else if (only_search == NORMAL) { read_block(bp); } else if(only_search == NO_READ) { /* we want this block, but its contents * will be overwritten. VM has to forget * about it. */ if(vmcache) { vm_forgetblock(getid); } } else panic("unexpected only_search value: %d", only_search); assert(bp->bp); return(bp); /* return the newly acquired block */ }
/*===========================================================================* * main * *===========================================================================*/ int main(int argc, char **argv) { endpoint_t ep_self, ep_child; size_t size = BUF_SIZE; int i, r, pid; int status; u64_t start, end, diff; double micros; char nr_pages_str[10], is_map_str[2], is_write_str[2]; int nr_pages, is_map, is_write; /* SEF local startup. */ env_setargs(argc, argv); sef_local_startup(); /* Parse the command line. */ r = env_get_param("pages", nr_pages_str, sizeof(nr_pages_str)); errno = 0; nr_pages = atoi(nr_pages_str); if (r != OK || errno || nr_pages <=0) { exit_usage(); } if(nr_pages > TEST_PAGE_NUM) { printf("REQUESTOR: too many pages. Max allowed: %d\n", TEST_PAGE_NUM); exit_usage(); } r = env_get_param("map", is_map_str, sizeof(is_map_str)); errno = 0; is_map = atoi(is_map_str); if (r != OK || errno || (is_map!=0 && is_map!=1)) { exit_usage(); } r = env_get_param("write", is_write_str, sizeof(is_write_str)); errno = 0; is_write = atoi(is_write_str); if (r != OK || errno || (is_write!=0 && is_write!=1)) { exit_usage(); } printf("REQUESTOR: Running tests with pages=%d map=%d write=%d...\n", nr_pages, is_map, is_write); /* Prepare work. */ buf = (char*) CLICK_CEIL(buf_buf); fid_get = open(FIFO_GRANTOR, O_RDONLY); fid_send = open(FIFO_REQUESTOR, O_WRONLY); if(fid_get < 0 || fid_send < 0) { printf("REQUESTOR: can't open fifo files.\n"); return 1; } /* Send the endpoint to the granter, in order to let him to * create the grant. */ ep_self = getprocnr(); write(fid_send, &ep_self, sizeof(ep_self)); dprint("REQUESTOR: sending my endpoint: %d\n", ep_self); /* Get the granter's endpoint and gid. */ read(fid_get, &ep_granter, sizeof(ep_granter)); read(fid_get, &gid, sizeof(gid)); dprint("REQUESTOR: getting granter's endpoint %d and gid %d\n", ep_granter, gid); FIFO_WAIT(fid_get); diff = make64(0, 0); if(is_map) { /* Test safemap. */ for(i=0;i<NR_TEST_ITERATIONS;i++) { read_tsc_64(&start); r = sys_safemap(ep_granter, gid, 0, (long)buf, nr_pages*CLICK_SIZE, D, 1); if(r != OK) { printf("REQUESTOR: safemap error: %d\n", r); return 1; } read_write_buff(buf, nr_pages*CLICK_SIZE, is_write); read_tsc_64(&end); diff = add64(diff, (sub64(end, start))); r = sys_safeunmap(D, (long)buf); if(r != OK) { printf("REQUESTOR: safeunmap error: %d\n", r); return 1; } } micros = ((double)tsc_64_to_micros(diff)) / (NR_TEST_ITERATIONS*nr_pages); REPORT_TEST("REQUESTOR", "SAFEMAP", micros); } else { /* Test safecopy. */ for(i=0;i<NR_TEST_ITERATIONS;i++) { read_tsc_64(&start); r = sys_safecopyfrom(ep_granter, gid, 0, (long)buf, nr_pages*CLICK_SIZE, D); if(r != OK) { printf("REQUESTOR: safecopy error: %d\n", r); return 1; } read_write_buff(buf, nr_pages*CLICK_SIZE, is_write); read_tsc_64(&end); diff = add64(diff, (sub64(end, start))); } micros = ((double)tsc_64_to_micros(diff)) / (NR_TEST_ITERATIONS*nr_pages); REPORT_TEST("REQUESTOR", "SAFECOPY", micros); } FIFO_NOTIFY(fid_send); return 0; }
int do_llseek(message *m_out) { return actual_llseek(fp, m_out, job_m_in.ls_fd, job_m_in.whence, make64(job_m_in.offset_lo, job_m_in.offset_high)); }
static void parse_file(pid_t pid) { char path[PATH_MAX], name[256], type, state; int version, endpt, effuid; unsigned long cycles_hi, cycles_lo; FILE *fp; struct proc *p; int slot; int i; sprintf(path, "%d/psinfo", pid); if ((fp = fopen(path, "r")) == NULL) return; if (fscanf(fp, "%d", &version) != 1) { fclose(fp); return; } if (version != PSINFO_VERSION) { fputs("procfs version mismatch!\n", stderr); exit(1); } if (fscanf(fp, " %c %d", &type, &endpt) != 2) { fclose(fp); return; } slot = SLOT_NR(endpt); if(slot < 0 || slot >= nr_total) { fprintf(stderr, "top: unreasonable endpoint number %d\n", endpt); fclose(fp); return; } p = &proc[slot]; if (type == TYPE_TASK) p->p_flags |= IS_TASK; else if (type == TYPE_SYSTEM) p->p_flags |= IS_SYSTEM; p->p_endpoint = endpt; p->p_pid = pid; if (fscanf(fp, " %255s %c %d %d %u %*u %lu %lu", name, &state, &p->p_blocked, &p->p_priority, &p->p_user_time, &cycles_hi, &cycles_lo) != 7) { fclose(fp); return; } strncpy(p->p_name, name, sizeof(p->p_name)-1); p->p_name[sizeof(p->p_name)-1] = 0; if (state != STATE_RUN) p->p_flags |= BLOCKED; p->p_cpucycles[0] = make64(cycles_lo, cycles_hi); p->p_memory = 0L; if (!(p->p_flags & IS_TASK)) { int j; if ((j=fscanf(fp, " %lu %*u %*u %*c %*d %*u %u %*u %d %*c %*d %*u", &p->p_memory, &effuid, &p->p_nice)) != 3) { fclose(fp); return; } p->p_effuid = effuid; } else p->p_effuid = 0; for(i = 1; i < CPUTIMENAMES; i++) { if(fscanf(fp, " %lu %lu", &cycles_hi, &cycles_lo) == 2) { p->p_cpucycles[i] = make64(cycles_lo, cycles_hi); } else { p->p_cpucycles[i] = 0; } } if ((p->p_flags & IS_TASK)) { if(fscanf(fp, " %lu", &p->p_memory) != 1) { p->p_memory = 0; } } p->p_flags |= USED; fclose(fp); }
static struct device *hello_prepare(dev_t UNUSED(dev)) { hello_device.dv_base = make64(0, 0); hello_device.dv_size = make64(strlen(the_secret), 0); return &hello_device; }
/*===========================================================================* * do_vm_call * *===========================================================================*/ int do_vm_call(message *m_out) { /* A call that VM does to VFS. * We must reply with the fixed type VM_VFS_REPLY (and put our result info * in the rest of the message) so VM can tell the difference between a * request from VFS and a reply to this call. */ int req = job_m_in.VFS_VMCALL_REQ; int req_fd = job_m_in.VFS_VMCALL_FD; u32_t req_id = job_m_in.VFS_VMCALL_REQID; endpoint_t ep = job_m_in.VFS_VMCALL_ENDPOINT; u64_t offset = make64(job_m_in.VFS_VMCALL_OFFSET_LO, job_m_in.VFS_VMCALL_OFFSET_HI); u32_t length = job_m_in.VFS_VMCALL_LENGTH; int result = OK; int slot; struct fproc *rfp, *vmf; struct filp *f = NULL; int r; if(job_m_in.m_source != VM_PROC_NR) return ENOSYS; if(isokendpt(ep, &slot) != OK) rfp = NULL; else rfp = &fproc[slot]; vmf = &fproc[VM_PROC_NR]; assert(fp == vmf); assert(rfp != vmf); switch(req) { case VMVFSREQ_FDLOOKUP: { int procfd; /* Lookup fd in referenced process. */ if(!rfp) { printf("VFS: why isn't ep %d here?!\n", ep); result = ESRCH; goto reqdone; } if((result = dupvm(rfp, req_fd, &procfd, &f)) != OK) { printf("vfs: dupvm failed\n"); goto reqdone; } if(S_ISBLK(f->filp_vno->v_mode)) { assert(f->filp_vno->v_sdev != NO_DEV); m_out->VMV_DEV = f->filp_vno->v_sdev; m_out->VMV_INO = VMC_NO_INODE; m_out->VMV_SIZE_PAGES = LONG_MAX; } else { m_out->VMV_DEV = f->filp_vno->v_dev; m_out->VMV_INO = f->filp_vno->v_inode_nr; m_out->VMV_SIZE_PAGES = roundup(f->filp_vno->v_size, PAGE_SIZE)/PAGE_SIZE; } m_out->VMV_FD = procfd; result = OK; break; } case VMVFSREQ_FDCLOSE: { result = close_fd(fp, req_fd); if(result != OK) { printf("VFS: VM fd close for fd %d, %d (%d)\n", req_fd, fp->fp_endpoint, result); } break; } case VMVFSREQ_FDIO: { message dummy_out; result = actual_llseek(fp, &dummy_out, req_fd, SEEK_SET, offset); if(result == OK) { result = actual_read_write_peek(fp, PEEKING, req_fd, NULL, length); } break; } default: panic("VFS: bad request code from VM\n"); break; } reqdone: if(f) unlock_filp(f); /* fp is VM still. */ assert(fp == vmf); m_out->VMV_ENDPOINT = ep; m_out->VMV_RESULT = result; m_out->VMV_REQID = req_id; /* reply asynchronously as VM may not be able to receive * a sendnb() message */ m_out->m_type = VM_VFS_REPLY; r = asynsend3(VM_PROC_NR, m_out, 0); if(r != OK) printf("VFS: couldn't asynsend3() to VM\n"); /* VFS does not reply any further */ return SUSPEND; }
PRIVATE struct device * counter_prepare(dev_t UNUSED(dev)) { counter_device.dv_base = make64(0, 0); counter_device.dv_size = make64(strlen(countNum), 0); return &counter_device; }