u32_t fs_bufs_heuristic(int minbufs, u32_t btotal, u32_t bfree, int blocksize, dev_t majordev) { struct vm_stats_info vsi; int bufs; u32_t kbytes_used_fs, kbytes_total_fs, kbcache, kb_fsmax; u32_t kbytes_remain_mem, bused; bused = btotal-bfree; /* but we simply need minbufs no matter what, and we don't * want more than that if we're a memory device */ if(majordev == MEMORY_MAJOR) { return minbufs; } /* set a reasonable cache size; cache at most a certain * portion of the used FS, and at most a certain %age of remaining * memory */ if((vm_info_stats(&vsi) != OK)) { bufs = 1024; printf("fslib: heuristic info fail: default to %d bufs\n", bufs); return bufs; } kbytes_remain_mem = div64u(mul64u(vsi.vsi_free, vsi.vsi_pagesize), 1024); /* check fs usage. */ kbytes_used_fs = div64u(mul64u(bused, blocksize), 1024); kbytes_total_fs = div64u(mul64u(btotal, blocksize), 1024); /* heuristic for a desired cache size based on FS usage; * but never bigger than half of the total filesystem */ kb_fsmax = sqrt_approx(kbytes_used_fs)*40; kb_fsmax = MIN(kb_fsmax, kbytes_total_fs/2); /* heuristic for a maximum usage - 10% of remaining memory */ kbcache = MIN(kbytes_remain_mem/10, kb_fsmax); bufs = kbcache * 1024 / blocksize; /* but we simply need MINBUFS no matter what */ if(bufs < minbufs) bufs = minbufs; return bufs; }
PRIVATE void estimate_cpu_freq(void) { u64_t tsc_delta; u64_t cpu_freq; irq_hook_t calib_cpu; /* set the probe, we use the legacy timer, IRQ 0 */ put_irq_handler(&calib_cpu, CLOCK_IRQ, calib_cpu_handler); /* just in case we are in an SMP single cpu fallback mode */ BKL_UNLOCK(); /* set the PIC timer to get some time */ intr_enable(); /* loop for some time to get a sample */ while(probe_ticks < PROBE_TICKS) { intr_enable(); } intr_disable(); /* just in case we are in an SMP single cpu fallback mode */ BKL_LOCK(); /* remove the probe */ rm_irq_handler(&calib_cpu); tsc_delta = sub64(tsc1, tsc0); cpu_freq = mul64(div64u64(tsc_delta, PROBE_TICKS - 1), make64(system_hz, 0)); cpu_set_freq(cpuid, cpu_freq); cpu_info[cpuid].freq = div64u(cpu_freq, 1000000); BOOT_VERBOSE(cpu_print_freq(cpuid)); }
/* * Convert a VirtualBox timestamp to a POSIX timespec structure. * VirtualBox' timestamps are in nanoseconds since the UNIX epoch. */ static void get_time(struct timespec *tsp, u64_t nsecs) { tsp->tv_sec = div64u(nsecs, 1000000000); tsp->tv_nsec = rem64u(nsecs, 1000000000); }
static void testdiv0(void) { int funcidx; assert(cmp64u(j, 0) == 0); /* loop through the 5 different division functions */ for (funcidx = 0; funcidx < 5; funcidx++) { expect_SIGFPE = 1; if (setjmp(jmpbuf_SIGFPE) == 0) { /* divide by zero using various functions */ switch (funcidx) { case 0: div64(i, j); ERR; break; case 1: div64u64(i, ex64lo(j)); ERR; break; case 2: div64u(i, ex64lo(j)); ERR; break; case 3: rem64(i, j); ERR; break; case 4: rem64u(i, ex64lo(j)); ERR; break; default: assert(0); ERR; break; } /* if we reach this point there was no signal and an * error has been recorded */ expect_SIGFPE = 0; } else { /* a signal has been received and expect_SIGFPE has * been reset; all is ok now */ assert(!expect_SIGFPE); } } }
void cpu_print_freq(unsigned cpu) { u64_t freq; freq = cpu_get_freq(cpu); printf("CPU %d freq %lu MHz\n", cpu, div64u(freq, 1000000)); }
u32_t fs_bufs_heuristic(int minbufs, u32_t btotal, u32_t bfree, int blocksize, dev_t majordev) { struct vm_stats_info vsi; int bufs; u32_t kbytes_used_fs, kbytes_total_fs, kbcache, kb_fsmax; u32_t kbytes_remain_mem, bused; bused = btotal-bfree; /* set a reasonable cache size; cache at most a certain * portion of the used FS, and at most a certain %age of remaining * memory */ if(vm_info_stats(&vsi) != OK) { bufs = 1024; if(!quiet) printf("fslib: heuristic info fail: default to %d bufs\n", bufs); return bufs; } /* remaining free memory is unused memory plus memory in used for cache, * as the cache can be evicted */ kbytes_remain_mem = (u64_t)(vsi.vsi_free + vsi.vsi_cached) * vsi.vsi_pagesize / 1024; /* check fs usage. */ kbytes_used_fs = div64u(mul64u(bused, blocksize), 1024); kbytes_total_fs = div64u(mul64u(btotal, blocksize), 1024); /* heuristic for a desired cache size based on FS usage; * but never bigger than half of the total filesystem */ kb_fsmax = sqrt_approx(kbytes_used_fs)*40; kb_fsmax = MIN(kb_fsmax, kbytes_total_fs/2); /* heuristic for a maximum usage - 10% of remaining memory */ kbcache = MIN(kbytes_remain_mem/10, kb_fsmax); bufs = kbcache * 1024 / blocksize; /* but we simply need MINBUFS no matter what */ if(bufs < minbufs) bufs = minbufs; return bufs; }
static void testdiv(void) { u64_t q, r; #if TIMED struct timeval tvstart, tvend; printf("i=0x%.8x%.8x; j=0x%.8x%.8x\n", ex64hi(i), ex64lo(i), ex64hi(j), ex64lo(j)); fflush(stdout); if (gettimeofday(&tvstart, NULL) < 0) ERR; #endif /* division by zero has a separate test */ if (cmp64u(j, 0) == 0) { testdiv0(); return; } /* perform division, store q in k to make ERR more informative */ q = div64(i, j); r = rem64(i, j); k = q; #if TIMED if (gettimeofday(&tvend, NULL) < 0) ERR; tvend.tv_sec -= tvstart.tv_sec; tvend.tv_usec -= tvstart.tv_usec; if (tvend.tv_usec < 0) { tvend.tv_sec -= 1; tvend.tv_usec += 1000000; } printf("q=0x%.8x%.8x; r=0x%.8x%.8x; time=%d.%.6d\n", ex64hi(q), ex64lo(q), ex64hi(r), ex64lo(r), tvend.tv_sec, tvend.tv_usec); fflush(stdout); #endif /* compare to 64/32-bit division if possible */ if (!ex64hi(j)) { if (cmp64(q, div64u64(i, ex64lo(j))) != 0) ERR; if (!ex64hi(q)) { if (cmp64u(q, div64u(i, ex64lo(j))) != 0) ERR; } if (cmp64u(r, rem64u(i, ex64lo(j))) != 0) ERR; /* compare to 32-bit division if possible */ if (!ex64hi(i)) { if (cmp64u(q, ex64lo(i) / ex64lo(j)) != 0) ERR; if (cmp64u(r, ex64lo(i) % ex64lo(j)) != 0) ERR; } } /* check results using i = q j + r and r < j */ if (cmp64(i, add64(mul64(q, j), r)) != 0) ERR; if (cmp64(r, j) >= 0) ERR; }
u32_t micros_to_ticks(u32_t micros) { u32_t ticks; ticks = div64u(mul64u(micros, sys_hz()), 1000000); if(ticks < 1) ticks = 1; return ticks; }
/*================================================================ * sizeup - determine device size *===============================================================*/ block_t sizeup(char * device) { block_t d; #if defined(__minix) u64_t bytes, resize; u32_t rem; #else off_t size; #endif if ((fd = open(device, O_RDONLY)) == -1) { if (errno != ENOENT) perror("sizeup open"); return 0; } #if defined(__minix) if(minix_sizeup(device, &bytes) < 0) { perror("sizeup"); return 0; } d = div64u(bytes, block_size); rem = rem64u(bytes, block_size); resize = add64u(mul64u(d, block_size), rem); if(cmp64(resize, bytes) != 0) { /* Assume block_t is unsigned */ d = (block_t)(-1ul); fprintf(stderr, "%s: truncating FS at %lu blocks\n", progname, (unsigned long)d); } #else size = lseek(fd, 0, SEEK_END); if (size == (off_t) -1) err(1, "cannot get device size fd=%d: %s", fd, device); /* Assume block_t is unsigned */ if (size / block_size > (block_t)(-1ul)) { d = (block_t)(-1ul); fprintf(stderr, "%s: truncating FS at %lu blocks\n", progname, (unsigned long)d); } else d = size / block_size; #endif return d; }
/*===========================================================================* * do_dioctl * *===========================================================================*/ static int do_dioctl(struct blockdriver *bdp, dev_t minor, unsigned int request, endpoint_t endpt, cp_grant_id_t grant) { /* Carry out a disk-specific I/O control request. */ struct device *dv; struct part_geom entry; int r = EINVAL; switch (request) { case DIOCSETP: /* Copy just this one partition table entry. */ r = sys_safecopyfrom(endpt, grant, 0, (vir_bytes) &entry, sizeof(entry)); if (r != OK) return r; if ((dv = (*bdp->bdr_part)(minor)) == NULL) return ENXIO; dv->dv_base = entry.base; dv->dv_size = entry.size; break; case DIOCGETP: /* Return a partition table entry and the geometry of the drive. */ if ((dv = (*bdp->bdr_part)(minor)) == NULL) return ENXIO; entry.base = dv->dv_base; entry.size = dv->dv_size; if (bdp->bdr_geometry) { (*bdp->bdr_geometry)(minor, &entry); } else { /* The driver doesn't care -- make up fake geometry. */ entry.cylinders = div64u(entry.size, SECTOR_SIZE); entry.heads = 64; entry.sectors = 32; } r = sys_safecopyto(endpt, grant, 0, (vir_bytes) &entry, sizeof(entry)); break; } return r; }
void print_proc(struct tp *tp, struct mproc *mpr, u32_t tcyc) { int euid = 0; static struct passwd *who = NULL; static int last_who = -1; char *name = ""; unsigned long pcyc; int ticks; struct proc *pr = tp->p; printf("%5d ", mpr->mp_pid); euid = mpr->mp_effuid; name = mpr->mp_name; if(last_who != euid || !who) { who = getpwuid(euid); last_who = euid; } if(who && who->pw_name) printf("%-8s ", who->pw_name); else if(pr->p_nr >= 0) printf("%8d ", mpr->mp_effuid); else printf(" "); printf(" %2d ", pr->p_priority); if(pr->p_nr >= 0) { printf(" %3d ", mpr->mp_nice); } else printf(" "); printf("%5dK", ((pr->p_memmap[T].mem_len + pr->p_memmap[D].mem_len) << CLICK_SHIFT)/1024); printf("%6s", pr->p_rts_flags ? "" : "RUN"); ticks = pr->p_user_time; printf(" %3d:%02d ", (ticks/system_hz/60), (ticks/system_hz)%60); pcyc = div64u(tp->ticks, SCALE); printf("%6.2f%% %s", 100.0*pcyc/tcyc, name); }
/*===========================================================================* * action_pre_misdir * *===========================================================================*/ static void action_pre_misdir(struct fbd_rule *rule, iovec_t *UNUSED(iov), unsigned *UNUSED(count), size_t *UNUSED(size), u64_t *pos) { /* Randomize the request position to fall within the range (and have * the alignment) given by the rule. */ u32_t range, choice; /* Unfortunately, we cannot interpret 0 as end as "up to end of disk" * here, because we have no idea about the actual disk size, and the * resulting address must of course be valid.. */ range = div64u(add64u(sub64(rule->params.misdir.end, rule->params.misdir.start), 1), rule->params.misdir.align); if (range > 0) choice = get_rand(range - 1); else choice = 0; *pos = add64(rule->params.misdir.start, mul64u(choice, rule->params.misdir.align)); }
int startsim(void){ message m; int i,j=0,piReady = -1; u64_t cpuTimeDiff; FILE *fp; fp = fopen("/home/out","w"); for(i=0;i<HISTORY;i++){ pInfoPtrs[i] = (struct pi *) &pInfo[i][0]; } for(i=0;i<HISTORY;i++){ pQhPtrs[i] = (struct qh*) &pQh[i][0]; } /* Copy the pointer arrays so that you can go backward and forward through the history */ struct pi *pInfoPtrsCopy[HISTORY]; struct qh *pQhPtrsCopy[HISTORY]; for(i=0;i<HISTORY;i++){ pInfoPtrsCopy[i] = pInfoPtrs[i]; pQhPtrsCopy[i] = pQhPtrs[i]; } m.m1_p1 = (char *) &pInfoPtrs; m.m1_p2 = (char *) &piReady; m.m1_i2 = SELF; m.m1_i3 = HISTORY; m.m2_p1 = (char *) &pQhPtrs; m.m3_p1 = (char *) &cpuFreq; int error = _syscall(PM_PROC_NR,STARTRECORD,&m); procs(); for(j;j<HISTORY;j++){ while(piReady < j){ } if(j==0){ fprintf(fp,"CPU frequency is: %lu Mhz\n",div64u(cpuFreq, 1000000)); } printf("Simulation is %d%% complete.\n",(j*2)+2); fprintf(fp,"Proc Table %d\n\n",j); fprintf(fp,"Queue heads: "); for(i=0;i<NR_SCHED_QUEUES;i++){ if(pQhPtrsCopy[j]->p_endpoint!=-1){ fprintf(fp,"Queue: %d %d ",i,pQhPtrsCopy[j]->p_endpoint); } pQhPtrsCopy[j]++; } fprintf(fp,"\n\n"); pQhPtrsCopy[j] = pQhPtrs[j]; /*Write out the runable queues in order */ for(i=0; i<NR_SCHED_QUEUES; i++){ fprintf(fp,"Priority Queue %d: ",i); if(pQhPtrsCopy[j]->p_endpoint != -1){ printQ(pInfoPtrsCopy[j],pQhPtrsCopy[j]->p_endpoint,fp); } else{ fprintf(fp,"\n"); } pQhPtrsCopy[j]++; } pQhPtrsCopy[j] = pQhPtrs[j]; /* Reset the Qh Pointers */ for (i=0;i<ALL_PROCS;i++){ if (!(pInfoPtrsCopy[j]->p_rts_flags == RTS_SLOT_FREE)){ if(j>0){ cpuTimeDiff = sub64(pInfoPtrsCopy[j]->p_cycles,pInfoPtrsCopy[j-1]->p_cycles); } fprintf(fp,"Process: %s, Endpoint: %d, Enter queue: %lu%lu, Time in Queue: %lu%lu, Dequeues: %lu, IPC Sync: %lu, IPC Async: %lu,Preempted: %lu,RTS: %x\n\t\t, Priority: %d, Next: %s Endpoint: %d, User time: %d, Sys Time: %d, CPU Cycles Elaps: %lu%lu\n", pInfoPtrsCopy[j]->p_name,pInfoPtrsCopy[j]->p_endpoint,ex64hi(pInfoPtrsCopy[j]->p_times.enter_queue),ex64lo(pInfoPtrsCopy[j]->p_times.enter_queue), ex64hi(pInfoPtrsCopy[j]->p_times.time_in_queue),ex64lo(pInfoPtrsCopy[j]->p_times.time_in_queue), pInfoPtrsCopy[j]->p_times.dequeues,pInfoPtrsCopy[j]->p_times.ipc_sync,pInfoPtrsCopy[j]->p_times.ipc_async, pInfoPtrsCopy[j]->p_times.preempted,pInfoPtrsCopy[j]->p_rts_flags,pInfoPtrsCopy[j]->p_priority, pInfoPtrsCopy[j]->p_nextready, pInfoPtrsCopy[j]->p_nextready_endpoint,pInfoPtrsCopy[j]->p_user_time,pInfoPtrsCopy[j]->p_sys_time,ex64hi(cpuTimeDiff), ex64lo(cpuTimeDiff)); } pInfoPtrsCopy[j]++; if(j>0){ pInfoPtrsCopy[j-1]++; } } pInfoPtrsCopy[j] = pInfoPtrs[j]; } m.m1_i3 = -1; _syscall(PM_PROC_NR,STARTRECORD,&m); return(0); }
/*===========================================================================* * driver_open * *===========================================================================*/ static int driver_open(int which) { /* Perform an open or close operation on the driver. This is * unfinished code: we should never be doing a blocking sendrec() to * the driver. */ message msg; cp_grant_id_t gid; struct partition part; sector_t sectors; int r; memset(&msg, 0, sizeof(msg)); msg.m_type = BDEV_OPEN; msg.BDEV_MINOR = driver[which].minor; msg.BDEV_ACCESS = R_BIT | W_BIT; msg.BDEV_ID = 0; r = sendrec(driver[which].endpt, &msg); if (r != OK) { /* Should we restart the driver now? */ printf("Filter: driver_open: sendrec returned %d\n", r); return RET_REDO; } if(msg.m_type != BDEV_REPLY || msg.BDEV_STATUS != OK) { printf("Filter: driver_open: sendrec returned %d, %d\n", msg.m_type, msg.BDEV_STATUS); return RET_REDO; } /* Take the opportunity to retrieve the hard disk size. */ gid = cpf_grant_direct(driver[which].endpt, (vir_bytes) &part, sizeof(part), CPF_WRITE); if(!GRANT_VALID(gid)) panic("invalid grant: %d", gid); memset(&msg, 0, sizeof(msg)); msg.m_type = BDEV_IOCTL; msg.BDEV_MINOR = driver[which].minor; msg.BDEV_REQUEST = DIOCGETP; msg.BDEV_GRANT = gid; msg.BDEV_ID = 0; r = sendrec(driver[which].endpt, &msg); cpf_revoke(gid); if (r != OK || msg.m_type != BDEV_REPLY || msg.BDEV_STATUS != OK) { /* Not sure what to do here, either. */ printf("Filter: ioctl(DIOCGETP) returned (%d, %d)\n", r, msg.m_type); return RET_REDO; } if(!size_known) { disk_size = part.size; size_known = 1; sectors = div64u(disk_size, SECTOR_SIZE); if(cmp64(mul64u(sectors, SECTOR_SIZE), disk_size)) { printf("Filter: partition too large\n"); return RET_REDO; } #if DEBUG printf("Filter: partition size: 0x%s / %lu sectors\n", print64(disk_size), sectors); #endif } else { if(cmp64(disk_size, part.size)) { printf("Filter: partition size mismatch (%s != %s)\n", print64(part.size), print64(disk_size)); return RET_REDO; } } return OK; }
PUBLIC unsigned cpu_time_2_ms(u64_t cpu_time) { return div64u(cpu_time, tsc_per_ms[cpuid]); }
PUBLIC int init_local_timer(unsigned freq) { #ifdef USE_APIC /* if we know the address, lapic is enabled and we should use it */ if (lapic_addr) { unsigned cpu = cpuid; tsc_per_ms[cpu] = div64u(cpu_get_freq(cpu), 1000); lapic_set_timer_one_shot(1000000/system_hz); } else { BOOT_VERBOSE(printf("Initiating legacy i8253 timer\n")); #else { #endif init_8253A_timer(freq); estimate_cpu_freq(); /* always only 1 cpu in the system */ tsc_per_ms[0] = div64u(cpu_get_freq(0), 1000); } return 0; } PUBLIC void stop_local_timer(void) { #ifdef USE_APIC if (lapic_addr) { lapic_stop_timer(); apic_eoi(); } else #endif { stop_8253A_timer(); } } PUBLIC void restart_local_timer(void) { #ifdef USE_APIC if (lapic_addr) { lapic_restart_timer(); } #endif } PUBLIC int register_local_timer_handler(const irq_handler_t handler) { #ifdef USE_APIC if (lapic_addr) { /* Using APIC, it is configured in apic_idt_init() */ BOOT_VERBOSE(printf("Using LAPIC timer as tick source\n")); } else #endif { /* Using PIC, Initialize the CLOCK's interrupt hook. */ pic_timer_hook.proc_nr_e = NONE; pic_timer_hook.irq = CLOCK_IRQ; put_irq_handler(&pic_timer_hook, CLOCK_IRQ, handler); } return 0; } PUBLIC void cycles_accounting_init(void) { #ifdef CONFIG_SMP unsigned cpu = cpuid; #endif read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch)); make_zero64(get_cpu_var(cpu, cpu_last_tsc)); make_zero64(get_cpu_var(cpu, cpu_last_idle)); }
void print_procs(int maxlines, struct proc *proc1, struct proc *proc2, struct mproc *mproc) { int p, nprocs, tot=0; u64_t idleticks = cvu64(0); u64_t kernelticks = cvu64(0); u64_t systemticks = cvu64(0); u64_t userticks = cvu64(0); u64_t total_ticks = cvu64(0); unsigned long tcyc; unsigned long tmp; int blockedseen = 0; struct tp tick_procs[PROCS]; for(p = nprocs = 0; p < PROCS; p++) { if(isemptyp(&proc2[p])) continue; tick_procs[nprocs].p = proc2 + p; if(proc1[p].p_endpoint == proc2[p].p_endpoint) { tick_procs[nprocs].ticks = sub64(proc2[p].p_cycles, proc1[p].p_cycles); } else { tick_procs[nprocs].ticks = proc2[p].p_cycles; } total_ticks = add64(total_ticks, tick_procs[nprocs].ticks); if(p-NR_TASKS == IDLE) { idleticks = tick_procs[nprocs].ticks; continue; } if(p-NR_TASKS == KERNEL) { kernelticks = tick_procs[nprocs].ticks; continue; } if(mproc[proc2[p].p_nr].mp_procgrp == 0) systemticks = add64(systemticks, tick_procs[nprocs].ticks); else if (p > NR_TASKS) userticks = add64(userticks, tick_procs[nprocs].ticks); nprocs++; } if (!cmp64u(total_ticks, 0)) return; qsort(tick_procs, nprocs, sizeof(tick_procs[0]), cmp_ticks); tcyc = div64u(total_ticks, SCALE); tmp = div64u(userticks, SCALE); printf("CPU states: %6.2f%% user, ", 100.0*(tmp)/tcyc); tmp = div64u(systemticks, SCALE); printf("%6.2f%% system, ", 100.0*tmp/tcyc); tmp = div64u(kernelticks, SCALE); printf("%6.2f%% kernel, ", 100.0*tmp/tcyc); tmp = div64u(idleticks, SCALE); printf("%6.2f%% idle", 100.0*tmp/tcyc); #define NEWLINE do { printf("\n"); if(--maxlines <= 0) { return; } } while(0) NEWLINE; NEWLINE; printf(" PID USERNAME PRI NICE SIZE STATE TIME CPU COMMAND"); NEWLINE; for(p = 0; p < nprocs; p++) { struct proc *pr; int pnr; int level = 0; pnr = tick_procs[p].p->p_nr; if(pnr < 0) { /* skip old kernel tasks as they don't run anymore */ continue; } pr = tick_procs[p].p; /* If we're in blocked verbose mode, indicate start of * blocked processes. */ if(blockedverbose && pr->p_rts_flags && !blockedseen) { NEWLINE; printf("Blocked processes:"); NEWLINE; blockedseen = 1; } print_proc(&tick_procs[p], &mproc[pnr], tcyc); NEWLINE; if(!blockedverbose) continue; /* Traverse dependency chain if blocked. */ while(pr->p_rts_flags) { endpoint_t dep = NONE; struct tp *tpdep; level += 5; if((dep = P_BLOCKEDON(pr)) == NONE) { printf("not blocked on a process"); NEWLINE; break; } if(dep == ANY) break; tpdep = lookup(dep, tick_procs, nprocs); pr = tpdep->p; printf("%*s> ", level, ""); print_proc(tpdep, &mproc[pr->p_nr], tcyc); NEWLINE; } } }