/* * Check if and when lwIP has its next timeout, and set or cancel our timer * accordingly. */ static void set_lwip_timer(void) { uint32_t next_timeout; clock_t ticks; /* Ask lwIP when the next alarm is supposed to go off, if any. */ next_timeout = sys_timeouts_sleeptime(); /* * Set or update the lwIP timer. We rely on set_timer() asking the * kernel for an alarm only if the timeout is different from the one we * gave it last time (if at all). However, due to conversions between * absolute and relative times, and the fact that we cannot guarantee * that the uptime itself does not change while executing these * routines, set_timer() will sometimes be issuing a kernel call even * if the alarm has not changed. Not a huge deal, but fixing this will * require a different interface to lwIP and/or the timers library. */ if (next_timeout != (uint32_t)-1) { /* * Round up the next timeout (which is in milliseconds) to the * number of clock ticks to add to the current time. Avoid any * potential for overflows, no matter how unrealistic.. */ if (next_timeout > TMRDIFF_MAX / sys_hz()) ticks = TMRDIFF_MAX; else ticks = (next_timeout * sys_hz() + 999) / 1000; set_timer(&lwip_timer, ticks, expire_lwip_timer, 0 /*unused*/); } else cancel_timer(&lwip_timer); /* not really needed.. */ }
/*===========================================================================* * sef_cb_init_fresh * *===========================================================================*/ static int sef_cb_init_fresh(int type, sef_init_info_t *UNUSED(info)) { /* Initialize the dp8390 driver. */ dpeth_t *dep; long v; system_hz = sys_hz(); if (env_argc < 1) { panic("A head which at this time has no name"); } v = 0; (void) env_parse("instance", "d", 0, &v, 0, 255); de_instance = (int) v; dep = &de_state; strcpy(dep->de_name, "dp8390#0"); dep->de_name[7] += de_instance; /* Announce we are up! */ netdriver_announce(); return(OK); }
int spin_check(spin_t *s) { /* Check whether a timeout has taken place. Return TRUE if the caller * should continue spinning, and FALSE if a timeout has occurred. The * implementation assumes that it is okay to spin a little bit too long * (up to a full clock tick extra). */ u64_t cur_tsc, tsc_delta; clock_t now, micro_delta; switch (s->s_state) { case STATE_INIT: s->s_state = STATE_BASE_TS; break; case STATE_BASE_TS: s->s_state = STATE_TS; read_tsc_64(&s->s_base_tsc); break; case STATE_TS: read_tsc_64(&cur_tsc); tsc_delta = sub64(cur_tsc, s->s_base_tsc); micro_delta = tsc_64_to_micros(tsc_delta); if (micro_delta >= s->s_usecs) { s->s_timeout = TRUE; return FALSE; } if (micro_delta >= TSC_SPIN) { s->s_usecs -= micro_delta; getticks(&s->s_base_uptime); s->s_state = STATE_UPTIME; } break; case STATE_UPTIME: getticks(&now); /* We assume that sys_hz() caches its return value. */ micro_delta = ((now - s->s_base_uptime) * 1000 / sys_hz()) * 1000; if (micro_delta >= s->s_usecs) { s->s_timeout = TRUE; return FALSE; } break; default: panic("spin_check: invalid state %d", s->s_state); } return TRUE; }
void init_scheduling(void) { printf("STARTED MLFQ scheduling"); balance_timeout = BALANCE_TIMEOUT * sys_hz(); init_timer(&sched_timer); set_timer(&sched_timer, balance_timeout, balance_queues, 0); }
/*===========================================================================* * root_hz * *===========================================================================*/ static void root_hz(void) { /* Print the system clock frequency. */ buf_printf("%lu\n", (long) sys_hz()); }
int micro_delay(u32_t micros) { u64_t start, delta, delta_end; Hz = sys_hz(); /* Start of delay. */ read_frclock_64(&start); assert(_minix_kerninfo->minix_arm_frclock_hz); delta_end = (_minix_kerninfo->minix_arm_frclock_hz * micros) / MICROHZ; /* If we have to wait for at least one HZ tick, use the regular * tickdelay first. Round downwards on purpose, so the average * half-tick we wait short (depending on where in the current tick * we call tickdelay). We can correct for both overhead of tickdelay * itself and the short wait in the busywait later. */ if (micros >= MICROSPERTICK(Hz)) tickdelay(micros*Hz/MICROHZ); /* Wait (the rest) of the delay time using busywait. */ do { read_frclock_64(&delta); } while (delta_frclock_64(start, delta) < delta_end); return 0; }
void init_scheduling(void) { balance_timeout = BALANCE_TIMEOUT * sys_hz(); srandom(1000); init_timer(&sched_timer); set_timer(&sched_timer, balance_timeout, balance_queues, 0); }
/*===========================================================================* * vbox_init * *===========================================================================*/ static int vbox_init(int UNUSED(type), sef_init_info_t *UNUSED(info)) { /* Initialize the device. */ int devind; u16_t vid, did; struct VMMDevReportGuestInfo *req; int r; interval = DEFAULT_INTERVAL; drift = DEFAULT_DRIFT; if (env_argc > 1) optset_parse(optset_table, env_argv[1]); pci_init(); r = pci_first_dev(&devind, &vid, &did); for (;;) { if (r != 1) panic("backdoor device not found"); if (vid == VMMDEV_PCI_VID && did == VMMDEV_PCI_DID) break; r = pci_next_dev(&devind, &vid, &did); } pci_reserve(devind); port = pci_attr_r32(devind, PCI_BAR) & PCI_BAR_IO_MASK; irq = pci_attr_r8(devind, PCI_ILR); hook_id = 0; if ((r = sys_irqsetpolicy(irq, 0 /* IRQ_REENABLE */, &hook_id)) != OK) panic("unable to register IRQ: %d", r); if ((r = sys_irqenable(&hook_id)) != OK) panic("unable to enable IRQ: %d", r); if ((vir_ptr = alloc_contig(VMMDEV_BUF_SIZE, 0, &phys_ptr)) == NULL) panic("unable to allocate memory"); req = (struct VMMDevReportGuestInfo *) vir_ptr; req->add_version = VMMDEV_GUEST_VERSION; req->os_type = VMMDEV_GUEST_OS_OTHER; if ((r = vbox_request(&req->header, phys_ptr, VMMDEV_REQ_REPORTGUESTINFO, sizeof(*req))) != VMMDEV_ERR_OK) panic("backdoor device not functioning"); ticks = sys_hz() * interval; sys_setalarm(ticks, 0); return OK; }
/* * Fill the part of a LWP structure that is common between kernel tasks and * user processes. Also return a CPU estimate in 'estcpu', because we generate * the value as a side effect here, and the LWP structure has no estcpu field. */ static void fill_lwp_common(struct kinfo_lwp * l, int kslot, uint32_t * estcpu) { struct proc *kp; struct timeval tv; clock_t uptime; uint32_t hz; kp = &proc_tab[kslot]; uptime = getticks(); hz = sys_hz(); /* * We use the process endpoint as the LWP ID. Not only does this allow * users to obtain process endpoints with "ps -s" (thus replacing the * MINIX3 ps(1)'s "ps -E"), but if we ever do implement kernel threads, * this is probably still going to be accurate. */ l->l_lid = kp->p_endpoint; /* * The time during which the process has not been swapped in or out is * not applicable for us, and thus, we set it to the time the process * has been running (in seconds). This value is relevant mostly for * ps(1)'s CPU usage correction for processes that have just started. */ if (kslot >= NR_TASKS) l->l_swtime = uptime - mproc_tab[kslot - NR_TASKS].mp_started; else l->l_swtime = uptime; l->l_swtime /= hz; /* * Sleep (dequeue) times are not maintained for kernel tasks, so * pretend they are never asleep (which is pretty accurate). */ if (kslot < NR_TASKS) l->l_slptime = 0; else l->l_slptime = (uptime - kp->p_dequeued) / hz; l->l_priority = kp->p_priority; l->l_usrpri = kp->p_priority; l->l_cpuid = kp->p_cpu; ticks_to_timeval(&tv, kp->p_user_time + kp->p_sys_time); l->l_rtime_sec = tv.tv_sec; l->l_rtime_usec = tv.tv_usec; /* * Obtain CPU usage percentages and estimates through library code * shared between the kernel and this service; see its source for * details. We note that the produced estcpu value is rather different * from the one produced by NetBSD, but this should not be a problem. */ l->l_pctcpu = cpuavg_getstats(&kp->p_cpuavg, &l->l_cpticks, estcpu, uptime, hz); }
void init_scheduling(void) { u64_t r; balance_timeout = BALANCE_TIMEOUT * sys_hz(); init_timer(&sched_timer); set_timer(&sched_timer, balance_timeout, balance_queues, 0); read_tsc_64(&r); srandom((unsigned)r); }
/*===========================================================================* * init_scheduling * *===========================================================================*/ void init_scheduling(void) { int r; balance_timeout = BALANCE_TIMEOUT * sys_hz(); if ((r = sys_setalarm(balance_timeout, 0)) != OK) panic("sys_setalarm failed: %d", r); }
/* * Return the system uptime in milliseconds. Also remember that lwIP retrieved * the system uptime during this call, so that we know to check for timer * updates at the end of the current iteration of the message loop. */ uint32_t sys_now(void) { recheck_timer = TRUE; /* TODO: avoid 64-bit arithmetic if possible. */ return (uint32_t)(((uint64_t)getticks() * 1000) / sys_hz()); }
/* * Store the given number of clock ticks as a timeval structure. */ static void ticks_to_timeval(struct timeval * tv, clock_t ticks) { clock_t hz; hz = sys_hz(); tv->tv_sec = ticks / hz; tv->tv_usec = (long)((ticks % hz) * 1000000ULL / hz); }
u32_t micros_to_ticks(u32_t micros) { u32_t ticks; ticks = div64u(mul64u(micros, sys_hz()), 1000000); if(ticks < 1) ticks = 1; return ticks; }
void init_scheduling(void) { balance_timeout = BALANCE_TIMEOUT * sys_hz(); init_timer(&sched_timer); set_timer(&sched_timer, balance_timeout, balance_queues, 0); for(i=0;i<PROCNUM;i++){ sprintf((char *) &sjf[i].p_name,"proc%d",i+1); sjf[i].predBurst = 0; } }
/* * Print the current uptime. */ static void root_uptime(void) { clock_t ticks; ldiv_t division; if (getticks(&ticks) != OK) return; division = ldiv(100L * ticks / sys_hz(), 100L); buf_printf("%ld.%0.2ld\n", division.quot, division.rem); }
u32_t sys_now(void) { static u32_t hz; u32_t jiffs; if (!hz) hz = sys_hz(); /* use ticks not realtime as sys_now() is used to calculate timers */ jiffs = sys_jiffies(); return jiffs * (1000 / hz); }
void init_scheduling(void) { /*Lottery Scheduling*/ u64_t r; balance_timeout = BALANCE_TIMEOUT * sys_hz(); init_timer(&sched_timer); set_timer(&sched_timer, balance_timeout, balance_queues, 0); /*Lottery Scheduling*/ read_tsc_64(&r); srandom(); }
/*===========================================================================* * clock_time * *===========================================================================*/ time_t clock_time() { /* This routine returns the time in seconds since 1.1.1970. MINIX is an * astrophysically naive system that assumes the earth rotates at a constant * rate and that such things as leap seconds do not exist. */ int r; clock_t uptime, boottime; if ((r = getuptime2(&uptime,&boottime)) != 0) panic(__FILE__,"clock_time: getuptme2 failed", r); return( (time_t) (boottime + (uptime/sys_hz()))); }
/***************************************************************************** * ddekit_init_timers * ****************************************************************************/ void ddekit_init_timers(void) { static int first_time=0; if (!first_time) { ddekit_lock_init(&lock); jiffies = get_current_clock(); HZ = sys_hz(); pending_timer_ints = ddekit_sem_init(0); th = ddekit_thread_create(ddekit_timer_thread, 0, "timer"); first_time=1; DDEBUG_MSG_INFO("DDEkit timer subsustem initialized"); } }
/*============================================================================* * lan8710a_init * *============================================================================*/ static int lan8710a_init(unsigned int instance, netdriver_addr_t * addr, uint32_t * caps, unsigned int * ticks) { /* Initialize the ethernet driver. */ /* Clear state. */ memset(&lan8710a_state, 0, sizeof(lan8710a_state)); /* Initialize driver. */ lan8710a_map_regs(); lan8710a_init_hw(addr, instance); *caps = NDEV_CAP_MCAST | NDEV_CAP_BCAST; *ticks = sys_hz(); /* update statistics once a second */ return OK; }
/*===========================================================================* * sef_cb_init_fresh * *===========================================================================*/ static int sef_cb_init_fresh(int type, sef_init_info_t *UNUSED(info)) { /* Initialize the rtl8169 driver. */ long v; system_hz = sys_hz(); v = 0; (void) env_parse("instance", "d", 0, &v, 0, 255); re_instance = (int) v; /* Claim buffer memory now. */ rl_init_buf(&re_state); /* Announce we are up! */ netdriver_announce(); return(OK); }
/*===========================================================================* * rl_init * *===========================================================================*/ static int rl_init(unsigned int instance, netdriver_addr_t *addr, uint32_t *caps, unsigned int *ticks) { /* Initialize the rtl8169 driver. */ re_t *rep; /* Initialize driver state. */ rep = &re_state; memset(rep, 0, sizeof(*rep)); /* Try to find a matching device. */ if (!rl_probe(rep, instance)) return ENXIO; /* Claim buffer memory now. */ rl_init_buf(&re_state); /* Initialize the device we found. */ rl_init_hw(rep, addr, instance); *caps = NDEV_CAP_MCAST | NDEV_CAP_BCAST | NDEV_CAP_HWADDR; *ticks = sys_hz(); return OK; }
/*===========================================================================* * sef_cb_init_fresh * *===========================================================================*/ static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *info) { /* Initialize the virtual file server. */ int s, i; struct fproc *rfp; message mess; struct rprocpub rprocpub[NR_BOOT_PROCS]; force_sync = 0; receive_from = ANY; self = NULL; verbose = 0; /* Initialize proc endpoints to NONE */ for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) { rfp->fp_endpoint = NONE; rfp->fp_pid = PID_FREE; } /* Initialize the process table with help of the process manager messages. * Expect one message for each system process with its slot number and pid. * When no more processes follow, the magic process number NONE is sent. * Then, stop and synchronize with the PM. */ do { if ((s = sef_receive(PM_PROC_NR, &mess)) != OK) panic("VFS: couldn't receive from PM: %d", s); if (mess.m_type != PM_INIT) panic("unexpected message from PM: %d", mess.m_type); if (NONE == mess.PM_PROC) break; rfp = &fproc[mess.PM_SLOT]; rfp->fp_flags = FP_NOFLAGS; rfp->fp_pid = mess.PM_PID; rfp->fp_endpoint = mess.PM_PROC; rfp->fp_grant = GRANT_INVALID; rfp->fp_blocked_on = FP_BLOCKED_ON_NONE; rfp->fp_realuid = (uid_t) SYS_UID; rfp->fp_effuid = (uid_t) SYS_UID; rfp->fp_realgid = (gid_t) SYS_GID; rfp->fp_effgid = (gid_t) SYS_GID; rfp->fp_umask = ~0; } while (TRUE); /* continue until process NONE */ mess.m_type = OK; /* tell PM that we succeeded */ s = send(PM_PROC_NR, &mess); /* send synchronization message */ /* All process table entries have been set. Continue with initialization. */ fp = &fproc[_ENDPOINT_P(VFS_PROC_NR)];/* During init all communication with * FSes is on behalf of myself */ init_dmap(); /* Initialize device table. */ system_hz = sys_hz(); /* Map all the services in the boot image. */ if ((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0, (vir_bytes) rprocpub, sizeof(rprocpub), S)) != OK){ panic("sys_safecopyfrom failed: %d", s); } for (i = 0; i < NR_BOOT_PROCS; i++) { if (rprocpub[i].in_use) { if ((s = map_service(&rprocpub[i])) != OK) { panic("VFS: unable to map service: %d", s); } } } /* Subscribe to block and character driver events. */ s = ds_subscribe("drv\\.[bc]..\\..*", DSF_INITIAL | DSF_OVERWRITE); if (s != OK) panic("VFS: can't subscribe to driver events (%d)", s); /* Initialize worker threads */ for (i = 0; i < NR_WTHREADS; i++) { worker_init(&workers[i]); } worker_init(&sys_worker); /* exclusive system worker thread */ worker_init(&dl_worker); /* exclusive worker thread to resolve deadlocks */ /* Initialize global locks */ if (mthread_mutex_init(&pm_lock, NULL) != 0) panic("VFS: couldn't initialize pm lock mutex"); if (mthread_mutex_init(&exec_lock, NULL) != 0) panic("VFS: couldn't initialize exec lock"); if (mthread_mutex_init(&bsf_lock, NULL) != 0) panic("VFS: couldn't initialize block special file lock"); /* Initialize event resources for boot procs and locks for all procs */ for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) { if (mutex_init(&rfp->fp_lock, NULL) != 0) panic("unable to initialize fproc lock"); #if LOCK_DEBUG rfp->fp_vp_rdlocks = 0; rfp->fp_vmnt_rdlocks = 0; #endif } init_vnodes(); /* init vnodes */ init_vmnts(); /* init vmnt structures */ init_select(); /* init select() structures */ init_filps(); /* Init filp structures */ mount_pfs(); /* mount Pipe File Server */ worker_start(do_init_root); /* mount initial ramdisk as file system root */ yield(); /* force do_init_root to start */ self = NULL; return(OK); }
void main(void) { mq_t *mq; int r; int source, m_type, timerand, fd; u32_t tasknr; struct fssignon device; u8_t randbits[32]; struct timeval tv; #if DEBUG printk("Starting inet...\n"); printk("%s\n", version); #endif #if HZ_DYNAMIC system_hz = sys_hz(); #endif /* Read configuration. */ nw_conf(); /* Get a random number */ timerand= 1; fd= open(RANDOM_DEV_NAME, O_RDONLY | O_NONBLOCK); if (fd != -1) { r= read(fd, randbits, sizeof(randbits)); if (r == sizeof(randbits)) timerand= 0; else { printk("inet: unable to read random data from %s: %s\n", RANDOM_DEV_NAME, r == -1 ? strerror(errno) : r == 0 ? "EOF" : "not enough data"); } close(fd); } else { printk("inet: unable to open random device %s: %s\n", RANDOM_DEV_NAME, strerror(errno)); } if (timerand) { printk("inet: using current time for random-number seed\n"); r= gettimeofday(&tv, NULL); if (r == -1) { printk("sysutime failed: %s\n", strerror(errno)); exit(1); } memcpy(randbits, &tv, sizeof(tv)); } init_rand256(randbits); /* Our new identity as a server. */ r= ds_retrieve_u32("inet", &tasknr); if (r != 0) ip_panic(("inet: ds_retrieve_u32 failed for 'inet': %d", r)); this_proc= tasknr; /* Register the device group. */ device.dev= ip_dev; device.style= STYLE_CLONE; if (svrctl(FSSIGNON, (void *) &device) == -1) { printk("inet: error %d on registering ethernet devices\n", errno); pause(); } #ifdef BUF_CONSISTENCY_CHECK inet_buf_debug= (getenv("inetbufdebug") && (strcmp(getenv("inetbufdebug"), "on") == 0)); inet_buf_debug= 100; if (inet_buf_debug) { ip_warning(( "buffer consistency check enabled" )); } #endif if (getenv("killerinet")) { ip_warning(( "killer inet active" )); killer_inet= 1; } nw_init(); while (TRUE) { #ifdef BUF_CONSISTENCY_CHECK if (inet_buf_debug) { static int buf_debug_count= 0; if (++buf_debug_count >= inet_buf_debug) { buf_debug_count= 0; if (!bf_consistency_check()) break; } } #endif if (ev_head) { ev_process(); continue; } if (clck_call_expire) { clck_expire_timers(); continue; } mq= mq_get(); if (!mq) ip_panic(("out of messages")); r = kipc_module_call(KIPC_RECEIVE, 0, ENDPT_ANY, &mq->mq_mess); if (r<0) { ip_panic(("unable to receive: %d", r)); } reset_time(); source= mq->mq_mess.m_source; m_type= mq->mq_mess.m_type; if (source == VFS_PROC_NR) { sr_rec(mq); } else if (is_notify(m_type)) { if (_ENDPOINT_P(source) == CLOCK) { clck_tick(&mq->mq_mess); mq_free(mq); } else if (_ENDPOINT_P(source) == PM_PROC_NR) { /* signaled */ /* probably SIGTERM */ mq_free(mq); } else { /* A driver is (re)started. */ eth_check_drivers(&mq->mq_mess); mq_free(mq); } } else if (m_type == DL_CONF_REPLY || m_type == DL_TASK_REPLY || m_type == DL_NAME_REPLY || m_type == DL_STAT_REPLY) { eth_rec(&mq->mq_mess); mq_free(mq); } else { printk("inet: got bad message type 0x%x from %d\n", mq->mq_mess.m_type, mq->mq_mess.m_source); mq_free(mq); } } ip_panic(("task is not allowed to terminate")); }
/*===========================================================================* * sef_cb_init_fresh * *===========================================================================*/ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info) { /* Initialize the virtual file server. */ int s, i; register struct fproc *rfp; struct vmnt *vmp; struct vnode *root_vp; message mess; struct rprocpub rprocpub[NR_BOOT_PROCS]; /* Clear endpoint field */ last_login_fs_e = NONE; mount_m_in.m1_p3 = (char *) NONE; /* Initialize the process table with help of the process manager messages. * Expect one message for each system process with its slot number and pid. * When no more processes follow, the magic process number NONE is sent. * Then, stop and synchronize with the PM. */ do { if (OK != (s=sef_receive(PM_PROC_NR, &mess))) panic("FS couldn't receive from PM: %d", s); if (mess.m_type != PM_INIT) panic("unexpected message from PM: %d", mess.m_type); if (NONE == mess.PM_PROC) break; rfp = &fproc[mess.PM_SLOT]; rfp->fp_pid = mess.PM_PID; rfp->fp_endpoint = mess.PM_PROC; rfp->fp_openfd = 0; rfp->fp_realuid = (uid_t) SYS_UID; rfp->fp_effuid = (uid_t) SYS_UID; rfp->fp_realgid = (gid_t) SYS_GID; rfp->fp_effgid = (gid_t) SYS_GID; rfp->fp_umask = ~0; rfp->fp_grant = GRANT_INVALID; rfp->fp_blocked_on = FP_BLOCKED_ON_NONE; rfp->fp_revived = NOT_REVIVING; rfp->fp_fsizelim.rlim_cur = RLIM_FSIZE_DEFAULT; rfp->fp_fsizelim.rlim_max = RLIM_FSIZE_DEFAULT; rfp->fp_nofilelim.rlim_cur = RLIM_NOFILE_DEFAULT; rfp->fp_nofilelim.rlim_max = RLIM_NOFILE_DEFAULT; } while (TRUE); /* continue until process NONE */ mess.m_type = OK; /* tell PM that we succeeded */ s = send(PM_PROC_NR, &mess); /* send synchronization message */ /* All process table entries have been set. Continue with initialization. */ /* The following initializations are needed to let dev_opcl succeed .*/ fp = (struct fproc *) NULL; who_e = who_p = VFS_PROC_NR; /* Initialize device table. */ build_dmap(); /* Map all the services in the boot image. */ if((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0, (vir_bytes) rprocpub, sizeof(rprocpub), S)) != OK) { panic("sys_safecopyfrom failed: %d", s); } for(i=0;i < NR_BOOT_PROCS;i++) { if(rprocpub[i].in_use) { if((s = map_service(&rprocpub[i])) != OK) { panic("unable to map service: %d", s); } } } init_root(); /* init root device and load super block */ init_select(); /* init select() structures */ vmp = &vmnt[0]; /* Should be the root filesystem */ if (vmp->m_dev == NO_DEV) panic("vfs: no root filesystem"); root_vp= vmp->m_root_node; /* The root device can now be accessed; set process directories. */ for (rfp=&fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) { FD_ZERO(&(rfp->fp_filp_inuse)); if (rfp->fp_pid != PID_FREE) { dup_vnode(root_vp); rfp->fp_rd = root_vp; dup_vnode(root_vp); rfp->fp_wd = root_vp; } else rfp->fp_endpoint = NONE; } system_hz = sys_hz(); /* Subscribe to driver events for VFS drivers. */ s = ds_subscribe("drv\\.vfs\\..*", DSF_INITIAL | DSF_OVERWRITE); if(s != OK) { panic("vfs: can't subscribe to driver events"); } SANITYCHECK; #if DO_SANITYCHECKS FIXME("VFS: DO_SANITYCHECKS is on"); #endif return(OK); }
/*===========================================================================* * sef_cb_init_fresh * *===========================================================================*/ static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *UNUSED(info)) { /* Initialize the process manager. * Memory use info is collected from the boot monitor, the kernel, and * all processes compiled into the system image. Initially this information * is put into an array mem_chunks. Elements of mem_chunks are struct memory, * and hold base, size pairs in units of clicks. This array is small, there * should be no more than 8 chunks. After the array of chunks has been built * the contents are used to initialize the hole list. Space for the hole list * is reserved as an array with twice as many elements as the maximum number * of processes allowed. It is managed as a linked list, and elements of the * array are struct hole, which, in addition to storage for a base and size in * click units also contain space for a link, a pointer to another element. */ int s; static struct boot_image image[NR_BOOT_PROCS]; register struct boot_image *ip; static char core_sigs[] = { SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGEMT, SIGFPE, SIGBUS, SIGSEGV }; static char ign_sigs[] = { SIGCHLD, SIGWINCH, SIGCONT }; static char noign_sigs[] = { SIGILL, SIGTRAP, SIGEMT, SIGFPE, SIGBUS, SIGSEGV }; register struct mproc *rmp; register char *sig_ptr; message mess; /* Initialize process table, including timers. */ for (rmp=&mproc[0]; rmp<&mproc[NR_PROCS]; rmp++) { init_timer(&rmp->mp_timer); rmp->mp_magic = MP_MAGIC; } /* Build the set of signals which cause core dumps, and the set of signals * that are by default ignored. */ sigemptyset(&core_sset); for (sig_ptr = core_sigs; sig_ptr < core_sigs+sizeof(core_sigs); sig_ptr++) sigaddset(&core_sset, *sig_ptr); sigemptyset(&ign_sset); for (sig_ptr = ign_sigs; sig_ptr < ign_sigs+sizeof(ign_sigs); sig_ptr++) sigaddset(&ign_sset, *sig_ptr); sigemptyset(&noign_sset); for (sig_ptr = noign_sigs; sig_ptr < noign_sigs+sizeof(noign_sigs); sig_ptr++) sigaddset(&noign_sset, *sig_ptr); /* Obtain a copy of the boot monitor parameters and the kernel info struct. * Parse the list of free memory chunks. This list is what the boot monitor * reported, but it must be corrected for the kernel and system processes. */ if ((s=sys_getmonparams(monitor_params, sizeof(monitor_params))) != OK) panic("get monitor params failed: %d", s); if ((s=sys_getkinfo(&kinfo)) != OK) panic("get kernel info failed: %d", s); /* Initialize PM's process table. Request a copy of the system image table * that is defined at the kernel level to see which slots to fill in. */ if (OK != (s=sys_getimage(image))) panic("couldn't get image table: %d", s); procs_in_use = 0; /* start populating table */ for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) { if (ip->proc_nr >= 0) { /* task have negative nrs */ procs_in_use += 1; /* found user process */ /* Set process details found in the image table. */ rmp = &mproc[ip->proc_nr]; strlcpy(rmp->mp_name, ip->proc_name, PROC_NAME_LEN); (void) sigemptyset(&rmp->mp_ignore); (void) sigemptyset(&rmp->mp_sigmask); (void) sigemptyset(&rmp->mp_catch); if (ip->proc_nr == INIT_PROC_NR) { /* user process */ /* INIT is root, we make it father of itself. This is * not really OK, INIT should have no father, i.e. * a father with pid NO_PID. But PM currently assumes * that mp_parent always points to a valid slot number. */ rmp->mp_parent = INIT_PROC_NR; rmp->mp_procgrp = rmp->mp_pid = INIT_PID; rmp->mp_flags |= IN_USE; /* Set scheduling info */ rmp->mp_scheduler = KERNEL; rmp->mp_nice = get_nice_value(USR_Q); } else { /* system process */ if(ip->proc_nr == RS_PROC_NR) { rmp->mp_parent = INIT_PROC_NR; } else { rmp->mp_parent = RS_PROC_NR; } rmp->mp_pid = get_free_pid(); rmp->mp_flags |= IN_USE | PRIV_PROC; /* RS schedules this process */ rmp->mp_scheduler = NONE; rmp->mp_nice = get_nice_value(SRV_Q); } /* Get kernel endpoint identifier. */ rmp->mp_endpoint = ip->endpoint; /* Tell VFS about this system process. */ mess.m_type = PM_INIT; mess.PM_SLOT = ip->proc_nr; mess.PM_PID = rmp->mp_pid; mess.PM_PROC = rmp->mp_endpoint; if (OK != (s=send(VFS_PROC_NR, &mess))) panic("can't sync up with VFS: %d", s); } } /* Tell VFS that no more system processes follow and synchronize. */ mess.PR_ENDPT = NONE; if (sendrec(VFS_PROC_NR, &mess) != OK || mess.m_type != OK) panic("can't sync up with VFS"); #if defined(__i386__) uts_val.machine[0] = 'i'; strcpy(uts_val.machine + 1, itoa(getprocessor())); #elif defined(__arm__) strcpy(uts_val.machine, "arm"); #endif system_hz = sys_hz(); /* Initialize user-space scheduling. */ sched_init(); return(OK); }
/*===========================================================================* * sef_cb_init_fresh * *===========================================================================*/ static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *info) { /* Initialize the virtual file server. */ int s, i; struct fproc *rfp; message mess; struct rprocpub rprocpub[NR_BOOT_PROCS]; self = NULL; verbose = 0; /* Initialize proc endpoints to NONE */ for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) { rfp->fp_endpoint = NONE; rfp->fp_pid = PID_FREE; } /* Initialize the process table with help of the process manager messages. * Expect one message for each system process with its slot number and pid. * When no more processes follow, the magic process number NONE is sent. * Then, stop and synchronize with the PM. */ do { if ((s = sef_receive(PM_PROC_NR, &mess)) != OK) panic("VFS: couldn't receive from PM: %d", s); if (mess.m_type != VFS_PM_INIT) panic("unexpected message from PM: %d", mess.m_type); if (NONE == mess.VFS_PM_ENDPT) break; rfp = &fproc[mess.VFS_PM_SLOT]; rfp->fp_flags = FP_NOFLAGS; rfp->fp_pid = mess.VFS_PM_PID; rfp->fp_endpoint = mess.VFS_PM_ENDPT; rfp->fp_grant = GRANT_INVALID; rfp->fp_blocked_on = FP_BLOCKED_ON_NONE; rfp->fp_realuid = (uid_t) SYS_UID; rfp->fp_effuid = (uid_t) SYS_UID; rfp->fp_realgid = (gid_t) SYS_GID; rfp->fp_effgid = (gid_t) SYS_GID; rfp->fp_umask = ~0; } while (TRUE); /* continue until process NONE */ mess.m_type = OK; /* tell PM that we succeeded */ s = ipc_send(PM_PROC_NR, &mess); /* send synchronization message */ system_hz = sys_hz(); /* Subscribe to block and character driver events. */ s = ds_subscribe("drv\\.[bc]..\\..*", DSF_INITIAL | DSF_OVERWRITE); if (s != OK) panic("VFS: can't subscribe to driver events (%d)", s); /* Initialize worker threads */ worker_init(); /* Initialize global locks */ if (mthread_mutex_init(&bsf_lock, NULL) != 0) panic("VFS: couldn't initialize block special file lock"); init_dmap(); /* Initialize device table. */ /* Map all the services in the boot image. */ if ((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0, (vir_bytes) rprocpub, sizeof(rprocpub))) != OK){ panic("sys_safecopyfrom failed: %d", s); } for (i = 0; i < NR_BOOT_PROCS; i++) { if (rprocpub[i].in_use) { if ((s = map_service(&rprocpub[i])) != OK) { panic("VFS: unable to map service: %d", s); } } } /* Initialize locks and initial values for all processes. */ for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) { if (mutex_init(&rfp->fp_lock, NULL) != 0) panic("unable to initialize fproc lock"); rfp->fp_worker = NULL; #if LOCK_DEBUG rfp->fp_vp_rdlocks = 0; rfp->fp_vmnt_rdlocks = 0; #endif /* Initialize process directories. mount_fs will set them to the * correct values. */ for (i = 0; i < OPEN_MAX; i++) rfp->fp_filp[i] = NULL; rfp->fp_rd = NULL; rfp->fp_wd = NULL; } init_vnodes(); /* init vnodes */ init_vmnts(); /* init vmnt structures */ init_select(); /* init select() structures */ init_filps(); /* Init filp structures */ /* Mount PFS and initial file system root. */ worker_start(fproc_addr(VFS_PROC_NR), do_init_root, &mess /*unused*/, FALSE /*use_spare*/); return(OK); }
/*===========================================================================* * sef_cb_init_fresh * *===========================================================================*/ static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *UNUSED(info)) { /* Initialize the process manager. */ int s; static struct boot_image image[NR_BOOT_PROCS]; register struct boot_image *ip; static char core_sigs[] = { SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGEMT, SIGFPE, SIGBUS, SIGSEGV }; static char ign_sigs[] = { SIGCHLD, SIGWINCH, SIGCONT, SIGINFO }; static char noign_sigs[] = { SIGILL, SIGTRAP, SIGEMT, SIGFPE, SIGBUS, SIGSEGV }; register struct mproc *rmp; register char *sig_ptr; message mess; /* Initialize process table, including timers. */ for (rmp=&mproc[0]; rmp<&mproc[NR_PROCS]; rmp++) { init_timer(&rmp->mp_timer); rmp->mp_magic = MP_MAGIC; rmp->mp_sigact = mpsigact[rmp - mproc]; rmp->mp_eventsub = NO_EVENTSUB; } /* Build the set of signals which cause core dumps, and the set of signals * that are by default ignored. */ sigemptyset(&core_sset); for (sig_ptr = core_sigs; sig_ptr < core_sigs+sizeof(core_sigs); sig_ptr++) sigaddset(&core_sset, *sig_ptr); sigemptyset(&ign_sset); for (sig_ptr = ign_sigs; sig_ptr < ign_sigs+sizeof(ign_sigs); sig_ptr++) sigaddset(&ign_sset, *sig_ptr); sigemptyset(&noign_sset); for (sig_ptr = noign_sigs; sig_ptr < noign_sigs+sizeof(noign_sigs); sig_ptr++) sigaddset(&noign_sset, *sig_ptr); /* Obtain a copy of the boot monitor parameters. */ if ((s=sys_getmonparams(monitor_params, sizeof(monitor_params))) != OK) panic("get monitor params failed: %d", s); /* Initialize PM's process table. Request a copy of the system image table * that is defined at the kernel level to see which slots to fill in. */ if (OK != (s=sys_getimage(image))) panic("couldn't get image table: %d", s); procs_in_use = 0; /* start populating table */ for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) { if (ip->proc_nr >= 0) { /* task have negative nrs */ procs_in_use += 1; /* found user process */ /* Set process details found in the image table. */ rmp = &mproc[ip->proc_nr]; strlcpy(rmp->mp_name, ip->proc_name, PROC_NAME_LEN); (void) sigemptyset(&rmp->mp_ignore); (void) sigemptyset(&rmp->mp_sigmask); (void) sigemptyset(&rmp->mp_catch); if (ip->proc_nr == INIT_PROC_NR) { /* user process */ /* INIT is root, we make it father of itself. This is * not really OK, INIT should have no father, i.e. * a father with pid NO_PID. But PM currently assumes * that mp_parent always points to a valid slot number. */ rmp->mp_parent = INIT_PROC_NR; rmp->mp_procgrp = rmp->mp_pid = INIT_PID; rmp->mp_flags |= IN_USE; /* Set scheduling info */ rmp->mp_scheduler = KERNEL; rmp->mp_nice = get_nice_value(USR_Q); } else { /* system process */ if(ip->proc_nr == RS_PROC_NR) { rmp->mp_parent = INIT_PROC_NR; } else { rmp->mp_parent = RS_PROC_NR; } rmp->mp_pid = get_free_pid(); rmp->mp_flags |= IN_USE | PRIV_PROC; /* RS schedules this process */ rmp->mp_scheduler = NONE; rmp->mp_nice = get_nice_value(SRV_Q); } /* Get kernel endpoint identifier. */ rmp->mp_endpoint = ip->endpoint; /* Tell VFS about this system process. */ memset(&mess, 0, sizeof(mess)); mess.m_type = VFS_PM_INIT; mess.VFS_PM_SLOT = ip->proc_nr; mess.VFS_PM_PID = rmp->mp_pid; mess.VFS_PM_ENDPT = rmp->mp_endpoint; if (OK != (s=ipc_send(VFS_PROC_NR, &mess))) panic("can't sync up with VFS: %d", s); } } /* Tell VFS that no more system processes follow and synchronize. */ memset(&mess, 0, sizeof(mess)); mess.m_type = VFS_PM_INIT; mess.VFS_PM_ENDPT = NONE; if (ipc_sendrec(VFS_PROC_NR, &mess) != OK || mess.m_type != OK) panic("can't sync up with VFS"); system_hz = sys_hz(); /* Initialize user-space scheduling. */ sched_init(); return(OK); }
/*===========================================================================* * parse_arguments * *===========================================================================*/ static int parse_arguments(int argc, char *argv[]) { if(argc != 2) return EINVAL; optset_parse(optset_table, argv[1]); if (MAIN_LABEL[0] == 0 || MAIN_MINOR < 0 || MAIN_MINOR > 255) return EINVAL; if (USE_MIRROR && (BACKUP_LABEL[0] == 0 || BACKUP_MINOR < 0 || BACKUP_MINOR > 255)) return EINVAL; /* Checksumming implies a checksum layout. */ if (USE_CHECKSUM) USE_SUM_LAYOUT = 1; /* Determine the checksum size for the chosen checksum type. */ switch (SUM_TYPE) { case ST_NIL: SUM_SIZE = 4; /* for the sector number */ break; case ST_XOR: SUM_SIZE = 16; /* compatibility */ break; case ST_CRC: SUM_SIZE = 4; break; case ST_MD5: SUM_SIZE = 16; break; default: return EINVAL; } if (NR_SUM_SEC <= 0 || SUM_SIZE * NR_SUM_SEC > SECTOR_SIZE) return EINVAL; #if DEBUG printf("Filter starting. Configuration:\n"); printf(" USE_CHECKSUM : %3s ", USE_CHECKSUM ? "yes" : "no"); printf(" USE_MIRROR : %3s\n", USE_MIRROR ? "yes" : "no"); if (USE_CHECKSUM) { printf(" BAD_SUM_ERROR : %3s ", BAD_SUM_ERROR ? "yes" : "no"); printf(" NR_SUM_SEC : %3d\n", NR_SUM_SEC); printf(" SUM_TYPE : "); switch (SUM_TYPE) { case ST_NIL: printf("nil"); break; case ST_XOR: printf("xor"); break; case ST_CRC: printf("crc"); break; case ST_MD5: printf("md5"); break; } printf(" SUM_SIZE : %3d\n", SUM_SIZE); } else printf(" USE_SUM_LAYOUT : %3s\n", USE_SUM_LAYOUT ? "yes" : "no"); printf(" N : %3dx M : %3dx T : %3ds\n", NR_RETRIES, NR_RESTARTS, DRIVER_TIMEOUT); printf(" MAIN_LABEL / MAIN_MINOR : %19s / %d\n", MAIN_LABEL, MAIN_MINOR); if (USE_MIRROR) { printf(" BACKUP_LABEL / BACKUP_MINOR : %15s / %d\n", BACKUP_LABEL, BACKUP_MINOR); } #endif /* Convert timeout seconds to ticks. */ DRIVER_TIMEOUT *= sys_hz(); return OK; }