void schedule_init(void) { spin_lock(&sched_lock); /* init provisioning stuff */ all_pcores = kmalloc(sizeof(struct sched_pcore) * num_cores, 0); memset(all_pcores, 0, sizeof(struct sched_pcore) * num_cores); assert(!core_id()); /* want the alarm on core0 for now */ init_awaiter(&ksched_waiter, __ksched_tick); set_ksched_alarm(); /* init the idlecore list. if they turned off hyperthreading, give them the * odds from 1..max-1. otherwise, give them everything by 0 (default mgmt * core). TODO: (CG/LL) better LL/CG mgmt */ #ifndef CONFIG_DISABLE_SMT for (int i = 1; i < num_cores; i++) TAILQ_INSERT_TAIL(&idlecores, pcoreid2spc(i), alloc_next); #else assert(!(num_cores % 2)); for (int i = 1; i < num_cores; i += 2) TAILQ_INSERT_TAIL(&idlecores, pcoreid2spc(i), alloc_next); #endif /* CONFIG_DISABLE_SMT */ spin_unlock(&sched_lock); #ifdef CONFIG_ARSC_SERVER int arsc_coreid = get_any_idle_core(); assert(arsc_coreid >= 0); send_kernel_message(arsc_coreid, arsc_server, 0, 0, 0, KMSG_ROUTINE); printk("Using core %d for the ARSC server\n", arsc_coreid); #endif /* CONFIG_ARSC_SERVER */ }
void schedule_init(void) { spin_lock(&sched_lock); /* init provisioning stuff */ all_pcores = kmalloc(sizeof(struct sched_pcore) * num_cpus, 0); memset(all_pcores, 0, sizeof(struct sched_pcore) * num_cpus); assert(!core_id()); /* want the alarm on core0 for now */ init_awaiter(&ksched_waiter, __kalarm); set_ksched_alarm(); /* init the idlecore list. if they turned off hyperthreading, give them the * odds from 1..max-1. otherwise, give them everything by 0 (default mgmt * core). TODO: (CG/LL) better LL/CG mgmt */ #ifndef CONFIG_DISABLE_SMT for (int i = 1; i < num_cpus; i++) TAILQ_INSERT_TAIL(&idlecores, pcoreid2spc(i), alloc_next); #else assert(!(num_cpus % 2)); for (int i = 1; i < num_cpus; i += 2) TAILQ_INSERT_TAIL(&idlecores, pcoreid2spc(i), alloc_next); #endif /* CONFIG_DISABLE_SMT */ #ifdef CONFIG_ARSC_SERVER struct sched_pcore *a_core = TAILQ_FIRST(&idlecores); assert(a_core); TAILQ_REMOVE(&idlecores, a_core, alloc_next); send_kernel_message(spc2pcoreid(a_core), arsc_server, 0, 0, 0, KMSG_ROUTINE); warn("Using core %d for the ARSCs - there are probably issues with this.", spc2pcoreid(a_core)); #endif /* CONFIG_ARSC_SERVER */ spin_unlock(&sched_lock); return; }
void udelay_sched(uint64_t usec) { struct timer_chain *tchain = &per_cpu_info[core_id()].tchain; struct alarm_waiter a_waiter; init_awaiter(&a_waiter, 0); set_awaiter_rel(&a_waiter, usec); set_alarm(tchain, &a_waiter); sleep_on_awaiter(&a_waiter); }
/* Like sleep, but it will timeout in 'usec' microseconds. */ void rendez_sleep_timeout(struct rendez *rv, int (*cond)(void*), void *arg, uint64_t usec) { int8_t irq_state = 0; struct alarm_waiter awaiter; struct cv_lookup_elm cle; struct timer_chain *pcpui_tchain = &per_cpu_info[core_id()].tchain; if (!usec) return; /* Doing this cond check early, but then unlocking again. Mostly just to * avoid weird issues with the CV lock and the alarm tchain lock. */ cv_lock_irqsave(&rv->cv, &irq_state); if (cond(arg)) { cv_unlock_irqsave(&rv->cv, &irq_state); return; } cv_unlock_irqsave(&rv->cv, &irq_state); /* The handler will call rendez_wake, but won't mess with the condition * state. It's enough to break us out of cv_wait() to see .on_tchain. */ init_awaiter(&awaiter, rendez_alarm_handler); awaiter.data = rv; set_awaiter_rel(&awaiter, usec); /* Set our alarm on this cpu's tchain. Note that when we sleep in cv_wait, * we could be migrated, and later on we could be unsetting the alarm * remotely. */ set_alarm(pcpui_tchain, &awaiter); cv_lock_irqsave(&rv->cv, &irq_state); __reg_abortable_cv(&cle, &rv->cv); /* We could wake early for a few reasons. Legit wakeups after a changed * condition (and we should exit), other alarms with different timeouts (and * we should go back to sleep), etc. Note it is possible for our alarm to * fire immediately upon setting it: before we even cv_lock. */ while (!cond(arg) && awaiter.on_tchain) { if (should_abort(&cle)) { cv_unlock_irqsave(&rv->cv, &irq_state); unset_alarm(pcpui_tchain, &awaiter); dereg_abortable_cv(&cle); error(EINTR, "syscall aborted"); } cv_wait(&rv->cv); cpu_relax(); } cv_unlock_irqsave(&rv->cv, &irq_state); dereg_abortable_cv(&cle); /* Turn off our alarm. If it already fired, this is a no-op. Note this * could be cross-core. */ unset_alarm(pcpui_tchain, &awaiter); }
/** * Create a one-shot timer (aka timeout). Timeouts are processed in the * following cases: * @param msecs time in milliseconds after that the timer should expire * @param handler callback function to call when msecs have elapsed, this handler takes no arguments */ struct alarm_waiter* sys_timeout(uint32_t msecs, sys_timeout_handler func, struct alarm_waiter* waiter) { if (waiter == NULL) { waiter = kmalloc(sizeof(struct alarm_waiter), 0); } struct timer_chain *tchain = &per_cpu_info[core_id()].tchain; // initialize the waiter init_awaiter(waiter, func); // explicitly setting the waiter data to be null, since we do not need it in our handler waiter->data = NULL; // set waiting time set_awaiter_rel(waiter, 1000 * msecs); // attach the waiter to a chain to wait set_alarm(tchain, waiter); // XXX: when to destroy the waiter, handlers responsibility? return waiter; }
void rcvr(int fd, int msglen, int interval, int nmsg) { int i, n, munged; uint16_t x; int64_t now; uint8_t *buf = malloc(BUFSIZE); struct icmphdr *icmp; Req *r; struct alarm_waiter waiter; init_awaiter(&waiter, alarm_abort_sysc); waiter.data = current_uthread; sum = 0; while(lostmsgs+rcvdmsgs < nmsg){ /* arm to wake ourselves if the read doesn't connect in time */ set_awaiter_rel(&waiter, 1000 * ((nmsg - lostmsgs - rcvdmsgs) * interval + waittime)); set_alarm(&waiter); n = read(fd, buf, BUFSIZE); /* cancel immediately, so future syscalls don't get aborted */ unset_alarm(&waiter); now = read_tsc(); if(n <= 0){ /* read interrupted - time to go */ /* Faking time being a minute in the future, so clean marks our * message as lost. Note this will also end up cancelling any other * pending replies that would have expired by then. Whatever. */ clean(0, now + MINUTETSC, NULL); continue; } if(n < msglen){ printf("bad len %d/%d\n", n, msglen); continue; } icmp = geticmp(buf); munged = 0; for(i = proto->iphdrsz + ICMP_HDRSIZE; i < msglen; i++) if(buf[i] != (uint8_t)i) munged++; if(munged) printf("corrupted reply\n"); x = nhgets(icmp->seq); if(icmp->type != proto->echoreply || icmp->code != 0) { printf("bad type/code/sequence %d/%d/%d (want %d/%d/%d)\n", icmp->type, icmp->code, x, proto->echoreply, 0, x); continue; } clean(x, now, buf); } spin_pdr_lock(&listlock); for(r = first; r; r = r->next) if(r->replied == 0) lostmsgs++; spin_pdr_unlock(&listlock); if(!quiet && lostmsgs) printf("%d out of %d messages lost\n", lostmsgs, lostmsgs+rcvdmsgs); }