void schedule_init(void) { spin_lock(&sched_lock); /* init provisioning stuff */ all_pcores = kmalloc(sizeof(struct sched_pcore) * num_cores, 0); memset(all_pcores, 0, sizeof(struct sched_pcore) * num_cores); assert(!core_id()); /* want the alarm on core0 for now */ init_awaiter(&ksched_waiter, __ksched_tick); set_ksched_alarm(); /* init the idlecore list. if they turned off hyperthreading, give them the * odds from 1..max-1. otherwise, give them everything by 0 (default mgmt * core). TODO: (CG/LL) better LL/CG mgmt */ #ifndef CONFIG_DISABLE_SMT for (int i = 1; i < num_cores; i++) TAILQ_INSERT_TAIL(&idlecores, pcoreid2spc(i), alloc_next); #else assert(!(num_cores % 2)); for (int i = 1; i < num_cores; i += 2) TAILQ_INSERT_TAIL(&idlecores, pcoreid2spc(i), alloc_next); #endif /* CONFIG_DISABLE_SMT */ spin_unlock(&sched_lock); #ifdef CONFIG_ARSC_SERVER int arsc_coreid = get_any_idle_core(); assert(arsc_coreid >= 0); send_kernel_message(arsc_coreid, arsc_server, 0, 0, 0, KMSG_ROUTINE); printk("Using core %d for the ARSC server\n", arsc_coreid); #endif /* CONFIG_ARSC_SERVER */ }
void schedule_init(void) { spin_lock(&sched_lock); /* init provisioning stuff */ all_pcores = kmalloc(sizeof(struct sched_pcore) * num_cpus, 0); memset(all_pcores, 0, sizeof(struct sched_pcore) * num_cpus); assert(!core_id()); /* want the alarm on core0 for now */ init_awaiter(&ksched_waiter, __kalarm); set_ksched_alarm(); /* init the idlecore list. if they turned off hyperthreading, give them the * odds from 1..max-1. otherwise, give them everything by 0 (default mgmt * core). TODO: (CG/LL) better LL/CG mgmt */ #ifndef CONFIG_DISABLE_SMT for (int i = 1; i < num_cpus; i++) TAILQ_INSERT_TAIL(&idlecores, pcoreid2spc(i), alloc_next); #else assert(!(num_cpus % 2)); for (int i = 1; i < num_cpus; i += 2) TAILQ_INSERT_TAIL(&idlecores, pcoreid2spc(i), alloc_next); #endif /* CONFIG_DISABLE_SMT */ #ifdef CONFIG_ARSC_SERVER struct sched_pcore *a_core = TAILQ_FIRST(&idlecores); assert(a_core); TAILQ_REMOVE(&idlecores, a_core, alloc_next); send_kernel_message(spc2pcoreid(a_core), arsc_server, 0, 0, 0, KMSG_ROUTINE); warn("Using core %d for the ARSCs - there are probably issues with this.", spc2pcoreid(a_core)); #endif /* CONFIG_ARSC_SERVER */ spin_unlock(&sched_lock); return; }
static void handle_keypress(char c) { amr_t handler = c == 'G' ? __run_mon : __cons_add_char; send_kernel_message(core_id(), handler, (long)&cons_buf, (long)c, 0, KMSG_ROUTINE); cons_init(); }
static void handle_keypress(char c) { /* brho: not sure if this will work on riscv or not... */ #define capchar2ctl(x) ((x) - '@') amr_t handler = c == capchar2ctl('G') ? __run_mon : __cons_add_char; send_kernel_message(core_id(), handler, (long)&cons_buf, (long)c, 0, KMSG_ROUTINE); cons_init(); }
void appserver_die(uintptr_t code) { int i; for(i = 0; i < num_cpus; i++) if(i != core_id()) while(send_kernel_message(i, (amr_t)&__diediedie, code, 0, 0, KMSG_IMMEDIATE)); // just in case. __diediedie(0, code, 0, 0); }
void mlx4_en_tx_irq(struct mlx4_cq *mcq) { struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); struct mlx4_en_priv *priv = netdev_priv(cq->dev); if (likely(priv->port_up)) #if 0 // AKAROS_PORT napi_schedule_irqoff(&cq->napi); #else send_kernel_message(core_id(), mlx4_en_poll_tx_cq, (long)cq, 0, 0, KMSG_ROUTINE); #endif else
int smp_call_function_all(isr_t handler, void* data, handler_wrapper_t** wait_wrapper) { int8_t state = 0; int i; handler_wrapper_t* wrapper = 0; if(wait_wrapper) { wrapper = *wait_wrapper = smp_make_wrapper(); if(!wrapper) return -ENOMEM; for(i = 0; i < num_cores(); i++) wrapper->wait_list[i] = 1; } enable_irqsave(&state); // send to others for(i = 0; i < num_cores(); i++) { if(i == core_id()) continue; send_kernel_message(i,(amr_t)smp_call_wrapper, handler, wrapper, data, KMSG_IMMEDIATE); } // send to me send_kernel_message(core_id(),(amr_t)smp_call_wrapper, handler,wrapper,data, KMSG_IMMEDIATE); cpu_relax(); // wait to get the interrupt disable_irqsave(&state); return 0; }
void smp_do_in_cores(const struct core_set *cset, void (*func)(void *), void *opaque) { int cpu = core_id(); struct all_cpu_work acw; memset(&acw, 0, sizeof(acw)); completion_init(&acw.comp, core_set_remote_count(cset)); acw.func = func; acw.opaque = opaque; for (int i = 0; i < num_cores; i++) { if (core_set_getcpu(cset, i)) { if (i == cpu) func(opaque); else send_kernel_message(i, smp_do_core_work, (long) &acw, 0, 0, KMSG_ROUTINE); } } completion_wait(&acw.comp); }
int smp_call_function_single(uint32_t dest, isr_t handler, void* data, handler_wrapper_t** wait_wrapper) { int8_t state = 0; handler_wrapper_t* wrapper = 0; if(wait_wrapper) { wrapper = *wait_wrapper = smp_make_wrapper(); if(!wrapper) return -ENOMEM; wrapper->wait_list[dest] = 1; } enable_irqsave(&state); send_kernel_message(dest,(amr_t)smp_call_wrapper, handler,wrapper,data, KMSG_IMMEDIATE); cpu_relax(); // wait to get the interrupt, if it's to this core disable_irqsave(&state); return 0; }
/* Interrupt/alarm handler: tells our core to run the scheduler (out of * interrupt context). */ static void __kalarm(struct alarm_waiter *waiter) { /* Not necessary when alarms are running in RKM context (check * timer_interrupt()) */ send_kernel_message(core_id(), __ksched_tick, 0, 0, 0, KMSG_ROUTINE); }
/* Interrupt/alarm handler: tells our core to run the scheduler (out of * interrupt context). */ static void __kalarm(struct alarm_waiter *waiter) { send_kernel_message(core_id(), __ksched_tick, 0, 0, 0, KMSG_ROUTINE); }