void *kpage_zalloc_addr(void) { void *retval = kpage_alloc_addr(); if (retval) memset(retval, 0, PGSIZE); return retval; }
/* Arch-independent per-cpu initialization. This will call the arch dependent * init first. */ void smp_percpu_init(void) { uint32_t coreid = core_id(); struct per_cpu_info *pcpui = &per_cpu_info[coreid]; void *trace_buf; struct kthread *kthread; /* Don't initialize __ctx_depth here, since it is already 1 (at least on * x86), since this runs in irq context. */ /* Do this first */ __arch_pcpu_init(coreid); /* init our kthread (tracks our currently running context) */ kthread = __kthread_zalloc(); kthread->stacktop = get_stack_top(); /* assumes we're on the 1st page */ pcpui->cur_kthread = kthread; /* Treat the startup threads as ktasks. This will last until smp_idle when * they clear it, either in anticipation of being a user-backing kthread or * to handle an RKM. */ kthread->flags = KTH_KTASK_FLAGS; per_cpu_info[coreid].spare = 0; /* Init relevant lists */ spinlock_init_irqsave(&per_cpu_info[coreid].immed_amsg_lock); STAILQ_INIT(&per_cpu_info[coreid].immed_amsgs); spinlock_init_irqsave(&per_cpu_info[coreid].routine_amsg_lock); STAILQ_INIT(&per_cpu_info[coreid].routine_amsgs); /* Initialize the per-core timer chain */ init_timer_chain(&per_cpu_info[coreid].tchain, set_pcpu_alarm_interrupt); #ifdef CONFIG_KTHREAD_POISON *kstack_bottom_addr(kthread->stacktop) = 0xdeadbeef; #endif /* CONFIG_KTHREAD_POISON */ /* Init generic tracing ring */ trace_buf = kpage_alloc_addr(); assert(trace_buf); trace_ring_init(&pcpui->traces, trace_buf, PGSIZE, sizeof(struct pcpu_trace_event)); for (int i = 0; i < NR_CPU_STATES; i++) pcpui->state_ticks[i] = 0; pcpui->last_tick_cnt = read_tsc(); /* Core 0 is in the KERNEL state, called from smp_boot. The other cores are * too, at least on x86, where we were called from asm (woken by POKE). */ pcpui->cpu_state = CPU_STATE_KERNEL; /* Enable full lock debugging, after all pcpui work is done */ pcpui->__lock_checking_enabled = 1; }