示例#1
0
static void
ap_start(phandle_t node, u_int mid, u_int cpu_impl)
{
	volatile struct cpu_start_args *csa;
	struct pcpu *pc;
	register_t s;
	vm_offset_t va;
	u_int cpuid;
	uint32_t clock;

	if (cpuids > mp_maxid)
		return;

	if (OF_getprop(node, "clock-frequency", &clock, sizeof(clock)) <= 0)
		panic("%s: couldn't determine CPU frequency", __func__);
	if (clock != PCPU_GET(clock))
		tick_et_use_stick = 1;

	csa = &cpu_start_args;
	csa->csa_state = 0;
	sun4u_startcpu(node, (void *)mp_tramp, 0);
	s = intr_disable();
	while (csa->csa_state != CPU_TICKSYNC)
		;
	membar(StoreLoad);
	csa->csa_tick = rd(tick);
	if (cpu_impl == CPU_IMPL_SPARC64V ||
	    cpu_impl >= CPU_IMPL_ULTRASPARCIII) {
		while (csa->csa_state != CPU_STICKSYNC)
			;
		membar(StoreLoad);
		csa->csa_stick = rdstick();
	}
	while (csa->csa_state != CPU_INIT)
		;
	csa->csa_tick = csa->csa_stick = 0;
	intr_restore(s);

	cpuid = cpuids++;
	cpuid_to_mid[cpuid] = mid;
	cpu_identify(csa->csa_ver, clock, cpuid);

	va = kmem_malloc(kernel_arena, PCPU_PAGES * PAGE_SIZE,
	    M_WAITOK | M_ZERO);
	pc = (struct pcpu *)(va + (PCPU_PAGES * PAGE_SIZE)) - 1;
	pcpu_init(pc, cpuid, sizeof(*pc));
	dpcpu_init((void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
	    M_WAITOK | M_ZERO), cpuid);
	pc->pc_addr = va;
	pc->pc_clock = clock;
	pc->pc_impl = cpu_impl;
	pc->pc_mid = mid;
	pc->pc_node = node;

	cache_init(pc);

	CPU_SET(cpuid, &all_cpus);
	intr_add_cpu(cpuid);
}
示例#2
0
文件: pv.c 项目: fengsi/freebsd
static bool
start_xen_ap(int cpu)
{
	struct vcpu_guest_context *ctxt;
	int ms, cpus = mp_naps;
	const size_t stacksize = KSTACK_PAGES * PAGE_SIZE;

	/* allocate and set up an idle stack data page */
	bootstacks[cpu] =
	    (void *)kmem_malloc(kernel_arena, stacksize, M_WAITOK | M_ZERO);
	doublefault_stack =
	    (char *)kmem_malloc(kernel_arena, PAGE_SIZE, M_WAITOK | M_ZERO);
	nmi_stack =
	    (char *)kmem_malloc(kernel_arena, PAGE_SIZE, M_WAITOK | M_ZERO);
	dpcpu =
	    (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO);

	bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
	bootAP = cpu;

	ctxt = malloc(sizeof(*ctxt), M_TEMP, M_WAITOK | M_ZERO);
	if (ctxt == NULL)
		panic("unable to allocate memory");

	ctxt->flags = VGCF_IN_KERNEL;
	ctxt->user_regs.rip = (unsigned long) init_secondary;
	ctxt->user_regs.rsp = (unsigned long) bootSTK;

	/* Set the AP to use the same page tables */
	ctxt->ctrlreg[3] = KPML4phys;

	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
		panic("unable to initialize AP#%d", cpu);

	free(ctxt, M_TEMP);

	/* Launch the vCPU */
	if (HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
		panic("unable to start AP#%d", cpu);

	/* Wait up to 5 seconds for it to start. */
	for (ms = 0; ms < 5000; ms++) {
		if (mp_naps > cpus)
			return (true);
		DELAY(1000);
	}

	return (false);
}
示例#3
0
/* Initialize and fire up non-boot processors */
void
cpu_mp_start(void)
{
	int error, i;

	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);

	/* Reserve memory for application processors */
	for(i = 0; i < (mp_ncpus - 1); i++)
		dpcpu[i] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
		    M_WAITOK | M_ZERO);

	dcache_wbinv_poc_all();

	/* Initialize boot code and start up processors */
	platform_mp_start_ap();

	/*  Check if ap's started properly */
	error = check_ap();
	if (error)
		printf("WARNING: Some AP's failed to start\n");
	else
		for (i = 1; i < mp_ncpus; i++)
			CPU_SET(i, &all_cpus);
}
示例#4
0
/*
 * Allocate a single object of specified size with specified flags (either
 * M_WAITOK or M_NOWAIT).
 */
void *
memguard_alloc(unsigned long size, int flags)
{
	void *obj;
	struct memguard_entry *e = NULL;
	int numpgs;

	numpgs = size / PAGE_SIZE;
	if ((size % PAGE_SIZE) != 0)
		numpgs++;
	if (numpgs > MAX_PAGES_PER_ITEM)
		panic("MEMGUARD: You must increase MAX_PAGES_PER_ITEM " \
		    "in memguard.c (requested: %d pages)", numpgs);
	if (numpgs == 0)
		return NULL;

	/*
	 * If we haven't exhausted the memguard_map yet, allocate from
	 * it and grab a new page, even if we have recycled pages in our
	 * FIFO.  This is because we wish to allow recycled pages to live
	 * guarded in the FIFO for as long as possible in order to catch
	 * even very late tamper-after-frees, even though it means that
	 * we end up wasting more memory, this is only a DEBUGGING allocator
	 * after all.
	 */
	MEMGUARD_CRIT_SECTION_ENTER;
	if (memguard_mapused >= memguard_mapsize) {
		e = STAILQ_FIRST(&memguard_fifo_pool[numpgs - 1]);
		if (e != NULL) {
			STAILQ_REMOVE(&memguard_fifo_pool[numpgs - 1], e,
			    memguard_entry, entries);
			MEMGUARD_CRIT_SECTION_EXIT;
			obj = e->ptr;
			free(e, M_TEMP);
			memguard_unguard(obj, numpgs);
			if (flags & M_ZERO)
				bzero(obj, PAGE_SIZE * numpgs);
			return obj;
		}
		MEMGUARD_CRIT_SECTION_EXIT;
		if (flags & M_WAITOK)
			panic("MEMGUARD: Failed with M_WAITOK: " \
			    "memguard_map too small");
		return NULL;
	}
	memguard_mapused += (PAGE_SIZE * numpgs);
	MEMGUARD_CRIT_SECTION_EXIT;

	obj = (void *)kmem_malloc(memguard_map, PAGE_SIZE * numpgs, flags);
	if (obj != NULL) {
		vsetmgfifo((vm_offset_t)obj, &memguard_fifo_pool[numpgs - 1]);
		if (flags & M_ZERO)
			bzero(obj, PAGE_SIZE * numpgs);
	} else {
		MEMGUARD_CRIT_SECTION_ENTER;
		memguard_mapused -= (PAGE_SIZE * numpgs);
		MEMGUARD_CRIT_SECTION_EXIT;
	}
	return obj;
}
示例#5
0
void *
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
	void *ret;
	struct arm_small_page *sp;
	TAILQ_HEAD(,arm_small_page) *head;
	vm_page_t m;

	*flags = UMA_SLAB_PRIV;
	/*
	 * For CPUs where we setup page tables as write back, there's no
	 * need to maintain two separate pools.
	 */
	if (zone == l2zone && pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt)
		head = (void *)&pages_wt;
	else
		head = (void *)&pages_normal;

	mtx_lock(&smallalloc_mtx);
	sp = TAILQ_FIRST(head);

	if (!sp) {
		int pflags;

		mtx_unlock(&smallalloc_mtx);
		if (zone == l2zone &&
		    pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) {
			*flags = UMA_SLAB_KMEM;
			ret = ((void *)kmem_malloc(kmem_arena, bytes,
			    M_NOWAIT));
			return (ret);
		}
		pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
		for (;;) {
			m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
			if (m == NULL) {
				if (wait & M_NOWAIT)
					return (NULL);
				VM_WAIT;
			} else
				break;
		}
		ret = (void *)arm_ptovirt(VM_PAGE_TO_PHYS(m));
		if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
			bzero(ret, PAGE_SIZE);
		return (ret);
	}
	TAILQ_REMOVE(head, sp, pg_list);
	TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list);
	ret = sp->addr;
	mtx_unlock(&smallalloc_mtx);
	if ((wait & M_ZERO))
		bzero(ret, bytes);
	return (ret);
}
示例#6
0
文件: os.c 项目: nf-mlo/open-vm-tools
static void
os_pmap_alloc(os_pmap *p) // IN
{
   /* number of pages (div. 8) */
   p->size = (cnt.v_page_count + 7) / 8;

   /*
    * expand to nearest word boundary
    * XXX: bitmap can be greater than total number of pages in system
    */
   p->size = (p->size + sizeof(unsigned long) - 1) &
                         ~(sizeof(unsigned long) - 1);

#if __FreeBSD_version < 1000000
   p->bitmap = (unsigned long *)kmem_alloc(kernel_map, p->size);
#else
   p->bitmap = (unsigned long *)kmem_malloc(kernel_arena, p->size, M_WAITOK | M_ZERO);
#endif
}