Пример #1
0
/*
 * Get SMP fully working before we start initializing devices.
 */
static
void
ap_finish(void)
{
        mp_finish = 1;
        if (bootverbose)
                kprintf("Finish MP startup\n");

	/* build our map of 'other' CPUs */
	mycpu->gd_other_cpus = smp_startup_mask;
	CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);

	/*
	 * Let the other cpu's finish initializing and build their map
	 * of 'other' CPUs.
	 */
        rel_mplock();
        while (CPUMASK_CMPMASKNEQ(smp_active_mask,smp_startup_mask)) {
		DELAY(100000);
                cpu_lfence();
	}

        while (try_mplock() == 0)
		DELAY(100000);
        if (bootverbose)
                kprintf("Active CPU Mask: %08lx\n",
			(long)CPUMASK_LOWMASK(smp_active_mask));
}
Пример #2
0
static void
adj_perf(cpumask_t xcpu_used, cpumask_t xcpu_pwrdom_used)
{
	cpumask_t old_usched_used;
	int cpu, inc;

	/*
	 * Set cpus requiring performance to the userland process
	 * scheduler.  Leave the rest of cpus unmapped.
	 */
	old_usched_used = usched_cpu_used;
	usched_cpu_used = cpu_used;
	if (CPUMASK_TESTZERO(usched_cpu_used))
		CPUMASK_ORBIT(usched_cpu_used, 0);
	if (CPUMASK_CMPMASKNEQ(usched_cpu_used, old_usched_used))
		set_uschedcpus();

	/*
	 * Adjust per-cpu performance.
	 */
	CPUMASK_XORMASK(xcpu_used, cpu_used);
	while (CPUMASK_TESTNZERO(xcpu_used)) {
		cpu = BSFCPUMASK(xcpu_used);
		CPUMASK_NANDBIT(xcpu_used, cpu);

		if (CPUMASK_TESTBIT(cpu_used, cpu)) {
			/* Increase cpu performance */
			inc = 1;
		} else {
			/* Decrease cpu performance */
			inc = 0;
		}
		adj_cpu_perf(cpu, inc);
	}

	/*
	 * Adjust cpu power domain performance.  This could affect
	 * a set of cpus.
	 */
	CPUMASK_XORMASK(xcpu_pwrdom_used, cpu_pwrdom_used);
	while (CPUMASK_TESTNZERO(xcpu_pwrdom_used)) {
		int dom;

		dom = BSFCPUMASK(xcpu_pwrdom_used);
		CPUMASK_NANDBIT(xcpu_pwrdom_used, dom);

		if (CPUMASK_TESTBIT(cpu_pwrdom_used, dom)) {
			/* Increase cpu power domain performance */
			inc = 1;
		} else {
			/* Decrease cpu power domain performance */
			inc = 0;
		}
		adj_cpu_pwrdom(dom, inc);
	}
}
Пример #3
0
static
void
_checksigmask(pmap_inval_info_t *info, const char *file, int line)
{
    cpumask_t tmp;

    tmp = info->mask;
    CPUMASK_ANDMASK(tmp, info->sigmask);
    if (CPUMASK_CMPMASKNEQ(tmp, info->mask)) {
        kprintf("\"%s\" line %d: bad sig/mask %08jx %08jx\n",
                file, line, info->sigmask.ary[0], info->mask.ary[0]);
    }
}
/*
 *  kdb_trap - field a TRACE or BPT trap
 */
int
kdb_trap(int type, int code, struct x86_64_saved_state *regs)
{
	volatile int ddb_mode = !(boothowto & RB_GDB);

	/*
	 * XXX try to do nothing if the console is in graphics mode.
	 * Handle trace traps (and hardware breakpoints...) by ignoring
	 * them except for forgetting about them.  Return 0 for other
	 * traps to say that we haven't done anything.  The trap handler
	 * will usually panic.  We should handle breakpoint traps for
	 * our breakpoints by disarming our breakpoints and fixing up
	 * %eip.
	 */
	if (cons_unavail && ddb_mode) {
	    if (type == T_TRCTRAP) {
		regs->tf_rflags &= ~PSL_T;
		return (1);
	    }
	    return (0);
	}

	switch (type) {
	    case T_BPTFLT:	/* breakpoint */
	    case T_TRCTRAP:	/* debug exception */
		break;

	    default:
		/*
		 * XXX this is almost useless now.  In most cases,
		 * trap_fatal() has already printed a much more verbose
		 * message.  However, it is dangerous to print things in
		 * trap_fatal() - kprintf() might be reentered and trap.
		 * The debugger should be given control first.
		 */
		if (ddb_mode)
		    db_printf("kernel: type %d trap, code=%x\n", type, code);

		if (db_nofault) {
		    jmp_buf *no_fault = db_nofault;
		    db_nofault = NULL;
		    longjmp(*no_fault, 1);
		}
	}

	/*
	 * This handles unexpected traps in ddb commands, including calls to
	 * non-ddb functions.  db_nofault only applies to memory accesses by
	 * internal ddb commands.
	 */
	if (db_global_jmpbuf_valid)
	    longjmp(db_global_jmpbuf, 1);

	/*
	 * XXX We really should switch to a local stack here.
	 */
	ddb_regs = *regs;

	crit_enter();
	db_printf("\nCPU%d stopping CPUs: 0x%016jx\n",
	    mycpu->gd_cpuid, (uintmax_t)CPUMASK_LOWMASK(mycpu->gd_other_cpus));

	/* We stop all CPUs except ourselves (obviously) */
	stop_cpus(mycpu->gd_other_cpus);

	db_printf(" stopped\n");

	setjmp(db_global_jmpbuf);
	db_global_jmpbuf_valid = TRUE;
	db_active++;
	vcons_set_mode(1);
	if (ddb_mode) {
	    cndbctl(TRUE);
	    db_trap(type, code);
	    cndbctl(FALSE);
	} else
	    gdb_handle_exception(&ddb_regs, type, code);
	db_active--;
	vcons_set_mode(0);
	db_global_jmpbuf_valid = FALSE;

	db_printf("\nCPU%d restarting CPUs: 0x%016jx\n",
	    mycpu->gd_cpuid, (uintmax_t)CPUMASK_LOWMASK(stopped_cpus));

	/* Restart all the CPUs we previously stopped */
	if (CPUMASK_CMPMASKNEQ(stopped_cpus, mycpu->gd_other_cpus)) {
		db_printf("whoa, other_cpus: 0x%016jx, "
			  "stopped_cpus: 0x%016jx\n",
			  (uintmax_t)CPUMASK_LOWMASK(mycpu->gd_other_cpus),
			  (uintmax_t)CPUMASK_LOWMASK(stopped_cpus));
		panic("stop_cpus() failed");
	}
	restart_cpus(stopped_cpus);

	db_printf(" restarted\n");
	crit_exit();

	regs->tf_rip    = ddb_regs.tf_rip;
	regs->tf_rflags = ddb_regs.tf_rflags;
	regs->tf_rax    = ddb_regs.tf_rax;
	regs->tf_rcx    = ddb_regs.tf_rcx;
	regs->tf_rdx    = ddb_regs.tf_rdx;
	regs->tf_rbx    = ddb_regs.tf_rbx;

	regs->tf_rsp    = ddb_regs.tf_rsp;
	regs->tf_ss     = ddb_regs.tf_ss & 0xffff;

	regs->tf_rbp    = ddb_regs.tf_rbp;
	regs->tf_rsi    = ddb_regs.tf_rsi;
	regs->tf_rdi    = ddb_regs.tf_rdi;

	regs->tf_r8     = ddb_regs.tf_r8;
	regs->tf_r9     = ddb_regs.tf_r9;
	regs->tf_r10    = ddb_regs.tf_r10;
	regs->tf_r11    = ddb_regs.tf_r11;
	regs->tf_r12    = ddb_regs.tf_r12;
	regs->tf_r13    = ddb_regs.tf_r13;
	regs->tf_r14    = ddb_regs.tf_r14;
	regs->tf_r15    = ddb_regs.tf_r15;

	/* regs->tf_es     = ddb_regs.tf_es & 0xffff; */
	/* regs->tf_fs     = ddb_regs.tf_fs & 0xffff; */
	/* regs->tf_gs     = ddb_regs.tf_gs & 0xffff; */
	regs->tf_cs     = ddb_regs.tf_cs & 0xffff;
	/* regs->tf_ds     = ddb_regs.tf_ds & 0xffff; */
	return (1);
}
Пример #5
0
/*
 * Called with a critical section held and interrupts enabled.
 */
int
pmap_inval_intr(cpumask_t *cpumaskp, int toolong)
{
    globaldata_t gd = mycpu;
    pmap_inval_info_t *info;
    int loopme = 0;
    int cpu;
    cpumask_t cpumask;

    /*
     * Check all cpus for invalidations we may need to service.
     */
    cpu_ccfence();
    cpu = gd->gd_cpuid;
    cpumask = *cpumaskp;

    while (CPUMASK_TESTNZERO(cpumask)) {
        int n = BSFCPUMASK(cpumask);

#ifdef LOOPRECOVER
        KKASSERT(n >= 0 && n < MAXCPU);
#endif

        CPUMASK_NANDBIT(cpumask, n);
        info = &invinfo[n];

        /*
         * Due to interrupts/races we can catch a new operation
         * in an older interrupt.  A fence is needed once we detect
         * the (not) done bit.
         */
        if (!CPUMASK_TESTBIT(info->done, cpu))
            continue;
        cpu_lfence();
#ifdef LOOPRECOVER
        if (toolong) {
            kprintf("pminvl %d->%d %08jx %08jx mode=%d\n",
                    cpu, n, info->done.ary[0], info->mask.ary[0],
                    info->mode);
        }
#endif

        /*
         * info->mask and info->done always contain the originating
         * cpu until the originator is done.  Targets may still be
         * present in info->done after the originator is done (they
         * will be finishing up their loops).
         *
         * Clear info->mask bits on other cpus to indicate that they
         * have quiesced (entered the loop).  Once the other mask bits
         * are clear we can execute the operation on the original,
         * then clear the mask and done bits on the originator.  The
         * targets will then finish up their side and clear their
         * done bits.
         *
         * The command is considered 100% done when all done bits have
         * been cleared.
         */
        if (n != cpu) {
            /*
             * Command state machine for 'other' cpus.
             */
            if (CPUMASK_TESTBIT(info->mask, cpu)) {
                /*
                 * Other cpu indicate to originator that they
                 * are quiesced.
                 */
                ATOMIC_CPUMASK_NANDBIT(info->mask, cpu);
                loopme = 1;
            } else if (info->ptep &&
                       CPUMASK_TESTBIT(info->mask, n)) {
                /*
                 * Other cpu must wait for the originator (n)
                 * to complete its command if ptep is not NULL.
                 */
                loopme = 1;
            } else {
                /*
                 * Other cpu detects that the originator has
                 * completed its command, or there was no
                 * command.
                 *
                 * Now that the page table entry has changed,
                 * we can follow up with our own invalidation.
                 */
                vm_offset_t va = info->va;
                int npgs;

                if (va == (vm_offset_t)-1 ||
                        info->npgs > MAX_INVAL_PAGES) {
                    cpu_invltlb();
                } else {
                    for (npgs = info->npgs; npgs; --npgs) {
                        cpu_invlpg((void *)va);
                        va += PAGE_SIZE;
                    }
                }
                ATOMIC_CPUMASK_NANDBIT(info->done, cpu);
                /* info invalid now */
                /* loopme left alone */
            }
        } else if (CPUMASK_TESTBIT(info->mask, cpu)) {
            /*
             * Originator is waiting for other cpus
             */
            if (CPUMASK_CMPMASKNEQ(info->mask, gd->gd_cpumask)) {
                /*
                 * Originator waits for other cpus to enter
                 * their loop (aka quiesce).
                 *
                 * If this bugs out the IPI may have been lost,
                 * try to reissue by resetting our own
                 * reentrancy bit and clearing the smurf mask
                 * for the cpus that did not respond, then
                 * reissuing the IPI.
                 */
                loopme = 1;
#ifdef LOOPRECOVER
                if (loopwdog(info)) {
                    info->failed = 1;
                    loopdebug("C", info);
                    /* XXX recover from possible bug */
                    mdcpu->gd_xinvaltlb = 0;
                    ATOMIC_CPUMASK_NANDMASK(smp_smurf_mask,
                                            info->mask);
                    cpu_disable_intr();
                    smp_invlpg(&smp_active_mask);

                    /*
                     * Force outer-loop retest of Xinvltlb
                     * requests (see mp_machdep.c).
                     */
                    mdcpu->gd_xinvaltlb = 2;
                    cpu_enable_intr();
                }
#endif
            } else {
                /*
                 * Originator executes operation and clears
                 * mask to allow other cpus to finish.
                 */
                KKASSERT(info->mode != INVDONE);
                if (info->mode == INVSTORE) {
                    if (info->ptep)
                        info->opte = atomic_swap_long(info->ptep, info->npte);
                    CHECKSIGMASK(info);
                    ATOMIC_CPUMASK_NANDBIT(info->mask, cpu);
                    CHECKSIGMASK(info);
                } else {
                    if (atomic_cmpset_long(info->ptep,
                                           info->opte, info->npte)) {
                        info->success = 1;
                    } else {
                        info->success = 0;
                    }
                    CHECKSIGMASK(info);
                    ATOMIC_CPUMASK_NANDBIT(info->mask, cpu);
                    CHECKSIGMASK(info);
                }
                loopme = 1;
            }
        } else {
            /*
             * Originator does not have to wait for the other
             * cpus to finish.  It clears its done bit.  A new
             * command will not be initiated by the originator
             * until the other cpus have cleared their done bits
             * (asynchronously).
             */
            vm_offset_t va = info->va;
            int npgs;

            if (va == (vm_offset_t)-1 ||
                    info->npgs > MAX_INVAL_PAGES) {
                cpu_invltlb();
            } else {
                for (npgs = info->npgs; npgs; --npgs) {
                    cpu_invlpg((void *)va);
                    va += PAGE_SIZE;
                }
            }

            /* leave loopme alone */
            /* other cpus may still be finishing up */
            /* can't race originator since that's us */
            info->mode = INVDONE;
            ATOMIC_CPUMASK_NANDBIT(info->done, cpu);
        }
    }
    return loopme;
}