Beispiel #1
0
static void
adj_perf(cpumask_t xcpu_used, cpumask_t xcpu_pwrdom_used)
{
	cpumask_t old_usched_used;
	int cpu, inc;

	/*
	 * Set cpus requiring performance to the userland process
	 * scheduler.  Leave the rest of cpus unmapped.
	 */
	old_usched_used = usched_cpu_used;
	usched_cpu_used = cpu_used;
	if (CPUMASK_TESTZERO(usched_cpu_used))
		CPUMASK_ORBIT(usched_cpu_used, 0);
	if (CPUMASK_CMPMASKNEQ(usched_cpu_used, old_usched_used))
		set_uschedcpus();

	/*
	 * Adjust per-cpu performance.
	 */
	CPUMASK_XORMASK(xcpu_used, cpu_used);
	while (CPUMASK_TESTNZERO(xcpu_used)) {
		cpu = BSFCPUMASK(xcpu_used);
		CPUMASK_NANDBIT(xcpu_used, cpu);

		if (CPUMASK_TESTBIT(cpu_used, cpu)) {
			/* Increase cpu performance */
			inc = 1;
		} else {
			/* Decrease cpu performance */
			inc = 0;
		}
		adj_cpu_perf(cpu, inc);
	}

	/*
	 * Adjust cpu power domain performance.  This could affect
	 * a set of cpus.
	 */
	CPUMASK_XORMASK(xcpu_pwrdom_used, cpu_pwrdom_used);
	while (CPUMASK_TESTNZERO(xcpu_pwrdom_used)) {
		int dom;

		dom = BSFCPUMASK(xcpu_pwrdom_used);
		CPUMASK_NANDBIT(xcpu_pwrdom_used, dom);

		if (CPUMASK_TESTBIT(cpu_pwrdom_used, dom)) {
			/* Increase cpu power domain performance */
			inc = 1;
		} else {
			/* Decrease cpu power domain performance */
			inc = 0;
		}
		adj_cpu_pwrdom(dom, inc);
	}
}
/*
 * Get SMP fully working before we start initializing devices.
 */
static
void
ap_finish(void)
{
        mp_finish = 1;
        if (bootverbose)
                kprintf("Finish MP startup\n");

	/* build our map of 'other' CPUs */
	mycpu->gd_other_cpus = smp_startup_mask;
	CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);

	/*
	 * Let the other cpu's finish initializing and build their map
	 * of 'other' CPUs.
	 */
        rel_mplock();
        while (CPUMASK_CMPMASKNEQ(smp_active_mask,smp_startup_mask)) {
		DELAY(100000);
                cpu_lfence();
	}

        while (try_mplock() == 0)
		DELAY(100000);
        if (bootverbose)
                kprintf("Active CPU Mask: %08lx\n",
			(long)CPUMASK_LOWMASK(smp_active_mask));
}
Beispiel #3
0
static void
add_spare_cpus(const cpumask_t ocpu_used, int ncpu)
{
	cpumask_t saved_pwrdom, xcpu_used;
	int done = 0, cpu;

	/*
	 * Find more cpus in the previous cpu set.
	 */
	xcpu_used = cpu_used;
	CPUMASK_XORMASK(xcpu_used, ocpu_used);
	while (CPUMASK_TESTNZERO(xcpu_used)) {
		cpu = BSFCPUMASK(xcpu_used);
		CPUMASK_NANDBIT(xcpu_used, cpu);

		if (CPUMASK_TESTBIT(ocpu_used, cpu)) {
			CPUMASK_ORBIT(cpu_pwrdom_used, cpu2pwrdom[cpu]);
			CPUMASK_ORBIT(cpu_used, cpu);
			--ncpu;
			if (ncpu == 0)
				return;
		}
	}

	/*
	 * Find more cpus in the used cpu power domains.
	 */
	saved_pwrdom = cpu_pwrdom_used;
again:
	while (CPUMASK_TESTNZERO(saved_pwrdom)) {
		cpumask_t unused_cpumask;
		int dom;

		dom = BSFCPUMASK(saved_pwrdom);
		CPUMASK_NANDBIT(saved_pwrdom, dom);

		unused_cpumask = cpu_pwrdomain[dom]->dom_cpumask;
		CPUMASK_NANDMASK(unused_cpumask, cpu_used);

		while (CPUMASK_TESTNZERO(unused_cpumask)) {
			cpu = BSFCPUMASK(unused_cpumask);
			CPUMASK_NANDBIT(unused_cpumask, cpu);

			CPUMASK_ORBIT(cpu_pwrdom_used, dom);
			CPUMASK_ORBIT(cpu_used, cpu);
			--ncpu;
			if (ncpu == 0)
				return;
		}
	}
	if (!done) {
		done = 1;
		/*
		 * Find more cpus in unused cpu power domains
		 */
		saved_pwrdom = cpu_pwrdom_mask;
		CPUMASK_NANDMASK(saved_pwrdom, cpu_pwrdom_used);
		goto again;
	}
	if (DebugOpt)
		printf("%d cpus not found\n", ncpu);
}
Beispiel #4
0
/*
 * Figure out the cpu power domains.
 */
static int
acpi_get_cpupwrdom(void)
{
	struct cpu_pwrdom *dom;
	cpumask_t pwrdom_mask;
	char buf[64];
	char members[1024];
	char *str;
	size_t msize;
	int n, i, ncpu = 0, dom_id;

	memset(cpu2pwrdom, 0, sizeof(cpu2pwrdom));
	memset(cpu_pwrdomain, 0, sizeof(cpu_pwrdomain));
	CPUMASK_ASSZERO(cpu_pwrdom_mask);

	for (i = 0; i < MAXDOM; ++i) {
		snprintf(buf, sizeof(buf),
			 "hw.acpi.cpu.px_dom%d.available", i);
		if (sysctlbyname(buf, NULL, NULL, NULL, 0) < 0)
			continue;

		dom = calloc(1, sizeof(*dom));
		dom->dom_id = i;

		if (cpu_pwrdomain[i] != NULL) {
			fprintf(stderr, "cpu power domain %d exists\n", i);
			exit(1);
		}
		cpu_pwrdomain[i] = dom;
		CPUMASK_ORBIT(cpu_pwrdom_mask, i);
	}
	pwrdom_mask = cpu_pwrdom_mask;

	while (CPUMASK_TESTNZERO(pwrdom_mask)) {
		dom_id = BSFCPUMASK(pwrdom_mask);
		CPUMASK_NANDBIT(pwrdom_mask, dom_id);
		dom = cpu_pwrdomain[dom_id];

		CPUMASK_ASSZERO(dom->dom_cpumask);

		snprintf(buf, sizeof(buf),
			 "hw.acpi.cpu.px_dom%d.members", dom->dom_id);
		msize = sizeof(members);
		if (sysctlbyname(buf, members, &msize, NULL, 0) < 0) {
			cpu_pwrdomain[dom_id] = NULL;
			free(dom);
			continue;
		}

		members[msize] = 0;
		for (str = strtok(members, " "); str; str = strtok(NULL, " ")) {
			n = -1;
			sscanf(str, "cpu%d", &n);
			if (n >= 0) {
				++ncpu;
				++dom->dom_ncpus;
				CPUMASK_ORBIT(dom->dom_cpumask, n);
				cpu2pwrdom[n] = dom->dom_id;
			}
		}
		if (dom->dom_ncpus == 0) {
			cpu_pwrdomain[dom_id] = NULL;
			free(dom);
			continue;
		}
		if (DebugOpt) {
			printf("dom%d cpumask: ", dom->dom_id);
			for (i = 0; i < (int)NELEM(dom->dom_cpumask.ary); ++i) {
				printf("%jx ",
				    (uintmax_t)dom->dom_cpumask.ary[i]);
			}
			printf("\n");
		}
	}

	if (ncpu != NCpus) {
		if (DebugOpt)
			printf("Found %d cpus, expecting %d\n", ncpu, NCpus);

		pwrdom_mask = cpu_pwrdom_mask;
		while (CPUMASK_TESTNZERO(pwrdom_mask)) {
			dom_id = BSFCPUMASK(pwrdom_mask);
			CPUMASK_NANDBIT(pwrdom_mask, dom_id);
			dom = cpu_pwrdomain[dom_id];
			if (dom != NULL)
				free(dom);
		}
		return 0;
	}
	return 1;
}
Beispiel #5
0
int
main(int ac, char **av)
{
	int ch;
	int res;
	char *sched = NULL;
	char *cpustr = NULL;
	char *sched_cpustr = NULL;
	char *p = NULL;
	cpumask_t cpumask;
	int cpuid;
	pid_t pid = getpid();  /* See usched_set(2) - BUGS */

	CPUMASK_ASSZERO(cpumask);

	while ((ch = getopt(ac, av, "d")) != -1) {
		switch (ch) {
		case 'd':
			DebugOpt = 1;
			break;
		default:
			usage();
			/* NOTREACHED */
		}
	}
	ac -= optind;
	av += optind;

	if (ac < 2) {
		usage();
		/* NOTREACHED */
	}
	sched_cpustr = strdup(av[0]);
	sched = strsep(&sched_cpustr, ":");
	if (strcmp(sched, "default") == 0)
		fprintf(stderr, "Ignoring scheduler == \"default\": not implemented\n");
	cpustr = strsep(&sched_cpustr, "");
	if (strlen(sched) == 0 && cpustr == NULL) {
		usage();
		/* NOTREACHED */
	}

	/*
	 * XXX needs expanded support for > 64 cpus
	 */
	if (cpustr != NULL) {
		uint64_t v;

		v = (uint64_t)strtoull(cpustr, NULL, 0);
		for (cpuid = 0; cpuid < (int)sizeof(v) * 8; ++cpuid) {
			if (v & (1LU << cpuid))
				CPUMASK_ORBIT(cpumask, cpuid);
		}
	}

	if (strlen(sched) != 0) {
		if (DebugOpt)
			fprintf(stderr, "DEBUG: USCHED_SET_SCHEDULER: scheduler: %s\n", sched);
		res = usched_set(pid, USCHED_SET_SCHEDULER, sched, strlen(sched));
		if (res != 0) {
			asprintf(&p, "usched_set(%d, USCHED_SET_SCHEDULER, \"%s\", %d)",
				pid, sched, (int)strlen(sched));
			perror(p);
			exit(1);
		}
	}
	if (CPUMASK_TESTNZERO(cpumask)) {
		for (cpuid = 0; cpuid < (int)sizeof(cpumask) * 8; ++cpuid) {
			if (CPUMASK_TESTBIT(cpumask, cpuid))
				break;
		}
		if (DebugOpt) {
			fprintf(stderr, "DEBUG: USCHED_SET_CPU: cpuid: %d\n",
				cpuid);
		}
		res = usched_set(pid, USCHED_SET_CPU, &cpuid, sizeof(int));
		if (res != 0) {
			asprintf(&p, "usched_set(%d, USCHED_SET_CPU, &%d, %d)",
				pid, cpuid, (int)sizeof(int));
			perror(p);
			exit(1);
		}
		CPUMASK_NANDBIT(cpumask, cpuid);
		while (CPUMASK_TESTNZERO(cpumask)) {
			++cpuid;
			if (CPUMASK_TESTBIT(cpumask, cpuid) == 0)
				continue;
			CPUMASK_NANDBIT(cpumask, cpuid);
			if (DebugOpt) {
				fprintf(stderr,
					"DEBUG: USCHED_ADD_CPU: cpuid: %d\n",
					cpuid);
			}
			res = usched_set(pid, USCHED_ADD_CPU, &cpuid, sizeof(int));
			if (res != 0) {
				asprintf(&p, "usched_set(%d, USCHED_ADD_CPU, &%d, %d)",
					pid, cpuid, (int)sizeof(int));
				perror(p);
				exit(1);
			}
		}
	}
	execvp(av[1], av + 1);
	exit(1);
}
Beispiel #6
0
/*
 * Called with a critical section held and interrupts enabled.
 */
int
pmap_inval_intr(cpumask_t *cpumaskp, int toolong)
{
    globaldata_t gd = mycpu;
    pmap_inval_info_t *info;
    int loopme = 0;
    int cpu;
    cpumask_t cpumask;

    /*
     * Check all cpus for invalidations we may need to service.
     */
    cpu_ccfence();
    cpu = gd->gd_cpuid;
    cpumask = *cpumaskp;

    while (CPUMASK_TESTNZERO(cpumask)) {
        int n = BSFCPUMASK(cpumask);

#ifdef LOOPRECOVER
        KKASSERT(n >= 0 && n < MAXCPU);
#endif

        CPUMASK_NANDBIT(cpumask, n);
        info = &invinfo[n];

        /*
         * Due to interrupts/races we can catch a new operation
         * in an older interrupt.  A fence is needed once we detect
         * the (not) done bit.
         */
        if (!CPUMASK_TESTBIT(info->done, cpu))
            continue;
        cpu_lfence();
#ifdef LOOPRECOVER
        if (toolong) {
            kprintf("pminvl %d->%d %08jx %08jx mode=%d\n",
                    cpu, n, info->done.ary[0], info->mask.ary[0],
                    info->mode);
        }
#endif

        /*
         * info->mask and info->done always contain the originating
         * cpu until the originator is done.  Targets may still be
         * present in info->done after the originator is done (they
         * will be finishing up their loops).
         *
         * Clear info->mask bits on other cpus to indicate that they
         * have quiesced (entered the loop).  Once the other mask bits
         * are clear we can execute the operation on the original,
         * then clear the mask and done bits on the originator.  The
         * targets will then finish up their side and clear their
         * done bits.
         *
         * The command is considered 100% done when all done bits have
         * been cleared.
         */
        if (n != cpu) {
            /*
             * Command state machine for 'other' cpus.
             */
            if (CPUMASK_TESTBIT(info->mask, cpu)) {
                /*
                 * Other cpu indicate to originator that they
                 * are quiesced.
                 */
                ATOMIC_CPUMASK_NANDBIT(info->mask, cpu);
                loopme = 1;
            } else if (info->ptep &&
                       CPUMASK_TESTBIT(info->mask, n)) {
                /*
                 * Other cpu must wait for the originator (n)
                 * to complete its command if ptep is not NULL.
                 */
                loopme = 1;
            } else {
                /*
                 * Other cpu detects that the originator has
                 * completed its command, or there was no
                 * command.
                 *
                 * Now that the page table entry has changed,
                 * we can follow up with our own invalidation.
                 */
                vm_offset_t va = info->va;
                int npgs;

                if (va == (vm_offset_t)-1 ||
                        info->npgs > MAX_INVAL_PAGES) {
                    cpu_invltlb();
                } else {
                    for (npgs = info->npgs; npgs; --npgs) {
                        cpu_invlpg((void *)va);
                        va += PAGE_SIZE;
                    }
                }
                ATOMIC_CPUMASK_NANDBIT(info->done, cpu);
                /* info invalid now */
                /* loopme left alone */
            }
        } else if (CPUMASK_TESTBIT(info->mask, cpu)) {
            /*
             * Originator is waiting for other cpus
             */
            if (CPUMASK_CMPMASKNEQ(info->mask, gd->gd_cpumask)) {
                /*
                 * Originator waits for other cpus to enter
                 * their loop (aka quiesce).
                 *
                 * If this bugs out the IPI may have been lost,
                 * try to reissue by resetting our own
                 * reentrancy bit and clearing the smurf mask
                 * for the cpus that did not respond, then
                 * reissuing the IPI.
                 */
                loopme = 1;
#ifdef LOOPRECOVER
                if (loopwdog(info)) {
                    info->failed = 1;
                    loopdebug("C", info);
                    /* XXX recover from possible bug */
                    mdcpu->gd_xinvaltlb = 0;
                    ATOMIC_CPUMASK_NANDMASK(smp_smurf_mask,
                                            info->mask);
                    cpu_disable_intr();
                    smp_invlpg(&smp_active_mask);

                    /*
                     * Force outer-loop retest of Xinvltlb
                     * requests (see mp_machdep.c).
                     */
                    mdcpu->gd_xinvaltlb = 2;
                    cpu_enable_intr();
                }
#endif
            } else {
                /*
                 * Originator executes operation and clears
                 * mask to allow other cpus to finish.
                 */
                KKASSERT(info->mode != INVDONE);
                if (info->mode == INVSTORE) {
                    if (info->ptep)
                        info->opte = atomic_swap_long(info->ptep, info->npte);
                    CHECKSIGMASK(info);
                    ATOMIC_CPUMASK_NANDBIT(info->mask, cpu);
                    CHECKSIGMASK(info);
                } else {
                    if (atomic_cmpset_long(info->ptep,
                                           info->opte, info->npte)) {
                        info->success = 1;
                    } else {
                        info->success = 0;
                    }
                    CHECKSIGMASK(info);
                    ATOMIC_CPUMASK_NANDBIT(info->mask, cpu);
                    CHECKSIGMASK(info);
                }
                loopme = 1;
            }
        } else {
            /*
             * Originator does not have to wait for the other
             * cpus to finish.  It clears its done bit.  A new
             * command will not be initiated by the originator
             * until the other cpus have cleared their done bits
             * (asynchronously).
             */
            vm_offset_t va = info->va;
            int npgs;

            if (va == (vm_offset_t)-1 ||
                    info->npgs > MAX_INVAL_PAGES) {
                cpu_invltlb();
            } else {
                for (npgs = info->npgs; npgs; --npgs) {
                    cpu_invlpg((void *)va);
                    va += PAGE_SIZE;
                }
            }

            /* leave loopme alone */
            /* other cpus may still be finishing up */
            /* can't race originator since that's us */
            info->mode = INVDONE;
            ATOMIC_CPUMASK_NANDBIT(info->done, cpu);
        }
    }
    return loopme;
}