Exemplo n.º 1
0
static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
                                      struct perf_event *event)
{
    struct hw_perf_event *hwc = &event->hw;
    struct amd_nb *nb = cpuc->amd_nb;
    int i;

    /*
     * only care about NB events
     */
    if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
        return;

    /*
     * need to scan whole list because event may not have
     * been assigned during scheduling
     *
     * no race condition possible because event can only
     * be removed on one CPU at a time AND PMU is disabled
     * when we come here
     */
    for (i = 0; i < x86_pmu.num_counters; i++) {
        if (nb->owners[i] == event) {
            cmpxchg(nb->owners+i, event, NULL);
            break;
        }
    }
}
static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
				      struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;
	struct amd_nb *nb = cpuc->amd_nb;
	int i;

	if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
		return;

	for (i = 0; i < x86_pmu.num_counters; i++) {
		if (nb->owners[i] == event) {
			cmpxchg(nb->owners+i, event, NULL);
			break;
		}
	}
}
static struct event_constraint *
amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;
	struct amd_nb *nb = cpuc->amd_nb;
	struct perf_event *old = NULL;
	int max = x86_pmu.num_counters;
	int i, j, k = -1;

	if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
		return &unconstrained;

	for (i = 0; i < max; i++) {
		if (k == -1 && !nb->owners[i])
			k = i;

		
		if (nb->owners[i] == event)
			goto done;
	}
	if (hwc->idx != -1) {
		
		i = hwc->idx;
	} else if (k != -1) {
		
		i = k;
	} else {
		i = 0;
	}
	j = i;
	do {
		old = cmpxchg(nb->owners+i, NULL, event);
		if (!old)
			break;
		if (++i == max)
			i = 0;
	} while (i != j);
done:
	if (!old)
		return &nb->event_constraints[i];

	return &emptyconstraint;
}
Exemplo n.º 4
0
/*
 * AMD64 NorthBridge events need special treatment because
 * counter access needs to be synchronized across all cores
 * of a package. Refer to BKDG section 3.12
 *
 * NB events are events measuring L3 cache, Hypertransport
 * traffic. They are identified by an event code >= 0xe00.
 * They measure events on the NorthBride which is shared
 * by all cores on a package. NB events are counted on a
 * shared set of counters. When a NB event is programmed
 * in a counter, the data actually comes from a shared
 * counter. Thus, access to those counters needs to be
 * synchronized.
 *
 * We implement the synchronization such that no two cores
 * can be measuring NB events using the same counters. Thus,
 * we maintain a per-NB allocation table. The available slot
 * is propagated using the event_constraint structure.
 *
 * We provide only one choice for each NB event based on
 * the fact that only NB events have restrictions. Consequently,
 * if a counter is available, there is a guarantee the NB event
 * will be assigned to it. If no slot is available, an empty
 * constraint is returned and scheduling will eventually fail
 * for this event.
 *
 * Note that all cores attached the same NB compete for the same
 * counters to host NB events, this is why we use atomic ops. Some
 * multi-chip CPUs may have more than one NB.
 *
 * Given that resources are allocated (cmpxchg), they must be
 * eventually freed for others to use. This is accomplished by
 * calling amd_put_event_constraints().
 *
 * Non NB events are not impacted by this restriction.
 */
static struct event_constraint *
amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
    struct hw_perf_event *hwc = &event->hw;
    struct amd_nb *nb = cpuc->amd_nb;
    struct perf_event *old = NULL;
    int max = x86_pmu.num_counters;
    int i, j, k = -1;

    /*
     * if not NB event or no NB, then no constraints
     */
    if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
        return &unconstrained;

    /*
     * detect if already present, if so reuse
     *
     * cannot merge with actual allocation
     * because of possible holes
     *
     * event can already be present yet not assigned (in hwc->idx)
     * because of successive calls to x86_schedule_events() from
     * hw_perf_group_sched_in() without hw_perf_enable()
     */
    for (i = 0; i < max; i++) {
        /*
         * keep track of first free slot
         */
        if (k == -1 && !nb->owners[i])
            k = i;

        /* already present, reuse */
        if (nb->owners[i] == event)
            goto done;
    }
    /*
     * not present, so grab a new slot
     * starting either at:
     */
    if (hwc->idx != -1) {
        /* previous assignment */
        i = hwc->idx;
    } else if (k != -1) {
        /* start from free slot found */
        i = k;
    } else {
        /*
         * event not found, no slot found in
         * first pass, try again from the
         * beginning
         */
        i = 0;
    }
    j = i;
    do {
        old = cmpxchg(nb->owners+i, NULL, event);
        if (!old)
            break;
        if (++i == max)
            i = 0;
    } while (i != j);
done:
    if (!old)
        return &nb->event_constraints[i];

    return &emptyconstraint;
}