Example #1
0
const S390CPUDef *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga,
                                    S390FeatBitmap features)
{
    const S390CPUDef *last_compatible = NULL;
    const S390CPUDef *matching_cpu_type = NULL;
    int i;

    if (!gen) {
        ec_ga = 0;
    }
    if (!gen && type) {
        gen = s390_get_gen_for_cpu_type(type);
    }

    for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
        const S390CPUDef *def = &s390_cpu_defs[i];
        S390FeatBitmap missing;

        /* don't even try newer generations if we know the generation */
        if (gen) {
            if (def->gen > gen) {
                break;
            } else if (def->gen == gen && ec_ga && def->ec_ga > ec_ga) {
                break;
            }
        }

        if (features) {
            /* see if the model satisfies the minimum features */
            bitmap_andnot(missing, def->base_feat, features, S390_FEAT_MAX);
            /*
             * Ignore certain features that are in the base model, but not
             * relevant for the search (esp. MSA subfunctions).
             */
            bitmap_andnot(missing, missing, ignored_base_feat, S390_FEAT_MAX);
            if (!bitmap_empty(missing, S390_FEAT_MAX)) {
                break;
            }
        }

        /* stop the search if we found the exact model */
        if (def->type == type && def->ec_ga == ec_ga) {
            return def;
        }
        /* remember if we've at least seen one with the same cpu type */
        if (def->type == type) {
            matching_cpu_type = def;
        }
        last_compatible = def;
    }
    /* prefer the model with the same cpu type, esp. don't take the BC for EC */
    if (matching_cpu_type) {
        return matching_cpu_type;
    }
    return last_compatible;
}
Example #2
0
int smu_feature_init_dpm(struct smu_context *smu)
{
	struct smu_feature *feature = &smu->smu_feature;
	int ret = 0;
	uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];

	if (!smu->pm_enabled)
		return ret;
	mutex_lock(&feature->mutex);
	bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
	mutex_unlock(&feature->mutex);

	ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask,
					     SMU_FEATURE_MAX/32);
	if (ret)
		return ret;

	mutex_lock(&feature->mutex);
	bitmap_andnot(feature->allowed, feature->allowed,
		      (unsigned long *)unallowed_feature_mask,
		      feature->feature_num);
	mutex_unlock(&feature->mutex);

	return ret;
}
Example #3
0
static void check_compatibility(const S390CPUModel *max_model,
                                const S390CPUModel *model, Error **errp)
{
    S390FeatBitmap missing;

    if (model->def->gen > max_model->def->gen) {
        error_setg(errp, "Selected CPU generation is too new. Maximum "
                   "supported model in the configuration: \'%s\'",
                   max_model->def->name);
        return;
    } else if (model->def->gen == max_model->def->gen &&
               model->def->ec_ga > max_model->def->ec_ga) {
        error_setg(errp, "Selected CPU GA level is too new. Maximum "
                   "supported model in the configuration: \'%s\'",
                   max_model->def->name);
        return;
    }

    /* detect the missing features to properly report them */
    bitmap_andnot(missing, model->features, max_model->features, S390_FEAT_MAX);
    if (bitmap_empty(missing, S390_FEAT_MAX)) {
        return;
    }

    error_setg(errp, " ");
    s390_feat_bitmap_to_ascii(missing, errp, error_prepend_missing_feat);
    error_prepend(errp, "Some features requested in the CPU model are not "
                  "available in the configuration: ");
}
Example #4
0
void s390_feat_bitmap_to_ascii(const S390FeatBitmap features, void *opaque,
                               void (*fn)(const char *name, void *opaque))
{
    S390FeatBitmap bitmap, tmp;
    S390FeatGroup group;
    S390Feat feat;

    bitmap_copy(bitmap, features, S390_FEAT_MAX);

    /* process whole groups first */
    for (group = 0; group < S390_FEAT_GROUP_MAX; group++) {
        const S390FeatGroupDef *def = s390_feat_group_def(group);

        bitmap_and(tmp, bitmap, def->feat, S390_FEAT_MAX);
        if (bitmap_equal(tmp, def->feat, S390_FEAT_MAX)) {
            bitmap_andnot(bitmap, bitmap, def->feat, S390_FEAT_MAX);
            fn(def->name, opaque);
        }
    }

    /* report leftovers as separate features */
    feat = find_first_bit(bitmap, S390_FEAT_MAX);
    while (feat < S390_FEAT_MAX) {
        fn(s390_feat_def(feat)->name, opaque);
        feat = find_next_bit(bitmap, S390_FEAT_MAX, feat + 1);
    };
}
Example #5
0
File: matrix.c Project: Lyude/linux
/**
 * irq_matrix_remove_managed - Remove managed interrupts in a CPU map
 * @m:		Matrix pointer
 * @msk:	On which CPUs the bits should be removed
 *
 * Can be called for offline CPUs
 *
 * This removes not allocated managed interrupts from the map. It does
 * not matter which one because the managed interrupts free their
 * allocation when they shut down. If not, the accounting is screwed,
 * but all what can be done at this point is warn about it.
 */
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
{
	unsigned int cpu;

	for_each_cpu(cpu, msk) {
		struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
		unsigned int bit, end = m->alloc_end;

		if (WARN_ON_ONCE(!cm->managed))
			continue;

		/* Get managed bit which are not allocated */
		bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);

		bit = find_first_bit(m->scratch_map, end);
		if (WARN_ON_ONCE(bit >= end))
			continue;

		clear_bit(bit, cm->managed_map);

		cm->managed--;
		if (cm->online) {
			cm->available++;
			m->global_available++;
		}
		trace_irq_matrix_remove_managed(bit, cpu, m, cm);
	}
}
Example #6
0
static int phylink_is_empty_linkmode(const unsigned long *linkmode)
{
	__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = { 0, };

	phylink_set_port_modes(tmp);
	phylink_set(tmp, Autoneg);
	phylink_set(tmp, Pause);
	phylink_set(tmp, Asym_Pause);

	bitmap_andnot(tmp, linkmode, tmp, __ETHTOOL_LINK_MODE_MASK_NBITS);

	return linkmode_empty(tmp);
}
Example #7
0
/* convert S390CPUDef into a static CpuModelInfo */
static void cpu_info_from_model(CpuModelInfo *info, const S390CPUModel *model,
                                bool delta_changes)
{
    QDict *qdict = qdict_new();
    S390FeatBitmap bitmap;

    /* always fallback to the static base model */
    info->name = g_strdup_printf("%s-base", model->def->name);

    if (delta_changes) {
        /* features deleted from the base feature set */
        bitmap_andnot(bitmap, model->def->base_feat, model->features,
                      S390_FEAT_MAX);
        if (!bitmap_empty(bitmap, S390_FEAT_MAX)) {
            s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat);
        }

        /* features added to the base feature set */
        bitmap_andnot(bitmap, model->features, model->def->base_feat,
                      S390_FEAT_MAX);
        if (!bitmap_empty(bitmap, S390_FEAT_MAX)) {
            s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_enabled_feat);
        }
    } else {
        /* expand all features */
        s390_feat_bitmap_to_ascii(model->features, qdict,
                                  qdict_add_enabled_feat);
        bitmap_complement(bitmap, model->features, S390_FEAT_MAX);
        s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat);
    }

    if (!qdict_size(qdict)) {
        QDECREF(qdict);
    } else {
        info->props = QOBJECT(qdict);
        info->has_props = true;
    }
}
Example #8
0
/* Check whether the current CPU supports all VQs in the committed set */
int sve_verify_vq_map(void)
{
	int ret = 0;

	sve_probe_vqs(sve_secondary_vq_map);
	bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map,
		      SVE_VQ_MAX);
	if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) {
		pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
			smp_processor_id());
		ret = -EINVAL;
	}

	return ret;
}
Example #9
0
File: matrix.c Project: Lyude/linux
/**
 * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
 * @m:		Matrix pointer
 * @cpu:	On which CPU the interrupt should be allocated
 */
int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
{
	struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
	unsigned int bit, end = m->alloc_end;

	/* Get managed bit which are not allocated */
	bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
	bit = find_first_bit(m->scratch_map, end);
	if (bit >= end)
		return -ENOSPC;
	set_bit(bit, cm->alloc_map);
	cm->allocated++;
	m->total_allocated++;
	trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
	return bit;
}
Example #10
0
const S390CPUDef *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga,
                                    S390FeatBitmap features)
{
    const S390CPUDef *last_compatible = NULL;
    int i;

    if (!gen) {
        ec_ga = 0;
    }
    if (!gen && type) {
        gen = s390_get_gen_for_cpu_type(type);
    }

    for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
        const S390CPUDef *def = &s390_cpu_defs[i];
        S390FeatBitmap missing;

        /* don't even try newer generations if we know the generation */
        if (gen) {
            if (def->gen > gen) {
                break;
            } else if (def->gen == gen && ec_ga && def->ec_ga > ec_ga) {
                break;
            }
        }

        if (features) {
            /* see if the model satisfies the minimum features */
            bitmap_andnot(missing, def->base_feat, features, S390_FEAT_MAX);
            if (!bitmap_empty(missing, S390_FEAT_MAX)) {
                break;
            }
        }

        /* stop the search if we found the exact model */
        if (def->type == type && def->ec_ga == ec_ga) {
            return def;
        }
        last_compatible = def;
    }
    return last_compatible;
}
Example #11
0
static void check_unavailable_features(const S390CPUModel *max_model,
                                       const S390CPUModel *model,
                                       strList **unavailable)
{
    S390FeatBitmap missing;

    /* check general model compatibility */
    if (max_model->def->gen < model->def->gen ||
        (max_model->def->gen == model->def->gen &&
         max_model->def->ec_ga < model->def->ec_ga)) {
        list_add_feat("type", unavailable);
    }

    /* detect missing features if any to properly report them */
    bitmap_andnot(missing, model->features, max_model->features,
                  S390_FEAT_MAX);
    if (!bitmap_empty(missing, S390_FEAT_MAX)) {
        s390_feat_bitmap_to_ascii(missing, unavailable, list_add_feat);
    }
}
Example #12
0
static void set_feature_group(Object *obj, Visitor *v, const char *name,
                              void *opaque, Error **errp)
{
    S390FeatGroup group = (S390FeatGroup) opaque;
    const S390FeatGroupDef *def = s390_feat_group_def(group);
    DeviceState *dev = DEVICE(obj);
    S390CPU *cpu = S390_CPU(obj);
    bool value;

    if (dev->realized) {
        error_setg(errp, "Attempt to set property '%s' on '%s' after "
                   "it was realized", name, object_get_typename(obj));
        return;
    } else if (!cpu->model) {
        error_setg(errp, "Details about the host CPU model are not available, "
                         "features cannot be changed.");
        return;
    }

    visit_type_bool(v, name, &value, errp);
    if (*errp) {
        return;
    }
    if (value) {
        /* groups are added in one shot, so an intersect is sufficient */
        if (!bitmap_intersects(def->feat, cpu->model->def->full_feat,
                               S390_FEAT_MAX)) {
            error_setg(errp, "Group '%s' is not available for CPU model '%s',"
                       " it was introduced with later models.",
                       name, cpu->model->def->name);
            return;
        }
        bitmap_or(cpu->model->features, cpu->model->features, def->feat,
                  S390_FEAT_MAX);
    } else {
        bitmap_andnot(cpu->model->features, cpu->model->features, def->feat,
                      S390_FEAT_MAX);
    }
}
/*H:205
 * Virtual Interrupts.
 *
 * interrupt_pending() returns the first pending interrupt which isn't blocked
 * by the Guest.  It is called before every entry to the Guest, and just before
 * we go to sleep when the Guest has halted itself.
 */
unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more)
{
	unsigned int irq;
	DECLARE_BITMAP(blk, LGUEST_IRQS);

	/* If the Guest hasn't even initialized yet, we can do nothing. */
	if (!cpu->lg->lguest_data)
		return LGUEST_IRQS;

	/*
	 * Take our "irqs_pending" array and remove any interrupts the Guest
	 * wants blocked: the result ends up in "blk".
	 */
	if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
			   sizeof(blk)))
		return LGUEST_IRQS;
	bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS);

	/* Find the first interrupt. */
	irq = find_first_bit(blk, LGUEST_IRQS);
	*more = find_next_bit(blk, LGUEST_IRQS, irq+1);

	return irq;
}
Example #14
0
CpuModelCompareInfo *arch_query_cpu_model_comparison(CpuModelInfo *infoa,
                                                     CpuModelInfo *infob,
                                                     Error **errp)
{
    CpuModelCompareResult feat_result, gen_result;
    CpuModelCompareInfo *compare_info;
    S390FeatBitmap missing, added;
    S390CPUModel modela, modelb;

    /* convert both models to our internal representation */
    cpu_model_from_info(&modela, infoa, errp);
    if (*errp) {
        return NULL;
    }
    cpu_model_from_info(&modelb, infob, errp);
    if (*errp) {
        return NULL;
    }
    compare_info = g_malloc0(sizeof(*compare_info));

    /* check the cpu generation and ga level */
    if (modela.def->gen == modelb.def->gen) {
        if (modela.def->ec_ga == modelb.def->ec_ga) {
            /* ec and corresponding bc are identical */
            gen_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL;
        } else if (modela.def->ec_ga < modelb.def->ec_ga) {
            gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET;
        } else {
            gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET;
        }
    } else if (modela.def->gen < modelb.def->gen) {
        gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET;
    } else {
        gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET;
    }
    if (gen_result != CPU_MODEL_COMPARE_RESULT_IDENTICAL) {
        /* both models cannot be made identical */
        list_add_feat("type", &compare_info->responsible_properties);
    }

    /* check the feature set */
    if (bitmap_equal(modela.features, modelb.features, S390_FEAT_MAX)) {
        feat_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL;
    } else {
        bitmap_andnot(missing, modela.features, modelb.features, S390_FEAT_MAX);
        s390_feat_bitmap_to_ascii(missing,
                                  &compare_info->responsible_properties,
                                  list_add_feat);
        bitmap_andnot(added, modelb.features, modela.features, S390_FEAT_MAX);
        s390_feat_bitmap_to_ascii(added, &compare_info->responsible_properties,
                                  list_add_feat);
        if (bitmap_empty(missing, S390_FEAT_MAX)) {
            feat_result = CPU_MODEL_COMPARE_RESULT_SUBSET;
        } else if (bitmap_empty(added, S390_FEAT_MAX)) {
            feat_result = CPU_MODEL_COMPARE_RESULT_SUPERSET;
        } else {
            feat_result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE;
        }
    }

    /* combine the results */
    if (gen_result == feat_result) {
        compare_info->result = gen_result;
    } else if (feat_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) {
        compare_info->result = gen_result;
    } else if (gen_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) {
        compare_info->result = feat_result;
    } else {
        compare_info->result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE;
    }
    return compare_info;
}
/*H:205
 * Virtual Interrupts.
 *
 * maybe_do_interrupt() gets called before every entry to the Guest, to see if
 * we should divert the Guest to running an interrupt handler. */
void maybe_do_interrupt(struct lg_cpu *cpu)
{
	unsigned int irq;
	DECLARE_BITMAP(blk, LGUEST_IRQS);
	struct desc_struct *idt;

	/* If the Guest hasn't even initialized yet, we can do nothing. */
	if (!cpu->lg->lguest_data)
		return;

	/* Take our "irqs_pending" array and remove any interrupts the Guest
	 * wants blocked: the result ends up in "blk". */
	if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
			   sizeof(blk)))
		return;
	bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS);

	/* Find the first interrupt. */
	irq = find_first_bit(blk, LGUEST_IRQS);
	/* None?  Nothing to do */
	if (irq >= LGUEST_IRQS)
		return;

	/* They may be in the middle of an iret, where they asked us never to
	 * deliver interrupts. */
	if (cpu->regs->eip >= cpu->lg->noirq_start &&
	   (cpu->regs->eip < cpu->lg->noirq_end))
		return;

	/* If they're halted, interrupts restart them. */
	if (cpu->halted) {
		/* Re-enable interrupts. */
		if (put_user(X86_EFLAGS_IF, &cpu->lg->lguest_data->irq_enabled))
			kill_guest(cpu, "Re-enabling interrupts");
		cpu->halted = 0;
	} else {
		/* Otherwise we check if they have interrupts disabled. */
		u32 irq_enabled;
		if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled))
			irq_enabled = 0;
		if (!irq_enabled)
			return;
	}

	/* Look at the IDT entry the Guest gave us for this interrupt.  The
	 * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip
	 * over them. */
	idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq];
	/* If they don't have a handler (yet?), we just ignore it */
	if (idt_present(idt->a, idt->b)) {
		/* OK, mark it no longer pending and deliver it. */
		clear_bit(irq, cpu->irqs_pending);
		/* set_guest_interrupt() takes the interrupt descriptor and a
		 * flag to say whether this interrupt pushes an error code onto
		 * the stack as well: virtual interrupts never do. */
		set_guest_interrupt(cpu, idt->a, idt->b, 0);
	}

	/* Every time we deliver an interrupt, we update the timestamp in the
	 * Guest's lguest_data struct.  It would be better for the Guest if we
	 * did this more often, but it can actually be quite slow: doing it
	 * here is a compromise which means at least it gets updated every
	 * timer interrupt. */
	write_timestamp(cpu);
}