Example #1
0
static int dtl_start(struct dtl *dtl)
{
	unsigned long addr;
	int ret, hwcpu;

	/* Register our dtl buffer with the hypervisor. The HV expects the
	 * buffer size to be passed in the second word of the buffer */
	((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES;

	hwcpu = get_hard_smp_processor_id(dtl->cpu);
	addr = __pa(dtl->buf);
	ret = register_dtl(hwcpu, addr);
	if (ret) {
		printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
		       "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
		return -EIO;
	}

	/* set our initial buffer indices */
	lppaca_of(dtl->cpu).dtl_idx = 0;

	/* ensure that our updates to the lppaca fields have occurred before
	 * we actually enable the logging */
	smp_wmb();

	/* enable event logging */
	lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;

	return 0;
}
Example #2
0
void vpa_init(int cpu)
{
    int hwcpu = get_hard_smp_processor_id(cpu);
    unsigned long addr;
    long ret;
    struct paca_struct *pp;
    struct dtl_entry *dtl;

    if (cpu_has_feature(CPU_FTR_ALTIVEC))
        lppaca_of(cpu).vmxregs_in_use = 1;

    if (cpu_has_feature(CPU_FTR_ARCH_207S))
        lppaca_of(cpu).ebb_regs_in_use = 1;

    addr = __pa(&lppaca_of(cpu));
    ret = register_vpa(hwcpu, addr);

    if (ret) {
        pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
               "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
        return;
    }
    /*
     * PAPR says this feature is SLB-Buffer but firmware never
     * reports that.  All SPLPAR support SLB shadow buffer.
     */
    addr = __pa(&slb_shadow[cpu]);
    if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
        ret = register_slb_shadow(hwcpu, addr);
        if (ret)
            pr_err("WARNING: SLB shadow buffer registration for "
                   "cpu %d (hw %d) of area %lx failed with %ld\n",
                   cpu, hwcpu, addr, ret);
    }

    /*
     * Register dispatch trace log, if one has been allocated.
     */
    pp = &paca[cpu];
    dtl = pp->dispatch_log;
    if (dtl) {
        pp->dtl_ridx = 0;
        pp->dtl_curr = dtl;
        lppaca_of(cpu).dtl_idx = 0;

        /* hypervisor reads buffer length from this field */
        dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
        ret = register_dtl(hwcpu, __pa(dtl));
        if (ret)
            pr_err("WARNING: DTL registration of cpu %d (hw %d) "
                   "failed with %ld\n", smp_processor_id(),
                   hwcpu, ret);
        lppaca_of(cpu).dtl_enable_mask = 2;
    }
}
Example #3
0
static void dtl_stop(struct dtl *dtl)
{
	int hwcpu = get_hard_smp_processor_id(dtl->cpu);

	lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;

	unregister_dtl(hwcpu);
}
Example #4
0
static int dtl_start(struct dtl *dtl)
{
	struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);

	dtlr->buf = dtl->buf;
	dtlr->buf_end = dtl->buf + dtl->buf_entries;
	dtlr->write_index = 0;

	/* setting write_ptr enables logging into our buffer */
	smp_wmb();
	dtlr->write_ptr = dtl->buf;

	/* enable event logging */
	dtlr->saved_dtl_mask = lppaca_of(dtl->cpu).dtl_enable_mask;
	lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;

	dtl_consumer = consume_dtle;
	atomic_inc(&dtl_count);
	return 0;
}
static void smp_iSeries_kick_cpu(int nr)
{
	BUG_ON((nr < 0) || (nr >= NR_CPUS));

	/* Verify that our partition has a processor nr */
	if (lppaca_of(nr).dyn_proc_status >= 2)
		return;

	/* The processor is currently spinning, waiting
	 * for the cpu_start field to become non-zero
	 * After we set cpu_start, the processor will
	 * continue on to secondary_start in iSeries_head.S
	 */
	paca[nr].cpu_start = 1;
}
Example #6
0
static void dtl_stop(struct dtl *dtl)
{
	struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);

	dtlr->write_ptr = NULL;
	smp_wmb();

	dtlr->buf = NULL;

	/* restore dtl_enable_mask */
	lppaca_of(dtl->cpu).dtl_enable_mask = dtlr->saved_dtl_mask;

	if (atomic_dec_and_test(&dtl_count))
		dtl_consumer = NULL;
}
Example #7
0
void __spin_yield(arch_spinlock_t *lock)
{
	unsigned int lock_value, holder_cpu, yield_count;

	lock_value = lock->slock;
	if (lock_value == 0)
		return;
	holder_cpu = lock_value & 0xffff;
	BUG_ON(holder_cpu >= NR_CPUS);
	yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
	if ((yield_count & 1) == 0)
		return;		/* virtual cpu is currently running */
	rmb();
	if (lock->slock != lock_value)
		return;		/* something has changed */
	plpar_hcall_norets(H_CONFER,
		get_hard_smp_processor_id(holder_cpu), yield_count);
}
Example #8
0
/*
 * Waiting for a read lock or a write lock on a rwlock...
 * This turns out to be the same for read and write locks, since
 * we only know the holder if it is write-locked.
 */
void __rw_yield(arch_rwlock_t *rw)
{
	int lock_value;
	unsigned int holder_cpu, yield_count;

	lock_value = rw->lock;
	if (lock_value >= 0)
		return;		/* no write lock at present */
	holder_cpu = lock_value & 0xffff;
	BUG_ON(holder_cpu >= NR_CPUS);
	yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
	if ((yield_count & 1) == 0)
		return;		/* virtual cpu is currently running */
	rmb();
	if (rw->lock != lock_value)
		return;		/* something has changed */
	plpar_hcall_norets(H_CONFER,
		get_hard_smp_processor_id(holder_cpu), yield_count);
}
Example #9
0
void vpa_init(int cpu)
{
	int hwcpu = get_hard_smp_processor_id(cpu);
	unsigned long addr;
	long ret;
	struct paca_struct *pp;
	struct dtl_entry *dtl;

	/*
	 * The spec says it "may be problematic" if CPU x registers the VPA of
	 * CPU y. We should never do that, but wail if we ever do.
	 */
	WARN_ON(cpu != smp_processor_id());

	if (cpu_has_feature(CPU_FTR_ALTIVEC))
		lppaca_of(cpu).vmxregs_in_use = 1;

	if (cpu_has_feature(CPU_FTR_ARCH_207S))
		lppaca_of(cpu).ebb_regs_in_use = 1;

	addr = __pa(&lppaca_of(cpu));
	ret = register_vpa(hwcpu, addr);

	if (ret) {
		pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
		       "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
		return;
	}

#ifdef CONFIG_PPC_BOOK3S_64
	/*
	 * PAPR says this feature is SLB-Buffer but firmware never
	 * reports that.  All SPLPAR support SLB shadow buffer.
	 */
	if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) {
		addr = __pa(paca_ptrs[cpu]->slb_shadow_ptr);
		ret = register_slb_shadow(hwcpu, addr);
		if (ret)
			pr_err("WARNING: SLB shadow buffer registration for "
			       "cpu %d (hw %d) of area %lx failed with %ld\n",
			       cpu, hwcpu, addr, ret);
	}
#endif /* CONFIG_PPC_BOOK3S_64 */

	/*
	 * Register dispatch trace log, if one has been allocated.
	 */
	pp = paca_ptrs[cpu];
	dtl = pp->dispatch_log;
	if (dtl) {
		pp->dtl_ridx = 0;
		pp->dtl_curr = dtl;
		lppaca_of(cpu).dtl_idx = 0;

		/* hypervisor reads buffer length from this field */
		dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
		ret = register_dtl(hwcpu, __pa(dtl));
		if (ret)
			pr_err("WARNING: DTL registration of cpu %d (hw %d) "
			       "failed with %ld\n", smp_processor_id(),
			       hwcpu, ret);
		lppaca_of(cpu).dtl_enable_mask = 2;
	}
}
Example #10
0
#include <asm/hvcall.h>
#include <asm/iseries/hv_call.h>
#include <asm/smp.h>
#include <asm/firmware.h>

void __spin_yield(arch_spinlock_t *lock)
{
    unsigned int lock_value, holder_cpu, yield_count;

    lock_value = lock->slock;
    if (lock_value == 0)
        return;
    holder_cpu = lock_value & 0xffff;
    BUG_ON(holder_cpu >= NR_CPUS);
    <<<<<<< HEAD
    yield_count = lppaca_of(holder_cpu).yield_count;
    =======
        yield_count = lppaca[holder_cpu].yield_count;
    >>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
    if ((yield_count & 1) == 0)
        return;		/* virtual cpu is currently running */
    rmb();
    if (lock->slock != lock_value)
        return;		/* something has changed */
    if (firmware_has_feature(FW_FEATURE_ISERIES))
        HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
                ((u64)holder_cpu << 32) | yield_count);
#ifdef CONFIG_PPC_SPLPAR
    else
        plpar_hcall_norets(H_CONFER,
                           get_hard_smp_processor_id(holder_cpu), yield_count);
Example #11
0
static u64 dtl_current_index(struct dtl *dtl)
{
	return lppaca_of(dtl->cpu).dtl_idx;
}