Esempio n. 1
0
/*
 * Spurious interrupts should _never_ happen with our APIC/SMP architecture.
 */
fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs)
{
    unsigned long v;
    struct cpu_user_regs *old_regs = set_irq_regs(regs);

    irq_enter();

    /*
     * Check if this is a vectored interrupt (most likely, as this is probably
     * a request to dump local CPU state). Vectored interrupts are ACKed;
     * spurious interrupts are not.
     */
    v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
    if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) {
        ack_APIC_irq();
        if (this_cpu(state_dump_pending)) {
            this_cpu(state_dump_pending) = 0;
            dump_execstate(regs);
            goto out;
        }
    }

    /* see sw-dev-man vol 3, chapter 7.4.13.5 */
    printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should "
           "never happen.\n", smp_processor_id());

 out:
    irq_exit();
    set_irq_regs(old_regs);
}
Esempio n. 2
0
static inline s_time_t avg_intr_interval_us(void)
{
    struct menu_device *data = &__get_cpu_var(menu_devices);
    s_time_t    duration, now;
    s_time_t    avg_interval;
    unsigned int irq_sum;

    now = NOW();
    duration = (data->pf.duration + (now - data->pf.time_stamp)
            * (DECAY - 1)) / DECAY;

    irq_sum = (data->pf.irq_sum + (this_cpu(irq_count) - data->pf.irq_count_stamp)
            * (DECAY - 1)) / DECAY;

    if (irq_sum == 0)
        /* no irq recently, so return a big enough interval: 1 sec */
        avg_interval = 1000000;
    else
        avg_interval = duration / irq_sum / 1000; /* in us */

    if ( duration >= SAMPLING_PERIOD){
        data->pf.time_stamp = now;
        data->pf.duration = duration;
        data->pf.irq_count_stamp= this_cpu(irq_count);
        data->pf.irq_sum = irq_sum;
    }

    return avg_interval;
}
Esempio n. 3
0
void trace_add(union trace *trace, u8 type, u16 len)
{
	struct trace_info *ti = this_cpu()->trace;
	unsigned int tsz;

	trace->hdr.type = type;
	trace->hdr.len_div_8 = (len + 7) >> 3;

	tsz = trace->hdr.len_div_8 << 3;

#ifdef DEBUG_TRACES
	assert(tsz >= sizeof(trace->hdr));
	assert(tsz <= sizeof(*trace));
	assert(trace->hdr.type != TRACE_REPEAT);
	assert(trace->hdr.type != TRACE_OVERFLOW);
#endif
	/* Skip traces not enabled in the debug descriptor */
	if (!((1ul << trace->hdr.type) & debug_descriptor.trace_mask))
		return;

	trace->hdr.timestamp = cpu_to_be64(mftb());
	trace->hdr.cpu = cpu_to_be16(this_cpu()->server_no);

	lock(&ti->lock);

	/* Throw away old entries before we overwrite them. */
	while ((be64_to_cpu(ti->tb.start) + be64_to_cpu(ti->tb.mask) + 1)
	       < (be64_to_cpu(ti->tb.end) + tsz)) {
		struct trace_hdr *hdr;

		hdr = (void *)ti->tb.buf +
			be64_to_cpu(ti->tb.start & ti->tb.mask);
		ti->tb.start = cpu_to_be64(be64_to_cpu(ti->tb.start) +
					   (hdr->len_div_8 << 3));
	}

	/* Must update ->start before we rewrite new entries. */
	lwsync(); /* write barrier */

	/* Check for duplicates... */
	if (!handle_repeat(&ti->tb, trace)) {
		/* This may go off end, and that's why ti->tb.buf is oversize */
		memcpy(ti->tb.buf + be64_to_cpu(ti->tb.end & ti->tb.mask),
		       trace, tsz);
		ti->tb.last = ti->tb.end;
		lwsync(); /* write barrier: write entry before exposing */
		ti->tb.end = cpu_to_be64(be64_to_cpu(ti->tb.end) + tsz);
	}
	unlock(&ti->lock);
}
Esempio n. 4
0
fastcall void smp_error_interrupt(struct cpu_user_regs *regs)
{
    unsigned long v, v1;
    struct cpu_user_regs *old_regs = set_irq_regs(regs);

    this_cpu(irq_count)++;
    irq_enter();
    /* First tickle the hardware, only then report what went on. -- REW */
    v = apic_read(APIC_ESR);
    apic_write(APIC_ESR, 0);
    v1 = apic_read(APIC_ESR);
    ack_APIC_irq();

    /* Here is what the APIC error bits mean:
       0: Send CS error
       1: Receive CS error
       2: Send accept error
       3: Receive accept error
       4: Reserved
       5: Send illegal vector
       6: Received illegal vector
       7: Illegal register address
    */
    printk (KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n",
            smp_processor_id(), v , v1);
    irq_exit();
    set_irq_regs(old_regs);
}
Esempio n. 5
0
File: intel.c Progetto: Chong-Li/xen
void set_cpuid_faulting(bool_t enable)
{
	uint32_t hi, lo;

	if (!cpu_has_cpuid_faulting ||
	    this_cpu(cpuid_faulting_enabled) == enable )
		return;

	rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
	lo &= ~MSR_MISC_FEATURES_CPUID_FAULTING;
	if (enable)
		lo |= MSR_MISC_FEATURES_CPUID_FAULTING;
	wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);

	this_cpu(cpuid_faulting_enabled) = enable;
}
Esempio n. 6
0
void vmm_scheduler_yield(void)
{
	irq_flags_t flags;
	struct vmm_scheduler_ctrl *schedp = &this_cpu(sched);

	arch_cpu_irq_save(flags);

	if (schedp->irq_context) {
		vmm_panic("%s: Cannot yield in IRQ context\n", __func__);
	}

	if (!schedp->current_vcpu) {
		vmm_panic("%s: NULL VCPU pointer\n", __func__);
	}

	if (schedp->current_vcpu->is_normal) {
		/* For Normal VCPU
		 * Just enable yield on exit and rest will be taken care
		 * by vmm_scheduler_irq_exit()
		 */
		if (vmm_manager_vcpu_get_state(schedp->current_vcpu) == 
						VMM_VCPU_STATE_RUNNING) {
			schedp->yield_on_irq_exit = TRUE;
		}
	} else {
		/* For Orphan VCPU
		 * Forcefully expire yield 
		 */
		arch_vcpu_preempt_orphan();
	}

	arch_cpu_irq_restore(flags);
}
Esempio n. 7
0
int __cpuinit vmm_scheduler_init(void)
{
	int rc;
	char vcpu_name[VMM_FIELD_NAME_SIZE];
	u32 cpu = vmm_smp_processor_id();
	struct vmm_scheduler_ctrl *schedp = &this_cpu(sched);

	/* Reset the scheduler control structure */
	memset(schedp, 0, sizeof(struct vmm_scheduler_ctrl));

	/* Create ready queue (Per Host CPU) */
	schedp->rq = vmm_schedalgo_rq_create();
	if (!schedp->rq) {
		return VMM_EFAIL;
	}
	INIT_SPIN_LOCK(&schedp->rq_lock);

	/* Initialize current VCPU. (Per Host CPU) */
	schedp->current_vcpu = NULL;

	/* Initialize IRQ state (Per Host CPU) */
	schedp->irq_context = FALSE;
	schedp->irq_regs = NULL;

	/* Initialize yield on exit (Per Host CPU) */
	schedp->yield_on_irq_exit = FALSE;

	/* Create timer event and start it. (Per Host CPU) */
	INIT_TIMER_EVENT(&schedp->ev, &vmm_scheduler_timer_event, schedp);

	/* Create idle orphan vcpu with default time slice. (Per Host CPU) */
	vmm_snprintf(vcpu_name, sizeof(vcpu_name), "idle/%d", cpu);
	schedp->idle_vcpu = vmm_manager_vcpu_orphan_create(vcpu_name,
						(virtual_addr_t)&idle_orphan,
						IDLE_VCPU_STACK_SZ,
						IDLE_VCPU_PRIORITY, 
						IDLE_VCPU_TIMESLICE);
	if (!schedp->idle_vcpu) {
		return VMM_EFAIL;
	}

	/* The idle vcpu need to stay on this cpu */
	if ((rc = vmm_manager_vcpu_set_affinity(schedp->idle_vcpu,
						vmm_cpumask_of(cpu)))) {
		return rc;
	}

	/* Kick idle orphan vcpu */
	if ((rc = vmm_manager_vcpu_kick(schedp->idle_vcpu))) {
		return rc;
	}

	/* Start scheduler timer event */
	vmm_timer_event_start(&schedp->ev, 0);

	/* Mark this CPU online */
	vmm_set_cpu_online(cpu, TRUE);

	return VMM_OK;
}
Esempio n. 8
0
void vmx_vmcs_enter(struct vcpu *v)
{
    struct foreign_vmcs *fv;

    /*
     * NB. We must *always* run an HVM VCPU on its own VMCS, except for
     * vmx_vmcs_enter/exit critical regions.
     */
    if ( likely(v == current) )
        return;

    fv = &this_cpu(foreign_vmcs);

    if ( fv->v == v )
    {
        BUG_ON(fv->count == 0);
    }
    else
    {
        BUG_ON(fv->v != NULL);
        BUG_ON(fv->count != 0);

        vcpu_pause(v);
        spin_lock(&v->arch.hvm_vmx.vmcs_lock);

        vmx_clear_vmcs(v);
        vmx_load_vmcs(v);

        fv->v = v;
    }

    fv->count++;
}
Esempio n. 9
0
irqstate_t enter_critical_section(void)
{
  FAR struct tcb_s *rtcb;

  /* Do nothing if called from an interrupt handler */

  if (up_interrupt_context())
    {
      /* The value returned does not matter.  We assume only that it is a
       * scalar here.
       */

      return (irqstate_t)0;
    }

  /* Do we already have interrupts disabled? */

  rtcb = this_task();
  DEBUGASSERT(rtcb != NULL);

  if (rtcb->irqcount > 0)
    {
      /* Yes... make sure that the spinlock is set and increment the IRQ
       * lock count.
       */

      DEBUGASSERT(g_cpu_irqlock == SP_LOCKED && rtcb->irqcount < INT16_MAX);
      rtcb->irqcount++;
    }
  else
    {
      /* NO.. Take the spinlock to get exclusive access and set the lock
       * count to 1.
       *
       * We must avoid that case where a context occurs between taking the
       * g_cpu_irqlock and disabling interrupts.  Also interrupts disables
       * must follow a stacked order.  We cannot other context switches to
       * re-order the enabling/disabling of interrupts.
       *
       * The scheduler accomplishes this by treating the irqcount like
       * lockcount:  Both will disable pre-emption.
       */

      spin_setbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock,
                  &g_cpu_irqlock);
      rtcb->irqcount = 1;

#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
      /* Note that we have entered the critical section */

      sched_note_csection(rtcb, true);
#endif
    }

  /* Then disable interrupts (they may already be disabled, be we need to
   * return valid interrupt status in any event).
   */

  return up_irq_save();
}
Esempio n. 10
0
static void xscom_reset(uint32_t gcid, bool need_delay)
{
	u64 hmer;
	uint32_t recv_status_reg, log_reg, err_reg;
	struct timespec ts;

	/* Clear errors in HMER */
	mtspr(SPR_HMER, HMER_CLR_MASK);

	/* Setup local and target scom addresses */
	if (proc_gen == proc_gen_p9) {
		recv_status_reg = 0x00090018;
		log_reg = 0x0090012;
		err_reg = 0x0090013;
	} else {
		recv_status_reg = 0x202000f;
		log_reg = 0x2020007;
		err_reg = 0x2020009;
	}

	/* First we need to write 0 to a register on our chip */
	out_be64(xscom_addr(this_cpu()->chip_id, recv_status_reg), 0);
	hmer = xscom_wait_done();
	if (hmer & SPR_HMER_XSCOM_FAIL)
		goto fail;

	/* Then we need to clear those two other registers on the target */
	out_be64(xscom_addr(gcid, log_reg), 0);
	hmer = xscom_wait_done();
	if (hmer & SPR_HMER_XSCOM_FAIL)
		goto fail;
	out_be64(xscom_addr(gcid, err_reg), 0);
	hmer = xscom_wait_done();
	if (hmer & SPR_HMER_XSCOM_FAIL)
		goto fail;

	if (need_delay) {
		/*
		 * Its observed that sometimes immediate retry of
		 * XSCOM operation returns wrong data. Adding a
		 * delay for XSCOM reset to be effective. Delay of
		 * 10 ms is found to be working fine experimentally.
		 * FIXME: Replace 10ms delay by exact delay needed
		 * or other alternate method to confirm XSCOM reset
		 * completion, after checking from HW folks.
		 */
		ts.tv_sec = 0;
		ts.tv_nsec = 10 * 1000;
		nanosleep_nopoll(&ts, NULL);
	}
	return;
 fail:
	/* Fatal error resetting XSCOM */
	log_simple_error(&e_info(OPAL_RC_XSCOM_RESET),
		"XSCOM: Fatal error resetting engine after failed access !\n");

	/* XXX Generate error log ? attn ? panic ?
	 * If we decide to panic, change the above severity to PANIC
	 */
}
Esempio n. 11
0
void vmm_scheduler_irq_exit(arch_regs_t *regs)
{
	struct vmm_scheduler_ctrl *schedp = &this_cpu(sched);
	struct vmm_vcpu *vcpu = NULL;

	/* Determine current vcpu */
	vcpu = schedp->current_vcpu;
	if (!vcpu) {
		return;
	}

	/* If current vcpu is not RUNNING or yield on exit is set
	 * then context switch
	 */
	if ((vmm_manager_vcpu_get_state(vcpu) != VMM_VCPU_STATE_RUNNING) ||
	    schedp->yield_on_irq_exit) {
		vmm_scheduler_next(schedp, &schedp->ev, schedp->irq_regs);
		schedp->yield_on_irq_exit = FALSE;
	}

	/* VCPU irq processing */
	vmm_vcpu_irq_process(vcpu, regs);

	/* Indicate that we have exited IRQ */
	schedp->irq_context = FALSE;

	/* Clear pointer to IRQ registers */
	schedp->irq_regs = NULL;
}
Esempio n. 12
0
fastcall void smp_pmu_apic_interrupt(struct cpu_user_regs *regs)
{
    struct cpu_user_regs *old_regs = set_irq_regs(regs);
    ack_APIC_irq();
    this_cpu(irq_count)++;
    hvm_do_pmu_interrupt(regs);
    set_irq_regs(old_regs);
}
Esempio n. 13
0
static void vmm_scheduler_timer_event(struct vmm_timer_event *ev)
{
	struct vmm_scheduler_ctrl *schedp = &this_cpu(sched);

	if (schedp->irq_regs) {
		vmm_scheduler_switch(schedp, schedp->irq_regs);
	}
}
Esempio n. 14
0
static void ns16550_poll(void *data)
{
    this_cpu(poll_port) = data;
#ifdef run_in_exception_handler
    run_in_exception_handler(__ns16550_poll);
#else
    __ns16550_poll(guest_cpu_user_regs());
#endif
}
Esempio n. 15
0
fastcall void smp_apic_timer_interrupt(struct cpu_user_regs * regs)
{
    struct cpu_user_regs *old_regs = set_irq_regs(regs);
    ack_APIC_irq();
    perfc_incr(apic_timer);
    this_cpu(irq_count)++;
    raise_softirq(TIMER_SOFTIRQ);
    set_irq_regs(old_regs);
}
Esempio n. 16
0
static unsigned int get_sleep_length_us(void)
{
    s_time_t us = (this_cpu(timer_deadline) - NOW()) / 1000;
    /*
     * while us < 0 or us > (u32)-1, return a large u32,
     * choose (unsigned int)-2000 to avoid wrapping while added with exit
     * latency because the latency should not larger than 2ms
     */
    return (us >> 32) ? (unsigned int)-2000 : (unsigned int)us;
}
Esempio n. 17
0
static void vmx_load_vmcs(struct vcpu *v)
{
    unsigned long flags;

    local_irq_save(flags);

    if ( v->arch.hvm_vmx.active_cpu == -1 )
    {
        list_add(&v->arch.hvm_vmx.active_list, &this_cpu(active_vmcs_list));
        v->arch.hvm_vmx.active_cpu = smp_processor_id();
    }

    ASSERT(v->arch.hvm_vmx.active_cpu == smp_processor_id());

    __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
    this_cpu(current_vmcs) = v->arch.hvm_vmx.vmcs;

    local_irq_restore(flags);
}
Esempio n. 18
0
static void decode_malfunction(struct OpalHMIEvent *hmi_evt, uint64_t *out_flags)
{
	int i;
	uint64_t malf_alert, flags;

	flags = 0;

	if (!setup_scom_addresses()) {
		prerror("Failed to setup scom addresses\n");
		/* Send an unknown HMI event. */
		hmi_evt->u.xstop_error.xstop_type = CHECKSTOP_TYPE_UNKNOWN;
		hmi_evt->u.xstop_error.xstop_reason = 0;
		queue_hmi_event(hmi_evt, false, out_flags);
		return;
	}

	xscom_read(this_cpu()->chip_id, malf_alert_scom, &malf_alert);

	if (!malf_alert)
		return;

	for (i = 0; i < 64; i++) {
		if (malf_alert & PPC_BIT(i)) {
			xscom_write(this_cpu()->chip_id, malf_alert_scom,
								~PPC_BIT(i));
			find_capp_checkstop_reason(i, hmi_evt, &flags);
			find_nx_checkstop_reason(i, hmi_evt, &flags);
			find_npu_checkstop_reason(i, hmi_evt, &flags);
		}
	}

	find_core_checkstop_reason(hmi_evt, &flags);

	/*
	 * If we fail to find checkstop reason, send an unknown HMI event.
	 */
	if (!(flags & OPAL_HMI_FLAGS_NEW_EVENT)) {
		hmi_evt->u.xstop_error.xstop_type = CHECKSTOP_TYPE_UNKNOWN;
		hmi_evt->u.xstop_error.xstop_reason = 0;
		queue_hmi_event(hmi_evt, false, &flags);
	}
	*out_flags |= flags;
}
Esempio n. 19
0
void *sched_ap_idle_thread (void *notused)
    {    
    interrupts_disable();
    
    lapic_common_init();

    interrupts_enable();

    thread_create_test();
    
    cpu_heart_beat(this_cpu());
    }
Esempio n. 20
0
int __cpuinit twd_clockchip_init(virtual_addr_t base, 
				virtual_addr_t ref_counter_addr,
				u32 ref_counter_freq,
				u32 ppi_hirq)
{
	int rc;
	u32 cpu = vmm_smp_processor_id();
	struct twd_clockchip *cc = &this_cpu(twd_cc);

	memset(cc, 0, sizeof(struct twd_clockchip));

	twd_caliberate_freq(base, ref_counter_addr, ref_counter_freq);

	vmm_sprintf(cc->name, "twd/%d", cpu);

	cc->base = base;
	cc->clkchip.name = cc->name;
	cc->clkchip.hirq = ppi_hirq;
	cc->clkchip.rating = 350;
	cc->clkchip.cpumask = vmm_cpumask_of(cpu);
	cc->clkchip.features = 
		VMM_CLOCKCHIP_FEAT_PERIODIC | VMM_CLOCKCHIP_FEAT_ONESHOT;
	cc->clkchip.shift = 20;
	cc->clkchip.mult = vmm_clockchip_hz2mult(twd_freq_hz, cc->clkchip.shift);
	cc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(0xF, &cc->clkchip);
	cc->clkchip.max_delta_ns = 
			vmm_clockchip_delta2ns(0xFFFFFFFF, &cc->clkchip);
	cc->clkchip.set_mode = &twd_clockchip_set_mode;
	cc->clkchip.set_next_event = &twd_clockchip_set_next_event;
	cc->clkchip.expire = &twd_clockchip_expire;
	cc->clkchip.priv = cc;

	if (!cpu) {
		/* Register interrupt handler */
		if ((rc = vmm_host_irq_register(ppi_hirq, "twd",
						&twd_clockchip_irq_handler, 
						cc))) {
			return rc;
		}

		/* Mark interrupt as per-cpu */
		if ((rc = vmm_host_irq_mark_per_cpu(ppi_hirq))) {
			return rc;
		}
	}

	/* Explicitly enable local timer PPI in GIC 
	 * Note: Local timer requires PPI support hence requires GIC
	 */
	gic_enable_ppi(ppi_hirq);

	return vmm_clockchip_register(&cc->clkchip);
}
Esempio n. 21
0
static vmm_irq_return_t twd_clockchip_irq_handler(int irq_no, void *dev)
{
	struct twd_clockchip *tcc = &this_cpu(twd_cc);

	if (vmm_readl((void *)(twd_base + TWD_TIMER_INTSTAT))) {
		vmm_writel(1, (void *)(twd_base + TWD_TIMER_INTSTAT));
	}

	tcc->clkchip.event_handler(&tcc->clkchip);

	return VMM_IRQ_HANDLED;
}
Esempio n. 22
0
static void idle_orphan(void)
{
	struct vmm_scheduler_ctrl *schedp = &this_cpu(sched);

	while (1) {
		if (rq_length(schedp, IDLE_VCPU_PRIORITY) == 0) {
			arch_cpu_wait_for_irq();
		}

		vmm_scheduler_yield();
	}
}
Esempio n. 23
0
static void __vmx_clear_vmcs(void *info)
{
    struct vcpu *v = info;
    struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;

    /* Otherwise we can nest (vmx_cpu_down() vs. vmx_clear_vmcs()). */
    ASSERT(!local_irq_is_enabled());

    if ( arch_vmx->active_cpu == smp_processor_id() )
    {
        __vmpclear(virt_to_maddr(arch_vmx->vmcs));

        arch_vmx->active_cpu = -1;
        arch_vmx->launched   = 0;

        list_del(&arch_vmx->active_list);

        if ( arch_vmx->vmcs == this_cpu(current_vmcs) )
            this_cpu(current_vmcs) = NULL;
    }
}
Esempio n. 24
0
static inline struct t_rec *
next_record(struct t_buf *buf)
{
    int x = buf->prod;
    if ( x >= data_size )
        x -= data_size;

    ASSERT(x >= 0);
    ASSERT(x < data_size);

    return (struct t_rec *)&this_cpu(t_data)[x];
}
Esempio n. 25
0
void vmm_scheduler_irq_enter(arch_regs_t *regs, bool vcpu_context)
{
	struct vmm_scheduler_ctrl *schedp = &this_cpu(sched);

	/* Indicate that we have entered in IRQ */
	schedp->irq_context = (vcpu_context) ? FALSE : TRUE;

	/* Save pointer to IRQ registers */
	schedp->irq_regs = regs;

	/* Ensure that yield on exit is disabled */
	schedp->yield_on_irq_exit = FALSE;
}
Esempio n. 26
0
/*
 * Spurious interrupts should _never_ happen with our APIC/SMP architecture.
 */
void spurious_interrupt(struct cpu_user_regs *regs)
{
    /*
     * Check if this is a vectored interrupt (most likely, as this is probably
     * a request to dump local CPU state). Vectored interrupts are ACKed;
     * spurious interrupts are not.
     */
    if (apic_isr_read(SPURIOUS_APIC_VECTOR)) {
        ack_APIC_irq();
        if (this_cpu(state_dump_pending)) {
            this_cpu(state_dump_pending) = 0;
            dump_execstate(regs);
            goto out;
        }
    }

    /* see sw-dev-man vol 3, chapter 7.4.13.5 */
    printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should "
           "never happen.\n", smp_processor_id());

out: ;
}
Esempio n. 27
0
static void hmi_print_debug(const uint8_t *msg, uint64_t hmer)
{
	const char *loc;
	uint32_t core_id, thread_index;

	core_id = pir_to_core_id(this_cpu()->pir);
	thread_index = cpu_get_thread_index(this_cpu());

	loc = chip_loc_code(this_cpu()->chip_id);
	if (!loc)
		loc = "Not Available";

	if (hmer & (SPR_HMER_TFAC_ERROR | SPR_HMER_TFMR_PARITY_ERROR)) {
		prlog(PR_DEBUG, "[Loc: %s]: P:%d C:%d T:%d: TFMR(%016lx) %s\n",
			loc, this_cpu()->chip_id, core_id, thread_index,
			mfspr(SPR_TFMR), msg);
	} else {
		prlog(PR_DEBUG, "[Loc: %s]: P:%d C:%d T:%d: %s\n",
			loc, this_cpu()->chip_id, core_id, thread_index,
			msg);
	}
}
Esempio n. 28
0
int up_cpu_pause(int cpu)
{
  int ret;

#ifdef CONFIG_SCHED_INSTRUMENTATION
  /* Notify of the pause event */

  sched_note_cpu_pause(this_task(), cpu);
#endif

  DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());

  /* Take the both spinlocks.  The g_cpu_wait spinlock will prevent the SGI2
   * handler from returning until up_cpu_resume() is called; g_cpu_paused
   * is a handshake that will prefent this function from returning until
   * the CPU is actually paused.
   */

  DEBUGASSERT(!spin_islocked(&g_cpu_wait[cpu]) &&
              !spin_islocked(&g_cpu_paused[cpu]));

  spin_lock(&g_cpu_wait[cpu]);
  spin_lock(&g_cpu_paused[cpu]);

  /* Execute SGI2 */

  ret = xtensa_intercpu_interrupt(cpu, CPU_INTCODE_PAUSE);
  if (ret < 0)
    {
      /* What happened?  Unlock the g_cpu_wait spinlock */

      spin_unlock(&g_cpu_wait[cpu]);
    }
  else
    {
      /* Wait for the other CPU to unlock g_cpu_paused meaning that
       * it is fully paused and ready for up_cpu_resume();
       */

      spin_lock(&g_cpu_paused[cpu]);
    }

  spin_unlock(&g_cpu_paused[cpu]);

  /* On successful return g_cpu_wait will be locked, the other CPU will be
   * spinninf on g_cpu_wait and will not continue until g_cpu_resume() is
   * called.  g_cpu_paused will be unlocked in any case.
   */

 return ret;
}
Esempio n. 29
0
void vmx_do_resume(struct vcpu *v)
{
    bool_t debug_state;

    if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
    {
        if ( v->arch.hvm_vmx.vmcs != this_cpu(current_vmcs) )
            vmx_load_vmcs(v);
    }
    else
    {
        /*
         * For pass-through domain, guest PCI-E device driver may leverage the
         * "Non-Snoop" I/O, and explicitly WBINVD or CLFLUSH to a RAM space.
         * Since migration may occur before WBINVD or CLFLUSH, we need to
         * maintain data consistency either by:
         *  1: flushing cache (wbinvd) when the guest is scheduled out if
         *     there is no wbinvd exit, or
         *  2: execute wbinvd on all dirty pCPUs when guest wbinvd exits.
         */
        if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) &&
             !cpu_has_wbinvd_exiting )
        {
            int cpu = v->arch.hvm_vmx.active_cpu;
            if ( cpu != -1 )
                on_selected_cpus(cpumask_of_cpu(cpu), wbinvd_ipi, NULL, 1, 1);
        }

        vmx_clear_vmcs(v);
        vmx_load_vmcs(v);
        hvm_migrate_timers(v);
        vmx_set_host_env(v);
    }

    debug_state = v->domain->debugger_attached;
    if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
    {
        unsigned long intercepts = __vmread(EXCEPTION_BITMAP);
        unsigned long mask = (1U << TRAP_debug) | (1U << TRAP_int3);
        v->arch.hvm_vcpu.debug_state_latch = debug_state;
        if ( debug_state )
            intercepts |= mask;
        else
            intercepts &= ~mask;
        __vmwrite(EXCEPTION_BITMAP, intercepts);
    }

    hvm_do_resume(v);
    reset_stack_and_jump(vmx_asm_do_vmentry);
}
Esempio n. 30
0
int sched_cpu_pause(FAR struct tcb_s *tcb)
{
  int cpu;
  int ret;

  DEBUGASSERT(tcb != NULL);

  /* If the task is not running at all then our job is easy */

  cpu = tcb->cpu;
  if (tcb->task_state != TSTATE_TASK_RUNNING)
    {
      return -ESRCH;
    }

  /* Check the CPU that the task is running on */

  DEBUGASSERT(cpu != this_cpu() && (unsigned int)cpu < CONFIG_SMP_NCPUS);
  if (cpu == this_cpu())
    {
      /* We can't pause ourself */

      return -EACCES;
    }

  /* Pause the CPU that the task is running on */

  ret = up_cpu_pause(cpu);
  if (ret < 0)
    {
      return ret;
    }

  /* Return the CPU that the task is running on */

  return cpu;
}