void MT_trace_irq_on(void)
{
    struct sched_stop_event *e;
    e = & __raw_get_cpu_var(IRQ_disable_mon);
    e->last_ts = e->cur_ts;
    e->cur_ts = 0 ;
    e->last_te = sched_clock();

}
// SoftIRQ monitor
void mt_trace_SoftIRQ_start(int sq_num)
{

    struct sched_block_event *b;
    b = & __raw_get_cpu_var(SoftIRQ_mon);

    b->cur_ts = sched_clock();
    b->cur_event = (unsigned long)sq_num;
}
Beispiel #3
0
static void do_init_timer(struct timer_list *timer, unsigned int flags,
						  const char *name)
{
	struct tvec_base *base = __raw_get_cpu_var(tvec_bases);

	timer->entry.next = NULL;
	timer->base = (void *)((unsigned long)base | flags);
	timer->slack = -1;
}
Beispiel #4
0
static void at91_unmute_pic(void)
{
	struct at91_gpio_chip *prev, *chip = NULL;
	unsigned long muted;
	unsigned i;

	at91_sys_write(AT91_AIC_IECR, __raw_get_cpu_var(__ipipe_muted_irqs)[0]);
	for (i = 0; i < gpio_banks; i++) {
		prev = chip;
		chip = &gpio_chip[i];
		if (!(*chip->nonroot_gpios))
			continue;

		muted = __raw_get_cpu_var(__ipipe_muted_irqs)
			[i + PIN_BASE / 32];
		__raw_writel(muted, chip->regbase + PIO_IER);
	}
}
Beispiel #5
0
/**
 * hrtimer_get_res - get the timer resolution for a clock
 * @which_clock: which clock to query
 * @tp:		 pointer to timespec variable to store the resolution
 *
 * Store the resolution of the clock selected by @which_clock in the
 * variable pointed to by @tp.
 */
int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
{
    struct hrtimer_cpu_base *cpu_base;

    cpu_base = &__raw_get_cpu_var(hrtimer_bases);
    *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);

    return 0;
}
Beispiel #6
0
static void watchdog_disable(unsigned int cpu)
{
	struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);

	watchdog_set_prio(SCHED_NORMAL, 0);
	hrtimer_cancel(hrtimer);
	/* disable the perf event */
	watchdog_nmi_disable(cpu);
}
// SoftTimer monitor
void mt_trace_sft_start(void *func)
{
    struct sched_block_event *b;
    b = & __raw_get_cpu_var(sft_mon);

    b->cur_ts = sched_clock();
    b->cur_event = (unsigned long)func;
    b->cur_count++;
}
// IRQ off monitor
void MT_trace_irq_off(void)
{
    struct sched_stop_event *e;
    struct stack_trace *trace;
    e = & __raw_get_cpu_var(IRQ_disable_mon);

    e->cur_ts = sched_clock();
    /*save timestap*/
    __raw_get_cpu_var(TS_irq_off) = sched_clock();
    trace = &__raw_get_cpu_var(MT_stack_trace);
    /*save backtraces*/
    trace->nr_entries    = 0;
    trace->max_entries   = MAX_STACK_TRACE_DEPTH; //32
    trace->skip      = 0;
    save_stack_trace_tsk(current, trace);

    
}
void mt_trace_ISR_end(int irq)
{
    struct sched_block_event *b;
    b = & __raw_get_cpu_var(ISR_mon);

    WARN_ON(b->cur_event != irq);
    b->last_event = b->cur_event;
    b->last_ts = b->cur_ts;
    b->last_te = sched_clock();
    b->cur_event = 0;
    b->cur_ts = 0;
    event_duration_check(b);
    aee_rr_rec_last_irq_exit(smp_processor_id(), irq, b->last_te);

    //reset HRTimer function counter
    b = & __raw_get_cpu_var(hrt_mon);
    reset_event_count(b);

}
// ISR monitor
void mt_trace_ISR_start(int irq)
{
    struct sched_block_event *b;
    b = & __raw_get_cpu_var(ISR_mon);

    b->cur_ts = sched_clock();
    b->cur_event = (unsigned long)irq;
    aee_rr_rec_last_irq_enter(smp_processor_id(), irq, b->cur_ts);
 
}
Beispiel #11
0
static inline unsigned int get_and_clear_irq_fired(void)
{
	/* This is potentially not atomic  since we might migrate if
	 * preemptions are not disabled. As a tradeoff between
	 * accuracy and tracing overheads, this seems acceptable.
	 * If it proves to be a problem, then one could add a callback
	 * from the migration code to invalidate irq_fired_count.
	 */
	return atomic_xchg(&__raw_get_cpu_var(irq_fired_count), 0);
}
void mt_trace_SoftIRQ_end(int sq_num)
{
    struct sched_block_event *b;
    b = & __raw_get_cpu_var(SoftIRQ_mon);

    WARN_ON(b->cur_event != sq_num);
    b->last_event = b->cur_event;
    b->last_ts = b->cur_ts;
    b->last_te = sched_clock();
    b->cur_event = 0;
    b->cur_ts = 0;
    event_duration_check(b);

    //reset soft timer function counter
    b = & __raw_get_cpu_var(sft_mon);
    reset_event_count(b);
    //reset tasklet function counter
    b = & __raw_get_cpu_var(tasklet_mon);
    reset_event_count(b);
}
Beispiel #13
0
void touch_nmi_watchdog(void)
{
	/*
	 * Using __raw here because some code paths have
	 * preemption enabled.  If preemption is enabled
	 * then interrupts should be enabled too, in which
	 * case we shouldn't have to worry about the watchdog
	 * going off.
	 */
	__raw_get_cpu_var(watchdog_nmi_touch) = true;
	touch_softlockup_watchdog();
}
Beispiel #14
0
static irqreturn_t timer_handler(int irq, void *dev_id)
{
	struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
//#ifdef CONFIG_MT_SCHED_MONITOR
#if 0
    // add timer event tracer for wdt debug
    __raw_get_cpu_var(local_timer_ts) = sched_clock();
	if (generic_timer_ack()) {
		evt->event_handler(evt);
        __raw_get_cpu_var(local_timer_te) = sched_clock();
		return IRQ_HANDLED;
	}
    __raw_get_cpu_var(local_timer_te) = sched_clock();
	return IRQ_NONE;
#else
	if (generic_timer_ack()) {
		evt->event_handler(evt);
		return IRQ_HANDLED;
	}
	return IRQ_NONE;
#endif
}
void mt_trace_sft_end(void *func)
{
    struct sched_block_event *b;
    b = & __raw_get_cpu_var(sft_mon);

    WARN_ON(b->cur_event != (unsigned long)func);
    b->last_event = b->cur_event;
    b->last_ts = b->cur_ts;
    b->last_te = sched_clock();
    b->cur_event = 0;
    b->cur_ts = 0;
    event_duration_check(b);
}
Beispiel #16
0
void __sched io_schedule(void)
{
#ifndef DDE_LINUX
  struct rq *rq = &__raw_get_cpu_var(runqueues);

  delayacct_blkio_start();
  atomic_inc(&rq->nr_iowait);
#endif
  schedule();
#ifndef DDE_LINUX
  atomic_dec(&rq->nr_iowait);
  delayacct_blkio_end();
#endif
}
Beispiel #17
0
static void at91_mute_pic(void)
{
	struct at91_gpio_chip *prev, *chip = NULL;
	unsigned long unmasked, muted;
	unsigned i;

	for (i = 0; i < gpio_banks; i++) {
		prev = chip;
		chip = &gpio_chip[i];
		if (!(*chip->nonroot_gpios))
			continue;

		unmasked = __raw_readl(chip->regbase + PIO_IMR);
		muted = unmasked & __ipipe_irqbits[i + 1];
		__raw_get_cpu_var(__ipipe_muted_irqs)
			[i + PIN_BASE / 32] = muted;
		__raw_writel(muted, chip->regbase + PIO_IDR);
	}

	unmasked = at91_sys_read(AT91_AIC_IMR);
	muted = unmasked & __ipipe_irqbits[0];
	__raw_get_cpu_var(__ipipe_muted_irqs)[0] = muted;
	at91_sys_write(AT91_AIC_IDCR, muted);
}
static void restart_watchdog_hrtimer(void *info)
{
	struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
	int ret;

	/*
	 * No need to cancel and restart hrtimer if it is currently executing
	 * because it will reprogram itself with the new period now.
	 * We should never see it unqueued here because we are running per-cpu
	 * with interrupts disabled.
	 */
	ret = hrtimer_try_to_cancel(hrtimer);
	if (ret == 1)
		hrtimer_start(hrtimer, ns_to_ktime(sample_period),
				HRTIMER_MODE_REL_PINNED);
}
Beispiel #19
0
static void
__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
{
	struct cpumask *cpus_in_cluster_ptr;
	struct cpumask *ipi_mask_ptr;
	unsigned int cpu, this_cpu;
	unsigned long flags;
	u32 dest;

	x2apic_wrmsr_fence();

	local_irq_save(flags);

	this_cpu = smp_processor_id();

	/*
	 * We are to modify mask, so we need an own copy
	 * and be sure it's manipulated with irq off.
	 */
	ipi_mask_ptr = __raw_get_cpu_var(ipi_mask);
	cpumask_copy(ipi_mask_ptr, mask);

	/*
	 * The idea is to send one IPI per cluster.
	 */
	for_each_cpu(cpu, ipi_mask_ptr) {
		unsigned long i;

		cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu);
		dest = 0;

		/* Collect cpus in cluster. */
		for_each_cpu_and(i, ipi_mask_ptr, cpus_in_cluster_ptr) {
			if (apic_dest == APIC_DEST_ALLINC || i != this_cpu)
				dest |= per_cpu(x86_cpu_to_logical_apicid, i);
		}

		if (!dest)
			continue;

		__x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
		/*
		 * Cluster sibling cpus should be discared now so
		 * we would not send IPI them second time.
		 */
		cpumask_andnot(ipi_mask_ptr, ipi_mask_ptr, cpus_in_cluster_ptr);
	}
static void watchdog_enable(unsigned int cpu)
{
	struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);

	/* kick off the timer for the hardlockup detector */
	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hrtimer->function = watchdog_timer_fn;

	/* Enable the perf event */
	watchdog_nmi_enable(cpu);

	/* done here because hrtimer_start can only pin to smp_processor_id() */
	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
		      HRTIMER_MODE_REL_PINNED);

	/* initialize timestamp */
	watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
	__touch_watchdog();
}
Beispiel #21
0
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
			   enum hrtimer_mode mode)
{
	struct hrtimer_cpu_base *cpu_base;

	memset(timer, 0, sizeof(struct hrtimer));

	cpu_base = &__raw_get_cpu_var(hrtimer_bases);

	if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
		clock_id = CLOCK_MONOTONIC;

	timer->base = &cpu_base->clock_base[clock_id];
	hrtimer_init_timer_hres(timer);

#ifdef CONFIG_TIMER_STATS
	timer->start_site = NULL;
	timer->start_pid = -1;
	memset(timer->start_comm, 0, TASK_COMM_LEN);
#endif
}
void MT_trace_hardirqs_off(void)
{
    if(unlikely(__raw_get_cpu_var(mtsched_mon_enabled) & 0x2)){
        if( 0 == current->pid) /* Ignore swap thread */
            return;
        if( __raw_get_cpu_var(MT_trace_in_sched))
            return;
        if( __raw_get_cpu_var(MT_trace_in_resume_console))
            return;
        if(__raw_get_cpu_var(MT_tracing_cpu) == 0){
            MT_trace_irq_off();
            __raw_get_cpu_var(t_irq_off) = sched_clock();
        }
        __raw_get_cpu_var(MT_tracing_cpu) = 1;
    }
}
Beispiel #23
0
void wake_up_klogd(void)
{
	if (waitqueue_active(&log_wait))
		__raw_get_cpu_var(printk_pending) = 1;
}
/*
 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
 * interrupt protection log is maintained here for each domain.	 Hw
 * interrupts are off on entry.
 */
int __ipipe_handle_irq(struct pt_regs *regs)
{
	struct ipipe_domain *this_domain, *next_domain;
	int irq, vector = regs->orig_ax;
	struct list_head *head, *pos;
	struct pt_regs *tick_regs;
	int m_ack;

	if (vector < 0) {
		irq = __get_cpu_var(vector_irq)[~vector];
		BUG_ON(irq < 0);
		m_ack = 0;
	} else { /* This is a self-triggered one. */
		irq = vector;
		m_ack = 1;
	}

	this_domain = ipipe_current_domain;

	if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
		head = &this_domain->p_link;
	else {
		head = __ipipe_pipeline.next;
		next_domain = list_entry(head, struct ipipe_domain, p_link);
		if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
			if (!m_ack && next_domain->irqs[irq].acknowledge)
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
			__ipipe_dispatch_wired(next_domain, irq);
			goto finalize_nosync;
		}
	}

	/* Ack the interrupt. */

	pos = head;

	while (pos != &__ipipe_pipeline) {
		next_domain = list_entry(pos, struct ipipe_domain, p_link);
		if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
			__ipipe_set_irq_pending(next_domain, irq);
			if (!m_ack && next_domain->irqs[irq].acknowledge) {
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
				m_ack = 1;
			}
		}
		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
			break;
		pos = next_domain->p_link.next;
	}

	/*
	 * If the interrupt preempted the head domain, then do not
	 * even try to walk the pipeline, unless an interrupt is
	 * pending for it.
	 */
	if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
	    !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
		goto finalize_nosync;

	/*
	 * Now walk the pipeline, yielding control to the highest
	 * priority domain that has pending interrupt(s) or
	 * immediately to the current domain if the interrupt has been
	 * marked as 'sticky'. This search does not go beyond the
	 * current domain in the pipeline.
	 */

	__ipipe_walk_pipeline(head);

finalize_nosync:

	/*
	 * Given our deferred dispatching model for regular IRQs, we
	 * only record CPU regs for the last timer interrupt, so that
	 * the timer handler charges CPU times properly. It is assumed
	 * that other interrupt handlers don't actually care for such
	 * information.
	 */

	if (irq == __ipipe_hrtimer_irq) {
		tick_regs = &__raw_get_cpu_var(__ipipe_tick_regs);
		tick_regs->flags = regs->flags;
		tick_regs->cs = regs->cs;
		tick_regs->ip = regs->ip;
		tick_regs->bp = regs->bp;
#ifdef CONFIG_X86_64
		tick_regs->ss = regs->ss;
		tick_regs->sp = regs->sp;
#endif
		if (!__ipipe_root_domain_p)
			tick_regs->flags &= ~X86_EFLAGS_IF;
	}

	if (user_mode(regs) && (current->ipipe_flags & PF_EVTRET) != 0) {
		current->ipipe_flags &= ~PF_EVTRET;
		__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
	}

	if (!__ipipe_root_domain_p ||
	    test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)))
		return 0;

	return 1;
}
Beispiel #25
0
static unsigned long iommu_range_alloc(struct device *dev,
				       struct iommu_table *tbl,
                                       unsigned long npages,
                                       unsigned long *handle,
                                       unsigned long mask,
                                       unsigned int align_order)
{ 
	unsigned long n, end, start;
	unsigned long limit;
	int largealloc = npages > 15;
	int pass = 0;
	unsigned long align_mask;
	unsigned long boundary_size;
	unsigned long flags;
	unsigned int pool_nr;
	struct iommu_pool *pool;

	align_mask = 0xffffffffffffffffl >> (64 - align_order);

	/* This allocator was derived from x86_64's bit string search */

	/* Sanity check */
	if (unlikely(npages == 0)) {
		if (printk_ratelimit())
			WARN_ON(1);
		return DMA_ERROR_CODE;
	}

	if (should_fail_iommu(dev))
		return DMA_ERROR_CODE;

	/*
	 * We don't need to disable preemption here because any CPU can
	 * safely use any IOMMU pool.
	 */
	pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1);

	if (largealloc)
		pool = &(tbl->large_pool);
	else
		pool = &(tbl->pools[pool_nr]);

	spin_lock_irqsave(&(pool->lock), flags);

again:
	if ((pass == 0) && handle && *handle &&
	    (*handle >= pool->start) && (*handle < pool->end))
		start = *handle;
	else
		start = pool->hint;

	limit = pool->end;

	/* The case below can happen if we have a small segment appended
	 * to a large, or when the previous alloc was at the very end of
	 * the available space. If so, go back to the initial start.
	 */
	if (start >= limit)
		start = pool->start;

	if (limit + tbl->it_offset > mask) {
		limit = mask - tbl->it_offset + 1;
		/* If we're constrained on address range, first try
		 * at the masked hint to avoid O(n) search complexity,
		 * but on second pass, start at 0 in pool 0.
		 */
		if ((start & mask) >= limit || pass > 0) {
			spin_unlock(&(pool->lock));
			pool = &(tbl->pools[0]);
			spin_lock(&(pool->lock));
			start = pool->start;
		} else {
			start &= mask;
		}
	}

	if (dev)
		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
				      1 << tbl->it_page_shift);
	else
		boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */

	n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
			     boundary_size >> tbl->it_page_shift, align_mask);
	if (n == -1) {
		if (likely(pass == 0)) {
			/* First try the pool from the start */
			pool->hint = pool->start;
			pass++;
			goto again;

		} else if (pass <= tbl->nr_pools) {
			/* Now try scanning all the other pools */
			spin_unlock(&(pool->lock));
			pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
			pool = &tbl->pools[pool_nr];
			spin_lock(&(pool->lock));
			pool->hint = pool->start;
			pass++;
			goto again;

		} else {
			/* Give up */
			spin_unlock_irqrestore(&(pool->lock), flags);
			return DMA_ERROR_CODE;
		}
	}

	end = n + npages;

	/* Bump the hint to a new block for small allocs. */
	if (largealloc) {
		/* Don't bump to new block to avoid fragmentation */
		pool->hint = end;
	} else {
		/* Overflow will be taken care of at the next allocation */
		pool->hint = (end + tbl->it_blocksize - 1) &
		                ~(tbl->it_blocksize - 1);
	}

	/* Update handle for SG allocations */
	if (handle)
		*handle = end;

	spin_unlock_irqrestore(&(pool->lock), flags);

	return n;
}
Beispiel #26
0
void touch_softlockup_watchdog_sync(void)
{
	__raw_get_cpu_var(softlockup_touch_sync) = true;
	__raw_get_cpu_var(watchdog_touch_ts) = 0;
}
Beispiel #27
0
static inline void clear_irq_fired(void)
{
	atomic_set(&__raw_get_cpu_var(irq_fired_count), 0);
}
Beispiel #28
0
void touch_softlockup_watchdog(void)
{
	__raw_get_cpu_var(watchdog_touch_ts) = 0;
}