Ejemplo n.º 1
0
/**
 * set_mtrr - update mtrrs on all processors
 * @reg:	mtrr in question
 * @base:	mtrr base
 * @size:	mtrr size
 * @type:	mtrr type
 *
 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
 *
 * 1. Queue work to do the following on all processors:
 * 2. Disable Interrupts
 * 3. Wait for all procs to do so
 * 4. Enter no-fill cache mode
 * 5. Flush caches
 * 6. Clear PGE bit
 * 7. Flush all TLBs
 * 8. Disable all range registers
 * 9. Update the MTRRs
 * 10. Enable all range registers
 * 11. Flush all TLBs and caches again
 * 12. Enter normal cache mode and reenable caching
 * 13. Set PGE
 * 14. Wait for buddies to catch up
 * 15. Enable interrupts.
 *
 * What does that mean for us? Well, first we set data.count to the number
 * of CPUs. As each CPU announces that it started the rendezvous handler by
 * decrementing the count, We reset data.count and set the data.gate flag
 * allowing all the cpu's to proceed with the work. As each cpu disables
 * interrupts, it'll decrement data.count once. We wait until it hits 0 and
 * proceed. We clear the data.gate flag and reset data.count. Meanwhile, they
 * are waiting for that flag to be cleared. Once it's cleared, each
 * CPU goes through the transition of updating MTRRs.
 * The CPU vendors may each do it differently,
 * so we call mtrr_if->set() callback and let them take care of it.
 * When they're done, they again decrement data->count and wait for data.gate
 * to be set.
 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
 * Everyone then enables interrupts and we all continue on.
 *
 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
 * becomes nops.
 */
static void
set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
{
	struct set_mtrr_data data;
	unsigned long flags;
	int cpu;

#ifdef CONFIG_SMP
	/*
	 * If this cpu is not yet active, we are in the cpu online path. There
	 * can be no stop_machine() in parallel, as stop machine ensures this
	 * by using get_online_cpus(). We can skip taking the stop_cpus_mutex,
	 * as we don't need it and also we can't afford to block while waiting
	 * for the mutex.
	 *
	 * If this cpu is active, we need to prevent stop_machine() happening
	 * in parallel by taking the stop cpus mutex.
	 *
	 * Also, this is called in the context of cpu online path or in the
	 * context where cpu hotplug is prevented. So checking the active status
	 * of the raw_smp_processor_id() is safe.
	 */
	if (cpu_active(raw_smp_processor_id()))
		mutex_lock(&stop_cpus_mutex);
#endif

	preempt_disable();

	data.smp_reg = reg;
	data.smp_base = base;
	data.smp_size = size;
	data.smp_type = type;
	atomic_set(&data.count, num_booting_cpus() - 1);

	/* Make sure data.count is visible before unleashing other CPUs */
	smp_wmb();
	atomic_set(&data.gate, 0);

	/* Start the ball rolling on other CPUs */
	for_each_online_cpu(cpu) {
		struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu);

		if (cpu == smp_processor_id())
			continue;

		stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work);
	}


	while (atomic_read(&data.count))
		cpu_relax();

	/* Ok, reset count and toggle gate */
	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate, 1);

	local_irq_save(flags);

	while (atomic_read(&data.count))
		cpu_relax();

	/* Ok, reset count and toggle gate */
	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate, 0);

	/* Do our MTRR business */

	/*
	 * HACK!
	 *
	 * We use this same function to initialize the mtrrs during boot,
	 * resume, runtime cpu online and on an explicit request to set a
	 * specific MTRR.
	 *
	 * During boot or suspend, the state of the boot cpu's mtrrs has been
	 * saved, and we want to replicate that across all the cpus that come
	 * online (either at the end of boot or resume or during a runtime cpu
	 * online). If we're doing that, @reg is set to something special and on
	 * this cpu we still do mtrr_if->set_all(). During boot/resume, this
	 * is unnecessary if at this point we are still on the cpu that started
	 * the boot/resume sequence. But there is no guarantee that we are still
	 * on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be
	 * sure that we are in sync with everyone else.
	 */
	if (reg != ~0U)
		mtrr_if->set(reg, base, size, type);
	else
		mtrr_if->set_all();

	/* Wait for the others */
	while (atomic_read(&data.count))
		cpu_relax();

	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate, 1);

	/*
	 * Wait here for everyone to have seen the gate change
	 * So we're the last ones to touch 'data'
	 */
	while (atomic_read(&data.count))
		cpu_relax();

	local_irq_restore(flags);
	preempt_enable();
#ifdef CONFIG_SMP
	if (cpu_active(raw_smp_processor_id()))
		mutex_unlock(&stop_cpus_mutex);
#endif
}
Ejemplo n.º 2
0
/*
 * Report back to the Boot Processor.
 * Running on AP.
 */
static void __cpuinit smp_callin(void)
{
	int cpuid, phys_id;
	unsigned long timeout;

	/*
	 * If waken up by an INIT in an 82489DX configuration
	 * we may get here before an INIT-deassert IPI reaches
	 * our local APIC.  We have to wait for the IPI or we'll
	 * lock up on an APIC access.
	 */
	if (apic->wait_for_init_deassert)
		apic->wait_for_init_deassert(&init_deasserted);

	/*
	 * (This works even if the APIC is not enabled.)
	 */
	phys_id = read_apic_id();
	cpuid = smp_processor_id();
	if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
		panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
					phys_id, cpuid);
	}
	pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);

	/*
	 * STARTUP IPIs are fragile beasts as they might sometimes
	 * trigger some glue motherboard logic. Complete APIC bus
	 * silence for 1 second, this overestimates the time the
	 * boot CPU is spending to send the up to 2 STARTUP IPIs
	 * by a factor of two. This should be enough.
	 */

	/*
	 * Waiting 2s total for startup (udelay is not yet working)
	 */
	timeout = jiffies + 2*HZ;
	while (time_before(jiffies, timeout)) {
		/*
		 * Has the boot CPU finished it's STARTUP sequence?
		 */
		if (cpumask_test_cpu(cpuid, cpu_callout_mask))
			break;
		cpu_relax();
	}

	if (!time_before(jiffies, timeout)) {
		panic("%s: CPU%d started up but did not get a callout!\n",
		      __func__, cpuid);
	}

	/*
	 * the boot CPU has finished the init stage and is spinning
	 * on callin_map until we finish. We are free to set up this
	 * CPU, first the APIC. (this is probably redundant on most
	 * boards)
	 */

	pr_debug("CALLIN, before setup_local_APIC().\n");
	if (apic->smp_callin_clear_local_apic)
		apic->smp_callin_clear_local_apic();
	setup_local_APIC();
	end_local_APIC_setup();

	/*
	 * Need to setup vector mappings before we enable interrupts.
	 */
	setup_vector_irq(smp_processor_id());

	/*
	 * Save our processor parameters. Note: this information
	 * is needed for clock calibration.
	 */
	smp_store_cpu_info(cpuid);

	/*
	 * Get our bogomips.
	 * Update loops_per_jiffy in cpu_data. Previous call to
	 * smp_store_cpu_info() stored a value that is close but not as
	 * accurate as the value just calculated.
	 */
	calibrate_delay();
	cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
	pr_debug("Stack at about %p\n", &cpuid);

	/*
	 * This must be done before setting cpu_online_mask
	 * or calling notify_cpu_starting.
	 */
	set_cpu_sibling_map(raw_smp_processor_id());
	wmb();

	notify_cpu_starting(cpuid);

	/*
	 * Allow the master to continue.
	 */
	cpumask_set_cpu(cpuid, cpu_callin_mask);
}
Ejemplo n.º 3
0
/**
 * round_jiffies_up - function to round jiffies up to a full second
 * @j: the time in (absolute) jiffies that should be rounded
 *
 * This is the same as round_jiffies() except that it will never
 * round down.  This is useful for timeouts for which the exact time
 * of firing does not matter too much, as long as they don't fire too
 * early.
 */
unsigned long round_jiffies_up(unsigned long j)
{
	return round_jiffies_common(j, raw_smp_processor_id(), true);
}
Ejemplo n.º 4
0
static void cpu_vsyscall_init(void *arg)
{
	/* preemption should be already off */
	vsyscall_set_cpu(raw_smp_processor_id());
}
static inline void debug_write_lock_after(rwlock_t *lock)
{
	lock->owner_cpu = raw_smp_processor_id();
	lock->owner = current;
}
Ejemplo n.º 6
0
static u64 nps_clksrc_read(struct clocksource *clksrc)
{
	int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET;

	return (u64)ioread32be(nps_msu_reg_low_addr[cluster]);
}
Ejemplo n.º 7
0
/*
 * The worker for the various blk_add_trace*() types. Fills out a
 * blk_io_trace structure and places it in a per-cpu subbuffer.
 */
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
		     int rw, u32 what, int error, int pdu_len, void *pdu_data)
{
	struct task_struct *tsk = current;
	struct ring_buffer_event *event = NULL;
	struct ring_buffer *buffer = NULL;
	struct blk_io_trace *t;
	unsigned long flags = 0;
	unsigned long *sequence;
	pid_t pid;
	int cpu, pc = 0;
	bool blk_tracer = blk_tracer_enabled;

	if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
		return;

	what |= ddir_act[rw & WRITE];
	what |= MASK_TC_BIT(rw, HARDBARRIER);
    what |= MASK_TC_BIT(rw, SYNC);
    what |= MASK_TC_BIT(rw, RAHEAD);
	what |= MASK_TC_BIT(rw, META);
	what |= MASK_TC_BIT(rw, DISCARD);

	pid = tsk->pid;
	if (act_log_check(bt, what, sector, pid))
		return;
	cpu = raw_smp_processor_id();

	if (blk_tracer) {
		tracing_record_cmdline(current);

		buffer = blk_tr->buffer;
		pc = preempt_count();
		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
						  sizeof(*t) + pdu_len,
						  0, pc);
		if (!event)
			return;
		t = ring_buffer_event_data(event);
		goto record_it;
	}

	/*
	 * A word about the locking here - we disable interrupts to reserve
	 * some space in the relay per-cpu buffer, to prevent an irq
	 * from coming in and stepping on our toes.
	 */
	local_irq_save(flags);

	if (unlikely(tsk->btrace_seq != blktrace_seq))
		trace_note_tsk(bt, tsk);

	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
	if (t) {
		sequence = per_cpu_ptr(bt->sequence, cpu);

		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
		t->sequence = ++(*sequence);
		t->time = ktime_to_ns(ktime_get());
record_it:
		/*
		 * These two are not needed in ftrace as they are in the
		 * generic trace_entry, filled by tracing_generic_entry_update,
		 * but for the trace_event->bin() synthesizer benefit we do it
		 * here too.
		 */
		t->cpu = cpu;
		t->pid = pid;

		t->sector = sector;
		t->bytes = bytes;
		t->action = what;
		t->device = bt->dev;
		t->error = error;
		t->pdu_len = pdu_len;

		if (pdu_len)
			memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);

		if (blk_tracer) {
			trace_buffer_unlock_commit(buffer, event, 0, pc);
			return;
		}
	}

	local_irq_restore(flags);
}
Ejemplo n.º 8
0
static int wait_slt_scu_state_sync(unsigned long state, int wait)
{

    int ret = 0, i;
    unsigned long retry = 0;
    static volatile int get_exit = 0;
    static volatile int cpu_num = 0;
    unsigned long all_cpu_mask = 0;

    int cpu = raw_smp_processor_id();

    //printk("wait_slt_scu_state_sync, cpu%d wait state=%d\n", cpu, state);

    if(cpu_num & (0x1 << cpu))
    {
        //printk(KERN_ERR, "cpu%d already waitting\n", cpu);
        return 0;
    }

    while(cpu_num && get_exit)
    {
        //printk(KERN_INFO, "wait other cpu to finish waiting loop\n");
        mdelay(10);
    }

    spin_lock(&scu_wait_sync_lock);
    cpu_num  |= 0x1 << cpu;
    get_exit = 0;
    __cpuc_flush_dcache_area(&get_exit, sizeof(int));
    __cpuc_flush_dcache_area(&cpu_num, sizeof(int));
    spin_unlock(&scu_wait_sync_lock);

    for(i = 0; i < NR_CPUS; i++)
    {
        all_cpu_mask |= (0x1 << i);
    }

    /* wait all cpu in sync loop */
    while(cpu_num != all_cpu_mask)
    {
        retry++;

        if(retry > 0x10000)
        {
            //printk(KERN_INFO, "scu wait sync state (%d) timeout\n", state);
            goto wait_sync_out;
        }

        if(get_exit)
            break;

        //printk(KERN_INFO, "\n\nretry=0x%08x wait state = %d\n", retry, state);
        //slt_scu_print_state();
        mdelay(1);
    }

    spin_lock(&scu_wait_sync_lock);
    get_exit |= 0x1 << cpu;
    __cpuc_flush_dcache_area(&get_exit, sizeof(int));
    spin_unlock(&scu_wait_sync_lock);


    ret = is_slt_scu_state_sync(state);

    /* make sure all cpu exit wait sync loop
     * check cpu_num is for the case retry timeout
     */
    while(1)
    {
        //printk(KERN_INFO, "wait exit retry\n");
        if(!get_exit ||
           get_exit == all_cpu_mask ||
           cpu_num != all_cpu_mask)
        {
            break;
        }
        mdelay(1);
    }

wait_sync_out:
    spin_lock(&scu_wait_sync_lock);
    cpu_num &= ~(0x01 << cpu);
    __cpuc_flush_dcache_area(&cpu_num, sizeof(int));
    spin_unlock(&scu_wait_sync_lock);

    //printk("cpu%d exit fun, ret=%s\n", cpu, ret ? "pass" : "fail");
    return ret;
}
Ejemplo n.º 9
0
static int slt_scu_test_func(void *data)
{
    int ret = 0, loop, pass;
    int cpu = raw_smp_processor_id();
    unsigned long irq_flag;
    int cpu_cnt;

    unsigned long buf;
    unsigned long *mem_buf = (unsigned long *)data;
    unsigned long retry;

    //spin_lock(&scu_thread_lock[cpu]);
    //local_irq_save(irq_flag);
#if 0
    if(cpu == 0)
    {
        mtk_wdt_enable(WK_WDT_DIS);
    }
#endif

    if(!mem_buf)
    {
        printk(KERN_ERR, "allocate memory fail for cpu scu test\n");
        g_iCPU_PassFail = -1;
        goto scu_thread_out;
    }

    printk("\n>>slt_scu_test_func -- cpu id = %d, mem_buf = 0x%08x <<\n", cpu, mem_buf);

    msleep(50);

    if(!wait_slt_scu_state_sync(SCU_STATE_START, 1))
    {
        printk("cpu%d wait SCU_STATE_START timeout\n", cpu);

        goto scu_thread_out;
    }
    g_iCPU_PassFail = 0;
    g_iSCU_PassFail[cpu] = 1;

    for (loop = 0; loop < g_iScuLoopCount; loop++) {

        slt_scu_write_state(cpu, SCU_STATE_EXECUTE);
        spin_lock_irqsave(&scu_thread_irq_lock[cpu], irq_flag);
        if(!wait_slt_scu_state_sync(SCU_STATE_EXECUTE, 1))
        {
            spin_unlock_irqrestore(&scu_thread_irq_lock[cpu], irq_flag);
            printk("cpu%d wait SCU_STATE_EXECUTE timeout\n", cpu);
            goto scu_thread_out;
        }

        g_iSCU_PassFail[cpu] = fp6_scu_start(mem_buf);
        spin_unlock_irqrestore(&scu_thread_irq_lock[cpu], irq_flag);

        __cpuc_flush_dcache_area(g_iSCU_PassFail, 2*sizeof(int));

        printk("\n>>cpu%d scu : fp6_scu_start %s ret=0x%x<<\n", cpu, g_iSCU_PassFail[cpu] != 0xA? "fail" : "pass", g_iSCU_PassFail[cpu]);

        slt_scu_write_state(cpu, SCU_STATE_EXEEND);

        if(!wait_slt_scu_state_sync(SCU_STATE_EXEEND, 1))
        {
            printk("cpu%d wait SCU_STATE_EXEEND timeout\n", cpu);
            goto scu_thread_out;

        }

        if(cpu == 0)
        {
            pass = 1;
            for(cpu_cnt = 0; cpu_cnt < NR_CPUS; cpu_cnt++)
            {
                if(g_iSCU_PassFail[cpu_cnt] != 0xA)
                {
                    pass = 0;
                }
            }

            if(pass)
            {
                g_iCPU_PassFail += 1;
            }
        }
    }

scu_thread_out:

    slt_scu_write_state(cpu, SCU_STATE_IDEL);

    if(cpu == 0)
    {
        if (g_iCPU_PassFail == g_iScuLoopCount) {
            printk("\n>> CPU scu test pass <<\n\n");
        }else {
            printk("\n>> CPU scu test fail (loop count = %d)<<\n\n", g_iCPU_PassFail);
        }
        //mtk_wdt_enable(WK_WDT_EN);
    }

    wait_slt_scu_state_sync(SCU_STATE_IDEL, 1);

    printk("cpu%d scu thread out\n", cpu);

    //local_irq_restore(irq_flag);

    //spin_unlock(&scu_thread_lock[cpu]);
    return 0;

}
Ejemplo n.º 10
0
/*---------------------------------------------------------------------------*/
int priv_ev_loop_run(void *loop_hndl)
{
	struct xio_ev_loop	*loop = loop_hndl;
	struct xio_ev_data	*tev;
	struct llist_node	*node;
	int cpu;

	clear_bit(XIO_EV_LOOP_STOP, &loop->states);

	switch (loop->flags) {
	case XIO_LOOP_GIVEN_THREAD:
		if (loop->ctx->worker != (uint64_t) get_current()) {
			ERROR_LOG("worker kthread(%p) is not current(%p).\n",
				  (void *) loop->ctx->worker, get_current());
			goto cleanup0;
		}
		/* no need to disable preemption */
		cpu = raw_smp_processor_id();
		if (loop->ctx->cpuid != cpu) {
			TRACE_LOG("worker on core(%d) scheduled to(%d).\n",
				  cpu, loop->ctx->cpuid);
			set_cpus_allowed_ptr(get_current(),
					     cpumask_of(loop->ctx->cpuid));
		}
		break;
	case XIO_LOOP_TASKLET:
		/* were events added to list while in STOP state ? */
		if (!llist_empty(&loop->ev_llist))
			priv_kick_tasklet(loop_hndl);
		return 0;
	case XIO_LOOP_WORKQUEUE:
		/* were events added to list while in STOP state ? */
		while ((node = llist_del_all(&loop->ev_llist)) != NULL) {
			node = llist_reverse_order(node);
			while (node) {
				tev = llist_entry(node, struct xio_ev_data,
						  ev_llist);
				node = llist_next(node);
				tev->work.func = priv_ev_loop_run_work;
				queue_work_on(loop->ctx->cpuid, loop->workqueue,
					      &tev->work);
			}
		}
		return 0;
	default:
		/* undo */
		set_bit(XIO_EV_LOOP_STOP, &loop->states);
		return -1;
	}

retry_wait:
	wait_event_interruptible(loop->wait,
				 test_bit(XIO_EV_LOOP_WAKE, &loop->states));

retry_dont_wait:

	while ((node = llist_del_all(&loop->ev_llist)) != NULL) {
		node = llist_reverse_order(node);
		while (node) {
			tev = llist_entry(node, struct xio_ev_data, ev_llist);
			node = llist_next(node);
			tev->handler(tev->data);
		}
	}

	/* "race point" */
	clear_bit(XIO_EV_LOOP_WAKE, &loop->states);

	if (unlikely(test_bit(XIO_EV_LOOP_STOP, &loop->states)))
		return 0;

	/* if a new entry was added while we were at "race point"
	 * than wait event might block forever as condition is false */
	if (llist_empty(&loop->ev_llist))
		goto retry_wait;

	/* race detected */
	if (!test_and_set_bit(XIO_EV_LOOP_WAKE, &loop->states))
		goto retry_dont_wait;

	/* was one wakeup was called */
	goto retry_wait;

cleanup0:
	set_bit(XIO_EV_LOOP_STOP, &loop->states);
	return -1;
}
Ejemplo n.º 11
0
static struct kvm_para_state *kvm_para_state(void)
{
	return &per_cpu(para_state, raw_smp_processor_id());
}
Ejemplo n.º 12
0
static int cpu_request_microcode(int cpu, const void *buf, size_t bufsize)
{
    struct microcode_amd *mc_amd, *mc_old;
    size_t offset = bufsize;
    size_t last_offset, applied_offset = 0;
    int error = 0;
    struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu);

    /* We should bind the task to the CPU */
    BUG_ON(cpu != raw_smp_processor_id());

    if ( *(const uint32_t *)buf != UCODE_MAGIC )
    {
        printk(KERN_ERR "microcode: Wrong microcode patch file magic\n");
        error = -EINVAL;
        goto out;
    }

    mc_amd = xmalloc(struct microcode_amd);
    if ( !mc_amd )
    {
        printk(KERN_ERR "microcode: Cannot allocate memory for microcode patch\n");
        error = -ENOMEM;
        goto out;
    }

    error = install_equiv_cpu_table(mc_amd, buf, &offset);
    if ( error )
    {
        xfree(mc_amd);
        printk(KERN_ERR "microcode: installing equivalent cpu table failed\n");
        error = -EINVAL;
        goto out;
    }

    mc_old = uci->mc.mc_amd;
    /* implicitely validates uci->mc.mc_valid */
    uci->mc.mc_amd = mc_amd;

    /*
     * It's possible the data file has multiple matching ucode,
     * lets keep searching till the latest version
     */
    mc_amd->mpb = NULL;
    mc_amd->mpb_size = 0;
    last_offset = offset;
    while ( (error = get_ucode_from_buffer_amd(mc_amd, buf, bufsize,
                                               &offset)) == 0 )
    {
        if ( microcode_fits(mc_amd, cpu) )
        {
            error = apply_microcode(cpu);
            if ( error )
                break;
            applied_offset = last_offset;
        }

        last_offset = offset;

        if ( offset >= bufsize )
            break;
    }

    /* On success keep the microcode patch for
     * re-apply on resume.
     */
    if ( applied_offset )
    {
        int ret = get_ucode_from_buffer_amd(mc_amd, buf, bufsize,
                                            &applied_offset);
        if ( ret == 0 )
            xfree(mc_old);
        else
            error = ret;
    }

    if ( !applied_offset || error )
    {
        xfree(mc_amd);
        uci->mc.mc_amd = mc_old;
    }

  out:
    svm_host_osvw_init();

    /*
     * In some cases we may return an error even if processor's microcode has
     * been updated. For example, the first patch in a container file is loaded
     * successfully but subsequent container file processing encounters a
     * failure.
     */
    return error;
}
Ejemplo n.º 13
0
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
		     int rw, u32 what, int error, int pdu_len, void *pdu_data)
{
	struct task_struct *tsk = current;
	struct ring_buffer_event *event = NULL;
	struct ring_buffer *buffer = NULL;
	struct blk_io_trace *t;
	unsigned long flags = 0;
	unsigned long *sequence;
	pid_t pid;
	int cpu, pc = 0;
	bool blk_tracer = blk_tracer_enabled;

	if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
		return;

	what |= ddir_act[rw & WRITE];
	what |= MASK_TC_BIT(rw, BARRIER);
	what |= MASK_TC_BIT(rw, SYNCIO);
	what |= MASK_TC_BIT(rw, AHEAD);
	what |= MASK_TC_BIT(rw, META);
	what |= MASK_TC_BIT(rw, DISCARD);

	pid = tsk->pid;
	if (act_log_check(bt, what, sector, pid))
		return;
	cpu = raw_smp_processor_id();

	if (blk_tracer) {
		tracing_record_cmdline(current);

		buffer = blk_tr->buffer;
		pc = preempt_count();
		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
						  sizeof(*t) + pdu_len,
						  0, pc);
		if (!event)
			return;
		t = ring_buffer_event_data(event);
		goto record_it;
	}

	
	local_irq_save(flags);

	if (unlikely(tsk->btrace_seq != blktrace_seq))
		trace_note_tsk(bt, tsk);

	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
	if (t) {
		sequence = per_cpu_ptr(bt->sequence, cpu);

		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
		t->sequence = ++(*sequence);
		t->time = ktime_to_ns(ktime_get());
record_it:
		
		t->cpu = cpu;
		t->pid = pid;

		t->sector = sector;
		t->bytes = bytes;
		t->action = what;
		t->device = bt->dev;
		t->error = error;
		t->pdu_len = pdu_len;

		if (pdu_len)
			memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);

		if (blk_tracer) {
			trace_buffer_unlock_commit(buffer, event, 0, pc);
			return;
		}
	}

	local_irq_restore(flags);
}
Ejemplo n.º 14
0
void dump_bfin_trace_buffer(void)
{
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
	int tflags, i = 0, fault = 0;
	char buf[150];
	unsigned short *addr;
	unsigned int cpu = raw_smp_processor_id();
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
	int j, index;
#endif

	trace_buffer_save(tflags);

	pr_notice("Hardware Trace:\n");

#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
	pr_notice("WARNING: Expanded trace turned on - can not trace exceptions\n");
#endif

	if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
		for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) {
			addr = (unsigned short *)bfin_read_TBUF();
			decode_address(buf, (unsigned long)addr);
			pr_notice("%4i Target : %s\n", i, buf);
			if (!fault && addr == ((unsigned short *)evt_ivhw)) {
				addr = (unsigned short *)bfin_read_TBUF();
				decode_address(buf, (unsigned long)addr);
				pr_notice("      FAULT : %s ", buf);
				decode_instruction(addr);
				pr_cont("\n");
				fault = 1;
				continue;
			}
			if (!fault && addr == (unsigned short *)trap &&
				(cpu_pda[cpu].seqstat & SEQSTAT_EXCAUSE) > VEC_EXCPT15) {
				decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
				pr_notice("      FAULT : %s ", buf);
				decode_instruction((unsigned short *)cpu_pda[cpu].icplb_fault_addr);
				pr_cont("\n");
				fault = 1;
			}
			addr = (unsigned short *)bfin_read_TBUF();
			decode_address(buf, (unsigned long)addr);
			pr_notice("     Source : %s ", buf);
			decode_instruction(addr);
			pr_cont("\n");
		}
	}

#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
	if (trace_buff_offset)
		index = trace_buff_offset / 4;
	else
		index = EXPAND_LEN;

	j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128;
	while (j) {
		decode_address(buf, software_trace_buff[index]);
		pr_notice("%4i Target : %s\n", i, buf);
		index -= 1;
		if (index < 0)
			index = EXPAND_LEN;
		decode_address(buf, software_trace_buff[index]);
		pr_notice("     Source : %s ", buf);
		decode_instruction((unsigned short *)software_trace_buff[index]);
		pr_cont("\n");
		index -= 1;
		if (index < 0)
			index = EXPAND_LEN;
		j--;
		i++;
	}
#endif

	trace_buffer_restore(tflags);
#endif
}
Ejemplo n.º 15
0
inline void __const_udelay(unsigned long xloops)
{
	__delay(xloops * (HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy));
}
Ejemplo n.º 16
0
void nft_meta_get_eval(const struct nft_expr *expr,
		       struct nft_regs *regs,
		       const struct nft_pktinfo *pkt)
{
	const struct nft_meta *priv = nft_expr_priv(expr);
	const struct sk_buff *skb = pkt->skb;
	const struct net_device *in = pkt->in, *out = pkt->out;
	u32 *dest = &regs->data[priv->dreg];

	switch (priv->key) {
	case NFT_META_LEN:
		*dest = skb->len;
		break;
	case NFT_META_PROTOCOL:
		*dest = 0;
		*(__be16 *)dest = skb->protocol;
		break;
	case NFT_META_NFPROTO:
		*dest = pkt->ops->pf;
		break;
	case NFT_META_L4PROTO:
		*dest = pkt->tprot;
		break;
	case NFT_META_PRIORITY:
		*dest = skb->priority;
		break;
	case NFT_META_MARK:
		*dest = skb->mark;
		break;
	case NFT_META_IIF:
		if (in == NULL)
			goto err;
		*dest = in->ifindex;
		break;
	case NFT_META_OIF:
		if (out == NULL)
			goto err;
		*dest = out->ifindex;
		break;
	case NFT_META_IIFNAME:
		if (in == NULL)
			goto err;
		strncpy((char *)dest, in->name, IFNAMSIZ);
		break;
	case NFT_META_OIFNAME:
		if (out == NULL)
			goto err;
		strncpy((char *)dest, out->name, IFNAMSIZ);
		break;
	case NFT_META_IIFTYPE:
		if (in == NULL)
			goto err;
		*dest = 0;
		*(u16 *)dest = in->type;
		break;
	case NFT_META_OIFTYPE:
		if (out == NULL)
			goto err;
		*dest = 0;
		*(u16 *)dest = out->type;
		break;
	case NFT_META_SKUID:
		if (skb->sk == NULL || !sk_fullsock(skb->sk))
			goto err;

		read_lock_bh(&skb->sk->sk_callback_lock);
		if (skb->sk->sk_socket == NULL ||
		    skb->sk->sk_socket->file == NULL) {
			read_unlock_bh(&skb->sk->sk_callback_lock);
			goto err;
		}

		*dest =	from_kuid_munged(&init_user_ns,
				skb->sk->sk_socket->file->f_cred->fsuid);
		read_unlock_bh(&skb->sk->sk_callback_lock);
		break;
	case NFT_META_SKGID:
		if (skb->sk == NULL || !sk_fullsock(skb->sk))
			goto err;

		read_lock_bh(&skb->sk->sk_callback_lock);
		if (skb->sk->sk_socket == NULL ||
		    skb->sk->sk_socket->file == NULL) {
			read_unlock_bh(&skb->sk->sk_callback_lock);
			goto err;
		}
		*dest =	from_kgid_munged(&init_user_ns,
				 skb->sk->sk_socket->file->f_cred->fsgid);
		read_unlock_bh(&skb->sk->sk_callback_lock);
		break;
#ifdef CONFIG_IP_ROUTE_CLASSID
	case NFT_META_RTCLASSID: {
		const struct dst_entry *dst = skb_dst(skb);

		if (dst == NULL)
			goto err;
		*dest = dst->tclassid;
		break;
	}
#endif
#ifdef CONFIG_NETWORK_SECMARK
	case NFT_META_SECMARK:
		*dest = skb->secmark;
		break;
#endif
	case NFT_META_PKTTYPE:
		if (skb->pkt_type != PACKET_LOOPBACK) {
			*dest = skb->pkt_type;
			break;
		}

		switch (pkt->ops->pf) {
		case NFPROTO_IPV4:
			if (ipv4_is_multicast(ip_hdr(skb)->daddr))
				*dest = PACKET_MULTICAST;
			else
				*dest = PACKET_BROADCAST;
			break;
		case NFPROTO_IPV6:
			if (ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF)
				*dest = PACKET_MULTICAST;
			else
				*dest = PACKET_BROADCAST;
			break;
		default:
			WARN_ON(1);
			goto err;
		}
		break;
	case NFT_META_CPU:
		*dest = raw_smp_processor_id();
		break;
	case NFT_META_IIFGROUP:
		if (in == NULL)
			goto err;
		*dest = in->group;
		break;
	case NFT_META_OIFGROUP:
		if (out == NULL)
			goto err;
		*dest = out->group;
		break;
#ifdef CONFIG_CGROUP_NET_CLASSID
	case NFT_META_CGROUP:
		if (skb->sk == NULL || !sk_fullsock(skb->sk))
			goto err;
		*dest = skb->sk->sk_classid;
		break;
#endif
	default:
		WARN_ON(1);
		goto err;
	}
	return;

err:
	regs->verdict.code = NFT_BREAK;
}
Ejemplo n.º 17
0
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
			      unsigned long address)
{
	struct vm_area_struct * vma = NULL;
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;
	const int field = sizeof(unsigned long) * 2;
	siginfo_t info;

#if 0
	printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", smp_processor_id(),
	       current->comm, current->pid, field, address, write,
	       field, regs->cp0_epc);
#endif

	info.si_code = SEGV_MAPERR;

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 */
	if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
		goto vmalloc_fault;

	/*
	 * If we're in an interrupt or have no user
	 * context, we must not take the fault..
	 */
	if (in_atomic() || !mm)
		goto bad_area_nosemaphore;

	down_read(&mm->mmap_sem);
	vma = find_vma(mm, address);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= address)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (expand_stack(vma, address))
		goto bad_area;
/*
 * Ok, we have a good vm_area for this memory access, so
 * we can handle it..
 */
good_area:
	info.si_code = SEGV_ACCERR;

	if (write) {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
	} else {
		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
			goto bad_area;
	}

survive:
	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
        MARK(kernel_trap_entry, "%d struct pt_regs %p",
                CAUSE_EXCCODE(regs->cp0_cause), regs);
	switch (handle_mm_fault(mm, vma, address, write)) {
	case VM_FAULT_MINOR:
		MARK(kernel_trap_exit, MARK_NOARGS);
		tsk->min_flt++;
		break;
	case VM_FAULT_MAJOR:
		MARK(kernel_trap_exit, MARK_NOARGS);
		tsk->maj_flt++;
		break;
	case VM_FAULT_SIGBUS:
		MARK(kernel_trap_exit, MARK_NOARGS);
		goto do_sigbus;
	case VM_FAULT_OOM:
		MARK(kernel_trap_exit, MARK_NOARGS);
		goto out_of_memory;
	default:
		BUG();
	}

	up_read(&mm->mmap_sem);
	return;

/*
 * Something tried to access memory that isn't in our memory map..
 * Fix it, but check if it's kernel or user first..
 */
bad_area:
	up_read(&mm->mmap_sem);

bad_area_nosemaphore:
	/* User mode accesses just cause a SIGSEGV */
	if (user_mode(regs)) {
		tsk->thread.cp0_badvaddr = address;
		tsk->thread.error_code = write;
#if 0
		printk("do_page_fault() #2: sending SIGSEGV to %s for "
		       "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
		       tsk->comm,
		       write ? "write access to" : "read access from",
		       field, address,
		       field, (unsigned long) regs->cp0_epc,
		       field, (unsigned long) regs->regs[31]);
#endif
		info.si_signo = SIGSEGV;
		info.si_errno = 0;
		/* info.si_code has been set above */
		info.si_addr = (void __user *) address;
		force_sig_info(SIGSEGV, &info, tsk);
		return;
	}

no_context:
	/* Are we prepared to handle this kernel fault?  */
	if (fixup_exception(regs)) {
		current->thread.cp0_baduaddr = address;
		return;
	}

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
	 * terminate things with extreme prejudice.
	 */
	bust_spinlocks(1);

	printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
	       "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
	       smp_processor_id(), field, address, field, regs->cp0_epc,
	       field,  regs->regs[31]);
	die("Oops", regs);

/*
 * We ran out of memory, or some other thing happened to us that made
 * us unable to handle the page fault gracefully.
 */
out_of_memory:
	up_read(&mm->mmap_sem);
	if (tsk->pid == 1) {
		yield();
		down_read(&mm->mmap_sem);
		goto survive;
	}
	printk("VM: killing process %s\n", tsk->comm);
	if (user_mode(regs))
		do_exit(SIGKILL);
	goto no_context;

do_sigbus:
	up_read(&mm->mmap_sem);

	/* Kernel mode? Handle exceptions or die */
	if (!user_mode(regs))
		goto no_context;
	else
	/*
	 * Send a sigbus, regardless of whether we were in kernel
	 * or user mode.
	 */
#if 0
		printk("do_page_fault() #3: sending SIGBUS to %s for "
		       "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
		       tsk->comm,
		       write ? "write access to" : "read access from",
		       field, address,
		       field, (unsigned long) regs->cp0_epc,
		       field, (unsigned long) regs->regs[31]);
#endif
	tsk->thread.cp0_badvaddr = address;
	info.si_signo = SIGBUS;
	info.si_errno = 0;
	info.si_code = BUS_ADRERR;
	info.si_addr = (void __user *) address;
	force_sig_info(SIGBUS, &info, tsk);

	return;
vmalloc_fault:
	{
		/*
		 * Synchronize this task's top level page-table
		 * with the 'reference' page table.
		 *
		 * Do _not_ use "tsk" here. We might be inside
		 * an interrupt in the middle of a task switch..
		 */
		int offset = __pgd_offset(address);
		pgd_t *pgd, *pgd_k;
		pud_t *pud, *pud_k;
		pmd_t *pmd, *pmd_k;
		pte_t *pte_k;

		pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
		pgd_k = init_mm.pgd + offset;

		if (!pgd_present(*pgd_k))
			goto no_context;
		set_pgd(pgd, *pgd_k);

		pud = pud_offset(pgd, address);
		pud_k = pud_offset(pgd_k, address);
		if (!pud_present(*pud_k))
			goto no_context;

		pmd = pmd_offset(pud, address);
		pmd_k = pmd_offset(pud_k, address);
		if (!pmd_present(*pmd_k))
			goto no_context;
		set_pmd(pmd, *pmd_k);

		pte_k = pte_offset_kernel(pmd_k, address);
		if (!pte_present(*pte_k))
			goto no_context;
		return;
	}
}
Ejemplo n.º 18
0
static int boost_mig_sync_thread(void *data)
{
	int dest_cpu = (int) data;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;
	unsigned int req_freq;

	while (1) {
		wait_event(s->sync_wq, s->pending || kthread_should_stop());
#ifdef CONFIG_IRLED_GPIO
		if (unlikely(gir_boost_disable)) {
			pr_debug("[GPIO_IR][%s] continue~!(cpu:%d)\n", 
				__func__, raw_smp_processor_id());
			continue;
		}
#endif

		if (kthread_should_stop())
			break;

		spin_lock_irqsave(&s->lock, flags);
		s->pending = false;
		src_cpu = s->src_cpu;
		spin_unlock_irqrestore(&s->lock, flags);

		ret = cpufreq_get_policy(&src_policy, src_cpu);
		if (ret)
			continue;

		ret = cpufreq_get_policy(&dest_policy, dest_cpu);
		if (ret)
			continue;

		if (s->task_load < migration_load_threshold)
			continue;

		req_freq = load_based_syncs ?
			(dest_policy.max * s->task_load) / 100 : src_policy.cur;

		if (req_freq <= dest_policy.cpuinfo.min_freq) {
			pr_debug("No sync. Sync Freq:%u\n", req_freq);
			continue;
		}

        if (sync_threshold)
            req_freq = min(sync_threshold, req_freq);

		cancel_delayed_work_sync(&s->boost_rem);

#ifdef CONFIG_CPUFREQ_HARDLIMIT
        s->boost_min = check_cpufreq_hardlimit(req_freq);
#else
#ifdef CONFIG_CPUFREQ_LIMIT
        s->boost_min = check_cpufreq_limit(req_freq);
#else
		s->boost_min = req_freq;
#endif
#endif

		/* Force policy re-evaluation to trigger adjust notifier. */
		get_online_cpus();
		if (cpu_online(src_cpu))
			/*
			 * Send an unchanged policy update to the source
			 * CPU. Even though the policy isn't changed from
			 * its existing boosted or non-boosted state
			 * notifying the source CPU will let the governor
			 * know a boost happened on another CPU and that it
			 * should re-evaluate the frequency at the next timer
			 * event without interference from a min sample time.
			 */
			cpufreq_update_policy(src_cpu);
		if (cpu_online(dest_cpu)) {
			cpufreq_update_policy(dest_cpu);
			queue_delayed_work_on(dest_cpu, cpu_boost_wq,
				&s->boost_rem, msecs_to_jiffies(boost_ms));
		} else {
			s->boost_min = 0;
		}
		put_online_cpus();
	}

	return 0;
}
static inline void padlock_store_cword(struct cword *cword)
{
	per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
}
Ejemplo n.º 20
0
Archivo: kgdb.c Proyecto: 7L/pi_plus
static int kgdb_call_nmi_hook(struct pt_regs *regs)
{
	kgdb_nmicallback(raw_smp_processor_id(), regs);
	return 0;
}
Ejemplo n.º 21
0
asmlinkage notrace void trap_c(struct pt_regs *fp)
{
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
	int j;
#endif
#ifdef CONFIG_BFIN_PSEUDODBG_INSNS
	int opcode;
#endif
	unsigned int cpu = raw_smp_processor_id();
	const char *strerror = NULL;
	int sig = 0;
	siginfo_t info;
	unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE;

	trace_buffer_save(j);
#if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
	last_seqstat = (u32)fp->seqstat;
#endif

	/* Important - be very careful dereferncing pointers - will lead to
	 * double faults if the stack has become corrupt
	 */

	/* trap_c() will be called for exceptions. During exceptions
	 * processing, the pc value should be set with retx value.
	 * With this change we can cleanup some code in signal.c- TODO
	 */
	fp->orig_pc = fp->retx;
	/* printk("exception: 0x%x, ipend=%x, reti=%x, retx=%x\n",
		trapnr, fp->ipend, fp->pc, fp->retx); */

	/* send the appropriate signal to the user program */
	switch (trapnr) {

	/* This table works in conjunction with the one in ./mach-common/entry.S
	 * Some exceptions are handled there (in assembly, in exception space)
	 * Some are handled here, (in C, in interrupt space)
	 * Some, like CPLB, are handled in both, where the normal path is
	 * handled in assembly/exception space, and the error path is handled
	 * here
	 */

	/* 0x00 - Linux Syscall, getting here is an error */
	/* 0x01 - userspace gdb breakpoint, handled here */
	case VEC_EXCPT01:
		info.si_code = TRAP_ILLTRAP;
		sig = SIGTRAP;
		CHK_DEBUGGER_TRAP_MAYBE();
		/* Check if this is a breakpoint in kernel space */
		if (kernel_mode_regs(fp))
			goto traps_done;
		else
			break;
	/* 0x03 - User Defined, userspace stack overflow */
	case VEC_EXCPT03:
		info.si_code = SEGV_STACKFLOW;
		sig = SIGSEGV;
		strerror = KERN_NOTICE EXC_0x03(KERN_NOTICE);
		CHK_DEBUGGER_TRAP_MAYBE();
		break;
	/* 0x02 - KGDB initial connection and break signal trap */
	case VEC_EXCPT02:
#ifdef CONFIG_KGDB
		info.si_code = TRAP_ILLTRAP;
		sig = SIGTRAP;
		CHK_DEBUGGER_TRAP();
		goto traps_done;
#endif
	/* 0x04 - User Defined */
	/* 0x05 - User Defined */
	/* 0x06 - User Defined */
	/* 0x07 - User Defined */
	/* 0x08 - User Defined */
	/* 0x09 - User Defined */
	/* 0x0A - User Defined */
	/* 0x0B - User Defined */
	/* 0x0C - User Defined */
	/* 0x0D - User Defined */
	/* 0x0E - User Defined */
	/* 0x0F - User Defined */
	/* If we got here, it is most likely that someone was trying to use a
	 * custom exception handler, and it is not actually installed properly
	 */
	case VEC_EXCPT04 ... VEC_EXCPT15:
		info.si_code = ILL_ILLPARAOP;
		sig = SIGILL;
		strerror = KERN_NOTICE EXC_0x04(KERN_NOTICE);
		CHK_DEBUGGER_TRAP_MAYBE();
		break;
	/* 0x10 HW Single step, handled here */
	case VEC_STEP:
		info.si_code = TRAP_STEP;
		sig = SIGTRAP;
		CHK_DEBUGGER_TRAP_MAYBE();
		/* Check if this is a single step in kernel space */
		if (kernel_mode_regs(fp))
			goto traps_done;
		else
			break;
	/* 0x11 - Trace Buffer Full, handled here */
	case VEC_OVFLOW:
		info.si_code = TRAP_TRACEFLOW;
		sig = SIGTRAP;
		strerror = KERN_NOTICE EXC_0x11(KERN_NOTICE);
		CHK_DEBUGGER_TRAP_MAYBE();
		break;
	/* 0x12 - Reserved, Caught by default */
	/* 0x13 - Reserved, Caught by default */
	/* 0x14 - Reserved, Caught by default */
	/* 0x15 - Reserved, Caught by default */
	/* 0x16 - Reserved, Caught by default */
	/* 0x17 - Reserved, Caught by default */
	/* 0x18 - Reserved, Caught by default */
	/* 0x19 - Reserved, Caught by default */
	/* 0x1A - Reserved, Caught by default */
	/* 0x1B - Reserved, Caught by default */
	/* 0x1C - Reserved, Caught by default */
	/* 0x1D - Reserved, Caught by default */
	/* 0x1E - Reserved, Caught by default */
	/* 0x1F - Reserved, Caught by default */
	/* 0x20 - Reserved, Caught by default */
	/* 0x21 - Undefined Instruction, handled here */
	case VEC_UNDEF_I:
#ifdef CONFIG_BUG
		if (kernel_mode_regs(fp)) {
			switch (report_bug(fp->pc, fp)) {
			case BUG_TRAP_TYPE_NONE:
				break;
			case BUG_TRAP_TYPE_WARN:
				dump_bfin_trace_buffer();
				fp->pc += 2;
				goto traps_done;
			case BUG_TRAP_TYPE_BUG:
				/* call to panic() will dump trace, and it is
				 * off at this point, so it won't be clobbered
				 */
				panic("BUG()");
			}
		}
#endif
#ifdef CONFIG_BFIN_PSEUDODBG_INSNS
		/*
		 * Support for the fake instructions, if the instruction fails,
		 * then just execute a illegal opcode failure (like normal).
		 * Don't support these instructions inside the kernel
		 */
		if (!kernel_mode_regs(fp) && get_instruction(&opcode, (unsigned short *)fp->pc)) {
			if (execute_pseudodbg_assert(fp, opcode))
				goto traps_done;
			if (execute_pseudodbg(fp, opcode))
				goto traps_done;
		}
#endif
		info.si_code = ILL_ILLOPC;
		sig = SIGILL;
		strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE);
		CHK_DEBUGGER_TRAP_MAYBE();
		break;
	/* 0x22 - Illegal Instruction Combination, handled here */
	case VEC_ILGAL_I:
		info.si_code = ILL_ILLPARAOP;
		sig = SIGILL;
		strerror = KERN_NOTICE EXC_0x22(KERN_NOTICE);
		CHK_DEBUGGER_TRAP_MAYBE();
		break;
	/* 0x23 - Data CPLB protection violation, handled here */
	case VEC_CPLB_VL:
		info.si_code = ILL_CPLB_VI;
		sig = SIGSEGV;
		strerror = KERN_NOTICE EXC_0x23(KERN_NOTICE);
		CHK_DEBUGGER_TRAP_MAYBE();
		break;
	/* 0x24 - Data access misaligned, handled here */
	case VEC_MISALI_D:
		info.si_code = BUS_ADRALN;
		sig = SIGBUS;
		strerror = KERN_NOTICE EXC_0x24(KERN_NOTICE);
		CHK_DEBUGGER_TRAP_MAYBE();
		break;
	/* 0x25 - Unrecoverable Event, handled here */
	case VEC_UNCOV:
		info.si_code = ILL_ILLEXCPT;
		sig = SIGILL;
		strerror = KERN_NOTICE EXC_0x25(KERN_NOTICE);
		CHK_DEBUGGER_TRAP_MAYBE();
		break;
	/* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr,
		error case is handled here */
	case VEC_CPLB_M:
		info.si_code = BUS_ADRALN;
		sig = SIGBUS;
		strerror = KERN_NOTICE EXC_0x26(KERN_NOTICE);
		break;
	/* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */
	case VEC_CPLB_MHIT:
		info.si_code = ILL_CPLB_MULHIT;
		sig = SIGSEGV;
#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
		if (
Ejemplo n.º 22
0
/**
 *	sk_run_filter - run a filter on a socket
 *	@skb: buffer to run the filter on
 *	@fentry: filter to apply
 *
 * Decode and apply filter instructions to the skb->data.
 * Return length to keep, 0 for none. @skb is the data we are
 * filtering, @filter is the array of filter instructions.
 * Because all jumps are guaranteed to be before last instruction,
 * and last instruction guaranteed to be a RET, we dont need to check
 * flen. (We used to pass to this function the length of filter)
 */
unsigned int sk_run_filter(const struct sk_buff *skb,
			   const struct sock_filter *fentry)
{
	void *ptr;
	u32 A = 0;			/* Accumulator */
	u32 X = 0;			/* Index Register */
	u32 mem[BPF_MEMWORDS];		/* Scratch Memory Store */
	u32 tmp;
	int k;

	/*
	 * Process array of filter instructions.
	 */
	for (;; fentry++) {
#if defined(CONFIG_X86_32)
#define	K (fentry->k)
#else
		const u32 K = fentry->k;
#endif

		switch (fentry->code) {
		case BPF_S_ALU_ADD_X:
			A += X;
			continue;
		case BPF_S_ALU_ADD_K:
			A += K;
			continue;
		case BPF_S_ALU_SUB_X:
			A -= X;
			continue;
		case BPF_S_ALU_SUB_K:
			A -= K;
			continue;
		case BPF_S_ALU_MUL_X:
			A *= X;
			continue;
		case BPF_S_ALU_MUL_K:
			A *= K;
			continue;
		case BPF_S_ALU_DIV_X:
			if (X == 0)
				return 0;
			A /= X;
			continue;
		case BPF_S_ALU_DIV_K:
			A = reciprocal_divide(A, K);
			continue;
		case BPF_S_ALU_AND_X:
			A &= X;
			continue;
		case BPF_S_ALU_AND_K:
			A &= K;
			continue;
		case BPF_S_ALU_OR_X:
			A |= X;
			continue;
		case BPF_S_ALU_OR_K:
			A |= K;
			continue;
		case BPF_S_ALU_LSH_X:
			A <<= X;
			continue;
		case BPF_S_ALU_LSH_K:
			A <<= K;
			continue;
		case BPF_S_ALU_RSH_X:
			A >>= X;
			continue;
		case BPF_S_ALU_RSH_K:
			A >>= K;
			continue;
		case BPF_S_ALU_NEG:
			A = -A;
			continue;
		case BPF_S_JMP_JA:
			fentry += K;
			continue;
		case BPF_S_JMP_JGT_K:
			fentry += (A > K) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_JMP_JGE_K:
			fentry += (A >= K) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_JMP_JEQ_K:
			fentry += (A == K) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_JMP_JSET_K:
			fentry += (A & K) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_JMP_JGT_X:
			fentry += (A > X) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_JMP_JGE_X:
			fentry += (A >= X) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_JMP_JEQ_X:
			fentry += (A == X) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_JMP_JSET_X:
			fentry += (A & X) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_LD_W_ABS:
			k = K;
load_w:
			ptr = load_pointer(skb, k, 4, &tmp);
			if (ptr != NULL) {
				A = get_unaligned_be32(ptr);
				continue;
			}
			return 0;
		case BPF_S_LD_H_ABS:
			k = K;
load_h:
			ptr = load_pointer(skb, k, 2, &tmp);
			if (ptr != NULL) {
				A = get_unaligned_be16(ptr);
				continue;
			}
			return 0;
		case BPF_S_LD_B_ABS:
			k = K;
load_b:
			ptr = load_pointer(skb, k, 1, &tmp);
			if (ptr != NULL) {
				A = *(u8 *)ptr;
				continue;
			}
			return 0;
		case BPF_S_LD_W_LEN:
			A = skb->len;
			continue;
		case BPF_S_LDX_W_LEN:
			X = skb->len;
			continue;
		case BPF_S_LD_W_IND:
			k = X + K;
			goto load_w;
		case BPF_S_LD_H_IND:
			k = X + K;
			goto load_h;
		case BPF_S_LD_B_IND:
			k = X + K;
			goto load_b;
		case BPF_S_LDX_B_MSH:
			ptr = load_pointer(skb, K, 1, &tmp);
			if (ptr != NULL) {
				X = (*(u8 *)ptr & 0xf) << 2;
				continue;
			}
			return 0;
		case BPF_S_LD_IMM:
			A = K;
			continue;
		case BPF_S_LDX_IMM:
			X = K;
			continue;
		case BPF_S_LD_MEM:
			A = mem[K];
			continue;
		case BPF_S_LDX_MEM:
			X = mem[K];
			continue;
		case BPF_S_MISC_TAX:
			X = A;
			continue;
		case BPF_S_MISC_TXA:
			A = X;
			continue;
		case BPF_S_RET_K:
			return K;
		case BPF_S_RET_A:
			return A;
		case BPF_S_ST:
			mem[K] = A;
			continue;
		case BPF_S_STX:
			mem[K] = X;
			continue;
		case BPF_S_ANC_PROTOCOL:
			A = ntohs(skb->protocol);
			continue;
		case BPF_S_ANC_PKTTYPE:
			A = skb->pkt_type;
			continue;
		case BPF_S_ANC_IFINDEX:
			if (!skb->dev)
				return 0;
			A = skb->dev->ifindex;
			continue;
		case BPF_S_ANC_MARK:
			A = skb->mark;
			continue;
		case BPF_S_ANC_QUEUE:
			A = skb->queue_mapping;
			continue;
		case BPF_S_ANC_HATYPE:
			if (!skb->dev)
				return 0;
			A = skb->dev->type;
			continue;
		case BPF_S_ANC_RXHASH:
			A = skb->rxhash;
			continue;
		case BPF_S_ANC_CPU:
			A = raw_smp_processor_id();
			continue;
		case BPF_S_ANC_NLATTR: {
			struct nlattr *nla;

			if (skb_is_nonlinear(skb))
				return 0;
			if (A > skb->len - sizeof(struct nlattr))
				return 0;

			nla = nla_find((struct nlattr *)&skb->data[A],
				       skb->len - A, X);
			if (nla)
				A = (void *)nla - (void *)skb->data;
			else
				A = 0;
			continue;
		}
		case BPF_S_ANC_NLATTR_NEST: {
			struct nlattr *nla;

			if (skb_is_nonlinear(skb))
				return 0;
			if (A > skb->len - sizeof(struct nlattr))
				return 0;

			nla = (struct nlattr *)&skb->data[A];
			if (nla->nla_len > A - skb->len)
				return 0;

			nla = nla_find_nested(nla, X);
			if (nla)
				A = (void *)nla - (void *)skb->data;
			else
				A = 0;
			continue;
		}
		default:
			WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
				       fentry->code, fentry->jt,
				       fentry->jf, fentry->k);
			return 0;
		}
	}

	return 0;
}
Ejemplo n.º 23
0
static int gameport_measure_speed(struct gameport *gameport)
{
#if defined(__i386__)

	unsigned int i, t, t1, t2, t3, tx;
	unsigned long flags;

	if (gameport_open(gameport, NULL, GAMEPORT_MODE_RAW))
		return 0;

	tx = 1 << 30;

	for(i = 0; i < 50; i++) {
		local_irq_save(flags);
		GET_TIME(t1);
		for (t = 0; t < 50; t++) gameport_read(gameport);
		GET_TIME(t2);
		GET_TIME(t3);
		local_irq_restore(flags);
		udelay(i * 10);
		if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
	}

	gameport_close(gameport);
	return 59659 / (tx < 1 ? 1 : tx);

#elif defined (__x86_64__)

	unsigned int i, t;
	unsigned long tx, t1, t2, flags;

	if (gameport_open(gameport, NULL, GAMEPORT_MODE_RAW))
		return 0;

	tx = 1 << 30;

	for(i = 0; i < 50; i++) {
		local_irq_save(flags);
		rdtscl(t1);
		for (t = 0; t < 50; t++) gameport_read(gameport);
		rdtscl(t2);
		local_irq_restore(flags);
		udelay(i * 10);
		if (t2 - t1 < tx) tx = t2 - t1;
	}

	gameport_close(gameport);
	return (cpu_data(raw_smp_processor_id()).loops_per_jiffy *
		(unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx);

#else

	unsigned int j, t = 0;

	if (gameport_open(gameport, NULL, GAMEPORT_MODE_RAW))
		return 0;

	j = jiffies; while (j == jiffies);
	j = jiffies; while (j == jiffies) { t++; gameport_read(gameport); }

	gameport_close(gameport);
	return t * HZ / 1000;

#endif
}
Ejemplo n.º 24
0
static void watchdog_workfunc(struct work_struct *work)
{
	writel(watchdog_reset, S3C2410_WTCNT);
	watchdog_iTime = cpu_clock(raw_smp_processor_id());
	queue_delayed_work(watchdog_wq, &watchdog_work, watchdog_pet);
}
Ejemplo n.º 25
0
void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
	unsigned long nr_switches;
	unsigned int load_avg;

	load_avg = pct_task_load(p);

	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
						get_nr_threads(p));
	SEQ_printf(m,
		"---------------------------------------------------------\n");
#define __P(F) \
	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
#define P(F) \
	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
#define __PN(F) \
	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN(F) \
	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))

	PN(se.exec_start);
	PN(se.vruntime);
	PN(se.sum_exec_runtime);

	nr_switches = p->nvcsw + p->nivcsw;

#ifdef CONFIG_SCHEDSTATS
	PN(se.statistics.wait_start);
	PN(se.statistics.sleep_start);
	PN(se.statistics.block_start);
	PN(se.statistics.sleep_max);
	PN(se.statistics.block_max);
	PN(se.statistics.exec_max);
	PN(se.statistics.slice_max);
	PN(se.statistics.wait_max);
	PN(se.statistics.wait_sum);
	P(se.statistics.wait_count);
	PN(se.statistics.iowait_sum);
	P(se.statistics.iowait_count);
	P(se.nr_migrations);
	P(se.statistics.nr_migrations_cold);
	P(se.statistics.nr_failed_migrations_affine);
	P(se.statistics.nr_failed_migrations_running);
	P(se.statistics.nr_failed_migrations_hot);
	P(se.statistics.nr_forced_migrations);
	P(se.statistics.nr_wakeups);
	P(se.statistics.nr_wakeups_sync);
	P(se.statistics.nr_wakeups_migrate);
	P(se.statistics.nr_wakeups_local);
	P(se.statistics.nr_wakeups_remote);
	P(se.statistics.nr_wakeups_affine);
	P(se.statistics.nr_wakeups_affine_attempts);
	P(se.statistics.nr_wakeups_passive);
	P(se.statistics.nr_wakeups_idle);

#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
	__P(load_avg);
#ifdef CONFIG_SCHED_HMP
	P(ravg.demand);
	P(se.avg.runnable_avg_sum_scaled);
#endif
#endif

	{
		u64 avg_atom, avg_per_cpu;

		avg_atom = p->se.sum_exec_runtime;
		if (nr_switches)
			avg_atom = div64_ul(avg_atom, nr_switches);
		else
			avg_atom = -1LL;

		avg_per_cpu = p->se.sum_exec_runtime;
		if (p->se.nr_migrations) {
			avg_per_cpu = div64_u64(avg_per_cpu,
						p->se.nr_migrations);
		} else {
			avg_per_cpu = -1LL;
		}

		__PN(avg_atom);
		__PN(avg_per_cpu);
	}
#endif
	__P(nr_switches);
	SEQ_printf(m, "%-35s:%21Ld\n",
		   "nr_voluntary_switches", (long long)p->nvcsw);
	SEQ_printf(m, "%-35s:%21Ld\n",
		   "nr_involuntary_switches", (long long)p->nivcsw);

	P(se.load.weight);
	P(policy);
	P(prio);
#undef PN
#undef __PN
#undef P
#undef __P

	{
		unsigned int this_cpu = raw_smp_processor_id();
		u64 t0, t1;

		t0 = cpu_clock(this_cpu);
		t1 = cpu_clock(this_cpu);
		SEQ_printf(m, "%-35s:%21Ld\n",
			   "clock-delta", (long long)(t1-t0));
	}
}
Ejemplo n.º 26
0
static void kgdb_call_nmi_hook(void *ignored)
{
	kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
Ejemplo n.º 27
0
/**
 * round_jiffies - function to round jiffies to a full second
 * @j: the time in (absolute) jiffies that should be rounded
 *
 * round_jiffies() rounds an absolute time in the future (in jiffies)
 * up or down to (approximately) full seconds. This is useful for timers
 * for which the exact time they fire does not matter too much, as long as
 * they fire approximately every X seconds.
 *
 * By rounding these timers to whole seconds, all such timers will fire
 * at the same time, rather than at various times spread out. The goal
 * of this is to have the CPU wake up less, which saves power.
 *
 * The return value is the rounded version of the @j parameter.
 */
unsigned long round_jiffies(unsigned long j)
{
	return __round_jiffies(j, raw_smp_processor_id());
}
void sec_debug_task_sched_log_short_msg(char *msg)
{
	__sec_debug_task_sched_log(raw_smp_processor_id(), NULL, msg);
}
Ejemplo n.º 29
0
/**
 * round_jiffies_up_relative - function to round jiffies up to a full second
 * @j: the time in (relative) jiffies that should be rounded
 *
 * This is the same as round_jiffies_relative() except that it will never
 * round down.  This is useful for timeouts for which the exact time
 * of firing does not matter too much, as long as they don't fire too
 * early.
 */
unsigned long round_jiffies_up_relative(unsigned long j)
{
	return __round_jiffies_up_relative(j, raw_smp_processor_id());
}
Ejemplo n.º 30
0
//void dc4_log(unsigned x) { if (dc4) writel((x), __io_address(ST_BASE+10 + raw_smp_processor_id()*4)); }
void dc4_log_dead(unsigned x) { if (dc4) writel((readl(__io_address(ST_BASE+0x10 + raw_smp_processor_id()*4)) & 0xffff) | ((x)<<16), __io_address(ST_BASE+0x10 + raw_smp_processor_id()*4)); }