示例#1
0
static int __cpuinit cpu_callback(struct notifier_block *nfb,
				  unsigned long action,
				  void *hcpu)
{
	int hotcpu = (unsigned long)hcpu;
	struct task_struct *p;

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
		p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
		if (IS_ERR(p)) {
			printk("ksoftirqd for %i failed\n", hotcpu);
			return NOTIFY_BAD;
		}
		kthread_bind(p, hotcpu);
  		per_cpu(ksoftirqd, hotcpu) = p;
 		break;
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
		wake_up_process(per_cpu(ksoftirqd, hotcpu));
		break;
#ifdef CONFIG_HOTPLUG_CPU
	case CPU_UP_CANCELED:
	case CPU_UP_CANCELED_FROZEN:
		if (!per_cpu(ksoftirqd, hotcpu))
			break;
		/* Unbind so it can run.  Fall thru. */
		kthread_bind(per_cpu(ksoftirqd, hotcpu),
			     any_online_cpu(cpu_online_map));
	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
		p = per_cpu(ksoftirqd, hotcpu);
		per_cpu(ksoftirqd, hotcpu) = NULL;
		kthread_stop(p);
		takeover_tasklets(hotcpu);
		break;
#endif /* CONFIG_HOTPLUG_CPU */
 	}
	return NOTIFY_OK;
}
示例#2
0
void factory_cpus_idle_test(void)
{
    int cpu = 0;
    int i = 0;
    unsigned char name[10] = {'\0'};
    struct task_struct *thread[nr_cpu_ids];
#ifdef CONFIG_SMP
    int ret = 0;
#endif

    spin_lock(&factory_lock);
    cpu = smp_processor_id();
    spin_unlock(&factory_lock);
    dcm_info("[%s]: it's cpu%d, num_online_cpus=%d\n", __func__, cpu, num_online_cpus());

#ifdef CONFIG_SMP
    mutex_lock(&ftm_cpu_prepare);
    disable_hotplug_policy(true, nr_cpu_ids);
    for (i = 1; i < nr_cpu_ids; i++) {
        ret = cpu_up(i);
        dcm_info("[%s]cpu_up(cpu%d) return %d, cpu1_killed=%u\n", __func__, i, ret, cpu1_killed);
    }
    mutex_unlock(&ftm_cpu_prepare);
#endif

    mtk_wdt_disable(); // disable watch dog

    // turn off backlight
#if defined(CONFIG_MTK_LEDS)
    mt65xx_leds_brightness_set(MT65XX_LED_TYPE_LCD, 0);
#endif

    for (i = nr_cpu_ids-1; i >= 0; i--) {
        cpuid[i] = i;
        init_completion(&each_thread_done[i]);
        sprintf(name, "idle-%d", i);
        thread[i] = kthread_create(cpu_enter_wfi[i], &cpuid[i], name);
        if (IS_ERR(thread[i])) {
            int ret = PTR_ERR(thread[i]);
            thread[i] = NULL;
            dcm_info("[%s]: kthread_create %s fail(%d)\n", __func__, name, ret);
            return;
        }
        dcm_info("[%s]: kthread_create %s done\n", __func__, name);
        kthread_bind(thread[i], i);
        dcm_info("[%s]: kthread_bind %s done\n", __func__, name);
        wake_up_process(thread[i]);
        dcm_info("[%s]: wake_up_process %s done\n", __func__, name);
        wait_for_completion(&each_thread_done[i]);
    }
    dcm_info("[%s]: cpu%d starts to complete_all all_threads_done\n", __func__, cpu);
    complete_all(&all_threads_done);
}
示例#3
0
文件: srcu.c 项目: cgvarela/fvm
static int cpu_callback(struct notifier_block *nfb, unsigned long action,
			void *hcpu)
{
	int hotcpu = (unsigned long)hcpu;
	struct task_struct *p;

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
		p = kthread_create(vmmr0_rcu_sync_thread, hcpu,
				   "vmmr0srcusync/%d", hotcpu);
		if (IS_ERR(p)) {
			printk(KERN_ERR "vmmr0: vmmr0srcsync for %d failed\n",
			       hotcpu);
			return NOTIFY_BAD;
		}
		kthread_bind(p, hotcpu);
		sched_setscheduler(p, SCHED_FIFO, &sync_thread_param);
		per_cpu(sync_thread, hotcpu) = p;
		break;
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
		wake_up_process(per_cpu(sync_thread, hotcpu));
		break;
	case CPU_UP_CANCELED:
	case CPU_UP_CANCELED_FROZEN:
		if (!per_cpu(sync_thread, hotcpu))
			break;
		/* Unbind so it can run.  Fall thru. */
		kthread_bind(per_cpu(sync_thread, hotcpu),
			     cpumask_any(cpu_online_mask));
	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
		p = per_cpu(sync_thread, hotcpu);
		per_cpu(sync_thread, hotcpu) = NULL;
		kthread_stop(p);
		break;
	}
	return NOTIFY_OK;
}
static int kwdt_thread_test(void *arg)
{
	struct sched_param param = {.sched_priority = 99 };
	int cpu;
	unsigned int flags;

	sched_setscheduler(current, SCHED_FIFO, &param);

	set_current_state(TASK_INTERRUPTIBLE);
	for (;;) {
		pr_debug("wd_test debug start, cpu:%d\n", cpu);
		spin_lock(&wdt_test_lock0);
		cpu = smp_processor_id();
		spin_unlock(&wdt_test_lock0);

		if (test_case == (cpu * 10 + 1)) {	/* cpu0 Preempt disale */
			pr_debug("CPU:%d, Preempt disable\n", cpu);
			spin_lock(&wdt_test_lock1);
		}
		if (test_case == (cpu * 10 + 2)) {	/* cpu0 Preempt&irq disale */
			pr_debug("CPU:%d, irq & Preempt disable\n", cpu);
			spin_lock_irqsave(&wdt_test_lock1, flags);
		}
		msleep(5 * 1000);	/* 5s */
		wdt_dump_reg();
		pr_debug("wd_test debug end, cpu:%d\n", cpu);
	}
	return 0;
}

static int start_kicker(void)
{

	int i;
	unsigned char name[64] = { 0 };


	for (i = 0; i < nr_cpu_ids; i++) {
		sprintf(name, "wdtk-test-%d", i);
		pr_debug("[WDK]:thread name: %s\n", name);
		wk_tsk[i] = kthread_create(kwdt_thread_test, &data, name);
		if (IS_ERR(wk_tsk[i])) {
			int ret = PTR_ERR(wk_tsk[i]);

			wk_tsk[i] = NULL;
			return ret;
		}
		kthread_bind(wk_tsk[i], i);
		wake_up_process(wk_tsk[i]);
	}
	return 0;
}
void wk_start_kick_cpu(int cpu)
{
	if(IS_ERR(wk_tsk[cpu]))
	{
		printk("[wdk]wk_task[%d] is NULL\n",cpu);
	}
	else
	{
		kthread_bind(wk_tsk[cpu], cpu);
		printk("[wdk]bind thread[%d] to cpu[%d]\n",wk_tsk[cpu]->pid,cpu);
		wake_up_process(wk_tsk[cpu]);
	}
}
示例#6
0
static int kthread_create_on_cpu(int (*f)(void *arg),
				 void *arg,
				 const char *name,
				 int cpu)
{
	struct task_struct *p;
	p = kthread_create(f, arg, name);
	if (IS_ERR(p))
		return PTR_ERR(p);
	kthread_bind(p, cpu);
	wake_up_process(p);
	return 0;
}
示例#7
0
/*
 * Start the sync_unplug_thread on the target cpu and wait for it to
 * complete.
 */
static int cpu_unplug_begin(unsigned int cpu)
{
	struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
	struct task_struct *tsk;

	init_completion(&hp->synced);
	tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
	if (IS_ERR(tsk))
		return (PTR_ERR(tsk));
	kthread_bind(tsk, cpu);
	wake_up_process(tsk);
	wait_for_completion(&hp->synced);
	return 0;
}
void create_dma_threads()
{
	struct task_struct *p[MAX_THREADS];
	int t_count;

	for (t_count = 0; t_count < MAX_THREADS; t_count++) {
		p[t_count] = kthread_create(dma_chain_thread_entry, &cht[t_count],
								"dma_chain_thread");
		kthread_bind(p[t_count], t_count);
	}

	/* Start all the threads at a time */
	for (t_count = 0; t_count < MAX_THREADS; t_count++) {
		wake_up_process(p[t_count]);
	}
	return;
}
示例#9
0
void wk_start_kick_cpu(int cpu)
{
	if(IS_ERR(wk_tsk[cpu]))
	{
		printk("[wdk]wk_task[%d] is NULL\n",cpu);
	}
	else
	{
		/* Need to be alseep *before* we do a kthread_bind */
		__set_task_state(wk_tsk[cpu], TASK_UNINTERRUPTIBLE);
		set_tsk_need_resched(wk_tsk[cpu]);

		kthread_bind(wk_tsk[cpu], cpu);
	//	printk("[wdk]bind thread[%d] to cpu[%d]\n",wk_tsk[cpu]->pid,cpu);
		wake_up_process(wk_tsk[cpu]);
	}
}
/* begin: add by wufan w00163571 for use kernel thread kick watchdog 20121201 */
static int k3_wdt_kick_start_oncpu(int cpu)
{
	int err = 0;
	struct task_struct *p = per_cpu(k3wdt_kick_watchdog_task, cpu);
	if (!p) {
		p = kthread_create(k3wdt_kick_threadfunc, (void *)(unsigned long)cpu, "k3wdt_kicktask/%d", cpu);
		if (IS_ERR(p)) {
			printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
			err = PTR_ERR(p);
			goto out;
		}
		kthread_bind(p, cpu);
		per_cpu(k3wdt_kick_watchdog_task, cpu) = p;
		wake_up_process(p);
	}
out:
	return err;
}
示例#11
0
文件: edf-wm.c 项目: Aand1/ROSCH
static void migration_thread_init(void)
{
	int cpu;
	struct sched_param sp = { .sched_priority = RESCH_PRIO_KTHREAD };

	for (cpu = 0; cpu < NR_RT_CPUS; cpu++) {
		INIT_LIST_HEAD(&kthread[cpu].list);
		kthread[cpu].task = kthread_create((void*)migration_thread,
										   (void*)(long)cpu,
										   "edf-wm-kthread");
		if (kthread[cpu].task != ERR_PTR(-ENOMEM)) {
			kthread_bind(kthread[cpu].task, cpu);
			sched_setscheduler(kthread[cpu].task, SCHED_FIFO, &sp);
			wake_up_process(kthread[cpu].task);
		}
		else {
			kthread[cpu].task = NULL;
		}
	}
}	
static int start_kicker(void)
{

	int i;
	unsigned char name[10] = {0};
	

	for(i = 0; i < nr_cpu_ids; i++){
	sprintf(name, "wdtk-%d", i);
	printk("[WDK]:thread name: %s\n", name);
	wk_tsk[i] = kthread_create(kwdt_thread_test, &data, name);
	if (IS_ERR(wk_tsk[i])) {
		int ret = PTR_ERR(wk_tsk[i]);
		wk_tsk[i] = NULL;
		return ret;
	}
	kthread_bind(wk_tsk[i], i);
	wake_up_process(wk_tsk[i]);
}
	return 0;
}
static int spawn_init(void)
{

	int index = 0;

	printk(KERN_DEBUG "spawn_init\n" );

	/* -- clear all task_struct pointers -- */
	for( index=0; index<4; index++)
	{
		memcpy( threads[index].name, "framework/", 10 );
		threads[index].name[10] = 48 + index;
		threads[index].name[11] = 0; 
		threads[index].pThread = NULL;
	}


	for( index=0; index<4; index++ )
	{

		/* -- create kernel thread in system -- */
		threads[index].pThread = kthread_create(&threadEntryPoint, (void*)index, threads[index].name );
		if( threads[index].pThread == NULL )
		{
			printk( KERN_ALERT "multi_thread_framework; kthread_create failed; index=0x%08x\n", index );
			continue;
		}

		/* -- bind kernel thread to specific cpu -- */
		kthread_bind( threads[index].pThread , index );

		/* -- wake up the process -- */
		wake_up_process( threads[index].pThread );
		
	}
	
	/* http://lxr.free-electrons.com/source/kernel/trace/ring_buffer.c#L4883 */

	return 0;
}
示例#14
0
static int timeout_enable(int cpu)
{
	int err = 0;
	int warning_limit;

	/*
	 * Create an uptime worker thread. This thread is required since the
	 * safe version of kernel restart cannot be called from a
	 * non-interruptible context. Which means we cannot call it directly
	 * from a timer callback.  So we arrange for the timer expiration to
	 * wakeup a thread, which performs the action.
	 */
	uptime_worker_task = kthread_create(uptime_worker,
					    (void *)(unsigned long)cpu,
					    "uptime_worker/%d", cpu);
	if (IS_ERR(uptime_worker_task)) {
		printk(KERN_ERR "Uptime: task for cpu %i failed\n", cpu);
		err = PTR_ERR(uptime_worker_task);
		goto out;
	}
	/* bind to cpu0 to avoid migration and hot plug nastiness */
	kthread_bind(uptime_worker_task, cpu);
	wake_up_process(uptime_worker_task);

	/* Create the timer that will wake the uptime thread at expiration */
	init_timer(&timelimit_timer);
	timelimit_timer.function = timelimit_expire;
	/*
	 * Fire two timers. One warning timeout and the final timer
	 * which will carry out the expiration action. The warning timer will
	 * expire at the minimum of half the original time or ten minutes.
	 */
	warning_limit = MIN(UPTIME_LIMIT_IN_SECONDS/2, TEN_MINUTES_IN_SECONDS);
	timelimit_timer.expires = jiffies + warning_limit * HZ;
	timelimit_timer.data = UPTIME_LIMIT_IN_SECONDS - warning_limit;

	add_timer_on(&timelimit_timer, cpumask_first(cpu_online_mask));
out:
	return err;
}
示例#15
0
int ehca_create_comp_pool(void)
{
    int cpu;
    struct task_struct *task;

    if (!ehca_scaling_code)
        return 0;

    pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
    if (pool == NULL)
        return -ENOMEM;

    spin_lock_init(&pool->last_cpu_lock);
    pool->last_cpu = any_online_cpu(cpu_online_map);

    pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
    if (pool->cpu_comp_tasks == NULL) {
        kfree(pool);
        return -EINVAL;
    }

    for_each_online_cpu(cpu) {
        task = create_comp_task(pool, cpu);
        if (task) {
            kthread_bind(task, cpu);
            wake_up_process(task);
        }
    }

#ifdef CONFIG_HOTPLUG_CPU
    comp_pool_callback_nb.notifier_call = comp_pool_callback;
    comp_pool_callback_nb.priority =0;
    register_cpu_notifier(&comp_pool_callback_nb);
#endif

    printk(KERN_INFO "eHCA scaling code enabled\n");

    return 0;
}
static int __cpuinit
cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
	int hotcpu = (unsigned long)hcpu;

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
//		watchdog_prepare_cpu(hotcpu);
		break;
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
//		
        if(hotcpu < nr_cpu_ids)
        {
		kthread_bind(wk_tsk[hotcpu], hotcpu);
	      wake_up_process(wk_tsk[hotcpu]);	
		printk("[WDK-test]cpu %d plug on ", hotcpu);
        }
		break;
#ifdef CONFIG_HOTPLUG_CPU
	case CPU_UP_CANCELED:
	case CPU_UP_CANCELED_FROZEN:
	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
		printk("[WDK-test]:start Stop CPU:%d\n", hotcpu);
	
		break;
#endif /* CONFIG_HOTPLUG_CPU */
	}

	/*
	 * hardlockup and softlockup are not important enough
	 * to block cpu bring up.  Just always succeed and
	 * rely on printk output to flag problems.
	 */
	return NOTIFY_OK;
}
示例#17
0
int
pfq_start_all_tx_threads(void)
{
	int err = 0;

	if (tx_thread_nr)
	{
		int n;
		printk(KERN_INFO "[PFQ] starting %d Tx thread(s)...\n", tx_thread_nr);

		for(n = 0; n < tx_thread_nr; n++)
		{
			struct pfq_thread_tx_data *data = &pfq_thread_tx_pool[n];

			data->id = n;
			data->cpu = tx_affinity[n];
			data->node = cpu_online(tx_affinity[n]) ? cpu_to_node(tx_affinity[n]) : NUMA_NO_NODE;
			data->task = kthread_create_on_node(pfq_tx_thread,
							    data, data->node,
							    "kpfq/%d:%d", n, data->cpu);
			if (IS_ERR(data->task)) {
				printk(KERN_INFO "[PFQ] kernel_thread: create failed on cpu %d!\n",
				       data->cpu);
				err = PTR_ERR(data->task);
				data->task = NULL;
				return err;
			}

			kthread_bind(data->task, data->cpu);

			pr_devel("[PFQ] created Tx[%d] kthread on cpu %d...\n", data->id, data->cpu);

			wake_up_process(data->task);
		}
	}

	return err;
}
static int spawn_init(void)
{

	int index = 0;

	atomic_set(&race, 0);

	/* -- clear all task_struct pointers -- */
	for( index=0; index<4; index++)
	{
		memcpy( threads[index].name, "framework/", 10 );
		threads[index].name[10] = 48 + index;
		threads[index].name[11] = 0; 
		threads[index].pThread = NULL;
	}


	for( index=0; index<4; index++ )
	{

		/* -- create kernel thread in system -- */
		threads[index].pThread = kthread_create(&threadEntryPoint, (void*)index, threads[index].name );
		if( threads[index].pThread == NULL )
		{
			printk( KERN_ALERT "multi_thread_framework; kthread_create failed; index=0x%08x\n", index );
			continue;
		}

		/* -- bind kernel thread to specific cpu -- */
		kthread_bind( threads[index].pThread , index );

		/* -- wake up the process -- */
		wake_up_process( threads[index].pThread );
		
	}
	
	return 0;
}
示例#19
0
/*
 * Start the sync_unplug_thread on the target cpu and wait for it to
 * complete.
 */
static int cpu_unplug_begin(unsigned int cpu)
{
	struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
	int err;

	/* Protected by cpu_hotplug.lock */
	if (!hp->mutex_init) {
#ifdef CONFIG_PREEMPT_RT_FULL
		spin_lock_init(&hp->lock);
#else
		mutex_init(&hp->mutex);
#endif
		hp->mutex_init = 1;
	}

	/* Inform the scheduler to migrate tasks off this CPU */
	tell_sched_cpu_down_begin(cpu);

	init_completion(&hp->synced);
	init_completion(&hp->unplug_wait);

	hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
	if (IS_ERR(hp->sync_tsk)) {
		err = PTR_ERR(hp->sync_tsk);
		hp->sync_tsk = NULL;
		return err;
	}
	kthread_bind(hp->sync_tsk, cpu);

	/*
	 * Wait for tasks to get out of the pinned sections,
	 * it's still OK if new tasks enter. Some CPU notifiers will
	 * wait for tasks that are going to enter these sections and
	 * we must not have them block.
	 */
	wake_up_process(hp->sync_tsk);
	return 0;
}
示例#20
0
static int __init fairness_init(void)
{
	unsigned long long i;
	printk("init_module() called\n");
	spin_lock_init(&lock);
	spin_lock_init(&thread_cnt_lock);
	spin_lock_init(&finish_lock);
	naive_lock = 0;

	thread_num = num_online_cpus() <= 8? num_online_cpus(): 8;
	lock_num = 100000 * thread_num;

	printk("[fairness] created of %d kthreads on %d cores\n", thread_num, thread_num);

	for(i = 0; i < thread_num; i++) {
		tasks[i] = kthread_create(measure_lock, (void*)i, "KTHREAD %lld", i);
		kthread_bind(tasks[i], i % thread_num);
	}

	do_gettimeofday(&start);
	for(i = 0; i < thread_num; i++)
		if (!IS_ERR(tasks[i])) wake_up_process(tasks[i]);
	return 0;
}
示例#21
0
void wk_start_kick_cpu(int cpu)
{
#if defined(LGE_USE_WDOG_SPINLOCK_IRQ)
	spin_lock_irqsave(&lock, lock_flag);
#else
	spin_lock(&lock);
#endif
	cpus_kick_bit |= (1<<cpu); 
	kick_bit = 0;
#if defined(LGE_USE_WDOG_SPINLOCK_IRQ)
	spin_unlock_irqrestore(&lock, lock_flag);
#else	
	spin_unlock(&lock);
#endif
	if(IS_ERR(wk_tsk[cpu]))
		{
			printk("[wdk]wk_task[%d] is NULL\n",cpu);
		}
	else
		{
			kthread_bind(wk_tsk[cpu], cpu);
			wake_up_process(wk_tsk[cpu]);
		}
}
示例#22
0
static void gpio_test(void)
{
    int loop;
    struct task_struct *p1, *p2, *p3, *p4;
    int x;

    switch (test) {

    case 1: /* Reserve and free GPIO Line */
        gpio_test_request();
        if (request_flag)
            gpio_test_free();
        break;

    case 2: /* Set GPIO input/output direction */
        gpio_test_request();
        if (request_flag) {
            gpio_test_direction_input();
            gpio_test_direction_output();
            gpio_test_free();
        }
        break;

    case 3: /* GPIO read */
        gpio_test_request();
        if (request_flag) {
            gpio_test_direction_input();
            if (input_direction_flag)
                gpio_test_read();
            gpio_test_free();
        }
        break;

    case 4: /* GPIO write */
        gpio_test_request();
        if (request_flag) {
            gpio_test_direction_output();
            if (output_direction_flag)
                gpio_test_write();
            gpio_test_free();
        }
        break;

    case 5:/* configure the interrupt edge \
				sensitivity (rising, falling) */
        gpio_test_request();
        if (request_flag) {
            gpio_test_irq();
            gpio_test_free();
        }
        break;

#ifdef CONFIG_ARCH_OMAP4
    case 6: /* GPIO read */
        for (loop = 0; loop < iterations; loop++) {
            gpio_test_request();
            if (request_flag) {
                printk(KERN_INFO "Running on %d ",smp_processor_id() );
                gpio_test_direction_input();
                if (input_direction_flag)
                    gpio_test_read();
                gpio_test_free();
            }
        }
        break;

    case 7: /* GPIO write */
        for (loop = 0; loop < iterations; loop++) {
            gpio_test_request();
            if (request_flag) {
                printk(KERN_INFO "Running on %d ",smp_processor_id() );
                gpio_test_direction_output();
                if (output_direction_flag)
                    gpio_test_write();
                gpio_test_free();
            }
        }
        break;

    case 8: /* thread */
        p1 = kthread_create(gpio_keep_reading, NULL , "gpiotest/0");
        p2 = kthread_create(gpio_keep_reading, NULL , "gpiotest/1");
        kthread_bind(p1, 0);
        kthread_bind(p2, 1);
        x = wake_up_process(p1);
        x = wake_up_process(p2);
        break;
#endif
    case 9:/* Verify if GPIO module disable happens if all \
				GPIOs in the module are inactive */
        gpio_test7();
        break;

    case 10:/* Request for same GPIO twice and free \
				the GPIO */
        gpio_test_request();
        if (request_flag) {
            request_flag = 0;
            printk(KERN_INFO "Requesting same GPIO again");
            gpio_test_request();
            if (request_flag)
                test_passed = 0;
            gpio_test_free();
        }
        break;
    case 11:
        for (loop = 0; loop < 500; loop++) {
            gpio_test_request();
            if (request_flag) {

                gpio_test_direction_output();
                if (output_direction_flag)
                    gpio_test_write();

                gpio_test_direction_input();
                if (input_direction_flag)
                    gpio_test_read();
                gpio_test_free();
            } else
                break;
        }
        break;
    case 13: /* thread */
        p1 = kthread_create(gpio_keep_reading, NULL ,
                            "gpiotest/0");
        p2 = kthread_create(gpio_keep_reading, NULL ,
                            "gpiotest/1");
        p3 = kthread_create(gpio_keep_reading, NULL ,
                            "gpiotest/3");
        p4 = kthread_create(gpio_keep_reading, NULL ,
                            "gpiotest/4");
        x = wake_up_process(p1);
        x = wake_up_process(p2);
        x = wake_up_process(p3);
        x = wake_up_process(p4);
        break;
    case 12:
        break;
    default:
        printk(KERN_INFO "Test option not available.\n");
    }

    /* On failure of a testcase, one of the three error flags set to 0
     * if a gpio line request fails it is not considered as a failure
     * set test_passed =0 for failure
     */
    if (!(error_flag_1 && error_flag_2 && error_flag_3))
        test_passed = 0;
}
示例#23
0
int pfq_setsockopt(struct socket *sock,
                int level, int optname,
                char __user * optval,
#if(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31))
                unsigned
#endif
                int optlen)
{
        struct pfq_sock *so = pfq_sk(sock->sk);

        bool found = true;

        if (so == NULL)
                return -EINVAL;

        switch(optname)
        {
        case Q_SO_ENABLE:
	{
		unsigned long addr;
		int err = 0;

                if (optlen != sizeof(addr))
                        return -EINVAL;

                if (copy_from_user(&addr, optval, optlen))
                        return -EFAULT;

                err = pfq_shared_queue_enable(so, addr);
                if (err < 0) {
                        printk(KERN_INFO "[PFQ|%d] enable error!\n", so->id.value);
                        return err;
                }

		return 0;

	} break;

	case Q_SO_DISABLE:
	{
		int err = 0;
                size_t n;

		for(n = 0; n < so->tx_opt.num_queues; n++)
		{
			if (so->tx_opt.queue[n].task) {
				pr_devel("[PFQ|%d] stopping Tx[%zu] thread@%p\n", so->id.value, n, so->tx_opt.queue[n].task);
				kthread_stop(so->tx_opt.queue[n].task);
				so->tx_opt.queue[n].task = NULL;
			}
		}

                err = pfq_shared_queue_disable(so);
                if (err < 0) {
                        printk(KERN_INFO "[PFQ|%d] disable error!\n", so->id.value);
                        return err;
                }

	} break;

        case Q_SO_GROUP_BIND:
        {
                struct pfq_binding bind;
		pfq_gid_t gid;

                if (optlen != sizeof(struct pfq_binding))
                        return -EINVAL;

                if (copy_from_user(&bind, optval, optlen))
                        return -EFAULT;

		gid.value = bind.gid;

                if (!pfq_has_joined_group(gid, so->id)) {
                        printk(KERN_INFO "[PFQ|%d] add bind: gid=%d not joined!\n", so->id.value, bind.gid);
                	return -EACCES;
		}

                rcu_read_lock();
                if (!dev_get_by_index_rcu(sock_net(&so->sk), bind.if_index)) {
                        rcu_read_unlock();
                        printk(KERN_INFO "[PFQ|%d] bind: invalid if_index=%d!\n", so->id.value, bind.if_index);
                        return -EACCES;
                }
                rcu_read_unlock();

                pfq_devmap_update(map_set, bind.if_index, bind.hw_queue, gid);

        } break;

        case Q_SO_GROUP_UNBIND:
        {
                struct pfq_binding bind;
		pfq_gid_t gid;

                if (optlen != sizeof(struct pfq_binding))
                        return -EINVAL;

                if (copy_from_user(&bind, optval, optlen))
                        return -EFAULT;

		gid.value = bind.gid;

		if (!pfq_has_joined_group(gid, so->id)) {
                        printk(KERN_INFO "[PFQ|%d] remove bind: gid=%d not joined!\n", so->id.value, bind.gid);
			return -EACCES;
		}

                rcu_read_lock();
                if (!dev_get_by_index_rcu(sock_net(&so->sk), bind.if_index)) {
                        rcu_read_unlock();
                        printk(KERN_INFO "[PFQ|%d] unbind: invalid if_index=%d\n", so->id.value, bind.if_index);
                        return -EPERM;
                }
                rcu_read_unlock();

                pfq_devmap_update(map_reset, bind.if_index, bind.hw_queue, gid);

        } break;

        case Q_SO_EGRESS_BIND:
        {
                struct pfq_binding info;

                if (optlen != sizeof(info))
                        return -EINVAL;
                if (copy_from_user(&info, optval, optlen))
                        return -EFAULT;

                rcu_read_lock();
                if (!dev_get_by_index_rcu(sock_net(&so->sk), info.if_index)) {
                        rcu_read_unlock();
                        printk(KERN_INFO "[PFQ|%d] egress bind: invalid if_index=%d\n", so->id.value, info.if_index);
                        return -EPERM;
                }
                rcu_read_unlock();

                if (info.hw_queue < -1) {
                        printk(KERN_INFO "[PFQ|%d] egress bind: invalid queue=%d\n", so->id.value, info.hw_queue);
                        return -EPERM;
                }

		so->egress_type  = pfq_endpoint_device;
                so->egress_index = info.if_index;
                so->egress_queue = info.hw_queue;

                pr_devel("[PFQ|%d] egress bind: device if_index=%d hw_queue=%d\n", so->id.value, so->egress_index, so->egress_queue);

        } break;

        case Q_SO_EGRESS_UNBIND:
        {
		so->egress_type  = pfq_endpoint_socket;
                so->egress_index = 0;
                so->egress_queue = 0;
                pr_devel("[PFQ|%d] egress unbind.\n", so->id.value);

        } break;

        case Q_SO_SET_RX_TSTAMP:
        {
                int tstamp;
                if (optlen != sizeof(so->rx_opt.tstamp))
                        return -EINVAL;

                if (copy_from_user(&tstamp, optval, optlen))
                        return -EFAULT;

                tstamp = tstamp ? 1 : 0;
                so->rx_opt.tstamp = tstamp;

                pr_devel("[PFQ|%d] timestamp enabled.\n", so->id.value);
        } break;

        case Q_SO_SET_RX_CAPLEN:
        {
                typeof(so->rx_opt.caplen) caplen;

                if (optlen != sizeof(caplen))
                        return -EINVAL;
                if (copy_from_user(&caplen, optval, optlen))
                        return -EFAULT;

                if (caplen > (size_t)cap_len) {
                        printk(KERN_INFO "[PFQ|%d] invalid caplen=%zu (max %d)\n", so->id.value, caplen, cap_len);
                        return -EPERM;
                }

                so->rx_opt.caplen = caplen;
                so->rx_opt.slot_size = Q_MPDB_QUEUE_SLOT_SIZE(so->rx_opt.caplen);

                pr_devel("[PFQ|%d] caplen=%zu, slot_size=%zu\n",
                                so->id.value, so->rx_opt.caplen, so->rx_opt.slot_size);
        } break;

        case Q_SO_SET_RX_SLOTS:
        {
                typeof(so->rx_opt.queue_size) slots;

                if (optlen != sizeof(slots))
                        return -EINVAL;

                if (copy_from_user(&slots, optval, optlen))
                        return -EFAULT;

                if (slots > (size_t)max_queue_slots) {
                        printk(KERN_INFO "[PFQ|%d] invalid Rx slots=%zu (max %d)\n", so->id.value, slots, max_queue_slots);
                        return -EPERM;
                }

                so->rx_opt.queue_size = slots;

                pr_devel("[PFQ|%d] rx_queue slots=%zu\n", so->id.value, so->rx_opt.queue_size);
        } break;

        case Q_SO_SET_TX_SLOTS:
        {
                typeof (so->tx_opt.queue_size) slots;

                if (optlen != sizeof(slots))
                        return -EINVAL;
                if (copy_from_user(&slots, optval, optlen))
                        return -EFAULT;

                if (slots > (size_t)max_queue_slots) {
                        printk(KERN_INFO "[PFQ|%d] invalid Tx slots=%zu (max %d)\n", so->id.value, slots, max_queue_slots);
                        return -EPERM;
                }

                so->tx_opt.queue_size = slots;

                pr_devel("[PFQ|%d] tx_queue slots=%zu\n", so->id.value, so->tx_opt.queue_size);
        } break;

        case Q_SO_GROUP_LEAVE:
        {
                pfq_gid_t gid;

                if (optlen != sizeof(gid.value))
                        return -EINVAL;

                if (copy_from_user(&gid.value, optval, optlen))
                        return -EFAULT;

                if (pfq_leave_group(gid, so->id) < 0)
                        return -EFAULT;

                pr_devel("[PFQ|%d] leave: gid=%d\n", so->id.value, gid.value);

        } break;

        case Q_SO_GROUP_FPROG:
        {
                struct pfq_fprog fprog;
		pfq_gid_t gid;

                if (optlen != sizeof(fprog))
                        return -EINVAL;

                if (copy_from_user(&fprog, optval, optlen))
                        return -EFAULT;

		gid.value = fprog.gid;

		if (!pfq_has_joined_group(gid, so->id)) {
			/* don't set the first and return */
                	return 0;
		}

                if (fprog.fcode.len > 0) {  /* set the filter */

                        struct sk_filter *filter;

			if (fprog.fcode.len == 1) { /* check for dummey BPF_CLASS == BPF_RET */

                       	 	if (BPF_CLASS(fprog.fcode.filter[0].code) == BPF_RET) {
                                	pr_devel("[PFQ|%d] fprog: BPF_RET optimized out!\n", so->id.value);
                                	return 0;
				}
			}

                        filter = pfq_alloc_sk_filter(&fprog.fcode);
                        if (filter == NULL) {
                                printk(KERN_INFO "[PFQ|%d] fprog error: alloc_sk_filter for gid=%d\n", so->id.value, fprog.gid);
                                return -EINVAL;
                        }

                        pfq_set_group_filter(gid, filter);

                        pr_devel("[PFQ|%d] fprog: gid=%d (fprog len %d bytes)\n", so->id.value, fprog.gid, fprog.fcode.len);
                }
                else { 	/* reset the filter */

                        pfq_set_group_filter(gid, NULL);
                        pr_devel("[PFQ|%d] fprog: gid=%d (resetting filter)\n", so->id.value, fprog.gid);
                }

        } break;

        case Q_SO_GROUP_VLAN_FILT_TOGGLE:
        {
                struct pfq_vlan_toggle vlan;
                pfq_gid_t gid;

                if (optlen != sizeof(vlan))
                        return -EINVAL;

                if (copy_from_user(&vlan, optval, optlen))
                        return -EFAULT;

		gid.value = vlan.gid;

		if (!pfq_has_joined_group(gid, so->id)) {
                        printk(KERN_INFO "[PFQ|%d] vlan filter toggle: gid=%d not joined!\n", so->id.value, vlan.gid);
			return -EACCES;
		}

                pfq_toggle_group_vlan_filters(gid, vlan.toggle);
                pr_devel("[PFQ|%d] vlan filters %s for gid=%d\n", so->id.value, (vlan.toggle ? "enabled" : "disabled"), vlan.gid);

        } break;

        case Q_SO_GROUP_VLAN_FILT:
        {
                struct pfq_vlan_toggle filt;
                pfq_gid_t gid;

                if (optlen != sizeof(filt))
                        return -EINVAL;

                if (copy_from_user(&filt, optval, optlen))
                        return -EFAULT;

		gid.value = filt.gid;

		if (!pfq_has_joined_group(gid, so->id)) {
                        printk(KERN_INFO "[PFQ|%d] vlan filter: gid=%d not joined!\n", so->id.value, filt.gid);
			return -EACCES;
		}

                if (filt.vid < -1 || filt.vid > 4094) {
                        printk(KERN_INFO "[PFQ|%d] vlan error: invalid vid=%d for gid=%d!\n", so->id.value, filt.vid, filt.gid);
                        return -EINVAL;
                }

                if (!pfq_vlan_filters_enabled(gid)) {
                        printk(KERN_INFO "[PFQ|%d] vlan error: vlan filters disabled for gid=%d!\n", so->id.value, filt.gid);
                        return -EPERM;
                }

                if (filt.vid  == -1) { /* any */
                        int i;
                        for(i = 1; i < 4095; i++)
			{
                                pfq_set_group_vlan_filter(gid, filt.toggle, i);
			}
                }
                else  {
                        pfq_set_group_vlan_filter(gid, filt.toggle, filt.vid);
		}

                pr_devel("[PFQ|%d] vlan filter vid %d set for gid=%d\n", so->id.value, filt.vid, filt.gid);
        } break;

        case Q_SO_TX_BIND:
        {
                struct pfq_binding info;
                size_t i;

                if (optlen != sizeof(info))
                        return -EINVAL;

                if (copy_from_user(&info, optval, optlen))
                        return -EFAULT;

		if (so->tx_opt.num_queues >= Q_MAX_TX_QUEUES) {
                        printk(KERN_INFO "[PFQ|%d] Tx bind: max number of queues exceeded!\n", so->id.value);
			return -EPERM;
		}

                rcu_read_lock();
                if (!dev_get_by_index_rcu(sock_net(&so->sk), info.if_index)) {
                        rcu_read_unlock();
                        printk(KERN_INFO "[PFQ|%d] Tx bind: invalid if_index=%d\n", so->id.value, info.if_index);
                        return -EPERM;
                }
                rcu_read_unlock();

                if (info.hw_queue < -1) {
                        printk(KERN_INFO "[PFQ|%d] Tx bind: invalid queue=%d\n", so->id.value, info.hw_queue);
                        return -EPERM;
                }

                i = so->tx_opt.num_queues;

		if (info.cpu < -1) {
			printk(KERN_INFO "[PFQ|%d] Tx[%zu] thread: invalid cpu (%d)!\n", so->id.value, i, info.cpu);
			return -EPERM;
		}

                so->tx_opt.queue[i].if_index = info.if_index;
                so->tx_opt.queue[i].hw_queue = info.hw_queue;
                so->tx_opt.queue[i].cpu      = info.cpu;

		so->tx_opt.num_queues++;

                pr_devel("[PFQ|%d] Tx[%zu] bind: if_index=%d hw_queue=%d cpu=%d\n", so->id.value, i,
                		so->tx_opt.queue[i].if_index, so->tx_opt.queue[i].hw_queue, info.cpu);

        } break;

	case Q_SO_TX_UNBIND:
        {
        	size_t n;

         	for(n = 0; n < Q_MAX_TX_QUEUES; ++n)
		{
			so->tx_opt.queue[n].if_index = -1;
			so->tx_opt.queue[n].hw_queue = -1;
			so->tx_opt.queue[n].cpu      = -1;
		}

        } break;

        case Q_SO_TX_FLUSH:
        {
		int queue, err = 0;
                size_t n;

        	if (optlen != sizeof(queue))
        		return -EINVAL;

        	if (copy_from_user(&queue, optval, optlen))
        		return -EFAULT;

		if (pfq_get_tx_queue(&so->tx_opt, 0) == NULL) {
			printk(KERN_INFO "[PFQ|%d] Tx queue flush: socket not enabled!\n", so->id.value);
			return -EPERM;
		}

		if (queue < -1 || (queue > 0 && queue >= so->tx_opt.num_queues)) {
			printk(KERN_INFO "[PFQ|%d] Tx queue flush: bad queue %d (num_queue=%zu)!\n", so->id.value, queue, so->tx_opt.num_queues);
			return -EPERM;
		}

		if (queue != -1) {
			pr_devel("[PFQ|%d] flushing Tx queue %d...\n", so->id.value, queue);
			return pfq_queue_flush(so, queue);
		}

		for(n = 0; n < so->tx_opt.num_queues; n++)
		{
			if (pfq_queue_flush(so, n) != 0) {
				printk(KERN_INFO "[PFQ|%d] Tx[%zu] queue flush: flush error (if_index=%d)!\n", so->id.value, n, so->tx_opt.queue[n].if_index);
				err = -EPERM;
			}
		}

		if (err)
			return err;
        } break;

        case Q_SO_TX_ASYNC:
        {
                int toggle, err = 0;
                size_t n;

        	if (optlen != sizeof(toggle))
        		return -EINVAL;

        	if (copy_from_user(&toggle, optval, optlen))
        		return -EFAULT;

		if (toggle) {

			size_t started = 0;

			if (pfq_get_tx_queue(&so->tx_opt, 0) == NULL) {
				printk(KERN_INFO "[PFQ|%d] Tx queue flush: socket not enabled!\n", so->id.value);
				return -EPERM;
			}

			/* start Tx kernel threads */

			for(n = 0; n < Q_MAX_TX_QUEUES; n++)
			{
				struct pfq_thread_data *data;
				int node;

				if (so->tx_opt.queue[n].if_index == -1)
					break;

				if (so->tx_opt.queue[n].cpu == Q_NO_KTHREAD)
					continue;

				if (so->tx_opt.queue[n].task) {
					printk(KERN_INFO "[PFQ|%d] kernel_thread: Tx[%zu] thread already running!\n", so->id.value, n);
					continue;
				}

				data = kmalloc(sizeof(struct pfq_thread_data), GFP_KERNEL);
				if (!data) {
					printk(KERN_INFO "[PFQ|%d] kernel_thread: could not allocate thread_data! Failed starting thread on cpu %d!\n",
							so->id.value, so->tx_opt.queue[n].cpu);
					err = -EPERM;
					continue;
				}

				data->so = so;
				data->id = n;
				node     = cpu_online(so->tx_opt.queue[n].cpu) ? cpu_to_node(so->tx_opt.queue[n].cpu) : NUMA_NO_NODE;

				pr_devel("[PFQ|%d] creating Tx[%zu] thread on cpu %d: if_index=%d hw_queue=%d\n",
						so->id.value, n, so->tx_opt.queue[n].cpu, so->tx_opt.queue[n].if_index, so->tx_opt.queue[n].hw_queue);

				so->tx_opt.queue[n].task = kthread_create_on_node(pfq_tx_thread, data, node, "pfq_tx_%d#%zu", so->id.value, n);

				if (IS_ERR(so->tx_opt.queue[n].task)) {
					printk(KERN_INFO "[PFQ|%d] kernel_thread: create failed on cpu %d!\n", so->id.value, so->tx_opt.queue[n].cpu);
					err = PTR_ERR(so->tx_opt.queue[n].task);
					so->tx_opt.queue[n].task = NULL;
					kfree (data);
					continue;
				}

				/* bind the thread */

				kthread_bind(so->tx_opt.queue[n].task, so->tx_opt.queue[n].cpu);

				/* start it */

				wake_up_process(so->tx_opt.queue[n].task);

				started++;
			}

			if (started == 0) {
				printk(KERN_INFO "[PFQ|%d] no kernel thread started!\n", so->id.value);
				err = -EPERM;
			}
		}
		else {
                	/* stop running threads */

			for(n = 0; n < so->tx_opt.num_queues; n++)
			{
				if (so->tx_opt.queue[n].task) {
					pr_devel("[PFQ|%d] stopping Tx[%zu] kernel thread@%p\n", so->id.value, n, so->tx_opt.queue[n].task);
					kthread_stop(so->tx_opt.queue[n].task);
					so->tx_opt.queue[n].task = NULL;
				}
			}
		}

		return err;

        } break;

        case Q_SO_GROUP_FUNCTION:
        {
                struct pfq_computation_descr *descr = NULL;
                struct pfq_computation_tree *comp = NULL;
                struct pfq_group_computation tmp;
                size_t psize, ucsize;
                void *context = NULL;
                pfq_gid_t gid;
                int err = 0;

                if (optlen != sizeof(tmp))
                        return -EINVAL;

                if (copy_from_user(&tmp, optval, optlen))
                        return -EFAULT;

		gid.value = tmp.gid;

		if (!pfq_has_joined_group(gid, so->id)) {
                        printk(KERN_INFO "[PFQ|%d] group computation: gid=%d not joined!\n", so->id.value, tmp.gid);
			return -EACCES;
		}

                if (copy_from_user(&psize, tmp.prog, sizeof(size_t)))
                        return -EFAULT;

                pr_devel("[PFQ|%d] computation size: %zu\n", so->id.value, psize);

                ucsize = sizeof(size_t) * 2 + psize * sizeof(struct pfq_functional_descr);

                descr = kmalloc(ucsize, GFP_KERNEL);
                if (descr == NULL) {
                        printk(KERN_INFO "[PFQ|%d] computation: out of memory!\n", so->id.value);
                        return -ENOMEM;
                }

                if (copy_from_user(descr, tmp.prog, ucsize)) {
                        printk(KERN_INFO "[PFQ|%d] computation: copy_from_user error!\n", so->id.value);
                        err = -EFAULT;
                        goto error;
                }

                /* print user computation */

                pr_devel_computation_descr(descr);

		/* check the correctness of computation */

		if (pfq_check_computation_descr(descr) < 0) {
                        printk(KERN_INFO "[PFQ|%d] invalid expression!\n", so->id.value);
                        err = -EFAULT;
                        goto error;
		}

                /* allocate context */

                context = pfq_context_alloc(descr);
                if (context == NULL) {
                        printk(KERN_INFO "[PFQ|%d] context: alloc error!\n", so->id.value);
                        err = -EFAULT;
                        goto error;
                }

                /* allocate a pfq_computation_tree */

                comp = pfq_computation_alloc(descr);
                if (comp == NULL) {
                        printk(KERN_INFO "[PFQ|%d] computation: alloc error!\n", so->id.value);
                        err = -EFAULT;
                        goto error;
                }

                /* link functions of computation */

                if (pfq_computation_rtlink(descr, comp, context) < 0) {
                        printk(KERN_INFO "[PFQ|%d] computation aborted!", so->id.value);
                        err = -EPERM;
                        goto error;
                }

		/* print executable tree data structure */

		pr_devel_computation_tree(comp);

		/* run init functions */

		if (pfq_computation_init(comp) < 0) {
                        printk(KERN_INFO "[PFQ|%d] initialization of computation aborted!", so->id.value);
                        pfq_computation_fini(comp);
                        err = -EPERM;
                        goto error;
		}

                /* enable functional program */

                if (pfq_set_group_prog(gid, comp, context) < 0) {
                        printk(KERN_INFO "[PFQ|%d] set group program error!\n", so->id.value);
                        err = -EPERM;
                        goto error;
                }

		kfree(descr);
                return 0;

	error:  kfree(comp);
		kfree(context);
		kfree(descr);
		return err;

        } break;

        default:
        {
                found = false;
        } break;

        }

        return found ? 0 : sock_setsockopt(sock, level, optname, optval, optlen);
}
示例#24
0
static int clamp_thread(void *arg)
{
	int cpunr = (unsigned long)arg;
	DEFINE_TIMER(wakeup_timer, noop_timer, 0, 0);
	static const struct sched_param param = {
		.sched_priority = MAX_USER_RT_PRIO/2,
	};
	unsigned int count = 0;
	unsigned int target_ratio;

	set_bit(cpunr, cpu_clamping_mask);
	set_freezable();
	init_timer_on_stack(&wakeup_timer);
	sched_setscheduler(current, SCHED_FIFO, &param);

	while (true == clamping && !kthread_should_stop() &&
		cpu_online(cpunr)) {
		int sleeptime;
		unsigned long target_jiffies;
		unsigned int guard;
		unsigned int compensated_ratio;
		int interval; /* jiffies to sleep for each attempt */
		unsigned int duration_jiffies = msecs_to_jiffies(duration);
		unsigned int window_size_now;

		try_to_freeze();
		/*
		 * make sure user selected ratio does not take effect until
		 * the next round. adjust target_ratio if user has changed
		 * target such that we can converge quickly.
		 */
		target_ratio = set_target_ratio;
		guard = 1 + target_ratio/20;
		window_size_now = window_size;
		count++;

		/*
		 * systems may have different ability to enter package level
		 * c-states, thus we need to compensate the injected idle ratio
		 * to achieve the actual target reported by the HW.
		 */
		compensated_ratio = target_ratio +
			get_compensation(target_ratio);
		if (compensated_ratio <= 0)
			compensated_ratio = 1;
		interval = duration_jiffies * 100 / compensated_ratio;

		/* align idle time */
		target_jiffies = roundup(jiffies, interval);
		sleeptime = target_jiffies - jiffies;
		if (sleeptime <= 0)
			sleeptime = 1;
		schedule_timeout_interruptible(sleeptime);
		/*
		 * only elected controlling cpu can collect stats and update
		 * control parameters.
		 */
		if (cpunr == control_cpu && !(count%window_size_now)) {
			should_skip =
				powerclamp_adjust_controls(target_ratio,
							guard, window_size_now);
			smp_mb();
		}

		if (should_skip)
			continue;

		target_jiffies = jiffies + duration_jiffies;
		mod_timer(&wakeup_timer, target_jiffies);
		if (unlikely(local_softirq_pending()))
			continue;
		/*
		 * stop tick sched during idle time, interrupts are still
		 * allowed. thus jiffies are updated properly.
		 */
		preempt_disable();
		/* mwait until target jiffies is reached */
		while (time_before(jiffies, target_jiffies)) {
			unsigned long ecx = 1;
			unsigned long eax = target_mwait;

			/*
			 * REVISIT: may call enter_idle() to notify drivers who
			 * can save power during cpu idle. same for exit_idle()
			 */
			local_touch_nmi();
			stop_critical_timings();
			mwait_idle_with_hints(eax, ecx);
			start_critical_timings();
			atomic_inc(&idle_wakeup_counter);
		}
		preempt_enable();
	}
	del_timer_sync(&wakeup_timer);
	clear_bit(cpunr, cpu_clamping_mask);

	return 0;
}

/*
 * 1 HZ polling while clamping is active, useful for userspace
 * to monitor actual idle ratio.
 */
static void poll_pkg_cstate(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(poll_pkg_cstate_work, poll_pkg_cstate);
static void poll_pkg_cstate(struct work_struct *dummy)
{
	static u64 msr_last;
	static u64 tsc_last;
	static unsigned long jiffies_last;

	u64 msr_now;
	unsigned long jiffies_now;
	u64 tsc_now;
	u64 val64;

	msr_now = pkg_state_counter();
	tsc_now = rdtsc();
	jiffies_now = jiffies;

	/* calculate pkg cstate vs tsc ratio */
	if (!msr_last || !tsc_last)
		pkg_cstate_ratio_cur = 1;
	else {
		if (tsc_now - tsc_last) {
			val64 = 100 * (msr_now - msr_last);
			do_div(val64, (tsc_now - tsc_last));
			pkg_cstate_ratio_cur = val64;
		}
	}

	/* update record */
	msr_last = msr_now;
	jiffies_last = jiffies_now;
	tsc_last = tsc_now;

	if (true == clamping)
		schedule_delayed_work(&poll_pkg_cstate_work, HZ);
}

static int start_power_clamp(void)
{
	unsigned long cpu;
	struct task_struct *thread;

	set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
	/* prevent cpu hotplug */
	get_online_cpus();

	/* prefer BSP */
	control_cpu = 0;
	if (!cpu_online(control_cpu))
		control_cpu = smp_processor_id();

	clamping = true;
	schedule_delayed_work(&poll_pkg_cstate_work, 0);

	/* start one thread per online cpu */
	for_each_online_cpu(cpu) {
		struct task_struct **p =
			per_cpu_ptr(powerclamp_thread, cpu);

		thread = kthread_create_on_node(clamp_thread,
						(void *) cpu,
						cpu_to_node(cpu),
						"kidle_inject/%ld", cpu);
		/* bind to cpu here */
		if (likely(!IS_ERR(thread))) {
			kthread_bind(thread, cpu);
			wake_up_process(thread);
			*p = thread;
		}

	}
	put_online_cpus();

	return 0;
}
示例#25
0
int pfq_setsockopt(struct socket *sock,
                int level, int optname,
                char __user * optval,
#if(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31))
                unsigned
#endif
                int optlen)
{
        struct pfq_sock *so = pfq_sk(sock->sk);
        struct pfq_rx_opt * ro;
        struct pfq_tx_opt * to;

        bool found = true;

        if (so == NULL)
                return -EINVAL;

        ro = &so->rx_opt;
        to = &so->tx_opt;

        switch(optname)
        {
        case Q_SO_TOGGLE_QUEUE:
        {
                int active;
                if (optlen != sizeof(active))
                        return -EINVAL;
                if (copy_from_user(&active, optval, optlen))
                        return -EFAULT;

                if (active)
                {
                        if (!so->mem_addr)
                        {
                                struct pfq_queue_hdr * queue;

                                /* alloc queue memory */

                                if (pfq_shared_queue_alloc(so, pfq_queue_total_mem(so)) < 0)
                                {
                                        return -ENOMEM;
                                }

                                /* so->mem_addr and so->mem_size are correctly configured */

                                /* initialize queues headers */

                                queue = (struct pfq_queue_hdr *)so->mem_addr;

                                /* initialize rx queue header */

                                queue->rx.data              = (1L << 24);
                                queue->rx.poll_wait         = 0;
                                queue->rx.size              = so->rx_opt.size;
                                queue->rx.slot_size         = so->rx_opt.slot_size;

                                queue->tx.producer.index    = 0;
                                queue->tx.producer.cache    = 0;
                                queue->tx.consumer.index    = 0;
                                queue->tx.consumer.cache    = 0;

                                queue->tx.size_mask         = so->tx_opt.size - 1;
                                queue->tx.max_len           = so->tx_opt.maxlen;
                                queue->tx.size              = so->tx_opt.size;
                                queue->tx.slot_size         = so->tx_opt.slot_size;

                                /* update the queues base_addr */

                                so->rx_opt.base_addr = so->mem_addr + sizeof(struct pfq_queue_hdr);
                                so->tx_opt.base_addr = so->mem_addr + sizeof(struct pfq_queue_hdr) + pfq_queue_mpdb_mem(so);

                                /* commit both the queues */

                                smp_wmb();

                                so->rx_opt.queue_ptr = &queue->rx;
                                so->tx_opt.queue_ptr = &queue->tx;

                                pr_devel("[PFQ|%d] queue: rx_size:%d rx_slot_size:%d tx_size:%d tx_slot_size:%d\n", so->id, queue->rx.size,
                                                queue->rx.slot_size,
                                                queue->tx.size,
                                                queue->tx.slot_size);
                        }
                }
                else
                {
                        if (so->tx_opt.thread)
                        {
                                pr_devel("[PFQ|%d] stopping TX thread...\n", so->id);
                                kthread_stop(so->tx_opt.thread);
                                so->tx_opt.thread = NULL;
                        }

                        msleep(Q_GRACE_PERIOD);

                        pfq_shared_queue_free(so);
                }

        } break;

        case Q_SO_GROUP_BIND:
        {
                struct pfq_binding bind;
                if (optlen != sizeof(struct pfq_binding))
                        return -EINVAL;

                if (copy_from_user(&bind, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, bind.gid, "add binding");

                pfq_devmap_update(map_set, bind.if_index, bind.hw_queue, bind.gid);
        } break;

        case Q_SO_GROUP_UNBIND:
        {
                struct pfq_binding bind;
                if (optlen != sizeof(struct pfq_binding))
                        return -EINVAL;

                if (copy_from_user(&bind, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, bind.gid, "remove binding");

                pfq_devmap_update(map_reset, bind.if_index, bind.hw_queue, bind.gid);
        } break;

        case Q_SO_EGRESS_BIND:
        {
                struct pfq_binding info;

                if (optlen != sizeof(info))
                        return -EINVAL;
                if (copy_from_user(&info, optval, optlen))
                        return -EFAULT;

                rcu_read_lock();
                if (!dev_get_by_index_rcu(sock_net(&so->sk), info.if_index))
                {
                        rcu_read_unlock();
                        pr_devel("[PFQ|%d] TX bind: invalid if_index:%d\n", so->id, info.if_index);
                        return -EPERM;
                }
                rcu_read_unlock();

                if (info.hw_queue < -1)
                {
                        pr_devel("[PFQ|%d] TX bind: invalid queue:%d\n", so->id, info.hw_queue);
                        return -EPERM;
                }

                so->egress_index = info.if_index;
                so->egress_queue = info.hw_queue;

                pr_devel("[PFQ|%d] egress bind: if_index:%d hw_queue:%d\n", so->id, so->egress_index, so->egress_queue);

        } break;

        case Q_SO_EGRESS_UNBIND:
        {
                so->egress_index = 0;
                so->egress_queue = 0;
                pr_devel("[PFQ|%d] egress unbind.\n", so->id);

        } break;

        case Q_SO_SET_RX_TSTAMP:
        {
                int tstamp;
                if (optlen != sizeof(so->rx_opt.tstamp))
                        return -EINVAL;

                if (copy_from_user(&tstamp, optval, optlen))
                        return -EFAULT;

                tstamp = tstamp ? 1 : 0;

                /* update the timestamp_enabled counter */

                atomic_add(tstamp - so->rx_opt.tstamp, &timestamp_enabled);
                so->rx_opt.tstamp = tstamp;

                pr_devel("[PFQ|%d] timestamp_enabled counter: %d\n", so->id, atomic_read(&timestamp_enabled));
        } break;

        case Q_SO_SET_RX_CAPLEN:
        {
                typeof(so->rx_opt.caplen) caplen;

                if (optlen != sizeof(caplen))
                        return -EINVAL;
                if (copy_from_user(&caplen, optval, optlen))
                        return -EFAULT;

                if (caplen > (size_t)cap_len) {
                        pr_devel("[PFQ|%d] invalid caplen:%zu (max: %d)\n", so->id, caplen, cap_len);
                        return -EPERM;
                }

                so->rx_opt.caplen = caplen;

                so->rx_opt.slot_size = MPDB_QUEUE_SLOT_SIZE(so->rx_opt.caplen);

                pr_devel("[PFQ|%d] caplen:%zu -> slot_size:%zu\n",
                                so->id, so->rx_opt.caplen, so->rx_opt.slot_size);
        } break;

        case Q_SO_SET_RX_SLOTS:
        {
                typeof(so->rx_opt.size) slots;

                if (optlen != sizeof(slots))
                        return -EINVAL;
                if (copy_from_user(&slots, optval, optlen))
                        return -EFAULT;

                if (slots > (size_t)rx_queue_slots) {
                        pr_devel("[PFQ|%d] invalid rx slots:%zu (max: %d)\n", so->id, slots, rx_queue_slots);
                        return -EPERM;
                }

                so->rx_opt.size = slots;

                pr_devel("[PFQ|%d] rx_queue_slots:%zu\n", so->id, so->rx_opt.size);
        } break;

        case Q_SO_SET_TX_MAXLEN:
        {
                typeof (so->tx_opt.maxlen) maxlen;
                if (optlen != sizeof(maxlen))
                        return -EINVAL;
                if (copy_from_user(&maxlen, optval, optlen))
                        return -EFAULT;

                if (maxlen > (size_t)max_len) {
                        pr_devel("[PFQ|%d] invalid maxlen:%zu (max: %d)\n", so->id, maxlen, max_len);
                        return -EPERM;
                }

                so->tx_opt.maxlen = maxlen;

                so->tx_opt.slot_size = SPSC_QUEUE_SLOT_SIZE(so->tx_opt.maxlen); /* max_len: max length */

                pr_devel("[PFQ|%d] tx_slot_size:%zu\n", so->id, so->rx_opt.slot_size);
        } break;

        case Q_SO_SET_TX_SLOTS:
        {
                typeof (so->tx_opt.size) slots;

                if (optlen != sizeof(slots))
                        return -EINVAL;
                if (copy_from_user(&slots, optval, optlen))
                        return -EFAULT;

                if (slots & (slots-1))
                {
                        pr_devel("[PFQ|%d] tx slots must be a power of two.\n", so->id);
                        return -EINVAL;
                }

                if (slots > (size_t)tx_queue_slots) {
                        pr_devel("[PFQ|%d] invalid tx slots:%zu (max: %d)\n", so->id, slots, tx_queue_slots);
                        return -EPERM;
                }

                so->tx_opt.size = slots;

                pr_devel("[PFQ|%d] tx_queue_slots:%zu\n", so->id, so->tx_opt.size);
        } break;

        case Q_SO_GROUP_LEAVE:
        {
                int gid;
                if (optlen != sizeof(gid))
                        return -EINVAL;
                if (copy_from_user(&gid, optval, optlen))
                        return -EFAULT;

                if (pfq_leave_group(gid, so->id) < 0) {
                        return -EFAULT;
                }

                pr_devel("[PFQ|%d] leave: gid:%d\n", so->id, gid);
        } break;

        case Q_SO_GROUP_FPROG:
        {
                struct pfq_fprog fprog;
                if (optlen != sizeof(fprog))
                        return -EINVAL;

                if (copy_from_user(&fprog, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, fprog.gid, "group fprog");

                if (fprog.fcode.len > 0)  /* set the filter */
                {
                        struct sk_filter *filter = pfq_alloc_sk_filter(&fprog.fcode);
                        if (filter == NULL)
                        {
                                pr_devel("[PFQ|%d] fprog error: alloc_sk_filter for gid:%d\n", so->id, fprog.gid);
                                return -EINVAL;
                        }

                        __pfq_set_group_filter(fprog.gid, filter);

                        pr_devel("[PFQ|%d] fprog: gid:%d (fprog len %d bytes)\n", so->id, fprog.gid, fprog.fcode.len);
                }
                else 	/* reset the filter */
                {
                        __pfq_set_group_filter(fprog.gid, NULL);

                        pr_devel("[PFQ|%d] fprog: gid:%d (resetting filter)\n", so->id, fprog.gid);
                }

        } break;

        case Q_SO_GROUP_VLAN_FILT_TOGGLE:
        {
                struct pfq_vlan_toggle vlan;

                if (optlen != sizeof(vlan))
                        return -EINVAL;
                if (copy_from_user(&vlan, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, vlan.gid, "group vlan filt toggle");

                __pfq_toggle_group_vlan_filters(vlan.gid, vlan.toggle);

                pr_devel("[PFQ|%d] vlan filters %s for gid:%d\n", so->id, (vlan.toggle ? "enabled" : "disabled"), vlan.gid);
        } break;

        case Q_SO_GROUP_VLAN_FILT:
        {
                struct pfq_vlan_toggle filt;

                if (optlen != sizeof(filt))
                        return -EINVAL;

                if (copy_from_user(&filt, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, filt.gid, "group vlan filt");

                if (filt.vid < -1 || filt.vid > 4094) {
                        pr_devel("[PFQ|%d] vlan_set error: gid:%d invalid vid:%d!\n", so->id, filt.gid, filt.vid);
                        return -EINVAL;
                }

                if (!__pfq_vlan_filters_enabled(filt.gid)) {
                        pr_devel("[PFQ|%d] vlan_set error: vlan filters disabled for gid:%d!\n", so->id, filt.gid);
                        return -EPERM;
                }

                if (filt.vid  == -1) /* any */
                {
                        int i;
                        for(i = 1; i < 4095; i++)
                                __pfq_set_group_vlan_filter(filt.gid, filt.toggle, i);
                }
                else
                {
                        __pfq_set_group_vlan_filter(filt.gid, filt.toggle, filt.vid);
                }

                pr_devel("[PFQ|%d] vlan_set filter vid %d for gid:%d\n", so->id, filt.vid, filt.gid);
        } break;

        case Q_SO_TX_THREAD_BIND:
        {
                struct pfq_binding info;

                if (optlen != sizeof(info))
                        return -EINVAL;
                if (copy_from_user(&info, optval, optlen))
                        return -EFAULT;

                rcu_read_lock();
                if (!dev_get_by_index_rcu(sock_net(&so->sk), info.if_index))
                {
                        rcu_read_unlock();
                        pr_devel("[PFQ|%d] TX bind: invalid if_index:%d\n", so->id, info.if_index);
                        return -EPERM;
                }
                rcu_read_unlock();

                if (info.hw_queue < -1)
                {
                        pr_devel("[PFQ|%d] TX bind: invalid queue:%d\n", so->id, info.hw_queue);
                        return -EPERM;
                }

                to->if_index = info.if_index;
                to->hw_queue = info.hw_queue;

                pr_devel("[PFQ|%d] TX bind: if_index:%d hw_queue:%d\n", so->id, to->if_index, to->hw_queue);

        } break;

        case Q_SO_TX_THREAD_START:
        {
                int cpu;

                if (to->thread)
                {
                        pr_devel("[PFQ|%d] TX thread already created on cpu %d!\n", so->id, to->cpu);
                        return -EPERM;
                }
                if (to->if_index == -1)
                {
                        pr_devel("[PFQ|%d] socket TX not bound to any device!\n", so->id);
                        return -EPERM;
                }
                if (to->queue_ptr == NULL)
                {
                        pr_devel("[PFQ|%d] socket not enabled!\n", so->id);
                        return -EPERM;
                }

                if (optlen != sizeof(cpu))
                        return -EINVAL;

                if (copy_from_user(&cpu, optval, optlen))
                        return -EFAULT;

                if (cpu < -1 || (cpu > -1  && !cpu_online(cpu)))
                {
                        pr_devel("[PFQ|%d] invalid cpu (%d)!\n", so->id, cpu);
                        return -EPERM;
                }

                to->cpu = cpu;

                pr_devel("[PFQ|%d] creating TX thread on cpu %d -> if_index:%d hw_queue:%d\n", so->id, to->cpu, to->if_index, to->hw_queue);

                to->thread = kthread_create_on_node(pfq_tx_thread,
                                so,
                                to->cpu == -1 ? -1 : cpu_to_node(to->cpu),
                                "pfq_tx_%d", so->id);

                if (IS_ERR(to->thread)) {
                        printk(KERN_INFO "[PFQ] kernel_thread() create failed on cpu %d!\n", to->cpu);
                        return PTR_ERR(to->thread);
                }

                if (to->cpu != -1)
                        kthread_bind(to->thread, to->cpu);

        } break;

        case Q_SO_TX_THREAD_STOP:
        {
                pr_devel("[PFQ|%d] stopping TX thread...\n", so->id);

                if (!to->thread)
                {
                        pr_devel("[PFQ|%d] TX thread not running!\n", so->id);
                        return -EPERM;
                }

                kthread_stop(to->thread);
                to->thread = NULL;

                pr_devel("[PFQ|%d] stop TX thread: done.\n", so->id);

        } break;

        case Q_SO_TX_THREAD_WAKEUP:
        {
                if (to->if_index == -1)
                {
                        pr_devel("[PFQ|%d] socket TX not bound to any device!\n", so->id);
                        return -EPERM;
                }
                if (!to->thread)
                {
                        pr_devel("[PFQ|%d] TX thread not running!\n", so->id);
                        return -EPERM;
                }

                wake_up_process(to->thread);
        } break;

        case Q_SO_TX_QUEUE_FLUSH:
        {
                struct net_device *dev;

                if (to->if_index == -1)
                {
                        pr_devel("[PFQ|%d] socket TX not bound to any device!\n", so->id);
                        return -EPERM;
                }

                if (to->thread && to->thread->state == TASK_RUNNING)
                {
                        pr_devel("[PFQ|%d] TX thread is running!\n", so->id);
                        return -EPERM;
                }

                if (to->queue_ptr == NULL)
                {
                        pr_devel("[PFQ|%d] socket not enabled!\n", so->id);
                        return -EPERM;
                }

                dev = dev_get_by_index(sock_net(&so->sk), to->if_index);
                if (!dev)
                {
                        pr_devel("[PFQ|%d] No such device (if_index = %d)\n", so->id, to->if_index);
                        return -EPERM;
                }

                pfq_tx_queue_flush(to, dev, get_cpu(), NUMA_NO_NODE);
                put_cpu();

                dev_put(dev);
        } break;

        case Q_SO_GROUP_FUNCTION:
        {
                struct pfq_group_computation tmp;
                struct pfq_computation_descr *descr;
                size_t psize, ucsize;

                struct pfq_computation_tree *comp;
                void *context;

                if (optlen != sizeof(tmp))
                        return -EINVAL;
                if (copy_from_user(&tmp, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, tmp.gid, "group computation");

                if (copy_from_user(&psize, tmp.prog, sizeof(size_t)))
                        return -EFAULT;

                pr_devel("[PFQ|%d] computation size: %zu\n", so->id, psize);

                ucsize = sizeof(size_t) * 2 + psize * sizeof(struct pfq_functional_descr);

                descr = kmalloc(ucsize, GFP_KERNEL);
                if (descr == NULL) {
                        pr_devel("[PFQ|%d] computation: out of memory!\n", so->id);
                        return -ENOMEM;
                }

                if (copy_from_user(descr, tmp.prog, ucsize)) {
                        pr_devel("[PFQ|%d] computation: copy_from_user error!\n", so->id);
                        kfree(descr);
                        return -EFAULT;
                }

                /* print user computation */

                pr_devel_computation_descr(descr);

		/* ensure the correctness of the specified functional computation */

		if (pfq_validate_computation_descr(descr) < 0) {
                        pr_devel("[PFQ|%d] invalid expression!\n", so->id);
                        return -EFAULT;
		}

                /* allocate context */

                context = pfq_context_alloc(descr);
                if (context == NULL) {
                        pr_devel("[PFQ|%d] context: alloc error!\n", so->id);
                        kfree(descr);
                        return -EFAULT;
                }

                /* allocate struct pfq_computation_tree */

                comp = pfq_computation_alloc(descr);
                if (comp == NULL) {
                        pr_devel("[PFQ|%d] computation: alloc error!\n", so->id);
                        kfree(context);
                        kfree(descr);
                        return -EFAULT;
                }

                /* link the functional computation */

                if (pfq_computation_rtlink(descr, comp, context) < 0) {
                        pr_devel("[PFQ|%d] computation aborted!", so->id);
			kfree(context);
			kfree(descr);
			kfree(comp);
                        return -EPERM;
                }

		/* print executable tree data structure */

		pr_devel_computation_tree(comp);

		/* exec init functions */

		if (pfq_computation_init(comp) < 0) {
                        pr_devel("[PFQ|%d] computation initialization aborted!", so->id);
                        kfree(context);
                        kfree(descr);
                        kfree(comp);
                        return -EPERM;
		}

                /* set the new program */

                if (pfq_set_group_prog(tmp.gid, comp, context) < 0) {
                        pr_devel("[PFQ|%d] set group program error!\n", so->id);
                        kfree(context);
                        kfree(descr);
                        kfree(comp);
                        return -EPERM;
                }

		kfree(descr);
                return 0;

        } break;

        default:
        {
                found = false;
        } break;

        }

        return found ? 0 : sock_setsockopt(sock, level, optname, optval, optlen);
}
示例#26
0
int __init mod_TZ_DRV_Init(void)
{
    int s32Ret;
    dev_t dev;

    TZ_DPRINTK("TZ support Ver.1\n");

    smc = kmalloc(sizeof(struct smc_struct), GFP_KERNEL);
    if(!smc)
    {
        TZ_DPRINTK("smc malloc fail!!\n");
        return -1;
    }

    init_rwsem(&smc->smc_lock);

    tz_class = class_create(THIS_MODULE, "trustzone");

    if (IS_ERR(tz_class))
    {
        return PTR_ERR(tz_class);
    }

    if (TZDev.s32TZMajor)
    {
        dev = MKDEV(TZDev.s32TZMajor, TZDev.s32TZMinor);
        s32Ret = register_chrdev_region(dev, MOD_TZ_DEVICE_COUNT, MOD_TZ_NAME);
    }
    else
    {
        s32Ret = alloc_chrdev_region(&dev, TZDev.s32TZMinor, MOD_TZ_DEVICE_COUNT, MOD_TZ_NAME);
        TZDev.s32TZMajor = MAJOR(dev);
    }

    if ( 0 > s32Ret)
    {
        TZ_DPRINTK("Unable to get major %d\n", TZDev.s32TZMajor);
        class_destroy(tz_class);
        return s32Ret;
    }

    cdev_init(&TZDev.cDevice, &TZDev.TZFop);
    if (0!= (s32Ret= cdev_add(&TZDev.cDevice, dev, MOD_TZ_DEVICE_COUNT)))
    {
        TZ_DPRINTK("Unable add a character device\n");
        unregister_chrdev_region(dev, MOD_TZ_DEVICE_COUNT);
        class_destroy(tz_class);
        return s32Ret;
    }

    device_create(tz_class, NULL, dev, NULL, MOD_TZ_NAME);

    down_write(&smc->smc_lock);
    smc->smc_flag = 1;
    smc->cmd1 = 0xdeadbee1;
    smc->cmd2 = 0xdeadbee2;
    up_write(&smc->smc_lock);

    tz_task = kthread_create(__smc_thread, NULL, "SMC handle");

    kthread_bind(tz_task, 1); //bind the thread on processor 1

    if (!IS_ERR(tz_task))
        wake_up_process(tz_task);
    else
    {
        TZ_DPRINTK("smc fail !!\n");
        return -1;
    }
    
    return 0;	
}
示例#27
0
int __init test_mcspi_init(void)

{
	struct task_struct *p1, *p2;
	int x;

#define MODCTRL		0xd809a028
#define SYSTST		0xd809a024

	int status;
	int count = 10;
	int val;

	/* Required only if kernel does not configure
	 * SPI2 Mux settings
	 */

	if (slave_mode) {

		printk(KERN_INFO "configuring slave mode\n");

//		omap_writew(0x1700, spi2_clk);
//		omap_writew(0x1700, spi2_simo);
//		omap_writew(0x1700, spi2_somi);
//		omap_writew(0x1708, spi2_cs0);

	} else {

		printk(KERN_INFO "configuring master mode \n");

//		omap_writew(0x1700, spi2_clk);
//		omap_writew(0x1700, spi2_simo);
//		omap_writew(0x1700, spi2_somi);
//		omap_writew(0x1708, spi2_cs0);

	}
	create_proc_file_entries();

	if (test_mcspi_smp) {
		spitst_trans(0);
		p1 = kthread_create((void *)(omap2_mcspi_test1), NULL, "mcspitest/0");
		p2 = kthread_create((void *)(omap2_mcspi_test2), NULL, "mcspitest/1");

		kthread_bind(p1, 0);
		kthread_bind(p2, 1);

		x = wake_up_process(p1);
		x = wake_up_process(p2);
	}
       if (systst_mode == 1 && !test_mcspi_smp) {

		/* SPI clocks need to be always enabled for this to work */
		__raw_writel(0x8, MODCTRL);
		printk(KERN_INFO "MODCTRL %x\n", __raw_readl(MODCTRL));

		if (slave_mode == 0) /* Master */
			__raw_writel(0x100, SYSTST);
		else
			__raw_writel(0x600, SYSTST);

		printk(KERN_INFO "SYSTST Mode setting %x\n",
				__raw_readl(SYSTST));

		while (count--) {
			if (slave_mode == 0) {
				val = ((count & 0x1) << 6) | 0x100;
				val = ((count & 0x1) << 5) | val;
				val = ((count & 0x1) << 0) | val;

				__raw_writel(val, SYSTST);
			} else {
				val = ((count & 0x1) << 4) | 0x600;
				__raw_writel(val, SYSTST);
			}
			printk(KERN_INFO "SYSTST %x val %x\n",
					__raw_readl(SYSTST)&0xff1, val);
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(100);
		}
	}

	if (systst_mode == 0 && !test_mcspi_smp) {
		status = spi_register_driver(&spitst_spi);
		if (status < 0)
			printk(KERN_ERR "spi_register_driver failed, status %d",
					status);
		else
			printk("spi_register_driver successful \n");
		return status;
	}
	return 0;
}