void fpsimd_thread_switch(struct task_struct *next)
{
	/*
	 * Save the current FPSIMD state to memory, but only if whatever is in
	 * the registers is in fact the most recent userland FPSIMD state of
	 * 'current'.
	 */
	if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
		fpsimd_save_state(&current->thread.fpsimd_state);

	if (next->mm) {
		/*
		 * If we are switching to a task whose most recent userland
		 * FPSIMD state is already in the registers of *this* cpu,
		 * we can skip loading the state from memory. Otherwise, set
		 * the TIF_FOREIGN_FPSTATE flag so the state will be loaded
		 * upon the next return to userland.
		 */
		struct fpsimd_state *st = &next->thread.fpsimd_state;

		if (__this_cpu_read(fpsimd_last_state) == st
		    && st->cpu == smp_processor_id())
			clear_ti_thread_flag(task_thread_info(next),
					     TIF_FOREIGN_FPSTATE);
		else
			set_ti_thread_flag(task_thread_info(next),
					   TIF_FOREIGN_FPSTATE);
	}
}
Example #2
0
unsigned fcse_flush_all_start(void)
{
	if (!cache_is_vivt())
		return 0;

#ifndef CONFIG_ARM_FCSE_PREEMPT_FLUSH
	preempt_disable();
#endif /* CONFIG_ARM_FCSE_PREEMPT_FLUSH */

#if defined(CONFIG_IPIPE)
	clear_ti_thread_flag(current_thread_info(), TIF_SWITCHED);
#elif defined(CONFIG_ARM_FCSE_PREEMPT_FLUSH)
	return nr_context_switches();
#endif /* CONFIG_ARM_FCSE_PREEMPT_FLUSH */

	return 0;
}
Example #3
0
/*
 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
 */
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
				      unsigned long __user *user_mask_ptr)
{
	cpumask_t new_mask;
	cpumask_t effective_mask;
	int retval;
	struct task_struct *p;
	struct thread_info *ti;
	uid_t euid;

	if (len < sizeof(new_mask))
		return -EINVAL;

	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
		return -EFAULT;

	get_online_cpus();
	read_lock(&tasklist_lock);

	p = find_process_by_pid(pid);
	if (!p) {
		read_unlock(&tasklist_lock);
		put_online_cpus();
		return -ESRCH;
	}

	/*
	 * It is not safe to call set_cpus_allowed with the
	 * tasklist_lock held.  We will bump the task_struct's
	 * usage count and drop tasklist_lock before invoking
	 * set_cpus_allowed.
	 */
	get_task_struct(p);

	euid = current_euid();
	retval = -EPERM;
	if (euid != p->cred->euid && euid != p->cred->uid &&
	    !capable(CAP_SYS_NICE)) {
		read_unlock(&tasklist_lock);
		goto out_unlock;
	}

	retval = security_task_setscheduler(p, 0, NULL);
	if (retval)
		goto out_unlock;

	/* Record new user-specified CPU set for future reference */
	p->thread.user_cpus_allowed = new_mask;

	/* Unlock the task list */
	read_unlock(&tasklist_lock);

	/* Compute new global allowed CPU set if necessary */
	ti = task_thread_info(p);
	if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
	    cpus_intersects(new_mask, mt_fpu_cpumask)) {
		cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
		retval = set_cpus_allowed_ptr(p, &effective_mask);
	} else {
		clear_ti_thread_flag(ti, TIF_FPUBOUND);
		retval = set_cpus_allowed_ptr(p, &new_mask);
	}

out_unlock:
	put_task_struct(p);
	put_online_cpus();
	return retval;
}
Example #4
0
asmlinkage struct pt_regs *do_debug(struct pt_regs *regs)
{
    struct thread_info	*ti;
    unsigned long		trampoline_addr;
    u32			status;
    u32			ctrl;
    int			code;

    status = ocd_read(DS);
    ti = current_thread_info();
    code = TRAP_BRKPT;

    pr_debug("do_debug: status=0x%08x PC=0x%08lx SR=0x%08lx tif=0x%08lx\n",
             status, regs->pc, regs->sr, ti->flags);

    if (!user_mode(regs)) {
        unsigned long	die_val = DIE_BREAKPOINT;

        if (status & (1 << OCD_DS_SSS_BIT))
            die_val = DIE_SSTEP;

        if (notify_die(die_val, "ptrace", regs, 0, 0, SIGTRAP)
                == NOTIFY_STOP)
            return regs;

        if ((status & (1 << OCD_DS_SWB_BIT))
                && test_and_clear_ti_thread_flag(
                    ti, TIF_BREAKPOINT)) {
            /*
             * Explicit breakpoint from trampoline or
             * exception/syscall/interrupt handler.
             *
             * The real saved regs are on the stack right
             * after the ones we saved on entry.
             */
            regs++;
            pr_debug("  -> TIF_BREAKPOINT done, adjusted regs:"
                     "PC=0x%08lx SR=0x%08lx\n",
                     regs->pc, regs->sr);
            BUG_ON(!user_mode(regs));

            if (test_thread_flag(TIF_SINGLE_STEP)) {
                pr_debug("Going to do single step...\n");
                return regs;
            }

            /*
             * No TIF_SINGLE_STEP means we're done
             * stepping over a syscall. Do the trap now.
             */
            code = TRAP_TRACE;
        } else if ((status & (1 << OCD_DS_SSS_BIT))
                   && test_ti_thread_flag(ti, TIF_SINGLE_STEP)) {

            pr_debug("Stepped into something, "
                     "setting TIF_BREAKPOINT...\n");
            set_ti_thread_flag(ti, TIF_BREAKPOINT);

            /*
             * We stepped into an exception, interrupt or
             * syscall handler. Some exception handlers
             * don't check for pending work, so we need to
             * set up a trampoline just in case.
             *
             * The exception entry code will undo the
             * trampoline stuff if it does a full context
             * save (which also means that it'll check for
             * pending work later.)
             */
            if ((regs->sr & MODE_MASK) == MODE_EXCEPTION) {
                trampoline_addr
                    = (unsigned long)&debug_trampoline;

                pr_debug("Setting up trampoline...\n");
                ti->rar_saved = sysreg_read(RAR_EX);
                ti->rsr_saved = sysreg_read(RSR_EX);
                sysreg_write(RAR_EX, trampoline_addr);
                sysreg_write(RSR_EX, (MODE_EXCEPTION
                                      | SR_EM | SR_GM));
                BUG_ON(ti->rsr_saved & MODE_MASK);
            }

            /*
             * If we stepped into a system call, we
             * shouldn't do a single step after we return
             * since the return address is right after the
             * "scall" instruction we were told to step
             * over.
             */
            if ((regs->sr & MODE_MASK) == MODE_SUPERVISOR) {
                pr_debug("Supervisor; no single step\n");
                clear_ti_thread_flag(ti, TIF_SINGLE_STEP);
            }

            ctrl = ocd_read(DC);
            ctrl &= ~(1 << OCD_DC_SS_BIT);
            ocd_write(DC, ctrl);

            return regs;
        } else {
            printk(KERN_ERR "Unexpected OCD_DS value: 0x%08x\n",
                   status);
            printk(KERN_ERR "Thread flags: 0x%08lx\n", ti->flags);
            die("Unhandled debug trap in kernel mode",
                regs, SIGTRAP);
        }
    } else if (status & (1 << OCD_DS_SSS_BIT)) {
        /* Single step in user mode */
        code = TRAP_TRACE;

        ctrl = ocd_read(DC);
        ctrl &= ~(1 << OCD_DC_SS_BIT);
        ocd_write(DC, ctrl);
    }

    pr_debug("Sending SIGTRAP: code=%d PC=0x%08lx SR=0x%08lx\n",
             code, regs->pc, regs->sr);

    clear_thread_flag(TIF_SINGLE_STEP);
    _exception(SIGTRAP, regs, code, instruction_pointer(regs));

    return regs;
}
/*
 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
 */
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
				      unsigned long __user *user_mask_ptr)
{
	cpumask_var_t cpus_allowed, new_mask, effective_mask;
	struct thread_info *ti;
	struct task_struct *p;
	int retval;

	if (len < sizeof(new_mask))
		return -EINVAL;

	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
		return -EFAULT;

	get_online_cpus();
	rcu_read_lock();

	p = find_process_by_pid(pid);
	if (!p) {
		rcu_read_unlock();
		put_online_cpus();
		return -ESRCH;
	}

	/* Prevent p going away */
	get_task_struct(p);
	rcu_read_unlock();

	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_put_task;
	}
	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_free_cpus_allowed;
	}
	if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_free_new_mask;
	}
	retval = -EPERM;
	if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
		goto out_unlock;

	retval = security_task_setscheduler(p);
	if (retval)
		goto out_unlock;

	/* Record new user-specified CPU set for future reference */
	cpumask_copy(&p->thread.user_cpus_allowed, new_mask);

 again:
	/* Compute new global allowed CPU set if necessary */
	ti = task_thread_info(p);
	if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
	    cpus_intersects(*new_mask, mt_fpu_cpumask)) {
		cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);
		retval = set_cpus_allowed_ptr(p, effective_mask);
	} else {
		cpumask_copy(effective_mask, new_mask);
		clear_ti_thread_flag(ti, TIF_FPUBOUND);
		retval = set_cpus_allowed_ptr(p, new_mask);
	}

	if (!retval) {
		cpuset_cpus_allowed(p, cpus_allowed);
		if (!cpumask_subset(effective_mask, cpus_allowed)) {
			/*
			 * We must have raced with a concurrent cpuset
			 * update. Just reset the cpus_allowed to the
			 * cpuset's cpus_allowed
			 */
			cpumask_copy(new_mask, cpus_allowed);
			goto again;
		}
	}
out_unlock:
	free_cpumask_var(effective_mask);
out_free_new_mask:
	free_cpumask_var(new_mask);
out_free_cpus_allowed:
	free_cpumask_var(cpus_allowed);
out_put_task:
	put_task_struct(p);
	put_online_cpus();
	return retval;
}
Example #6
0
static void __init do_boot_cpu (int apicid)
{
	struct task_struct *idle;
	unsigned long boot_error;
	int timeout, cpu;
	unsigned long start_rip;

	cpu = ++cpucount;
	/*
	 * We can't use kernel_thread since we must avoid to
	 * reschedule the child.
	 */
	idle = fork_idle(cpu);
	if (IS_ERR(idle))
		panic("failed fork for CPU %d", cpu);
	x86_cpu_to_apicid[cpu] = apicid;

	cpu_pda[cpu].pcurrent = idle;

	start_rip = setup_trampoline();

	init_rsp = idle->thread.rsp; 
	per_cpu(init_tss,cpu).rsp0 = init_rsp;
	initial_code = start_secondary;
	clear_ti_thread_flag(idle->thread_info, TIF_FORK);

	printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid, 
	       start_rip, init_rsp);

	/*
	 * This grunge runs the startup process for
	 * the targeted processor.
	 */

	atomic_set(&init_deasserted, 0);

	Dprintk("Setting warm reset code and vector.\n");

	CMOS_WRITE(0xa, 0xf);
	local_flush_tlb();
	Dprintk("1.\n");
	*((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
	Dprintk("2.\n");
	*((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
	Dprintk("3.\n");

	/*
	 * Be paranoid about clearing APIC errors.
	 */
	if (APIC_INTEGRATED(apic_version[apicid])) {
		apic_read_around(APIC_SPIV);
		apic_write(APIC_ESR, 0);
		apic_read(APIC_ESR);
	}

	/*
	 * Status is now clean
	 */
	boot_error = 0;

	/*
	 * Starting actual IPI sequence...
	 */
	boot_error = wakeup_secondary_via_INIT(apicid, start_rip); 

	if (!boot_error) {
		/*
		 * allow APs to start initializing.
		 */
		Dprintk("Before Callout %d.\n", cpu);
		cpu_set(cpu, cpu_callout_map);
		Dprintk("After Callout %d.\n", cpu);

		/*
		 * Wait 5s total for a response
		 */
		for (timeout = 0; timeout < 50000; timeout++) {
			if (cpu_isset(cpu, cpu_callin_map))
				break;	/* It has booted */
			udelay(100);
		}

		if (cpu_isset(cpu, cpu_callin_map)) {
			/* number CPUs logically, starting from 1 (BSP is 0) */
			Dprintk("OK.\n");
			print_cpu_info(&cpu_data[cpu]);
			Dprintk("CPU has booted.\n");
		} else {
			boot_error = 1;
			if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
					== 0xA5)
				/* trampoline started but...? */
				printk("Stuck ??\n");
			else
				/* trampoline code not run */
				printk("Not responding.\n");
#if APIC_DEBUG
			inquire_remote_apic(apicid);
#endif
		}
	}
	if (boot_error) {
		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
		clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
		cpucount--;
		x86_cpu_to_apicid[cpu] = BAD_APICID;
		x86_cpu_to_log_apicid[cpu] = BAD_APICID;
	}
}