Exemplo n.º 1
0
asmlinkage void start_secondary(void)
{
	unsigned int cpu = smp_processor_id();
	struct mm_struct *mm = &init_mm;

	enable_mmu();
	mmgrab(mm);
	mmget(mm);
	current->active_mm = mm;
#ifdef CONFIG_MMU
	enter_lazy_tlb(mm, current);
	local_flush_tlb_all();
#endif

	per_cpu_trap_init();

	preempt_disable();

	notify_cpu_starting(cpu);

	local_irq_enable();

	calibrate_delay();

	smp_store_cpu_info(cpu);

	set_cpu_online(cpu, true);
	per_cpu(cpu_state, cpu) = CPU_ONLINE;

	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
Exemplo n.º 2
0
/**
 * mark_oom_victim - mark the given task as OOM victim
 * @tsk: task to mark
 *
 * Has to be called with oom_lock held and never after
 * oom has been disabled already.
 *
 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
 * under task_lock or operate on the current).
 */
static void mark_oom_victim(struct task_struct *tsk)
{
	struct mm_struct *mm = tsk->mm;

	WARN_ON(oom_killer_disabled);
	/* OOM killer might race with memcg OOM */
	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
		return;

	/* oom_mm is bound to the signal struct life time. */
	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
		mmgrab(tsk->signal->oom_mm);
		set_bit(MMF_OOM_VICTIM, &mm->flags);
	}

	/*
	 * Make sure that the task is woken up from uninterruptible sleep
	 * if it is frozen because OOM killer wouldn't be able to free
	 * any memory and livelock. freezing_slow_path will tell the freezer
	 * that TIF_MEMDIE tasks should be ignored.
	 */
	__thaw_task(tsk);
	atomic_inc(&oom_victims);
	trace_mark_victim(tsk->pid);
}
Exemplo n.º 3
0
void secondary_start_kernel(void)
{
	struct mm_struct *mm = &init_mm;
	unsigned int cpu = smp_processor_id();

	init_mmu();

#ifdef CONFIG_DEBUG_KERNEL
	if (boot_secondary_processors == 0) {
		pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
			__func__, boot_secondary_processors, cpu);
		for (;;)
			__asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
	}

	pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
		__func__, boot_secondary_processors, cpu);
#endif
	/* Init EXCSAVE1 */

	secondary_trap_init();

	/* All kernel threads share the same mm context. */

	mmget(mm);
	mmgrab(mm);
	current->active_mm = mm;
	cpumask_set_cpu(cpu, mm_cpumask(mm));
	enter_lazy_tlb(mm, current);

	preempt_disable();
	trace_hardirqs_off();

	calibrate_delay();

	notify_cpu_starting(cpu);

	secondary_init_irq();
	local_timer_setup(cpu);

	set_cpu_online(cpu, true);

	local_irq_enable();

	complete(&cpu_running);

	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
Exemplo n.º 4
0
static int
i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_mm_struct *mm;
	int ret = 0;

	/* During release of the GEM object we hold the struct_mutex. This
	 * precludes us from calling mmput() at that time as that may be
	 * the last reference and so call exit_mmap(). exit_mmap() will
	 * attempt to reap the vma, and if we were holding a GTT mmap
	 * would then call drm_gem_vm_close() and attempt to reacquire
	 * the struct mutex. So in order to avoid that recursion, we have
	 * to defer releasing the mm reference until after we drop the
	 * struct_mutex, i.e. we need to schedule a worker to do the clean
	 * up.
	 */
	mutex_lock(&dev_priv->mm_lock);
	mm = __i915_mm_struct_find(dev_priv, current->mm);
	if (mm == NULL) {
		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
		if (mm == NULL) {
			ret = -ENOMEM;
			goto out;
		}

		kref_init(&mm->kref);
		mm->i915 = to_i915(obj->base.dev);

		mm->mm = current->mm;
		mmgrab(current->mm);

		mm->mn = NULL;

		/* Protected by dev_priv->mm_lock */
		hash_add(dev_priv->mm_structs,
			 &mm->node, (unsigned long)mm->mm);
	} else
		kref_get(&mm->kref);

	obj->userptr.mm = mm;
out:
	mutex_unlock(&dev_priv->mm_lock);
	return ret;
}
Exemplo n.º 5
0
void __init trap_init(void)
{
	int i;

	pgd_current = (unsigned long)init_mm.pgd;
	/* DEBUG EXCEPTION */
	memcpy((void *)DEBUG_VECTOR_BASE_ADDR,
			&debug_exception_vector, DEBUG_VECTOR_SIZE);
	/* NMI EXCEPTION */
	memcpy((void *)GENERAL_VECTOR_BASE_ADDR,
			&general_exception_vector, GENERAL_VECTOR_SIZE);

	/*
	 * Initialise exception handlers
	 */
	for (i = 0; i <= 31; i++)
		set_except_vector(i, handle_reserved);

	set_except_vector(1, handle_nmi);
	set_except_vector(2, handle_adelinsn);
	set_except_vector(3, handle_tlb_refill);
	set_except_vector(4, handle_tlb_invaild);
	set_except_vector(5, handle_ibe);
	set_except_vector(6, handle_pel);
	set_except_vector(7, handle_sys);
	set_except_vector(8, handle_ccu);
	set_except_vector(9, handle_ri);
	set_except_vector(10, handle_tr);
	set_except_vector(11, handle_adedata);
	set_except_vector(12, handle_adedata);
	set_except_vector(13, handle_tlb_refill);
	set_except_vector(14, handle_tlb_invaild);
	set_except_vector(15, handle_mod);
	set_except_vector(16, handle_cee);
	set_except_vector(17, handle_cpe);
	set_except_vector(18, handle_dbe);
	flush_icache_range(DEBUG_VECTOR_BASE_ADDR, IRQ_VECTOR_BASE_ADDR);

	mmgrab(&init_mm);
	current->active_mm = &init_mm;
	cpu_cache_init();
}
Exemplo n.º 6
0
static void do_exit_flush_lazy_tlb(void *arg)
{
	struct mm_struct *mm = arg;
	unsigned long pid = mm->context.id;

	if (current->mm == mm)
		return; /* Local CPU */

	if (current->active_mm == mm) {
		/*
		 * Must be a kernel thread because sender is single-threaded.
		 */
		BUG_ON(current->mm);
		mmgrab(&init_mm);
		switch_mm(mm, &init_mm, current);
		current->active_mm = &init_mm;
		mmdrop(mm);
	}
	_tlbiel_pid(pid, RIC_FLUSH_ALL);
}
Exemplo n.º 7
0
/*
 * Activate a secondary processor.  Very minimal; don't add anything
 * to this path without knowing what you're doing, since SMP booting
 * is pretty fragile.
 */
static void start_secondary(void)
{
	int cpuid;

	preempt_disable();

	cpuid = smp_processor_id();

	/* Set our thread pointer appropriately. */
	set_my_cpu_offset(__per_cpu_offset[cpuid]);

	/*
	 * In large machines even this will slow us down, since we
	 * will be contending for for the printk spinlock.
	 */
	/* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */

	/* Initialize the current asid for our first page table. */
	__this_cpu_write(current_asid, min_asid);

	/* Set up this thread as another owner of the init_mm */
	mmgrab(&init_mm);
	current->active_mm = &init_mm;
	if (current->mm)
		BUG();
	enter_lazy_tlb(&init_mm, current);

	/* Allow hypervisor messages to be received */
	init_messaging();
	local_irq_enable();

	/* Indicate that we're ready to come up. */
	/* Must not do this before we're ready to receive messages */
	if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
		pr_warn("CPU#%d already started!\n", cpuid);
		for (;;)
			local_irq_enable();
	}

	smp_nap();
}
Exemplo n.º 8
0
static void __oom_kill_process(struct task_struct *victim, const char *message)
{
	struct task_struct *p;
	struct mm_struct *mm;
	bool can_oom_reap = true;

	p = find_lock_task_mm(victim);
	if (!p) {
		put_task_struct(victim);
		return;
	} else if (victim != p) {
		get_task_struct(p);
		put_task_struct(victim);
		victim = p;
	}

	/* Get a reference to safely compare mm after task_unlock(victim) */
	mm = victim->mm;
	mmgrab(mm);

	/* Raise event before sending signal: task reaper must see this */
	count_vm_event(OOM_KILL);
	memcg_memory_event_mm(mm, MEMCG_OOM_KILL);

	/*
	 * We should send SIGKILL before granting access to memory reserves
	 * in order to prevent the OOM victim from depleting the memory
	 * reserves from the user space under its control.
	 */
	do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
	mark_oom_victim(victim);
	pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
		message, task_pid_nr(victim), victim->comm,
		K(victim->mm->total_vm),
		K(get_mm_counter(victim->mm, MM_ANONPAGES)),
		K(get_mm_counter(victim->mm, MM_FILEPAGES)),
		K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
	task_unlock(victim);

	/*
	 * Kill all user processes sharing victim->mm in other thread groups, if
	 * any.  They don't get access to memory reserves, though, to avoid
	 * depletion of all memory.  This prevents mm->mmap_sem livelock when an
	 * oom killed thread cannot exit because it requires the semaphore and
	 * its contended by another thread trying to allocate memory itself.
	 * That thread will now get access to memory reserves since it has a
	 * pending fatal signal.
	 */
	rcu_read_lock();
	for_each_process(p) {
		if (!process_shares_mm(p, mm))
			continue;
		if (same_thread_group(p, victim))
			continue;
		if (is_global_init(p)) {
			can_oom_reap = false;
			set_bit(MMF_OOM_SKIP, &mm->flags);
			pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
					task_pid_nr(victim), victim->comm,
					task_pid_nr(p), p->comm);
			continue;
		}
		/*
		 * No use_mm() user needs to read from the userspace so we are
		 * ok to reap it.
		 */
		if (unlikely(p->flags & PF_KTHREAD))
			continue;
		do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
	}
	rcu_read_unlock();

	if (can_oom_reap)
		wake_oom_reaper(victim);

	mmdrop(mm);
	put_task_struct(victim);
}