Пример #1
0
/*
 * Called before incrementing preempt_count on {soft,}irq_enter
 * and before decrementing preempt_count on {soft,}irq_exit.
 */
void irqtime_account_irq(struct task_struct *curr)
{
	unsigned long flags;
	s64 delta;
	int cpu;

	if (!sched_clock_irqtime)
		return;

	local_irq_save(flags);

	cpu = smp_processor_id();
	delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
	__this_cpu_add(irq_start_time, delta);

	irq_time_write_begin();
	/*
	 * We do not account for softirq time from ksoftirqd here.
	 * We want to continue accounting softirq time to ksoftirqd thread
	 * in that case, so as not to confuse scheduler with a special task
	 * that do not consume any time, but still wants to run.
	 */
	if (hardirq_count())
		__this_cpu_add(cpu_hardirq_time, delta);
	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
		__this_cpu_add(cpu_softirq_time, delta);

	irq_time_write_end();
	local_irq_restore(flags);
}
Пример #2
0
/*
 * Called before incrementing preempt_count on {soft,}irq_enter
 * and before decrementing preempt_count on {soft,}irq_exit.
 */
void irqtime_account_irq(struct task_struct *curr)
{
	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
	s64 delta;
	int cpu;

	if (!sched_clock_irqtime)
		return;

	cpu = smp_processor_id();
	delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
	irqtime->irq_start_time += delta;

	u64_stats_update_begin(&irqtime->sync);
	/*
	 * We do not account for softirq time from ksoftirqd here.
	 * We want to continue accounting softirq time to ksoftirqd thread
	 * in that case, so as not to confuse scheduler with a special task
	 * that do not consume any time, but still wants to run.
	 */
	if (hardirq_count())
		irqtime->hardirq_time += delta;
	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
		irqtime->softirq_time += delta;

	u64_stats_update_end(&irqtime->sync);
}
Пример #3
0
static void inline procfile_context_print(void)
{
    printk_d("preemptible 0x%x\n", preemptible());
    printk_d("in_atomic_preempt_off 0x%x\n", in_atomic_preempt_off());
    printk_d("in_atomic 0x%x\n", in_atomic());
    printk_d("in_nmi 0x%lx\n", in_nmi());
    printk_d("in_serving_softirq 0x%lx\n", in_serving_softirq());
    printk_d("in_interrupt 0x%lx\n", in_interrupt());
    printk_d("in_softirq 0x%lx\n", in_softirq());
    printk_d("in_irq 0x%lx\n", in_irq());
    printk_d("preempt_count 0x%x\n", preempt_count());
    printk_d("irqs_disabled 0x%x\n", irqs_disabled());
    if(current) {
        printk_d("task->comm %s\n", current->comm);
        printk_d("task->flags 0x%x\n", current->flags);
        printk_d("task->state %lu\n", current->state);
        printk_d("task->usage %d\n", atomic_read(&(current->usage)));
        printk_d("task->prio %d\n", current->prio);
        printk_d("task->static_prio %d\n", current->static_prio);
        printk_d("task->normal_prio %d\n", current->normal_prio);
        printk_d("task->rt_priority %d\n", current->rt_priority);
        printk_d("task->policy %d\n", current->policy);
        printk_d("task->pid %d\n", current->pid);
        printk_d("task->tgid %d\n", current->tgid);
    }
    else
        printk_d("task pointer NULL\n");
}
Пример #4
0
void vtime_account_system(struct task_struct *tsk)
{
	struct thread_info *ti = task_thread_info(tsk);
	__u64 stime = vtime_delta(tsk);

	if ((tsk->flags & PF_VCPU) && !irq_count())
		ti->gtime += stime;
	else if (hardirq_count())
		ti->hardirq_time += stime;
	else if (in_serving_softirq())
		ti->softirq_time += stime;
	else
		ti->stime += stime;
}
Пример #5
0
static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
{
	/*
	 * In case of threaded ISR, for RT kernels in_irq() does not return
	 * appropriate value, so use in_serving_softirq to distinguish between
	 * softirq and irq contexts.
	 */
	if (unlikely(in_irq() || !in_serving_softirq())) {
		/* Disable QMan IRQ source and invoke NAPI */
		qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
		np->p = p;
		napi_schedule(&np->irqtask);
		return 1;
	}
	return 0;
}
Пример #6
0
/*
 * Account system cpu time to a process.
 * @p: the process that the cpu time gets accounted to
 * @hardirq_offset: the offset to subtract from hardirq_count()
 * @cputime: the cpu time spent in kernel space since the last update
 */
void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
{
	int index;

	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
		account_guest_time(p, cputime);
		return;
	}

	if (hardirq_count() - hardirq_offset)
		index = CPUTIME_IRQ;
	else if (in_serving_softirq())
		index = CPUTIME_SOFTIRQ;
	else
		index = CPUTIME_SYSTEM;

	account_system_index_time(p, cputime, index);
}
Пример #7
0
static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
{
	/*
	 * In case of threaded ISR for RT enable kernel,
	 * in_irq() does not return appropriate value, so use
	 * in_serving_softirq to distinguish softirq or irq context.
	 */
	if (unlikely(in_irq() || !in_serving_softirq())) {
		/* Disable QMan IRQ source and invoke NAPI */
		int ret = qman_p_irqsource_remove(p, QM_PIRQ_DQRI);

		if (likely(!ret)) {
			np->p = p;
			napi_schedule(&np->irqtask);
			return 1;
		}
	}
	return 0;
}
Пример #8
0
void gr_handle_kernel_exploit(void)
{
#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
	const struct cred *cred;
	struct task_struct *tsk, *tsk2;
	struct user_struct *user;
	uid_t uid;

	if (in_irq() || in_serving_softirq() || in_nmi())
		panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");

	uid = current_uid();

	if (uid == 0)
		panic("grsec: halting the system due to suspicious kernel crash caused by root");
	else {
		/* kill all the processes of this user, hold a reference
		   to their creds struct, and prevent them from creating
		   another process until system reset
		*/
		printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
		/* we intentionally leak this ref */
		user = get_uid(current->cred->user);
		if (user) {
			user->banned = 1;
			user->ban_expires = ~0UL;
		}

		read_lock(&tasklist_lock);
		do_each_thread(tsk2, tsk) {
			cred = __task_cred(tsk);
			if (cred->uid == uid)
				gr_fake_force_sig(SIGKILL, tsk);
		} while_each_thread(tsk2, tsk);
		read_unlock(&tasklist_lock); 
	}