Exemple #1
0
void free_fdtable_rcu(struct rcu_head *rcu)
{
	struct fdtable *fdt = container_of(rcu, struct fdtable, rcu);
	struct fdtable_defer *fddef;

	BUG_ON(!fdt);

	if (fdt->max_fds <= NR_OPEN_DEFAULT) {
		/*
		 * This fdtable is embedded in the files structure and that
		 * structure itself is getting destroyed.
		 */
		kmem_cache_free(files_cachep,
				container_of(fdt, struct files_struct, fdtab));
		return;
	}
	if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) {
		kfree(fdt->fd);
		kfree(fdt->open_fds);
		kfree(fdt);
	} else {
		fddef = &per_cpu(fdtable_defer_list, get_cpu_light());
		spin_lock(&fddef->lock);
		fdt->next = fddef->next;
		fddef->next = fdt;
		/* vmallocs are handled from the workqueue context */
		schedule_work(&fddef->wq);
		spin_unlock(&fddef->lock);
		put_cpu_light();
	}
}
void dump_trace(struct task_struct *task, struct pt_regs *regs,
		unsigned long *stack, unsigned long bp,
		const struct stacktrace_ops *ops, void *data)
{
	const unsigned cpu = get_cpu_light();
	int graph = 0;
	u32 *prev_esp;

	if (!task)
		task = current;

	if (!stack) {
		unsigned long dummy;

		stack = &dummy;
		if (task != current)
			stack = (unsigned long *)task->thread.sp;
	}

	if (!bp)
		bp = stack_frame(task, regs);

	for (;;) {
		struct thread_info *context;
		void *end_stack;

		end_stack = is_hardirq_stack(stack, cpu);
		if (!end_stack)
			end_stack = is_softirq_stack(stack, cpu);

		context = task_thread_info(task);
		bp = ops->walk_stack(context, stack, bp, ops, data,
				     end_stack, &graph);

		/* Stop if not on irq stack */
		if (!end_stack)
			break;

		/* The previous esp is saved on the bottom of the stack */
		prev_esp = (u32 *)(end_stack - THREAD_SIZE);
		stack = (unsigned long *)*prev_esp;
		if (!stack)
			break;

		if (ops->stack(data, "IRQ") < 0)
			break;
		touch_nmi_watchdog();
	}
	put_cpu_light();
}
void dump_trace(struct task_struct *task, struct pt_regs *regs,
		unsigned long *stack, unsigned long bp,
		const struct stacktrace_ops *ops, void *data)
{
	const unsigned cpu = get_cpu_light();
	struct thread_info *tinfo;
	unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
	unsigned long dummy;
	unsigned used = 0;
	int graph = 0;
	int done = 0;

	if (!task)
		task = current;

	if (!stack) {
		if (regs)
			stack = (unsigned long *)regs->sp;
		else if (task != current)
			stack = (unsigned long *)task->thread.sp;
		else
			stack = &dummy;
	}

	if (!bp)
		bp = stack_frame(task, regs);
	/*
	 * Print function call entries in all stacks, starting at the
	 * current stack address. If the stacks consist of nested
	 * exceptions
	 */
	tinfo = task_thread_info(task);
	while (!done) {
		unsigned long *stack_end;
		enum stack_type stype;
		char *id;

		stype = analyze_stack(cpu, task, stack, &stack_end,
				      irq_stack, &used, &id);

		/* Default finish unless specified to continue */
		done = 1;

		switch (stype) {

		/* Break out early if we are on the thread stack */
		case STACK_IS_NORMAL:
			break;

		case STACK_IS_EXCEPTION:

			if (ops->stack(data, id) < 0)
				break;

			bp = ops->walk_stack(tinfo, stack, bp, ops,
					     data, stack_end, &graph);
			ops->stack(data, "<EOE>");
			/*
			 * We link to the next stack via the
			 * second-to-last pointer (index -2 to end) in the
			 * exception stack:
			 */
			stack = (unsigned long *) stack_end[-2];
			done = 0;
			break;

		case STACK_IS_IRQ:

			if (ops->stack(data, "IRQ") < 0)
				break;
			bp = ops->walk_stack(tinfo, stack, bp,
				     ops, data, stack_end, &graph);
			/*
			 * We link to the next stack (which would be
			 * the process stack normally) the last
			 * pointer (index -1 to end) in the IRQ stack:
			 */
			stack = (unsigned long *) (stack_end[-1]);
			irq_stack = NULL;
			ops->stack(data, "EOI");
			done = 0;
			break;

		case STACK_IS_UNKNOWN:
			ops->stack(data, "UNK");
			break;
		}
	}

	/*
	 * This handles the process stack:
	 */
	bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
	put_cpu_light();
}