Exemplo n.º 1
0
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
    int i, ret;
    unsigned long __user *p = (void __user *)(unsigned long)data;

    switch (request) {
    /* when I and D space are separate, these will need to be fixed. */
    case PTRACE_PEEKTEXT: /* read word at location addr. */
    case PTRACE_PEEKDATA: {
        unsigned long tmp;
        int copied;

        ret = -EIO;
        copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
        if (copied != sizeof(tmp))
            break;
        ret = put_user(tmp, p);
        break;
    }

    /* read the word at location addr in the USER area. */
    case PTRACE_PEEKUSR:
        ret = peek_user(child, addr, data);
        break;

    /* when I and D space are separate, this will have to be fixed. */
    case PTRACE_POKETEXT: /* write the word at location addr. */
    case PTRACE_POKEDATA:
        ret = -EIO;
        if (access_process_vm(child, addr, &data, sizeof(data),
                              1) != sizeof(data))
            break;
        ret = 0;
        break;

    case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
        ret = poke_user(child, addr, data);
        break;

    case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
    case PTRACE_CONT: { /* restart after signal. */
        ret = -EIO;
        if (!valid_signal(data))
            break;

        set_singlestepping(child, 0);
        if (request == PTRACE_SYSCALL) {
            set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
        }
        else {
            clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
        }
        child->exit_code = data;
        wake_up_process(child);
        ret = 0;
        break;
    }

    /*
     * make the child exit.  Best I can do is send it a sigkill.
     * perhaps it should be put in the status that it wants to
     * exit.
     */
    case PTRACE_KILL: {
        ret = 0;
        if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
            break;

        set_singlestepping(child, 0);
        child->exit_code = SIGKILL;
        wake_up_process(child);
        break;
    }

    case PTRACE_SINGLESTEP: {  /* set the trap flag. */
        ret = -EIO;
        if (!valid_signal(data))
            break;
        clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
        set_singlestepping(child, 1);
        child->exit_code = data;
        /* give it a chance to run. */
        wake_up_process(child);
        ret = 0;
        break;
    }

    case PTRACE_DETACH:
        /* detach a process that was attached. */
        ret = ptrace_detach(child, data);
        break;

#ifdef PTRACE_GETREGS
    case PTRACE_GETREGS: { /* Get all gp regs from the child. */
        if (!access_ok(VERIFY_WRITE, p, MAX_REG_OFFSET)) {
            ret = -EIO;
            break;
        }
        for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
            __put_user(getreg(child, i), p);
            p++;
        }
        ret = 0;
        break;
    }
#endif
#ifdef PTRACE_SETREGS
    case PTRACE_SETREGS: { /* Set all gp regs in the child. */
        unsigned long tmp = 0;
        if (!access_ok(VERIFY_READ, p, MAX_REG_OFFSET)) {
            ret = -EIO;
            break;
        }
        for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
            __get_user(tmp, p);
            putreg(child, i, tmp);
            p++;
        }
        ret = 0;
        break;
    }
#endif
#ifdef PTRACE_GETFPREGS
    case PTRACE_GETFPREGS: /* Get the child FPU state. */
        ret = get_fpregs(data, child);
        break;
#endif
#ifdef PTRACE_SETFPREGS
    case PTRACE_SETFPREGS: /* Set the child FPU state. */
        ret = set_fpregs(data, child);
        break;
#endif
#ifdef PTRACE_GETFPXREGS
    case PTRACE_GETFPXREGS: /* Get the child FPU state. */
        ret = get_fpxregs(data, child);
        break;
#endif
#ifdef PTRACE_SETFPXREGS
    case PTRACE_SETFPXREGS: /* Set the child FPU state. */
        ret = set_fpxregs(data, child);
        break;
#endif
    case PTRACE_GET_THREAD_AREA:
        ret = ptrace_get_thread_area(child, addr,
                                     (struct user_desc __user *) data);
        break;

    case PTRACE_SET_THREAD_AREA:
        ret = ptrace_set_thread_area(child, addr,
                                     (struct user_desc __user *) data);
        break;

    case PTRACE_FAULTINFO: {
        /* Take the info from thread->arch->faultinfo,
         * but transfer max. sizeof(struct ptrace_faultinfo).
         * On i386, ptrace_faultinfo is smaller!
         */
        ret = copy_to_user(p, &child->thread.arch.faultinfo,
                           sizeof(struct ptrace_faultinfo));
        if(ret)
            break;
        break;
    }

#ifdef PTRACE_LDT
    case PTRACE_LDT: {
        struct ptrace_ldt ldt;

        if(copy_from_user(&ldt, p, sizeof(ldt))) {
            ret = -EIO;
            break;
        }

        /* This one is confusing, so just punt and return -EIO for
         * now
         */
        ret = -EIO;
        break;
    }
#endif
#ifdef CONFIG_PROC_MM
    case PTRACE_SWITCH_MM: {
        struct mm_struct *old = child->mm;
        struct mm_struct *new = proc_mm_get_mm(data);

        if(IS_ERR(new)) {
            ret = PTR_ERR(new);
            break;
        }

        atomic_inc(&new->mm_users);
        child->mm = new;
        child->active_mm = new;
        mmput(old);
        ret = 0;
        break;
    }
#endif
    default:
        ret = ptrace_request(child, request, addr, data);
        break;
    }

    return ret;
}
Exemplo n.º 2
0
static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
{
	/*
	 * Wake up the handler thread for this action. In case the
	 * thread crashed and was killed we just pretend that we
	 * handled the interrupt. The hardirq handler has disabled the
	 * device interrupt, so no irq storm is lurking. If the
	 * RUNTHREAD bit is already set, nothing to do.
	 */
	if (test_bit(IRQTF_DIED, &action->thread_flags) ||
	    test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
		return;

	/*
	 * It's safe to OR the mask lockless here. We have only two
	 * places which write to threads_oneshot: This code and the
	 * irq thread.
	 *
	 * This code is the hard irq context and can never run on two
	 * cpus in parallel. If it ever does we have more serious
	 * problems than this bitmask.
	 *
	 * The irq threads of this irq which clear their "running" bit
	 * in threads_oneshot are serialized via desc->lock against
	 * each other and they are serialized against this code by
	 * IRQS_INPROGRESS.
	 *
	 * Hard irq handler:
	 *
	 *	spin_lock(desc->lock);
	 *	desc->state |= IRQS_INPROGRESS;
	 *	spin_unlock(desc->lock);
	 *	set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
	 *	desc->threads_oneshot |= mask;
	 *	spin_lock(desc->lock);
	 *	desc->state &= ~IRQS_INPROGRESS;
	 *	spin_unlock(desc->lock);
	 *
	 * irq thread:
	 *
	 * again:
	 *	spin_lock(desc->lock);
	 *	if (desc->state & IRQS_INPROGRESS) {
	 *		spin_unlock(desc->lock);
	 *		while(desc->state & IRQS_INPROGRESS)
	 *			cpu_relax();
	 *		goto again;
	 *	}
	 *	if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
	 *		desc->threads_oneshot &= ~mask;
	 *	spin_unlock(desc->lock);
	 *
	 * So either the thread waits for us to clear IRQS_INPROGRESS
	 * or we are waiting in the flow handler for desc->lock to be
	 * released before we reach this point. The thread also checks
	 * IRQTF_RUNTHREAD under desc->lock. If set it leaves
	 * threads_oneshot untouched and runs the thread another time.
	 */
	desc->threads_oneshot |= action->thread_mask;
	wake_up_process(action->thread);
}
Exemplo n.º 3
0
long sys_ptrace(long request, long pid, long addr, long data)
{
    struct task_struct *child;
    int i, ret;

    lock_kernel();
    ret = -EPERM;
    if (request == PTRACE_TRACEME) {
        /* are we already being traced? */
        if (current->ptrace & PT_PTRACED)
            goto out;

        ret = security_ptrace(current->parent, current);
        if (ret)
            goto out;

        /* set the ptrace bit in the process flags. */
        current->ptrace |= PT_PTRACED;
        ret = 0;
        goto out;
    }
    ret = -ESRCH;
    read_lock(&tasklist_lock);
    child = find_task_by_pid(pid);
    if (child)
        get_task_struct(child);
    read_unlock(&tasklist_lock);
    if (!child)
        goto out;

    ret = -EPERM;
    if (pid == 1)		/* you may not mess with init */
        goto out_tsk;

    if (request == PTRACE_ATTACH) {
        ret = ptrace_attach(child);
        goto out_tsk;
    }

#ifdef SUBACH_PTRACE_SPECIAL
    SUBARCH_PTRACE_SPECIAL(child,request,addr,data);
#endif

    ret = ptrace_check_attach(child, request == PTRACE_KILL);
    if (ret < 0)
        goto out_tsk;

    switch (request) {
    /* when I and D space are separate, these will need to be fixed. */
    case PTRACE_PEEKTEXT: /* read word at location addr. */
    case PTRACE_PEEKDATA: {
        unsigned long tmp;
        int copied;

        ret = -EIO;
        copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
        if (copied != sizeof(tmp))
            break;
        ret = put_user(tmp, (unsigned long __user *) data);
        break;
    }

    /* read the word at location addr in the USER area. */
    case PTRACE_PEEKUSR:
        ret = peek_user(child, addr, data);
        break;

    /* when I and D space are separate, this will have to be fixed. */
    case PTRACE_POKETEXT: /* write the word at location addr. */
    case PTRACE_POKEDATA:
        ret = -EIO;
        if (access_process_vm(child, addr, &data, sizeof(data),
                              1) != sizeof(data))
            break;
        ret = 0;
        break;

    case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
        ret = poke_user(child, addr, data);
        break;

    case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
    case PTRACE_CONT: { /* restart after signal. */
        ret = -EIO;
        if (!valid_signal(data))
            break;

        set_singlestepping(child, 0);
        if (request == PTRACE_SYSCALL) {
            set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
        }
        else {
            clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
        }
        child->exit_code = data;
        wake_up_process(child);
        ret = 0;
        break;
    }

    /*
     * make the child exit.  Best I can do is send it a sigkill.
     * perhaps it should be put in the status that it wants to
     * exit.
     */
    case PTRACE_KILL: {
        ret = 0;
        if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
            break;

        set_singlestepping(child, 0);
        child->exit_code = SIGKILL;
        wake_up_process(child);
        break;
    }

    case PTRACE_SINGLESTEP: {  /* set the trap flag. */
        ret = -EIO;
        if (!valid_signal(data))
            break;
        clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
        set_singlestepping(child, 1);
        child->exit_code = data;
        /* give it a chance to run. */
        wake_up_process(child);
        ret = 0;
        break;
    }

    case PTRACE_DETACH:
        /* detach a process that was attached. */
        ret = ptrace_detach(child, data);
        break;

#ifdef PTRACE_GETREGS
    case PTRACE_GETREGS: { /* Get all gp regs from the child. */
        if (!access_ok(VERIFY_WRITE, (unsigned long *)data,
                       MAX_REG_OFFSET)) {
            ret = -EIO;
            break;
        }
        for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
            __put_user(getreg(child, i),
                       (unsigned long __user *) data);
            data += sizeof(long);
        }
        ret = 0;
        break;
    }
#endif
#ifdef PTRACE_SETREGS
    case PTRACE_SETREGS: { /* Set all gp regs in the child. */
        unsigned long tmp = 0;
        if (!access_ok(VERIFY_READ, (unsigned *)data,
                       MAX_REG_OFFSET)) {
            ret = -EIO;
            break;
        }
        for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
            __get_user(tmp, (unsigned long __user *) data);
            putreg(child, i, tmp);
            data += sizeof(long);
        }
        ret = 0;
        break;
    }
#endif
#ifdef PTRACE_GETFPREGS
    case PTRACE_GETFPREGS: /* Get the child FPU state. */
        ret = get_fpregs(data, child);
        break;
#endif
#ifdef PTRACE_SETFPREGS
    case PTRACE_SETFPREGS: /* Set the child FPU state. */
        ret = set_fpregs(data, child);
        break;
#endif
#ifdef PTRACE_GETFPXREGS
    case PTRACE_GETFPXREGS: /* Get the child FPU state. */
        ret = get_fpxregs(data, child);
        break;
#endif
#ifdef PTRACE_SETFPXREGS
    case PTRACE_SETFPXREGS: /* Set the child FPU state. */
        ret = set_fpxregs(data, child);
        break;
#endif
    case PTRACE_FAULTINFO: {
        /* Take the info from thread->arch->faultinfo,
         * but transfer max. sizeof(struct ptrace_faultinfo).
         * On i386, ptrace_faultinfo is smaller!
         */
        ret = copy_to_user((unsigned long __user *) data,
                           &child->thread.arch.faultinfo,
                           sizeof(struct ptrace_faultinfo));
        if(ret)
            break;
        break;
    }

#ifdef PTRACE_LDT
    case PTRACE_LDT: {
        struct ptrace_ldt ldt;

        if(copy_from_user(&ldt, (unsigned long __user *) data,
                          sizeof(ldt))) {
            ret = -EIO;
            break;
        }

        /* This one is confusing, so just punt and return -EIO for
         * now
         */
        ret = -EIO;
        break;
    }
#endif
#ifdef CONFIG_PROC_MM
    case PTRACE_SWITCH_MM: {
        struct mm_struct *old = child->mm;
        struct mm_struct *new = proc_mm_get_mm(data);

        if(IS_ERR(new)) {
            ret = PTR_ERR(new);
            break;
        }

        atomic_inc(&new->mm_users);
        child->mm = new;
        child->active_mm = new;
        mmput(old);
        ret = 0;
        break;
    }
#endif
    default:
        ret = ptrace_request(child, request, addr, data);
        break;
    }
out_tsk:
    put_task_struct(child);
out:
    unlock_kernel();
    return ret;
}
Exemplo n.º 4
0
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
	int ret = -EPERM;

	switch (request) {
	/* when I and D space are separate, these will need to be fixed. */
	case PTRACE_PEEKTEXT: /* read word at location addr. */
	case PTRACE_PEEKDATA: {
		unsigned long tmp;
		int copied;

		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
		ret = -EIO;
		if (copied != sizeof(tmp))
			break;
		ret = put_user(tmp,(unsigned long __user *) data);
		break;
	}

	/* read the word at location addr in the USER area. */
	case PTRACE_PEEKUSR: {
		unsigned long index, tmp;

		ret = -EIO;
		/* convert to index and check */
#ifdef CONFIG_PPC32
		index = (unsigned long) addr >> 2;
		if ((addr & 3) || (index > PT_FPSCR)
		    || (child->thread.regs == NULL))
#else
		index = (unsigned long) addr >> 3;
		if ((addr & 7) || (index > PT_FPSCR))
#endif
			break;

#ifdef CONFIG_PPC32
		CHECK_FULL_REGS(child->thread.regs);
#endif
		if (index < PT_FPR0) {
			tmp = get_reg(child, (int) index);
		} else {
			flush_fp_to_thread(child);
			tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
		}
		ret = put_user(tmp,(unsigned long __user *) data);
		break;
	}

	/* If I and D space are separate, this will have to be fixed. */
	case PTRACE_POKETEXT: /* write the word at location addr. */
	case PTRACE_POKEDATA:
		ret = 0;
		if (access_process_vm(child, addr, &data, sizeof(data), 1)
				== sizeof(data))
			break;
		ret = -EIO;
		break;

	/* write the word at location addr in the USER area */
	case PTRACE_POKEUSR: {
		unsigned long index;

		ret = -EIO;
		/* convert to index and check */
#ifdef CONFIG_PPC32
		index = (unsigned long) addr >> 2;
		if ((addr & 3) || (index > PT_FPSCR)
		    || (child->thread.regs == NULL))
#else
		index = (unsigned long) addr >> 3;
		if ((addr & 7) || (index > PT_FPSCR))
#endif
			break;

#ifdef CONFIG_PPC32
		CHECK_FULL_REGS(child->thread.regs);
#endif
		if (index == PT_ORIG_R3)
			break;
		if (index < PT_FPR0) {
			ret = put_reg(child, index, data);
		} else {
			flush_fp_to_thread(child);
			((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
			ret = 0;
		}
		break;
	}

	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
	case PTRACE_CONT: { /* restart after signal. */
		ret = -EIO;
		if (!valid_signal(data))
			break;
		if (request == PTRACE_SYSCALL)
			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
		else
			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
		child->exit_code = data;
		/* make sure the single step bit is not set. */
		clear_single_step(child);
		wake_up_process(child);
		ret = 0;
		break;
	}

/*
 * make the child exit.  Best I can do is send it a sigkill.
 * perhaps it should be put in the status that it wants to
 * exit.
 */
	case PTRACE_KILL: {
		ret = 0;
		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
			break;
		child->exit_code = SIGKILL;
		/* make sure the single step bit is not set. */
		clear_single_step(child);
		wake_up_process(child);
		break;
	}

	case PTRACE_SINGLESTEP: {  /* set the trap flag. */
		ret = -EIO;
		if (!valid_signal(data))
			break;
		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
		set_single_step(child);
		child->exit_code = data;
		/* give it a chance to run. */
		wake_up_process(child);
		ret = 0;
		break;
	}

#ifdef CONFIG_PPC64
	case PTRACE_GET_DEBUGREG: {
		ret = -EINVAL;
		/* We only support one DABR and no IABRS at the moment */
		if (addr > 0)
			break;
		ret = put_user(child->thread.dabr,
			       (unsigned long __user *)data);
		break;
	}

	case PTRACE_SET_DEBUGREG:
		ret = ptrace_set_debugreg(child, addr, data);
		break;
#endif

	case PTRACE_DETACH:
		ret = ptrace_detach(child, data);
		break;

	case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
		int i;
		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
		unsigned long __user *tmp = (unsigned long __user *)addr;

		for (i = 0; i < 32; i++) {
			ret = put_user(*reg, tmp);
			if (ret)
				break;
			reg++;
			tmp++;
		}
		break;
	}

	case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
		int i;
		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
		unsigned long __user *tmp = (unsigned long __user *)addr;

		for (i = 0; i < 32; i++) {
			ret = get_user(*reg, tmp);
			if (ret)
				break;
			reg++;
			tmp++;
		}
		break;
	}

	case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
		int i;
		unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
		unsigned long __user *tmp = (unsigned long __user *)addr;

		flush_fp_to_thread(child);

		for (i = 0; i < 32; i++) {
			ret = put_user(*reg, tmp);
			if (ret)
				break;
			reg++;
			tmp++;
		}
		break;
	}

	case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
		int i;
		unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
		unsigned long __user *tmp = (unsigned long __user *)addr;

		flush_fp_to_thread(child);

		for (i = 0; i < 32; i++) {
			ret = get_user(*reg, tmp);
			if (ret)
				break;
			reg++;
			tmp++;
		}
		break;
	}

#ifdef CONFIG_ALTIVEC
	case PTRACE_GETVRREGS:
		/* Get the child altivec register state. */
		flush_altivec_to_thread(child);
		ret = get_vrregs((unsigned long __user *)data, child);
		break;

	case PTRACE_SETVRREGS:
		/* Set the child altivec register state. */
		flush_altivec_to_thread(child);
		ret = set_vrregs(child, (unsigned long __user *)data);
		break;
#endif
#ifdef CONFIG_SPE
	case PTRACE_GETEVRREGS:
		/* Get the child spe register state. */
		if (child->thread.regs->msr & MSR_SPE)
			giveup_spe(child);
		ret = get_evrregs((unsigned long __user *)data, child);
		break;

	case PTRACE_SETEVRREGS:
		/* Set the child spe register state. */
		/* this is to clear the MSR_SPE bit to force a reload
		 * of register state from memory */
		if (child->thread.regs->msr & MSR_SPE)
			giveup_spe(child);
		ret = set_evrregs(child, (unsigned long __user *)data);
		break;
#endif

	default:
		ret = ptrace_request(child, request, addr, data);
		break;
	}

	return ret;
}
Exemplo n.º 5
0
static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
{
	struct resource *res;
	struct device *dev;
	unsigned int wtcon;
	int started = 0;
	int ret;
	int size;

	DBG("%s: probe=%p\n", __func__, pdev);

	dev = &pdev->dev;
	wdt_dev = &pdev->dev;

	/* get the memory region for the watchdog timer */

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(dev, "no memory resource specified\n");
		return -ENOENT;
	}

	size = resource_size(res);
	wdt_mem = request_mem_region(res->start, size, pdev->name);
	if (wdt_mem == NULL) {
		dev_err(dev, "failed to get memory region\n");
		return -EBUSY;
	}

	wdt_base = ioremap(res->start, size);
	if (wdt_base == NULL) {
		dev_err(dev, "failed to ioremap() region\n");
		ret = -EINVAL;
		goto err_req;
	}

	DBG("probe: mapped wdt_base=%p\n", wdt_base);

	wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (wdt_irq == NULL) {
		dev_err(dev, "no irq resource specified\n");
		ret = -ENOENT;
		goto err_map;
	}

	ret = request_irq(wdt_irq->start, s3c2410wdt_irq, 0, pdev->name, pdev);
	if (ret != 0) {
		dev_err(dev, "failed to install irq (%d)\n", ret);
		goto err_map;
	}

	wdt_clock = clk_get(&pdev->dev, "watchdog");
	if (IS_ERR(wdt_clock)) {
		dev_err(dev, "failed to find watchdog clock source\n");
		ret = PTR_ERR(wdt_clock);
		goto err_irq;
	}

	clk_enable(wdt_clock);

	if (s3c2410wdt_cpufreq_register() < 0) {
		printk(KERN_ERR PFX "failed to register cpufreq\n");
		goto err_clk;
	}

	/* see if we can actually set the requested timer margin, and if
	 * not, try the default value */

	if (s3c2410wdt_set_heartbeat(tmr_margin)) {
		started = s3c2410wdt_set_heartbeat(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);

		if (started == 0)
			dev_info(dev,
			   "tmr_margin value out of range, default %d used\n",
			       CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
		else
			dev_info(dev, "default timer value is out of range, cannot start\n");
	}

	ret = misc_register(&s3c2410wdt_miscdev);
	if (ret) {
		dev_err(dev, "cannot register miscdev on minor=%d (%d)\n",
			WATCHDOG_MINOR, ret);
		goto err_cpufreq;
	}

	if (tmr_atboot && started == 0) {
		dev_info(dev, "starting watchdog timer\n");
		DEBG("--------------- starting watchdog timer: timeout = %d --------------\n", tmr_margin);
		s3c2410wdt_start();
	#ifdef CONFIG_START_S3C2410_WDT_AT_BOOT
		wdt_ktd = kthread_create(wdt_thread, NULL, "s3c2410_watchdog_thread");
		if (wdt_ktd == (struct task_struct*)ERR_PTR) {
			wdt_ktd = NULL;
			DEBG("--- kthread create, error ---\n");
		} else {
			wake_up_process(wdt_ktd);
		}
	#endif
	} else if (!tmr_atboot) {
		/* if we're not enabling the watchdog, then ensure it is
		 * disabled if it has been left running from the bootloader
		 * or other source */
		DEBG("--- not starting watchdog timer at boot time ---\n");
		s3c2410wdt_stop();
	}

	/* print out a statement of readiness */

	wtcon = readl(wdt_base + S3C2410_WTCON);

	dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n",
		 (wtcon & S3C2410_WTCON_ENABLE) ?  "" : "in",
		 (wtcon & S3C2410_WTCON_RSTEN) ? "" : "dis",
		 (wtcon & S3C2410_WTCON_INTEN) ? "" : "en");

	return 0;

 err_cpufreq:
	s3c2410wdt_cpufreq_deregister();

 err_clk:
	clk_disable(wdt_clock);
	clk_put(wdt_clock);

 err_irq:
	free_irq(wdt_irq->start, pdev);

 err_map:
	iounmap(wdt_base);

 err_req:
	release_resource(wdt_mem);
	kfree(wdt_mem);

	return ret;
}
Exemplo n.º 6
0
/*
 * Block layer request function.
 * Wakes up the IO thread.
 */
static void sd_request_func(struct request_queue *q)
{
	struct sd_host *host = q->queuedata;
	wake_up_process(host->io_thread);
}
Exemplo n.º 7
0
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
	int ret;
	unsigned long __user *datap = (unsigned long __user *)data;

	switch (request) {
		/* Read word at location address. */
		case PTRACE_PEEKTEXT:
		case PTRACE_PEEKDATA: {
			unsigned long tmp;
			int copied;

			ret = -EIO;

			/* The signal trampoline page is outside the normal user-addressable
			 * space but still accessible. This is hack to make it possible to
			 * access the signal handler code in GDB.
			 */
			if ((addr & PAGE_MASK) == cris_signal_return_page) {
				/* The trampoline page is globally mapped, no page table to traverse.*/
				tmp = *(unsigned long*)addr;
			} else {
				copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);

				if (copied != sizeof(tmp))
					break;
			}

			ret = put_user(tmp,datap);
			break;
		}

		/* Read the word at location address in the USER area. */
		case PTRACE_PEEKUSR: {
			unsigned long tmp;

			ret = -EIO;
			if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
				break;

			tmp = get_reg(child, addr >> 2);
			ret = put_user(tmp, datap);
			break;
		}

		/* Write the word at location address. */
		case PTRACE_POKETEXT:
		case PTRACE_POKEDATA:
			ret = generic_ptrace_pokedata(child, addr, data);
			break;

		/* Write the word at location address in the USER area. */
		case PTRACE_POKEUSR:
			ret = -EIO;
			if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
				break;

			addr >>= 2;

			if (addr == PT_CCS) {
				/* don't allow the tracing process to change stuff like
				 * interrupt enable, kernel/user bit, dma enables etc.
				 */
				data &= CCS_MASK;
				data |= get_reg(child, PT_CCS) & ~CCS_MASK;
			}
			if (put_reg(child, addr, data))
				break;
			ret = 0;
			break;

		case PTRACE_SYSCALL:
		case PTRACE_CONT:
			ret = -EIO;

			if (!valid_signal(data))
				break;

			/* Continue means no single-step. */
			put_reg(child, PT_SPC, 0);

			if (!get_debugreg(child->pid, PT_BP_CTRL)) {
				unsigned long tmp;
				/* If no h/w bp configured, disable S bit. */
				tmp = get_reg(child, PT_CCS) & ~SBIT_USER;
				put_reg(child, PT_CCS, tmp);
			}

			if (request == PTRACE_SYSCALL) {
				set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
			}
			else {
				clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
			}

			child->exit_code = data;

			/* TODO: make sure any pending breakpoint is killed */
			wake_up_process(child);
			ret = 0;

			break;

		/* Make the child exit by sending it a sigkill. */
		case PTRACE_KILL:
			ret = 0;

			if (child->exit_state == EXIT_ZOMBIE)
				break;

			child->exit_code = SIGKILL;

			/* Deconfigure single-step and h/w bp. */
			ptrace_disable(child);

			/* TODO: make sure any pending breakpoint is killed */
			wake_up_process(child);
			break;

		/* Set the trap flag. */
		case PTRACE_SINGLESTEP:	{
			unsigned long tmp;
			ret = -EIO;

			/* Set up SPC if not set already (in which case we have
			   no other choice but to trust it). */
			if (!get_reg(child, PT_SPC)) {
				/* In case we're stopped in a delay slot. */
				tmp = get_reg(child, PT_ERP) & ~1;
				put_reg(child, PT_SPC, tmp);
			}
			tmp = get_reg(child, PT_CCS) | SBIT_USER;
			put_reg(child, PT_CCS, tmp);

			if (!valid_signal(data))
				break;

			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);

			/* TODO: set some clever breakpoint mechanism... */

			child->exit_code = data;
			wake_up_process(child);
			ret = 0;
			break;

		}

		/* Get all GP registers from the child. */
		case PTRACE_GETREGS: {
			int i;
			unsigned long tmp;

			for (i = 0; i <= PT_MAX; i++) {
				tmp = get_reg(child, i);

				if (put_user(tmp, datap)) {
					ret = -EFAULT;
					goto out_tsk;
				}

				datap++;
			}

			ret = 0;
			break;
		}

		/* Set all GP registers in the child. */
		case PTRACE_SETREGS: {
			int i;
			unsigned long tmp;

			for (i = 0; i <= PT_MAX; i++) {
				if (get_user(tmp, datap)) {
					ret = -EFAULT;
					goto out_tsk;
				}

				if (i == PT_CCS) {
					tmp &= CCS_MASK;
					tmp |= get_reg(child, PT_CCS) & ~CCS_MASK;
				}

				put_reg(child, i, tmp);
				datap++;
			}

			ret = 0;
			break;
		}

		default:
			ret = ptrace_request(child, request, addr, data);
			break;
	}

out_tsk:
	return ret;
}
/*
*******************************************************************************
*                     usb_manager_init
*
* Description:
*    void
*
* Parameters:
*    void
*
* Return value:
*    void
*
* note:
*    void
*
*******************************************************************************
*/
static int __init usb_manager_init(void)
{
	__s32 ret = 0;
	bsp_usbc_t usbc;
	u32 i = 0;

#ifdef CONFIG_USB_SW_SUN3I_USB0_OTG
	struct task_struct *th = NULL;
#endif

    DMSG_DBG_MANAGER("[sw usb]: usb_manager_init\n");

#if defined(CONFIG_USB_SW_SUN3I_USB0_DEVICE_ONLY)
	DMSG_INFO("CONFIG_USB_SW_SUN3I_USB0_DEVICE_ONLY\n");
#elif defined(CONFIG_USB_SW_SUN3I_USB0_HOST_ONLY)
	DMSG_INFO("CONFIG_USB_SW_SUN3I_USB0_HOST_ONLY\n");
#elif defined(CONFIG_USB_SW_SUN3I_USB0_OTG)
	DMSG_INFO("CONFIG_USB_SW_SUN3I_USB0_OTG\n");
#else
	DMSG_INFO("CONFIG_USB_SW_SUN3I_USB0_NULL\n");
	return 0;
#endif

	memset(&g_usb_cfg, 0, sizeof(struct usb_cfg));

	ret = get_usb_cfg(&g_usb_cfg);
	if(ret != 0){
		DMSG_PANIC("ERR: get_usb_cfg failed\n");
		return -1;
	}

    memset(&usbc, 0, sizeof(bsp_usbc_t));
   	for(i = 0; i < USBC_MAX_CTL_NUM; i++){
		usbc.usbc_info[i].num = i;

		switch(i){
            case 0:
                usbc.usbc_info[i].base = SW_VA_USB0_IO_BASE;
            break;

            case 1:
                usbc.usbc_info[i].base = SW_VA_USB1_IO_BASE;
            break;

			case 2:
                usbc.usbc_info[i].base = SW_VA_USB2_IO_BASE;
            break;

            default:
                DMSG_PANIC("ERR: unkown cnt(%d)\n", i);
                usbc.usbc_info[i].base = 0;
        }
	}
	usbc.sram_base = SW_VA_SRAM_IO_BASE;
	USBC_init(&usbc);

    usbc0_platform_device_init();
    usbc1_platform_device_init();
    usbc2_platform_device_init();

#ifdef CONFIG_USB_SW_SUN3I_USB0_OTG
	usb_hw_scan_init(&g_usb_cfg);

	thread_run_flag = 1;
	thread_stopped_flag = 0;
	th = kthread_create(usb_hardware_scan_thread, &g_usb_cfg, "usb-hardware-scan");
	if(IS_ERR(th)){
		DMSG_PANIC("ERR: kthread_create failed\n");
		return -1;
	}

	wake_up_process(th);
#endif

    DMSG_DBG_MANAGER("[sw usb]: usb_manager_init end\n");

    return 0;
}
Exemplo n.º 9
0
int __init test_mcspi_init(void)

{
	struct task_struct *p1, *p2;
	int x;

#define MODCTRL		0xd809a028
#define SYSTST		0xd809a024

	int status;
	int count = 10;
	int val;

	/* Required only if kernel does not configure
	 * SPI2 Mux settings
	 */

	if (slave_mode) {

		printk(KERN_INFO "configuring slave mode\n");

//		omap_writew(0x1700, spi2_clk);
//		omap_writew(0x1700, spi2_simo);
//		omap_writew(0x1700, spi2_somi);
//		omap_writew(0x1708, spi2_cs0);

	} else {

		printk(KERN_INFO "configuring master mode \n");

//		omap_writew(0x1700, spi2_clk);
//		omap_writew(0x1700, spi2_simo);
//		omap_writew(0x1700, spi2_somi);
//		omap_writew(0x1708, spi2_cs0);

	}
	create_proc_file_entries();

	if (test_mcspi_smp) {
		spitst_trans(0);
		p1 = kthread_create((void *)(omap2_mcspi_test1), NULL, "mcspitest/0");
		p2 = kthread_create((void *)(omap2_mcspi_test2), NULL, "mcspitest/1");

		kthread_bind(p1, 0);
		kthread_bind(p2, 1);

		x = wake_up_process(p1);
		x = wake_up_process(p2);
	}
       if (systst_mode == 1 && !test_mcspi_smp) {

		/* SPI clocks need to be always enabled for this to work */
		__raw_writel(0x8, MODCTRL);
		printk(KERN_INFO "MODCTRL %x\n", __raw_readl(MODCTRL));

		if (slave_mode == 0) /* Master */
			__raw_writel(0x100, SYSTST);
		else
			__raw_writel(0x600, SYSTST);

		printk(KERN_INFO "SYSTST Mode setting %x\n",
				__raw_readl(SYSTST));

		while (count--) {
			if (slave_mode == 0) {
				val = ((count & 0x1) << 6) | 0x100;
				val = ((count & 0x1) << 5) | val;
				val = ((count & 0x1) << 0) | val;

				__raw_writel(val, SYSTST);
			} else {
				val = ((count & 0x1) << 4) | 0x600;
				__raw_writel(val, SYSTST);
			}
			printk(KERN_INFO "SYSTST %x val %x\n",
					__raw_readl(SYSTST)&0xff1, val);
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(100);
		}
	}

	if (systst_mode == 0 && !test_mcspi_smp) {
		status = spi_register_driver(&spitst_spi);
		if (status < 0)
			printk(KERN_ERR "spi_register_driver failed, status %d",
					status);
		else
			printk("spi_register_driver successful \n");
		return status;
	}
	return 0;
}
Exemplo n.º 10
0
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
	struct pt_regs *regs = get_irq_regs();
	int duration;

	/* kick the hardlockup detector */
	watchdog_interrupt_count();

	/* kick the softlockup detector */
	wake_up_process(__this_cpu_read(softlockup_watchdog));

	/* .. and repeat */
	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));

	if (touch_ts == 0) {
		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
			/*
			 * If the time stamp was touched atomically
			 * make sure the scheduler tick is up to date.
			 */
			__this_cpu_write(softlockup_touch_sync, false);
			sched_clock_tick();
		}

		/* Clear the guest paused flag on watchdog reset */
		kvm_check_and_clear_guest_paused();
		__touch_watchdog();
		return HRTIMER_RESTART;
	}

	/* check for a softlockup
	 * This is done by making sure a high priority task is
	 * being scheduled.  The task touches the watchdog to
	 * indicate it is getting cpu time.  If it hasn't then
	 * this is a good indication some task is hogging the cpu
	 */
	duration = is_softlockup(touch_ts);
	if (unlikely(duration)) {
		/*
		 * If a virtual machine is stopped by the host it can look to
		 * the watchdog like a soft lockup, check to see if the host
		 * stopped the vm before we issue the warning
		 */
		if (kvm_check_and_clear_guest_paused())
			return HRTIMER_RESTART;

		/* only warn once */
		if (__this_cpu_read(soft_watchdog_warn) == true)
			return HRTIMER_RESTART;

		printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
			smp_processor_id(), duration,
			current->comm, task_pid_nr(current));
		print_modules();
		print_irqtrace_events(current);
		if (regs)
			show_regs(regs);
		else
			dump_stack();

		if (softlockup_panic)
			panic("softlockup: hung tasks");
		__this_cpu_write(soft_watchdog_warn, true);
	} else
		__this_cpu_write(soft_watchdog_warn, false);

	return HRTIMER_RESTART;
}
Exemplo n.º 11
0
/*
 * handle the lock being released whilst there are processes blocked on it that can now run
 * - if we come here, then:
 *   - the 'active part' of the count (&0x0000ffff) reached zero but has been re-incremented
 *   - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so)
 *   - there must be someone on the queue
 * - the spinlock must be held by the caller
 * - woken process blocks are discarded from the list after having flags zeroised
 * - writers are only woken if wakewrite is non-zero
 */
static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
{
   struct rwsem_waiter *waiter;
   struct list_head *next;
   signed long oldcount;
   int woken, loop;

   rwsemtrace(sem,"Entering __rwsem_do_wake");

   if (!wakewrite)
      goto dont_wake_writers;

   /* only wake someone up if we can transition the active part of the count from 0 -> 1 */
 try_again:
   oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS;
   if (oldcount & RWSEM_ACTIVE_MASK)
      goto undo;

   waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);

   /* try to grant a single write lock if there's a writer at the front of the queue
    * - note we leave the 'active part' of the count incremented by 1 and the waiting part
    *   incremented by 0x00010000
    */
   if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
      goto readers_only;

   list_del(&waiter->list);
   waiter->flags = 0;
   wake_up_process(waiter->task);
   goto out;

   /* don't want to wake any writers */
 dont_wake_writers:
   waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
   if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
      goto out;

   /* grant an infinite number of read locks to the readers at the front of the queue
    * - note we increment the 'active part' of the count by the number of readers (less one
    *   for the activity decrement we've already done) before waking any processes up
    */
 readers_only:
   woken = 0;
   do {
      woken++;

      if (waiter->list.next==&sem->wait_list)
         break;

      waiter = list_entry(waiter->list.next,struct rwsem_waiter,list);

   } while (waiter->flags & RWSEM_WAITING_FOR_READ);

   loop = woken;
   woken *= RWSEM_ACTIVE_BIAS-RWSEM_WAITING_BIAS;
   woken -= RWSEM_ACTIVE_BIAS;
   rwsem_atomic_add(woken,sem);

   next = sem->wait_list.next;
   for (; loop>0; loop--) {
      waiter = list_entry(next,struct rwsem_waiter,list);
      next = waiter->list.next;
      waiter->flags = 0;
      wake_up_process(waiter->task);
   }

   sem->wait_list.next = next;
   next->prev = &sem->wait_list;

 out:
   rwsemtrace(sem,"Leaving __rwsem_do_wake");
   return sem;

   /* undo the change to count, but check for a transition 1->0 */
 undo:
   if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem)!=0)
      goto out;
   goto try_again;
}
Exemplo n.º 12
0
static int __devinit rtsx_probe(struct pci_dev *pci,
				const struct pci_device_id *pci_id)
{
	struct Scsi_Host *host;
	struct rtsx_dev *dev;
	int err = 0;
	struct task_struct *th;

	RTSX_DEBUGP("Realtek PCI-E card reader detected\n");

	err = pci_enable_device(pci);
	if (err < 0) {
		printk(KERN_ERR "PCI enable device failed!\n");
		return err;
	}

	err = pci_request_regions(pci, CR_DRIVER_NAME);
	if (err < 0) {
		printk(KERN_ERR "PCI request regions for %s failed!\n",
		       CR_DRIVER_NAME);
		pci_disable_device(pci);
		return err;
	}

	/*
	 * Ask the SCSI layer to allocate a host structure, with extra
	 * space at the end for our private rtsx_dev structure.
	 */
	host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev));
	if (!host) {
		printk(KERN_ERR "Unable to allocate the scsi host\n");
		pci_release_regions(pci);
		pci_disable_device(pci);
		return -ENOMEM;
	}

	dev = host_to_rtsx(host);
	memset(dev, 0, sizeof(struct rtsx_dev));

	dev->chip = kzalloc(sizeof(struct rtsx_chip), GFP_KERNEL);
	if (dev->chip == NULL)
		goto errout;

	spin_lock_init(&dev->reg_lock);
	mutex_init(&(dev->dev_mutex));
	init_completion(&dev->cmnd_ready);
	init_completion(&dev->control_exit);
	init_completion(&dev->polling_exit);
	init_completion(&(dev->notify));
	init_completion(&dev->scanning_done);
	init_waitqueue_head(&dev->delay_wait);

	dev->pci = pci;
	dev->irq = -1;

	printk(KERN_INFO "Resource length: 0x%x\n",
	       (unsigned int)pci_resource_len(pci, 0));
	dev->addr = pci_resource_start(pci, 0);
	dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0));
	if (dev->remap_addr == NULL) {
		printk(KERN_ERR "ioremap error\n");
		err = -ENXIO;
		goto errout;
	}

	/*
	 * Using "unsigned long" cast here to eliminate gcc warning in
	 * 64-bit system
	 */
	printk(KERN_INFO "Original address: 0x%lx, remapped address: 0x%lx\n",
	       (unsigned long)(dev->addr), (unsigned long)(dev->remap_addr));

	dev->rtsx_resv_buf = dma_alloc_coherent(&(pci->dev), RTSX_RESV_BUF_LEN,
			&(dev->rtsx_resv_buf_addr), GFP_KERNEL);
	if (dev->rtsx_resv_buf == NULL) {
		printk(KERN_ERR "alloc dma buffer fail\n");
		err = -ENXIO;
		goto errout;
	}
	dev->chip->host_cmds_ptr = dev->rtsx_resv_buf;
	dev->chip->host_cmds_addr = dev->rtsx_resv_buf_addr;
	dev->chip->host_sg_tbl_ptr = dev->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
	dev->chip->host_sg_tbl_addr = dev->rtsx_resv_buf_addr +
				      HOST_CMDS_BUF_LEN;

	dev->chip->rtsx = dev;

	rtsx_init_options(dev->chip);

	printk(KERN_INFO "pci->irq = %d\n", pci->irq);

	if (dev->chip->msi_en) {
		if (pci_enable_msi(pci) < 0)
			dev->chip->msi_en = 0;
	}

	if (rtsx_acquire_irq(dev) < 0) {
		err = -EBUSY;
		goto errout;
	}

	pci_set_master(pci);
	synchronize_irq(dev->irq);

	rtsx_init_chip(dev->chip);

	/* set the supported max_lun and max_id for the scsi host
	 * NOTE: the minimal value of max_id is 1 */
	host->max_id = 1;
	host->max_lun = dev->chip->max_lun;

	/* Start up our control thread */
	th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME);
	if (IS_ERR(th)) {
		printk(KERN_ERR "Unable to start control thread\n");
		err = PTR_ERR(th);
		goto errout;
	}
	dev->ctl_thread = th;

	err = scsi_add_host(host, &pci->dev);
	if (err) {
		printk(KERN_ERR "Unable to add the scsi host\n");
		goto errout;
	}

	/* Start up the thread for delayed SCSI-device scanning */
	th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
	if (IS_ERR(th)) {
		printk(KERN_ERR "Unable to start the device-scanning thread\n");
		complete(&dev->scanning_done);
		quiesce_and_remove_host(dev);
		err = PTR_ERR(th);
		goto errout;
	}

	wake_up_process(th);

	/* Start up the thread for polling thread */
	th = kthread_run(rtsx_polling_thread, dev, "rtsx-polling");
	if (IS_ERR(th)) {
		printk(KERN_ERR "Unable to start the device-polling thread\n");
		quiesce_and_remove_host(dev);
		err = PTR_ERR(th);
		goto errout;
	}
	dev->polling_thread = th;

	pci_set_drvdata(pci, dev);

	return 0;

	/* We come here if there are any problems */
errout:
	printk(KERN_ERR "rtsx_probe() failed\n");
	release_everything(dev);

	return err;
}
Exemplo n.º 13
0
/**
 * handle_IRQ_event - irq action chain handler
 * @irq:	the interrupt number
 * @action:	the interrupt action chain for this irq
 *
 * Handles the action chain of an irq event
 */
irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
{
	irqreturn_t ret, retval = IRQ_NONE;
	unsigned int status = 0;

#ifdef ALRAN_IRQ
	int cpu;
	unsigned long long t;
	cpu = smp_processor_id();
	t = cpu_clock(cpu);
	Airq_stats[Acount].usec = do_div(t, 1000000000) / 1000;
	Airq_stats[Acount].sec = (unsigned long) t;
	Airq_stats[Acount].irq = irq;
	Airq_stats[Acount].cpu = cpu;
	Airq_stats[Acount].handler = (action->handler);
	irq_desc[irq].Alast_usec = Airq_stats[Acount].usec;
	irq_desc[irq].Alast_sec = Airq_stats[Acount].sec;

#if 0
	if (Acount % 20 == 0)
		printk (KERN_ERR "Airq %d [%5lu.%06lu] irq %d, cpu %d, handler %08x\n", Acount,
			Airq_stats[Acount].sec, Airq_stats[Acount].usec, irq, cpu, (unsigned int) Airq_stats[Acount].handler);
#endif

	Acount++;
	if (Acount == ASIZE) Acount = 0;
#endif

	do {
		trace_irq_handler_entry(irq, action);
		ret = action->handler(irq, action->dev_id);
		trace_irq_handler_exit(irq, action, ret);

		switch (ret) {
		case IRQ_WAKE_THREAD:
			/*
			 * Set result to handled so the spurious check
			 * does not trigger.
			 */
			ret = IRQ_HANDLED;

			/*
			 * Catch drivers which return WAKE_THREAD but
			 * did not set up a thread function
			 */
			if (unlikely(!action->thread_fn)) {
				warn_no_thread(irq, action);
				break;
			}

			/*
			 * Wake up the handler thread for this
			 * action. In case the thread crashed and was
			 * killed we just pretend that we handled the
			 * interrupt. The hardirq handler above has
			 * disabled the device interrupt, so no irq
			 * storm is lurking.
			 */
			if (likely(!test_bit(IRQTF_DIED,
					     &action->thread_flags))) {
				set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
				wake_up_process(action->thread);
			}

			/* Fall through to add to randomness */
		case IRQ_HANDLED:
			status |= action->flags;
			break;

		default:
			break;
		}

		retval |= ret;
		action = action->next;
	} while (action);

	if (status & IRQF_SAMPLE_RANDOM)
		add_interrupt_randomness(irq);
	local_irq_disable();

	return retval;
}
void hifi_om_init(struct platform_device *dev, unsigned char* hifi_priv_base_virt, unsigned char* hifi_priv_base_phy)
{
	BUG_ON(NULL == dev);

#ifdef PLATFORM_HI3XXX
	BUG_ON(NULL == hifi_priv_base_virt);
	BUG_ON(NULL == hifi_priv_base_phy);
#endif

	memset(&g_om_data, 0, sizeof(struct hifi_om_s));

	g_om_data.debug_level = 2; /*info level*/

#ifdef PLATFORM_HI3XXX
	g_om_data.dsp_time_stamp = (unsigned int*)ioremap_wc(SYS_TIME_STAMP_REG, 0x4);
	if (NULL == g_om_data.dsp_time_stamp) {
		printk("time stamp reg ioremap_wc Error.\n");//can't use logx
		return;
	}
#endif
	g_om_data.dsp_loaded = hifi_check_img_loaded();

	IN_FUNCTION;

#ifdef PLATFORM_HI3XXX
	g_om_data.dsp_debug_level = 2; /*info level*/
	g_om_data.first_dump_log = true;
	g_om_data.is_watchdog_coming = false;


	g_om_data.dsp_panic_mark = (unsigned int*)(hifi_priv_base_virt + (DRV_DSP_PANIC_MARK - HIFI_BASE_ADDR));
	g_om_data.dsp_bin_addr = (char*)(hifi_priv_base_virt + (HIFI_RUN_LOCATION - HIFI_BASE_ADDR));
	g_om_data.dsp_exception_no = (unsigned int*)(hifi_priv_base_virt + (DRV_DSP_EXCEPTION_NO - HIFI_BASE_ADDR));
	g_om_data.dsp_log_cur_addr = (unsigned int*)(hifi_priv_base_virt + (DRV_DSP_UART_TO_MEM_CUR_ADDR - HIFI_BASE_ADDR));
	g_om_data.dsp_log_addr = NULL;

	g_om_data.dsp_debug_level_addr = (unsigned int*)(hifi_priv_base_virt + (DRV_DSP_UART_LOG_LEVEL - HIFI_BASE_ADDR));
	g_om_data.dsp_debug_kill_addr = (unsigned int*)(hifi_priv_base_virt + (DRV_DSP_KILLME_ADDR - HIFI_BASE_ADDR));

	*(g_om_data.dsp_exception_no) = ~0;
	g_om_data.pre_exception_no = ~0;

	s_dsp_dump_info[NORMAL_BIN].data_addr = g_om_data.dsp_bin_addr;
	s_dsp_dump_info[PANIC_BIN].data_addr  = g_om_data.dsp_bin_addr;

	hifi_set_dsp_debug_level(g_om_data.dsp_debug_level);

	sema_init(&g_om_data.dsp_dump_sema, 1);

	g_om_data.kdumpdsp_task = kthread_create(hifi_dump_dsp_thread, 0, "dspdumplog");
	if (IS_ERR(g_om_data.kdumpdsp_task)) {
		loge("creat hifi dump log thread fail.\n");
	} else {
		wake_up_process(g_om_data.kdumpdsp_task);
	}
#endif

	g_om_data.dsp_hifidebug_show_tag = false;

	hifi_create_procfs();

#ifdef PLATFORM_HI6XXX
	g_om_data.hifi_mntn_wq = create_singlethread_workqueue("hifi_misc_mntn_workqueue");
	INIT_WORK(&(g_om_data.hifi_mntn_work.work_stru), hifi_handle_mntn_work);
#endif

	OUT_FUNCTION;
	return;
}
Exemplo n.º 15
0
/**
 * kthread_create_on_node - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @node: memory node number.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run().
 *
 * If thread is going to be bound on a particular cpu, give its node
 * in @node, to get NUMA affinity for kthread stack, or else give -1.
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which no one will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
					   void *data,
					   int node,
					   const char namefmt[],
					   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	create.node = node;
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		static const struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create_on_node);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
		WARN_ON(1);
		return;
	}

	/* It's safe because the task is inactive. */
	do_set_cpus_allowed(p, cpumask_of(cpu));
	p->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);
	get_task_struct(k);

	kthread = to_kthread(k);
	barrier(); /* it might have exited */
	if (k->vfork_done != NULL) {
		kthread->should_stop = 1;
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);

	return ret;
}
EXPORT_SYMBOL(kthread_stop);

int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	/* Setup a clean context for our children to inherit. */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
	set_mems_allowed(node_states[N_HIGH_MEMORY]);

	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}

void __init_kthread_worker(struct kthread_worker *worker,
				const char *name,
				struct lock_class_key *key)
{
	spin_lock_init(&worker->lock);
	lockdep_set_class_and_name(&worker->lock, key, name);
	INIT_LIST_HEAD(&worker->work_list);
	worker->task = NULL;
}
static void
wake_up_task(struct mid_q_entry *mid)
{
	wake_up_process(mid->callback_data);
}
Exemplo n.º 17
0
static int
splat_condvar_test1(struct file *file, void *arg)
{
	int i, count = 0, rc = 0;
	condvar_thr_t ct[SPLAT_CONDVAR_TEST_COUNT];
	condvar_priv_t cv;

	cv.cv_magic = SPLAT_CONDVAR_TEST_MAGIC;
	cv.cv_file = file;
	mutex_init(&cv.cv_mtx, SPLAT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
	cv_init(&cv.cv_condvar, NULL, CV_DEFAULT, NULL);

	/* Create some threads, the exact number isn't important just as
	 * long as we know how many we managed to create and should expect. */
	for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
		ct[i].ct_cvp = &cv;
		ct[i].ct_name = SPLAT_CONDVAR_TEST1_NAME;
		ct[i].ct_rc = 0;
		ct[i].ct_thread = spl_kthread_create(splat_condvar_test12_thread,
		    &ct[i], "%s/%d", SPLAT_CONDVAR_TEST_NAME, i);

		if (!IS_ERR(ct[i].ct_thread)) {
			wake_up_process(ct[i].ct_thread);
			count++;
		}
	}

	/* Wait until all threads are waiting on the condition variable */
	while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
		schedule();

	/* Wake a single thread at a time, wait until it exits */
	for (i = 1; i <= count; i++) {
		cv_signal(&cv.cv_condvar);

		while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
			schedule();

		/* Correct behavior 1 thread woken */
		if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
			continue;

                splat_vprint(file, SPLAT_CONDVAR_TEST1_NAME, "Attempted to "
			   "wake %d thread but work %d threads woke\n",
			   1, count - atomic_read(&cv.cv_condvar.cv_waiters));
		rc = -EINVAL;
		break;
	}

	if (!rc)
                splat_vprint(file, SPLAT_CONDVAR_TEST1_NAME, "Correctly woke "
			   "%d sleeping threads %d at a time\n", count, 1);

	/* Wait until that last nutex is dropped */
	while (mutex_owner(&cv.cv_mtx))
		schedule();

	/* Wake everything for the failure case */
	cv_broadcast(&cv.cv_condvar);
	cv_destroy(&cv.cv_condvar);

	/* wait for threads to exit */
	for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
		if (!IS_ERR(ct[i].ct_thread))
			kthread_stop(ct[i].ct_thread);
	}
	mutex_destroy(&cv.cv_mtx);

	return rc;
}
Exemplo n.º 18
0
Arquivo: handle.c Projeto: qkdang/m462
static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
{
	/*
	 * In case the thread crashed and was killed we just pretend that
	 * we handled the interrupt. The hardirq handler has disabled the
	 * device interrupt, so no irq storm is lurking.
	 */
	if (action->thread->flags & PF_EXITING)
		return;

	/*
	 * Wake up the handler thread for this action. If the
	 * RUNTHREAD bit is already set, nothing to do.
	 */
	if (test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
		return;

	/*
	 * It's safe to OR the mask lockless here. We have only two
	 * places which write to threads_oneshot: This code and the
	 * irq thread.
	 *
	 * This code is the hard irq context and can never run on two
	 * cpus in parallel. If it ever does we have more serious
	 * problems than this bitmask.
	 *
	 * The irq threads of this irq which clear their "running" bit
	 * in threads_oneshot are serialized via desc->lock against
	 * each other and they are serialized against this code by
	 * IRQS_INPROGRESS.
	 *
	 * Hard irq handler:
	 *
	 *	spin_lock(desc->lock);
	 *	desc->state |= IRQS_INPROGRESS;
	 *	spin_unlock(desc->lock);
	 *	set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
	 *	desc->threads_oneshot |= mask;
	 *	spin_lock(desc->lock);
	 *	desc->state &= ~IRQS_INPROGRESS;
	 *	spin_unlock(desc->lock);
	 *
	 * irq thread:
	 *
	 * again:
	 *	spin_lock(desc->lock);
	 *	if (desc->state & IRQS_INPROGRESS) {
	 *		spin_unlock(desc->lock);
	 *		while(desc->state & IRQS_INPROGRESS)
	 *			cpu_relax();
	 *		goto again;
	 *	}
	 *	if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
	 *		desc->threads_oneshot &= ~mask;
	 *	spin_unlock(desc->lock);
	 *
	 * So either the thread waits for us to clear IRQS_INPROGRESS
	 * or we are waiting in the flow handler for desc->lock to be
	 * released before we reach this point. The thread also checks
	 * IRQTF_RUNTHREAD under desc->lock. If set it leaves
	 * threads_oneshot untouched and runs the thread another time.
	 */
	desc->threads_oneshot |= action->thread_mask;

	/*
	 * We increment the threads_active counter in case we wake up
	 * the irq thread. The irq thread decrements the counter when
	 * it returns from the handler or in the exit path and wakes
	 * up waiters which are stuck in synchronize_irq() when the
	 * active count becomes zero. synchronize_irq() is serialized
	 * against this code (hard irq handler) via IRQS_INPROGRESS
	 * like the finalize_oneshot() code. See comment above.
	 */
	atomic_inc(&desc->threads_active);

	wake_up_process(action->thread);
}
Exemplo n.º 19
0
int wlan_logging_sock_activate_svc(int log_fe_to_console, int num_buf)
{
	int i = 0;
	unsigned long irq_flag;

	pr_info("%s: Initalizing FEConsoleLog = %d NumBuff = %d\n",
			__func__, log_fe_to_console, num_buf);

	gapp_pid = INVALID_PID;

	gplog_msg = (struct log_msg *) vmalloc(
			num_buf * sizeof(struct log_msg));
	if (!gplog_msg) {
		pr_err("%s: Could not allocate memory\n", __func__);
		return -ENOMEM;
	}

	vos_mem_zero(gplog_msg, (num_buf * sizeof(struct log_msg)));

	gwlan_logging.log_fe_to_console = !!log_fe_to_console;
	gwlan_logging.num_buf = num_buf;

	spin_lock_irqsave(&gwlan_logging.spin_lock, irq_flag);
	INIT_LIST_HEAD(&gwlan_logging.free_list);
	INIT_LIST_HEAD(&gwlan_logging.filled_list);

	for (i = 0; i < num_buf; i++) {
		list_add(&gplog_msg[i].node, &gwlan_logging.free_list);
		gplog_msg[i].index = i;
	}
	gwlan_logging.pcur_node = (struct log_msg *)
		(gwlan_logging.free_list.next);
	list_del_init(gwlan_logging.free_list.next);
	spin_unlock_irqrestore(&gwlan_logging.spin_lock, irq_flag);

	init_waitqueue_head(&gwlan_logging.wait_queue);
	gwlan_logging.exit = false;
	clear_bit(HOST_LOG_DRIVER_MSG, &gwlan_logging.eventFlag);
	clear_bit(HOST_LOG_PER_PKT_STATS, &gwlan_logging.eventFlag);
	clear_bit(HOST_LOG_FW_FLUSH_COMPLETE, &gwlan_logging.eventFlag);
	init_completion(&gwlan_logging.shutdown_comp);
	gwlan_logging.thread = kthread_create(wlan_logging_thread, NULL,
					"wlan_logging_thread");
	if (IS_ERR(gwlan_logging.thread)) {
		pr_err("%s: Could not Create LogMsg Thread Controller",
		       __func__);
		spin_lock_irqsave(&gwlan_logging.spin_lock, irq_flag);
		gwlan_logging.pcur_node = NULL;
		spin_unlock_irqrestore(&gwlan_logging.spin_lock, irq_flag);
		vfree(gplog_msg);
		gplog_msg = NULL;
		return -ENOMEM;
	}
	wake_up_process(gwlan_logging.thread);
	gwlan_logging.is_active = true;
	gwlan_logging.is_flush_complete = false;

	nl_srv_register(ANI_NL_MSG_LOG, wlan_logging_proc_sock_rx_msg);

	pr_info("%s: Activated wlan_logging svc\n", __func__);
	return 0;
}
Exemplo n.º 20
0
/*
 * Adjust the priority chain. Also used for deadlock detection.
 * Decreases task's usage by one - may thus free the task.
 * Returns 0 or -EDEADLK.
 */
static int rt_mutex_adjust_prio_chain(struct task_struct *task,
				      int deadlock_detect,
				      struct rt_mutex *orig_lock,
				      struct rt_mutex_waiter *orig_waiter,
				      struct task_struct *top_task)
{
	struct rt_mutex *lock;
	struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
	int detect_deadlock, ret = 0, depth = 0;
	unsigned long flags;

	detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
							 deadlock_detect);

	/*
	 * The (de)boosting is a step by step approach with a lot of
	 * pitfalls. We want this to be preemptible and we want hold a
	 * maximum of two locks per step. So we have to check
	 * carefully whether things change under us.
	 */
 again:
	if (++depth > max_lock_depth) {
		static int prev_max;

		/*
		 * Print this only once. If the admin changes the limit,
		 * print a new message when reaching the limit again.
		 */
		if (prev_max != max_lock_depth) {
			prev_max = max_lock_depth;
			printk(KERN_WARNING "Maximum lock depth %d reached "
			       "task: %s (%d)\n", max_lock_depth,
			       top_task->comm, task_pid_nr(top_task));
		}
		put_task_struct(task);

		return deadlock_detect ? -EDEADLK : 0;
	}
 retry:
	/*
	 * Task can not go away as we did a get_task() before !
	 */
	raw_spin_lock_irqsave(&task->pi_lock, flags);

	waiter = task->pi_blocked_on;
	/*
	 * Check whether the end of the boosting chain has been
	 * reached or the state of the chain has changed while we
	 * dropped the locks.
	 */
	if (!waiter)
		goto out_unlock_pi;

	/*
	 * Check the orig_waiter state. After we dropped the locks,
	 * the previous owner of the lock might have released the lock.
	 */
	if (orig_waiter && !rt_mutex_owner(orig_lock))
		goto out_unlock_pi;

	/*
	 * Drop out, when the task has no waiters. Note,
	 * top_waiter can be NULL, when we are in the deboosting
	 * mode!
	 */
	if (top_waiter && (!task_has_pi_waiters(task) ||
			   top_waiter != task_top_pi_waiter(task)))
		goto out_unlock_pi;

	/*
	 * When deadlock detection is off then we check, if further
	 * priority adjustment is necessary.
	 */
	if (!detect_deadlock && waiter->list_entry.prio == task->prio)
		goto out_unlock_pi;

	lock = waiter->lock;
	if (!raw_spin_trylock(&lock->wait_lock)) {
		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
		cpu_relax();
		goto retry;
	}

	/* Deadlock detection */
	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
		debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
		raw_spin_unlock(&lock->wait_lock);
		ret = deadlock_detect ? -EDEADLK : 0;
		goto out_unlock_pi;
	}

	top_waiter = rt_mutex_top_waiter(lock);

	/* Requeue the waiter */
	plist_del(&waiter->list_entry, &lock->wait_list);
	waiter->list_entry.prio = task->prio;
	plist_add(&waiter->list_entry, &lock->wait_list);

	/* Release the task */
	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
	if (!rt_mutex_owner(lock)) {
		/*
		 * If the requeue above changed the top waiter, then we need
		 * to wake the new top waiter up to try to get the lock.
		 */

		if (top_waiter != rt_mutex_top_waiter(lock))
			wake_up_process(rt_mutex_top_waiter(lock)->task);
		raw_spin_unlock(&lock->wait_lock);
		goto out_put_task;
	}
	put_task_struct(task);

	/* Grab the next task */
	task = rt_mutex_owner(lock);
	get_task_struct(task);
	raw_spin_lock_irqsave(&task->pi_lock, flags);

	if (waiter == rt_mutex_top_waiter(lock)) {
		/* Boost the owner */
		plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
		waiter->pi_list_entry.prio = waiter->list_entry.prio;
		plist_add(&waiter->pi_list_entry, &task->pi_waiters);
		__rt_mutex_adjust_prio(task);

	} else if (top_waiter == waiter) {
		/* Deboost the owner */
		plist_del(&waiter->pi_list_entry, &task->pi_waiters);
		waiter = rt_mutex_top_waiter(lock);
		waiter->pi_list_entry.prio = waiter->list_entry.prio;
		plist_add(&waiter->pi_list_entry, &task->pi_waiters);
		__rt_mutex_adjust_prio(task);
	}

	raw_spin_unlock_irqrestore(&task->pi_lock, flags);

	top_waiter = rt_mutex_top_waiter(lock);
	raw_spin_unlock(&lock->wait_lock);

	if (!detect_deadlock && waiter != top_waiter)
		goto out_put_task;

	goto again;

 out_unlock_pi:
	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 out_put_task:
	put_task_struct(task);

	return ret;
}
Exemplo n.º 21
0
/**
 * ubi_attach_mtd_dev - attach an MTD device.
 * @mtd: MTD device description object
 * @ubi_num: number to assign to the new UBI device
 * @vid_hdr_offset: VID header offset
 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
 *
 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
 * which case this function finds a vacant device number and assigns it
 * automatically. Returns the new UBI device number in case of success and a
 * negative error code in case of failure.
 *
 * Note, the invocations of this function has to be serialized by the
 * @ubi_devices_mutex.
 */
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
		       int vid_hdr_offset, int max_beb_per1024)
{
	struct ubi_device *ubi;
	int i, err, ref = 0;

	if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
		return -EINVAL;

	if (!max_beb_per1024)
		max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;

	/*
	 * Check if we already have the same MTD device attached.
	 *
	 * Note, this function assumes that UBI devices creations and deletions
	 * are serialized, so it does not take the &ubi_devices_lock.
	 */
	for (i = 0; i < UBI_MAX_DEVICES; i++) {
		ubi = ubi_devices[i];
		if (ubi && mtd == ubi->mtd) {
			ubi_err("mtd%d is already attached to ubi%d",
				mtd->index, i);
			return -EEXIST;
		}
	}

	/*
	 * Make sure this MTD device is not emulated on top of an UBI volume
	 * already. Well, generally this recursion works fine, but there are
	 * different problems like the UBI module takes a reference to itself
	 * by attaching (and thus, opening) the emulated MTD device. This
	 * results in inability to unload the module. And in general it makes
	 * no sense to attach emulated MTD devices, so we prohibit this.
	 */
	if (mtd->type == MTD_UBIVOLUME) {
		ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
			mtd->index);
		return -EINVAL;
	}

	if (ubi_num == UBI_DEV_NUM_AUTO) {
		/* Search for an empty slot in the @ubi_devices array */
		for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
			if (!ubi_devices[ubi_num])
				break;
		if (ubi_num == UBI_MAX_DEVICES) {
			ubi_err("only %d UBI devices may be created",
				UBI_MAX_DEVICES);
			return -ENFILE;
		}
	} else {
		if (ubi_num >= UBI_MAX_DEVICES)
			return -EINVAL;

		/* Make sure ubi_num is not busy */
		if (ubi_devices[ubi_num]) {
			ubi_err("ubi%d already exists", ubi_num);
			return -EEXIST;
		}
	}

	ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
	if (!ubi)
		return -ENOMEM;

	ubi->mtd = mtd;
	ubi->ubi_num = ubi_num;
	ubi->vid_hdr_offset = vid_hdr_offset;
	ubi->autoresize_vol_id = -1;

#ifdef CONFIG_MTD_UBI_FASTMAP
	ubi->fm_pool.used = ubi->fm_pool.size = 0;
	ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;

	/*
	 * fm_pool.max_size is 5% of the total number of PEBs but it's also
	 * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
	 */
	ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
		ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
	if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
		ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;

	ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
	ubi->fm_disabled = !fm_autoconvert;

	if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
	    <= UBI_FM_MAX_START) {
		ubi_err("More than %i PEBs are needed for fastmap, sorry.",
			UBI_FM_MAX_START);
		ubi->fm_disabled = 1;
	}

	ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
	ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
#else
	ubi->fm_disabled = 1;
#endif

	ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);

	err = io_init(ubi, max_beb_per1024);
	if (err)
		goto out_free;

	err = -ENOMEM;
	ubi->peb_buf = vmalloc(ubi->peb_size);
	if (!ubi->peb_buf)
		goto out_free;

#ifdef CONFIG_MTD_UBI_FASTMAP
	ubi->fm_size = ubi_calc_fm_size(ubi);
	ubi->fm_buf = kzalloc(ubi->fm_size, GFP_KERNEL);
	if (!ubi->fm_buf)
		goto out_free;
#endif
	err = ubi_attach(ubi, 0);
	if (err) {
		ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
		goto out_free;
	}

	if (ubi->autoresize_vol_id != -1) {
		err = autoresize(ubi, ubi->autoresize_vol_id);
		if (err)
			goto out_detach;
	}

	err = uif_init(ubi, &ref);
	if (err)
		goto out_detach;

	ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
		mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
	ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
		ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
	ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
		ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
	ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
		ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
	ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
		ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
	ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
		ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
		ubi->vtbl_slots);
	ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
		ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
		ubi->image_seq);
	ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
		ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);

	/*
	 * The below lock makes sure we do not race with 'ubi_thread()' which
	 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
	 */
	ubi->thread_enabled = 1;
	wake_up_process(ubi->bgt_thread);

	ubi_devices[ubi_num] = ubi;

	return ubi_num;

out_detach:
	ubi_wl_close(ubi);
	ubi_free_internal_volumes(ubi);
	vfree(ubi->vtbl);
out_free:
	vfree(ubi->peb_buf);
	vfree(ubi->fm_buf);
	kfree(ubi);
	return err;
}
Exemplo n.º 22
0
/*
 * handle the lock release when processes blocked on it that can now run
 * - if we come here from up_xxxx(), then:
 *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
 *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
 *   - there must be someone on the queue
 * - the spinlock must be held by the caller
 * - woken process blocks are discarded from the list after having task zeroed
 * - writers are only woken if downgrading is false
 */
static inline struct rw_semaphore *
__rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
{
	struct rwsem_waiter *waiter;
	struct task_struct *tsk;
	struct list_head *next;
	signed long oldcount, woken, loop;

	rwsemtrace(sem, "Entering __rwsem_do_wake");

	if (downgrading)
		goto dont_wake_writers;

	/* if we came through an up_xxxx() call, we only only wake someone up
	 * if we can transition the active part of the count from 0 -> 1
	 */
 try_again:
	oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
						- RWSEM_ACTIVE_BIAS;
	if (oldcount & RWSEM_ACTIVE_MASK)
		goto undo;

	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);

	/* try to grant a single write lock if there's a writer at the front
	 * of the queue - note we leave the 'active part' of the count
	 * incremented by 1 and the waiting part incremented by 0x00010000
	 */
	if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
		goto readers_only;

	/* We must be careful not to touch 'waiter' after we set ->task = NULL.
	 * It is an allocated on the waiter's stack and may become invalid at
	 * any time after that point (due to a wakeup from another source).
	 */
	list_del(&waiter->list);
	tsk = waiter->task;
	mb();
	waiter->task = NULL;
	wake_up_process(tsk);
	put_task_struct(tsk);
	goto out;

	/* don't want to wake any writers */
 dont_wake_writers:
	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
	if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
		goto out;

	/* grant an infinite number of read locks to the readers at the front
	 * of the queue
	 * - note we increment the 'active part' of the count by the number of
	 *   readers before waking any processes up
	 */
 readers_only:
	woken = 0;
	do {
		woken++;

		if (waiter->list.next == &sem->wait_list)
			break;

		waiter = list_entry(waiter->list.next,
					struct rwsem_waiter, list);

	} while (waiter->flags & RWSEM_WAITING_FOR_READ);

	loop = woken;
	woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS;
	if (!downgrading)
		/* we'd already done one increment earlier */
		woken -= RWSEM_ACTIVE_BIAS;

	rwsem_atomic_add(woken, sem);

	next = sem->wait_list.next;
	for (; loop > 0; loop--) {
		waiter = list_entry(next, struct rwsem_waiter, list);
		next = waiter->list.next;
		tsk = waiter->task;
		mb();
		waiter->task = NULL;
		wake_up_process(tsk);
		put_task_struct(tsk);
	}

	sem->wait_list.next = next;
	next->prev = &sem->wait_list;

 out:
	rwsemtrace(sem, "Leaving __rwsem_do_wake");
	return sem;

	/* undo the change to count, but check for a transition 1->0 */
 undo:
	if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0)
		goto out;
	goto try_again;
}
int wimax_hw_start(struct net_adapter *adapter)
{
	int retry = 0;
	struct wimax732_platform_data *pdata = adapter->pdata;

	pdata->g_cfg->wimax_status = WIMAX_STATE_READY;
	adapter->download_complete = false;

	adapter->rx_task = kthread_create(
					cmc732_receive_thread,	adapter, "%s",
					"cmc732_receive_thread");

	adapter->tx_task = kthread_create(
					cmc732_send_thread,	adapter, "%s",
					"cmc732_send_thread");

	init_waitqueue_head(&adapter->receive_event);
	init_waitqueue_head(&adapter->send_event);

	if (adapter->rx_task && adapter->tx_task) {
		wake_up_process(adapter->rx_task);
		wake_up_process(adapter->tx_task);
	} else {
		pr_debug("Unable to create send-receive threads");
		return STATUS_UNSUCCESSFUL;
	}

	if (load_wimax_image(pdata->g_cfg->wimax_mode, adapter))
		return STATUS_UNSUCCESSFUL;

	if (adapter->downloading) {

		while (!adapter->modem_resp) {
			send_cmd_packet(adapter, MSG_DRIVER_OK_REQ);
			wait_event_interruptible_timeout(
				adapter->modem_resp_event,
				adapter->modem_resp,
				HZ/10);
			if (!adapter->modem_resp)
				pr_err("no modem response");
			if (++retry > MODEM_RESP_RETRY)
				goto download_fail;
		}

		switch (wait_event_interruptible_timeout(
				adapter->download_event,
				(adapter->download_complete),
				HZ*FWDOWNLOAD_TIMEOUT)) {
		case 0:
			/* timeout */
			pr_debug("Error wimax_hw_start : \
					F/W Download timeout failed");
			goto download_fail;
		case -ERESTARTSYS:
			/* Interrupted by signal */
			pr_debug("Error wimax_hw_start :"
					"-ERESTARTSYS retry");
			goto download_fail;
		default:
			/* normal condition check */
			if (adapter->removed || adapter->halted) {
				pr_debug("Error wimax_hw_start :	\
						F/W Download surprise removed");
				goto download_fail;
			}
			pr_debug("wimax_hw_start :  F/W Download Complete");
			unload_wimax_image(adapter);

			if (cmc732_setup_wake_irq(adapter) < 0)
				pr_debug("wimax_hw_start :"
						"Error setting up wimax_int");

			break;
		}
		adapter->downloading = false;
	}
Exemplo n.º 24
0
irqreturn_t interrupt_handler(int irq_no, void *data)
{
        int device_status;
        uint32_t device_port = 0x0;
        /*
         * TODO: Write the code that handles a hardware interrupt.
         * TODO: Populate device_port with the port of the correct device.
         */

        int ret = IRQ_HANDLED;
        if(irq_no == COM1_IRQ) {
            device_port = COM1_BASEPORT;
        }
        else if(irq_no == COM2_IRQ) {
            device_port = COM2_BASEPORT;
        }
        if(device_port)
        {

            disable_irq(irq_no);
            device_status = uart16550_hw_get_device_status(device_port);

            struct task_struct **task_user_get_data = ftask_user_get_data(data);
            struct task_struct **task_user_push_data = ftask_user_push_data(data);
            struct kfifo * data_from_user = fdata_from_user (data);
            struct kfifo * data_from_device = fdata_from_device (data);

            while (uart16550_hw_device_can_send(device_status) && !kfifo_is_empty(data_from_user)) {
                    uint8_t byte_value;
                    /*
                     * TODO: Populate byte_value with the next value
                     *      from the kernel device outgoing buffer.
                     */
                    
                    kfifo_get(data_from_user,&byte_value);
                    if(*task_user_push_data)
                        wake_up_process(*task_user_push_data);
                    

                    uart16550_hw_write_to_device(device_port, byte_value);
                    device_status = uart16550_hw_get_device_status(device_port);
            }

            while (uart16550_hw_device_has_data(device_status) && !kfifo_is_full(data_from_device)) {
                    uint8_t byte_value;
                    byte_value = uart16550_hw_read_from_device(device_port);
                    /*
                     * TODO: Store the read byte_value in the kernel device
                     *      incoming buffer.
                     */
                     kfifo_put(data_from_device,byte_value);
                     if(*task_user_get_data)
                        wake_up_process(*task_user_get_data);
                     
                    device_status = uart16550_hw_get_device_status(device_port);
            }
            enable_irq(irq_no);
        }
        else
        {
            ret = -1;
        }
        
        return ret;
}
static int pn544_probe(struct i2c_client *client,
        const struct i2c_device_id *id)
{
    int ret;
    struct pn544_dev *pn544_dev = NULL;
    pn544_client = client;

    dprintk(PN544_DRV_NAME ": pn544_probe() start\n");

    pn544_dev = kzalloc(sizeof(*pn544_dev), GFP_KERNEL);
    if (pn544_dev == NULL) {
        dev_err(&client->dev,
                "failed to allocate memory for module data\n");
        ret = -ENOMEM;
        goto err_exit;
    }

    pn544_parse_dt(&client->dev, pn544_dev);

    pn544_dev->client   = client;
    dprintk(PN544_DRV_NAME ":IRQ : %d\nVEN : %d\nFIRM : %d\n",
            pn544_dev->irq_gpio, pn544_dev->ven_gpio, pn544_dev->firm_gpio);

    ret = gpio_request(pn544_dev->irq_gpio, "nfc_int");
    if (ret) {
        dprintk(PN544_DRV_NAME ":pn544_probe() : nfc_int request failed!\n");
        goto err_int;
    }
    ret = gpio_request(pn544_dev->ven_gpio, "nfc_ven");
    if (ret) {
        dprintk(PN544_DRV_NAME ":pn544_probe() : nfc_ven request failed!\n");
        goto err_ven;
    }
    ret = gpio_request(pn544_dev->firm_gpio, "nfc_firm");
    if (ret) {
        dprintk(PN544_DRV_NAME ":pn544_probe() : nfc_firm request failed!\n");
        goto err_firm;
    }

    pn544_gpio_enable(pn544_dev);

    ret = gpio_direction_output(pn544_dev->ven_gpio,1);
    ret = gpio_direction_output(pn544_dev->firm_gpio,0);
    ret = gpio_direction_input(pn544_dev->irq_gpio);

    /* init mutex and queues */
    init_waitqueue_head(&pn544_dev->read_wq);
    mutex_init(&pn544_dev->read_mutex);
#ifdef CONFIG_LGE_NFC_PRESTANDBY
    mutex_init(&mode_mutex);
#endif
    spin_lock_init(&pn544_dev->irq_enabled_lock);

    pn544_dev->pn544_device.minor = MISC_DYNAMIC_MINOR;
    pn544_dev->pn544_device.name = PN544_DRV_NAME;
    pn544_dev->pn544_device.fops = &pn544_dev_fops;

    ret = misc_register(&pn544_dev->pn544_device);
    if (ret) {
        pr_err("%s : misc_register failed\n", __FILE__);
        goto err_misc_register;
    }

    /* request irq.  the irq is set whenever the chip has data available
     * for reading.  it is cleared when all data has been read.
     */
    pr_info("%s : requesting IRQ %d\n", __func__, client->irq);
    pn544_dev->irq_enabled = true;
    ret = request_irq(pn544_gpio_to_irq(pn544_dev), pn544_dev_irq_handler,
              IRQF_TRIGGER_HIGH, client->name, pn544_dev);
    if (ret) {
        dev_err(&client->dev, "request_irq failed\n");
        goto err_request_irq_failed;
    }
#if !defined(LGE_NFC_HW_QCT_MSM8660)&&!defined(CONFIG_LGE_NFC_HW_QCT_MSM8255)
    enable_irq_wake(pn544_get_irq_pin(pn544_dev));
#endif
    pn544_disable_irq(pn544_dev);
    i2c_set_clientdata(client, pn544_dev);
    dprintk(PN544_DRV_NAME ": pn544_probe() end\n");
/*             
  
                                  
                                    
                                   
  
                                  
 */
#ifdef CONFIG_LGE_NFC_PRESTANDBY
    if (pn544_validate_boot_mode()) {
        dprintk("%s : get in the standbyset\n", __func__);
#ifdef CONFIG_LGE_NFC_MULTICORE_FASTBOOT
        {
            struct task_struct *th;
            th = kthread_create(pn544_factory_standby_set_thread, NULL, "pn544_factory_standby");
            if (IS_ERR(th)) {
                ret = PTR_ERR(th);
                goto err_request_irq_failed;
            }
            wake_up_process(th);
        }
#else
        pn544_factory_standby_set();
#endif
/*              */
    }
#endif
    return 0;

err_request_irq_failed:
    misc_deregister(&pn544_dev->pn544_device);

err_misc_register:
    mutex_destroy(&pn544_dev->read_mutex);
#ifdef CONFIG_LGE_NFC_PRESTANDBY
    mutex_destroy(&mode_mutex);
#endif
    gpio_free(pn544_dev->firm_gpio);

err_firm:
    gpio_free(pn544_dev->ven_gpio);

err_ven:
    gpio_free(pn544_dev->irq_gpio);

err_int:
    kfree(pn544_dev);

err_exit:
    pr_err(PN544_DRV_NAME ": pn544_dev is null\n");
    pr_err(PN544_DRV_NAME ": pn544_probe() end with error!\n");

    return ret;
}
Exemplo n.º 26
0
static void create_kthread(struct kthread_create_info *create)
{
	int pid;

	/* We want our own signal handler (we take no signals by default). */
	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
	if (pid < 0) {
		create->result = ERR_PTR(pid);
	} else {
		struct sched_param param = { .sched_priority = 0 };
		wait_for_completion(&create->started);
		read_lock(&tasklist_lock);
		create->result = find_task_by_pid_ns(pid, &init_pid_ns);
		read_unlock(&tasklist_lock);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler(create->result, SCHED_NORMAL, &param);
		set_user_nice(create->result, KTHREAD_NICE_LEVEL);
		set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR);
	}
	complete(&create->done);
}

/**
 * kthread_create - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run(), kthread_create_on_cpu().
 *
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which noone will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create(int (*threadfn)(void *data),
				   void *data,
				   const char namefmt[],
				   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	init_completion(&create.started);
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		va_list args;
		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @k: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *k, unsigned int cpu)
{
	if (k->state != TASK_UNINTERRUPTIBLE) {
		WARN_ON(1);
		return;
	}
	/* Must have done schedule() in kthread() before we set_task_cpu */
	wait_task_inactive(k, 0);
	set_task_cpu(k, cpu);
	k->cpus_allowed = cpumask_of_cpu(cpu);
	k->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit.  Your threadfn() must not call do_exit()
 * itself if you use this function!  This can also be called after
 * kthread_create() instead of calling wake_up_process(): the thread
 * will exit without calling threadfn().
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	int ret;

	mutex_lock(&kthread_stop_lock);

	/* It could exit after stop_info.k set, but before wake_up_process. */
	get_task_struct(k);

	/* Must init completion *before* thread sees kthread_stop_info.k */
	init_completion(&kthread_stop_info.done);
	smp_wmb();

	/* Now set kthread_should_stop() to true, and wake it up. */
	kthread_stop_info.k = k;
	wake_up_process(k);
	put_task_struct(k);

	/* Once it dies, reset stop ptr, gather result and we're done. */
	wait_for_completion(&kthread_stop_info.done);
	kthread_stop_info.k = NULL;
	ret = kthread_stop_info.err;
	mutex_unlock(&kthread_stop_lock);

	return ret;
}