Example #1
0
static int
tfw_bmb_worker(void *data)
{
	int tn = (int)(long)data;
	TfwBmbTask *task = &bmb_task[tn];
	int attempt, send, k, i;
	unsigned long time_max;

	fuzz_init(&task->ctx, true);

	for (k = 0; k < niters; k++) {
		task->conn_attempt = 0;
		atomic_set(&task->conn_compl, 0);
		atomic_set(&task->conn_error, 0);
		atomic_set(&task->conn_rd_tail, 0);
		init_waitqueue_head(&task->conn_wq);

		for (i = 0; i < nconns; i++)
			tfw_bmb_connect(tn, i);

		set_freezable();
		time_max = jiffies + 60 * HZ;
		attempt = task->conn_attempt;
		do {
#define COND()	(atomic_read(&task->conn_compl) > 0 || \
		 atomic_read(&task->conn_error) == attempt)
			wait_event_freezable_timeout(task->conn_wq, COND(), HZ);
#undef COND
			if (atomic_read(&task->conn_compl) > 0)
				break;
			if (atomic_read(&task->conn_error) == attempt)
				goto release_sockets;
			if (jiffies > time_max) {
				TFW_ERR("worker exceeded maximum wait time\n");
				goto release_sockets;
			}
		} while (!kthread_should_stop());

		for (send = 0; send < nconns * nmessages; ) {
			int tail = atomic_read(&task->conn_rd_tail);
			for (i = 0; i < tail; i++){
				tfw_bmb_msg_send(tn, task->conn_rd[i]);
				send++;
			}
		}

release_sockets:
		atomic_add(attempt, &bmb_conn_attempt);
		atomic_add(atomic_read(&task->conn_compl), &bmb_conn_compl);
		atomic_add(atomic_read(&task->conn_error), &bmb_conn_error);

		tfw_bmb_release_sockets(tn);
	}

	task->task_struct = NULL;
	atomic_dec(&bmb_threads);
	wake_up(&bmb_task_wq);

	return 0;
}
Example #2
0
/* afs_osi_TimedSleep
 * 
 * Arguments:
 * event - event to sleep on
 * ams --- max sleep time in milliseconds
 * aintok - 1 if should sleep interruptibly
 *
 * Returns 0 if timeout, EINTR if signalled, and EGAIN if it might
 * have raced.
 */
int
afs_osi_TimedSleep(void *event, afs_int32 ams, int aintok)
{
    int code = 0;
    long ticks = (ams * HZ / 1000) + 1;
    struct afs_event *evp;
    int seq;

    evp = afs_getevent(event);
    if (!evp) {
	afs_addevent(event);
	evp = afs_getevent(event);
    }

    seq = evp->seq;

    AFS_GUNLOCK();
    code = wait_event_freezable_timeout(evp->cond, evp->seq != seq, ticks);
    AFS_GLOCK();
    if (code == -ERESTARTSYS)
	code = EINTR;
    else
	code = -code;

    relevent(evp);

    return code;
}
static int rts51x_scan_thread(void *__chip)
{
	struct rts51x_chip *chip = (struct rts51x_chip *)__chip;

	printk(KERN_DEBUG
	       "rts51x: device found at %d\n", chip->usb->pusb_dev->devnum);

	set_freezable();
	
	if (delay_use > 0) {
		printk(KERN_DEBUG "rts51x: waiting for device "
		       "to settle before scanning\n");
		wait_event_freezable_timeout(chip->usb->delay_wait,
					     test_bit(FLIDX_DONT_SCAN,
						      &chip->usb->dflags),
					     delay_use * HZ);
	}

	
	if (!test_bit(FLIDX_DONT_SCAN, &chip->usb->dflags)) {
		scsi_scan_host(rts51x_to_host(chip));
		printk(KERN_DEBUG "rts51x: device scan complete\n");

		
	}

	complete_and_exit(&chip->usb->scanning_done, 0);
}
Example #4
0
static int
kclient_thread_finish(void *data)
{
	int nattempt = atomic_read(&kclient_connect_nattempt);
	uint64_t time_max = (uint64_t)get_seconds() + KCLIENT_WAIT_MAX;

	set_freezable();
	do {
		long timeout = KCLIENT_WAIT_INTVL;
		int nerror = atomic_read(&kclient_connect_nerror);
		int ncomplete = atomic_read(&kclient_connect_ncomplete);

		if (ncomplete + nerror == nattempt) {
			break;
		}
		wait_event_freezable_timeout(kclient_finish_wq,
					     kthread_should_stop(),
					     timeout);
		if ((uint64_t)get_seconds() > time_max) {
			SS_ERR("%s exceeded maximum wait time of %d seconds\n",
				"kclient_thread_finish", KCLIENT_WAIT_MAX);
			break;
		}
	} while (!kthread_should_stop());

	kclient_release_sockets();
	kclient_finish_task = NULL;
	return 0;
}
static int sleep_main_thread_timeout(struct dwc_otg2 *otg, int msecs)
{
	signed long jiffies;
	int rc = msecs;

	if (otg->state == DWC_STATE_EXIT) {
		otg_dbg(otg, "Main thread exiting\n");
		rc = -EINTR;
		goto done;
	}

	if (signal_pending(current)) {
		otg_dbg(otg, "Main thread signal pending\n");
		rc = -EINTR;
		goto done;
	}
	if (otg->main_wakeup_needed) {
		otg_dbg(otg, "Main thread wakeup needed\n");
		rc = msecs;
		goto done;
	}

	jiffies = msecs_to_jiffies(msecs);
	rc = wait_event_freezable_timeout(otg->main_wq,
					otg->main_wakeup_needed,
					jiffies);

	if (otg->state == DWC_STATE_EXIT) {
		otg_dbg(otg, "Main thread exiting\n");
		rc = -EINTR;
		goto done;
	}

	if (rc > 0)
		rc = jiffies_to_msecs(rc);

done:
	otg->main_wakeup_needed = 0;
	return rc;
}
static int ucb1400_ts_thread(void *_ucb)
{
    struct ucb1400_ts *ucb = _ucb;
    struct task_struct *tsk = current;
    int valid = 0;
    struct sched_param param = { .sched_priority = 1 };

    sched_setscheduler(tsk, SCHED_FIFO, &param);

    set_freezable();
    while (!kthread_should_stop()) {
        unsigned int x, y, p;
        long timeout;

        ucb->ts_restart = 0;

        if (ucb->irq_pending) {
            ucb->irq_pending = 0;
            ucb1400_handle_pending_irq(ucb);
        }

        ucb1400_adc_enable(ucb->ac97);
        x = ucb1400_ts_read_xpos(ucb);
        y = ucb1400_ts_read_ypos(ucb);
        p = ucb1400_ts_read_pressure(ucb);
        ucb1400_adc_disable(ucb->ac97);

        /* Switch back to interrupt mode. */
        ucb1400_ts_mode_int(ucb->ac97);

        msleep(10);

        if (ucb1400_ts_pen_down(ucb->ac97)) {
            ucb1400_ts_irq_enable(ucb->ac97);

            /*
             * If we spat out a valid sample set last time,
             * spit out a "pen off" sample here.
             */
            if (valid) {
                ucb1400_ts_event_release(ucb->ts_idev);
                valid = 0;
            }

            timeout = MAX_SCHEDULE_TIMEOUT;
        } else {
            valid = 1;
            ucb1400_ts_evt_add(ucb->ts_idev, p, x, y);
            timeout = msecs_to_jiffies(10);
        }

        wait_event_freezable_timeout(ucb->ts_wait,
                                     ucb->irq_pending || ucb->ts_restart ||
                                     kthread_should_stop(), timeout);
    }

    /* Send the "pen off" if we are stopping with the pen still active */
    if (valid)
        ucb1400_ts_event_release(ucb->ts_idev);

    ucb->ts_task = NULL;
    return 0;
}

/*
 * A restriction with interrupts exists when using the ucb1400, as
 * the codec read/write routines may sleep while waiting for codec
 * access completion and uses semaphores for access control to the
 * AC97 bus.  A complete codec read cycle could take  anywhere from
 * 60 to 100uSec so we *definitely* don't want to spin inside the
 * interrupt handler waiting for codec access.  So, we handle the
 * interrupt by scheduling a RT kernel thread to run in process
 * context instead of interrupt context.
 */
static irqreturn_t ucb1400_hard_irq(int irqnr, void *devid)
{
    struct ucb1400_ts *ucb = devid;

    if (irqnr == ucb->irq) {
        disable_irq(ucb->irq);
        ucb->irq_pending = 1;
        wake_up(&ucb->ts_wait);
        return IRQ_HANDLED;
    }
    return IRQ_NONE;
}

static int ucb1400_ts_open(struct input_dev *idev)
{
    struct ucb1400_ts *ucb = input_get_drvdata(idev);
    int ret = 0;

    BUG_ON(ucb->ts_task);

    ucb->ts_task = kthread_run(ucb1400_ts_thread, ucb, "UCB1400_ts");
    if (IS_ERR(ucb->ts_task)) {
        ret = PTR_ERR(ucb->ts_task);
        ucb->ts_task = NULL;
    }

    return ret;
}

static void ucb1400_ts_close(struct input_dev *idev)
{
    struct ucb1400_ts *ucb = input_get_drvdata(idev);

    if (ucb->ts_task)
        kthread_stop(ucb->ts_task);

    ucb1400_ts_irq_disable(ucb->ac97);
    ucb1400_reg_write(ucb->ac97, UCB_TS_CR, 0);
}

#ifndef NO_IRQ
#define NO_IRQ	0
#endif

/*
 * Try to probe our interrupt, rather than relying on lots of
 * hard-coded machine dependencies.
 */
static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
{
    unsigned long mask, timeout;

    mask = probe_irq_on();

    /* Enable the ADC interrupt. */
    ucb1400_reg_write(ucb->ac97, UCB_IE_RIS, UCB_IE_ADC);
    ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, UCB_IE_ADC);
    ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0xffff);
    ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);

    /* Cause an ADC interrupt. */
    ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, UCB_ADC_ENA);
    ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START);

    /* Wait for the conversion to complete. */
    timeout = jiffies + HZ/2;
    while (!(ucb1400_reg_read(ucb->ac97, UCB_ADC_DATA) &
             UCB_ADC_DAT_VALID)) {
        cpu_relax();
        if (time_after(jiffies, timeout)) {
            printk(KERN_ERR "ucb1400: timed out in IRQ probe\n");
            probe_irq_off(mask);
            return -ENODEV;
        }
    }
    ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, 0);

    /* Disable and clear interrupt. */
    ucb1400_reg_write(ucb->ac97, UCB_IE_RIS, 0);
    ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, 0);
    ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0xffff);
    ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);

    /* Read triggered interrupt. */
    ucb->irq = probe_irq_off(mask);
    if (ucb->irq < 0 || ucb->irq == NO_IRQ)
        return -ENODEV;

    return 0;
}
static int ucb1400_ts_thread(void *_ucb)
{
	struct ucb1400_ts *ucb = _ucb;
	struct task_struct *tsk = current;
	int valid = 0;
	struct sched_param param = { .sched_priority = 1 };

	sched_setscheduler(tsk, SCHED_FIFO, &param);

	set_freezable();
	while (!kthread_should_stop()) {
		unsigned int x, y, p;
		long timeout;

		ucb->ts_restart = 0;

		if (ucb->irq_pending) {
			ucb->irq_pending = 0;
			ucb1400_handle_pending_irq(ucb);
		}

		ucb1400_adc_enable(ucb->ac97);
		x = ucb1400_ts_read_xpos(ucb);
		y = ucb1400_ts_read_ypos(ucb);
		p = ucb1400_ts_read_pressure(ucb);
		ucb1400_adc_disable(ucb->ac97);

		/* Switch back to interrupt mode. */
		ucb1400_ts_mode_int(ucb->ac97);

		msleep(10);

		if (ucb1400_ts_pen_up(ucb->ac97)) {
			ucb1400_ts_irq_enable(ucb->ac97);

			/*
			 * If we spat out a valid sample set last time,
			 * spit out a "pen off" sample here.
			 */
			if (valid) {
				ucb1400_ts_event_release(ucb->ts_idev);
				valid = 0;
			}

			timeout = MAX_SCHEDULE_TIMEOUT;
		} else {
			valid = 1;
			ucb1400_ts_evt_add(ucb->ts_idev, p, x, y);
			timeout = msecs_to_jiffies(10);
		}

		wait_event_freezable_timeout(ucb->ts_wait,
			ucb->irq_pending || ucb->ts_restart ||
			kthread_should_stop(), timeout);
	}

	/* Send the "pen off" if we are stopping with the pen still active */
	if (valid)
		ucb1400_ts_event_release(ucb->ts_idev);

	ucb->ts_task = NULL;
	return 0;
}

static irqreturn_t ucb1400_hard_irq(int irqnr, void *devid)
{
	struct ucb1400_ts *ucb = devid;

	if (irqnr == ucb->irq) {
		disable_irq_nosync(ucb->irq);
		ucb->irq_pending = 1;
		wake_up(&ucb->ts_wait);
		return IRQ_HANDLED;
	}
	return IRQ_NONE;
}

static int ucb1400_ts_open(struct input_dev *idev)
{
	struct ucb1400_ts *ucb = input_get_drvdata(idev);
	int ret = 0;

	BUG_ON(ucb->ts_task);

	ucb->ts_task = kthread_run(ucb1400_ts_thread, ucb, "UCB1400_ts");
	if (IS_ERR(ucb->ts_task)) {
		ret = PTR_ERR(ucb->ts_task);
		ucb->ts_task = NULL;
	}

	return ret;
}

static void ucb1400_ts_close(struct input_dev *idev)
{
	struct ucb1400_ts *ucb = input_get_drvdata(idev);

	if (ucb->ts_task)
		kthread_stop(ucb->ts_task);

	ucb1400_ts_irq_disable(ucb->ac97);
	ucb1400_reg_write(ucb->ac97, UCB_TS_CR, 0);
}

#ifndef NO_IRQ
#define NO_IRQ	0
#endif

static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
{
	unsigned long mask, timeout;

	mask = probe_irq_on();

	/* Enable the ADC interrupt. */
	ucb1400_reg_write(ucb->ac97, UCB_IE_RIS, UCB_IE_ADC);
	ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, UCB_IE_ADC);
	ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0xffff);
	ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);

	/* Cause an ADC interrupt. */
	ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, UCB_ADC_ENA);
	ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START);

	/* Wait for the conversion to complete. */
	timeout = jiffies + HZ/2;
	while (!(ucb1400_reg_read(ucb->ac97, UCB_ADC_DATA) &
						UCB_ADC_DAT_VALID)) {
		cpu_relax();
		if (time_after(jiffies, timeout)) {
			printk(KERN_ERR "ucb1400: timed out in IRQ probe\n");
			probe_irq_off(mask);
			return -ENODEV;
		}
	}
	ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, 0);

	/* Disable and clear interrupt. */
	ucb1400_reg_write(ucb->ac97, UCB_IE_RIS, 0);
	ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, 0);
	ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0xffff);
	ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);

	/* Read triggered interrupt. */
	ucb->irq = probe_irq_off(mask);
	if (ucb->irq < 0 || ucb->irq == NO_IRQ)
		return -ENODEV;

	return 0;
}