Example #1
0
/**
 * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
 * @lock:		 the rt_mutex to take
 * @state:		 the state the task should block in (TASK_INTERRUPTIBLE
 * 			 or TASK_UNINTERRUPTIBLE)
 * @timeout:		 the pre-initialized and started timer, or NULL for none
 * @waiter:		 the pre-initialized rt_mutex_waiter
 * @detect_deadlock:	 passed to task_blocks_on_rt_mutex
 *
 * lock->wait_lock must be held by the caller.
 */
static int __sched
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
		    struct hrtimer_sleeper *timeout,
		    struct rt_mutex_waiter *waiter,
		    int detect_deadlock)
{
	int ret = 0;

	for (;;) {
		/* Try to acquire the lock: */
		if (try_to_take_rt_mutex(lock))
			break;

		/*
		 * TASK_INTERRUPTIBLE checks for signals and
		 * timeout. Ignored otherwise.
		 */
		if (unlikely(state == TASK_INTERRUPTIBLE)) {
			/* Signal pending? */
			if (signal_pending(current))
				ret = -EINTR;
			if (timeout && !timeout->task)
				ret = -ETIMEDOUT;
			if (ret)
				break;
		}

		/*
		 * waiter->task is NULL the first time we come here and
		 * when we have been woken up by the previous owner
		 * but the lock got stolen by a higher prio task.
		 */
		if (!waiter->task) {
			ret = task_blocks_on_rt_mutex(lock, waiter, current,
						      detect_deadlock);
			/*
			 * If we got woken up by the owner then start loop
			 * all over without going into schedule to try
			 * to get the lock now:
			 */
			if (unlikely(!waiter->task)) {
				/*
				 * Reset the return value. We might
				 * have returned with -EDEADLK and the
				 * owner released the lock while we
				 * were walking the pi chain.
				 */
				ret = 0;
				continue;
			}
			if (unlikely(ret))
				break;
		}

		raw_spin_unlock(&lock->wait_lock);

		debug_rt_mutex_print_deadlock(waiter);

		if (waiter->task)
			schedule_rt_mutex(lock);

		raw_spin_lock(&lock->wait_lock);
		set_current_state(state);
	}

	return ret;
}
Example #2
0
static int sdio_irq_thread(void *_host)
{
	struct mmc_host *host = _host;
	struct sched_param param = { .sched_priority = 1 };
	unsigned long period, idle_period;
	int ret;

	sched_setscheduler(current, SCHED_FIFO, &param);

	/*
	 * We want to allow for SDIO cards to work even on non SDIO
	 * aware hosts.  One thing that non SDIO host cannot do is
	 * asynchronous notification of pending SDIO card interrupts
	 * hence we poll for them in that case.
	 */
	idle_period = msecs_to_jiffies(10);
	period = (host->caps & MMC_CAP_SDIO_IRQ) ?
		MAX_SCHEDULE_TIMEOUT : idle_period;

	pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
		 mmc_hostname(host), period);

	do {
		/*
		 * We claim the host here on drivers behalf for a couple
		 * reasons:
		 *
		 * 1) it is already needed to retrieve the CCCR_INTx;
		 * 2) we want the driver(s) to clear the IRQ condition ASAP;
		 * 3) we need to control the abort condition locally.
		 *
		 * Just like traditional hard IRQ handlers, we expect SDIO
		 * IRQ handlers to be quick and to the point, so that the
		 * holding of the host lock does not cover too much work
		 * that doesn't require that lock to be held.
		 */
		ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
		if (ret)
			break;
		ret = process_sdio_pending_irqs(host->card);
		mmc_release_host(host);

		/*
		 * Give other threads a chance to run in the presence of
		 * errors.
		 */
		if (ret < 0) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (!kthread_should_stop())
				schedule_timeout(HZ);
			set_current_state(TASK_RUNNING);
		}

		/*
		 * Adaptive polling frequency based on the assumption
		 * that an interrupt will be closely followed by more.
		 * This has a substantial benefit for network devices.
		 */
		if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
			if (ret > 0)
				period /= 2;
			else {
				period++;
				if (period > idle_period)
					period = idle_period;
			}
		}

		set_current_state(TASK_INTERRUPTIBLE);
		if (host->caps & MMC_CAP_SDIO_IRQ)
			host->ops->enable_sdio_irq(host, 1);
		if (!kthread_should_stop())
			schedule_timeout(period);
		set_current_state(TASK_RUNNING);
	} while (!kthread_should_stop());

	if (host->caps & MMC_CAP_SDIO_IRQ)
		host->ops->enable_sdio_irq(host, 0);

	pr_debug("%s: IRQ thread exiting with code %d\n",
		 mmc_hostname(host), ret);

	return ret;
}

static int sdio_card_irq_get(struct mmc_card *card)
{
	struct mmc_host *host = card->host;

	WARN_ON(!host->claimed);

	if (!host->sdio_irqs++) {
		atomic_set(&host->sdio_irq_thread_abort, 0);
		host->sdio_irq_thread =
			kthread_run(sdio_irq_thread, host, "ksdioirqd/%s",
				mmc_hostname(host));
		if (IS_ERR(host->sdio_irq_thread)) {
			int err = PTR_ERR(host->sdio_irq_thread);
			host->sdio_irqs--;
			return err;
		}
	}

	return 0;
}

static int sdio_card_irq_put(struct mmc_card *card)
{
	struct mmc_host *host = card->host;

	WARN_ON(!host->claimed);
	BUG_ON(host->sdio_irqs < 1);

	if (!--host->sdio_irqs) {
		atomic_set(&host->sdio_irq_thread_abort, 1);
		kthread_stop(host->sdio_irq_thread);
	}

	return 0;
}

/**
 *	sdio_claim_irq - claim the IRQ for a SDIO function
 *	@func: SDIO function
 *	@handler: IRQ handler callback
 *
 *	Claim and activate the IRQ for the given SDIO function. The provided
 *	handler will be called when that IRQ is asserted.  The host is always
 *	claimed already when the handler is called so the handler must not
 *	call sdio_claim_host() nor sdio_release_host().
 */
int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
{
	int ret;
	unsigned char reg;

	BUG_ON(!func);
	BUG_ON(!func->card);

	pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));

	if (func->irq_handler) {
		pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func));
		return -EBUSY;
	}

	ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
	if (ret)
		return ret;

	reg |= 1 << func->num;

	reg |= 1; /* Master interrupt enable */

	ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
	if (ret)
		return ret;

	func->irq_handler = handler;
	ret = sdio_card_irq_get(func->card);
	if (ret)
		func->irq_handler = NULL;

	return ret;
}
/*
 * Schedule replacement for rtsem_down(). Only called for threads with
 * PF_MUTEX_TESTER set.
 *
 * This allows us to have finegrained control over the event flow.
 *
 */
void schedule_rt_mutex_test(struct rt_mutex *mutex)
{
	int tid, op, dat;
	struct test_thread_data *td;

	/* We have to lookup the task */
	for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) {
		if (threads[tid] == current)
			break;
	}

	BUG_ON(tid == MAX_RT_TEST_THREADS);

	td = &thread_data[tid];

	op = td->opcode;
	dat = td->opdata;

	switch (op) {
	case RTTEST_LOCK:
	case RTTEST_LOCKINT:
	case RTTEST_LOCKNOWAIT:
	case RTTEST_LOCKINTNOWAIT:
		if (mutex != &mutexes[dat])
			break;

		if (td->mutexes[dat] != 1)
			break;

		td->mutexes[dat] = 2;
		td->event = atomic_add_return_unchecked(1, &rttest_event);
		break;

	default:
		break;
	}

	schedule();


	switch (op) {
	case RTTEST_LOCK:
	case RTTEST_LOCKINT:
		if (mutex != &mutexes[dat])
			return;

		if (td->mutexes[dat] != 2)
			return;

		td->mutexes[dat] = 3;
		td->event = atomic_add_return_unchecked(1, &rttest_event);
		break;

	case RTTEST_LOCKNOWAIT:
	case RTTEST_LOCKINTNOWAIT:
		if (mutex != &mutexes[dat])
			return;

		if (td->mutexes[dat] != 2)
			return;

		td->mutexes[dat] = 1;
		td->event = atomic_add_return_unchecked(1, &rttest_event);
		return;

	default:
		return;
	}

	td->opcode = 0;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);

		if (td->opcode > 0) {
			int ret;

			set_current_state(TASK_RUNNING);
			ret = handle_op(td, 1);
			set_current_state(TASK_INTERRUPTIBLE);
			if (td->opcode == RTTEST_LOCKCONT)
				break;
			td->opcode = ret;
		}

		/* Wait for the next command to be executed */
		schedule();
	}

	/* Restore previous command and data */
	td->opcode = op;
	td->opdata = dat;
}
Example #4
0
static int
zpios_thread_main(void *data)
{
	thread_data_t *thr = (thread_data_t *)data;
	run_args_t *run_args = thr->run_args;
	zpios_time_t t;
	dmu_obj_t obj;
	__u64 offset;
	__u32 chunk_size;
	zpios_region_t *region;
	char *buf;
	unsigned int random_int;
	int chunk_noise = run_args->chunk_noise;
	int chunk_noise_tmp = 0;
	int thread_delay = run_args->thread_delay;
	int thread_delay_tmp = 0;
	int i, rc = 0;

	if (chunk_noise) {
		get_random_bytes(&random_int, sizeof (unsigned int));
		chunk_noise_tmp = (random_int % (chunk_noise * 2))-chunk_noise;
	}

	/*
	 * It's OK to vmem_alloc() this memory because it will be copied
	 * in to the slab and pointers to the slab copy will be setup in
	 * the bio when the IO is submitted.  This of course is not ideal
	 * since we want a zero-copy IO path if possible.  It would be nice
	 * to have direct access to those slab entries.
	 */
	chunk_size = run_args->chunk_size + chunk_noise_tmp;
	buf = (char *)vmem_alloc(chunk_size, KM_SLEEP);
	ASSERT(buf);

	/* Trivial data verification pattern for now. */
	if (run_args->flags & DMU_VERIFY)
		memset(buf, 'z', chunk_size);

	/* Write phase */
	mutex_enter(&thr->lock);
	thr->stats.wr_time.start = zpios_timespec_now();
	mutex_exit(&thr->lock);

	while (zpios_get_work_item(run_args, &obj, &offset,
	    &chunk_size, &region, DMU_WRITE)) {
		if (thread_delay) {
			get_random_bytes(&random_int, sizeof (unsigned int));
			thread_delay_tmp = random_int % thread_delay;
			set_current_state(TASK_UNINTERRUPTIBLE);
			schedule_timeout(thread_delay_tmp); /* In jiffies */
		}

		t.start = zpios_timespec_now();
		rc = zpios_dmu_write(run_args, obj.os, obj.obj,
		    offset, chunk_size, buf);
		t.stop  = zpios_timespec_now();
		t.delta = zpios_timespec_sub(t.stop, t.start);

		if (rc) {
			zpios_print(run_args->file, "IO error while doing "
			    "dmu_write(): %d\n", rc);
			break;
		}

		mutex_enter(&thr->lock);
		thr->stats.wr_data += chunk_size;
		thr->stats.wr_chunks++;
		thr->stats.wr_time.delta = zpios_timespec_add(
		    thr->stats.wr_time.delta, t.delta);
		mutex_exit(&thr->lock);

		mutex_enter(&region->lock);
		region->stats.wr_data += chunk_size;
		region->stats.wr_chunks++;
		region->stats.wr_time.delta = zpios_timespec_add(
		    region->stats.wr_time.delta, t.delta);

		/* First time region was accessed */
		if (region->init_offset == offset)
			region->stats.wr_time.start = t.start;

		mutex_exit(&region->lock);
	}

	mutex_enter(&run_args->lock_ctl);
	run_args->threads_done++;
	mutex_exit(&run_args->lock_ctl);

	mutex_enter(&thr->lock);
	thr->rc = rc;
	thr->stats.wr_time.stop = zpios_timespec_now();
	mutex_exit(&thr->lock);
	wake_up(&run_args->waitq);

	set_current_state(TASK_UNINTERRUPTIBLE);
	schedule();

	/* Check if we should exit */
	mutex_enter(&thr->lock);
	rc = thr->rc;
	mutex_exit(&thr->lock);
	if (rc)
		goto out;

	/* Read phase */
	mutex_enter(&thr->lock);
	thr->stats.rd_time.start = zpios_timespec_now();
	mutex_exit(&thr->lock);

	while (zpios_get_work_item(run_args, &obj, &offset,
	    &chunk_size, &region, DMU_READ)) {
		if (thread_delay) {
			get_random_bytes(&random_int, sizeof (unsigned int));
			thread_delay_tmp = random_int % thread_delay;
			set_current_state(TASK_UNINTERRUPTIBLE);
			schedule_timeout(thread_delay_tmp); /* In jiffies */
		}

		if (run_args->flags & DMU_VERIFY)
			memset(buf, 0, chunk_size);

		t.start = zpios_timespec_now();
		rc = zpios_dmu_read(run_args, obj.os, obj.obj,
		    offset, chunk_size, buf);
		t.stop  = zpios_timespec_now();
		t.delta = zpios_timespec_sub(t.stop, t.start);

		if (rc) {
			zpios_print(run_args->file, "IO error while doing "
			    "dmu_read(): %d\n", rc);
			break;
		}

		/* Trivial data verification, expensive! */
		if (run_args->flags & DMU_VERIFY) {
			for (i = 0; i < chunk_size; i++) {
				if (buf[i] != 'z') {
					zpios_print(run_args->file,
					    "IO verify error: %d/%d/%d\n",
					    (int)obj.obj, (int)offset,
					    (int)chunk_size);
					break;
				}
			}
		}

		mutex_enter(&thr->lock);
		thr->stats.rd_data += chunk_size;
		thr->stats.rd_chunks++;
		thr->stats.rd_time.delta = zpios_timespec_add(
		    thr->stats.rd_time.delta, t.delta);
		mutex_exit(&thr->lock);

		mutex_enter(&region->lock);
		region->stats.rd_data += chunk_size;
		region->stats.rd_chunks++;
		region->stats.rd_time.delta = zpios_timespec_add(
		    region->stats.rd_time.delta, t.delta);

		/* First time region was accessed */
		if (region->init_offset == offset)
			region->stats.rd_time.start = t.start;

		mutex_exit(&region->lock);
	}

	mutex_enter(&run_args->lock_ctl);
	run_args->threads_done++;
	mutex_exit(&run_args->lock_ctl);

	mutex_enter(&thr->lock);
	thr->rc = rc;
	thr->stats.rd_time.stop = zpios_timespec_now();
	mutex_exit(&thr->lock);
	wake_up(&run_args->waitq);

out:
	vmem_free(buf, chunk_size);
	do_exit(0);

	return (rc); /* Unreachable, due to do_exit() */
}
static int mtd_ioctl(struct inode *inode, struct file *file,
		     u_int cmd, u_long arg)
{
	struct mtd_info *mtd = (struct mtd_info *)file->private_data;
	int ret = 0;
	u_long size;
	
	DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");

	size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
	if (cmd & IOC_IN) {
		ret = verify_area(VERIFY_READ, (char *)arg, size);
		if (ret) return ret;
	}
	if (cmd & IOC_OUT) {
		ret = verify_area(VERIFY_WRITE, (char *)arg, size);
		if (ret) return ret;
	}
	
	switch (cmd) {
	case MEMGETREGIONCOUNT:
		if (copy_to_user((int *) arg, &(mtd->numeraseregions), sizeof(int)))
			return -EFAULT;
		break;

	case MEMGETREGIONINFO:
	{
		struct region_info_user ur;

		if (copy_from_user(	&ur, 
					(struct region_info_user *)arg, 
					sizeof(struct region_info_user))) {
			return -EFAULT;
		}

		if (ur.regionindex >= mtd->numeraseregions)
			return -EINVAL;
		if (copy_to_user((struct mtd_erase_region_info *) arg, 
				&(mtd->eraseregions[ur.regionindex]),
				sizeof(struct mtd_erase_region_info)))
			return -EFAULT;
		break;
	}

	case MEMGETINFO:
		if (copy_to_user((struct mtd_info *)arg, mtd,
				 sizeof(struct mtd_info_user)))
			return -EFAULT;
		break;

	case MEMERASE:
	{
		struct erase_info *erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL);
		if (!erase)
			ret = -ENOMEM;
		else {
			wait_queue_head_t waitq;
			DECLARE_WAITQUEUE(wait, current);

			init_waitqueue_head(&waitq);

			memset (erase,0,sizeof(struct erase_info));
			if (copy_from_user(&erase->addr, (u_long *)arg,
					   2 * sizeof(u_long))) {
				kfree(erase);
				return -EFAULT;
			}
			erase->mtd = mtd;
			erase->callback = mtd_erase_callback;
			erase->priv = (unsigned long)&waitq;

			/*
			  FIXME: Allow INTERRUPTIBLE. Which means
			  not having the wait_queue head on the stack.
			  
			  If the wq_head is on the stack, and we
			  leave because we got interrupted, then the
			  wq_head is no longer there when the
			  callback routine tries to wake us up.
			*/
			ret = mtd->erase(mtd, erase);
			if (!ret) {
				set_current_state(TASK_UNINTERRUPTIBLE);
				add_wait_queue(&waitq, &wait);
				if (erase->state != MTD_ERASE_DONE &&
				    erase->state != MTD_ERASE_FAILED)
					schedule();
				remove_wait_queue(&waitq, &wait);
				set_current_state(TASK_RUNNING);

				ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
			}
			kfree(erase);
		}
		break;
	}

	case MEMWRITEOOB:
	{
		struct mtd_oob_buf buf;
		void *databuf;
		ssize_t retlen;
		
		if (copy_from_user(&buf, (struct mtd_oob_buf *)arg, sizeof(struct mtd_oob_buf)))
			return -EFAULT;
		
		if (buf.length > 0x4096)
			return -EINVAL;

		if (!mtd->write_oob)
			ret = -EOPNOTSUPP;
		else
			ret = verify_area(VERIFY_READ, (char *)buf.ptr, buf.length);

		if (ret)
			return ret;

		databuf = kmalloc(buf.length, GFP_KERNEL);
		if (!databuf)
			return -ENOMEM;
		
		if (copy_from_user(databuf, buf.ptr, buf.length)) {
			kfree(databuf);
			return -EFAULT;
		}

		ret = (mtd->write_oob)(mtd, buf.start, buf.length, &retlen, databuf);

		if (copy_to_user((void *)arg + sizeof(u_int32_t), &retlen, sizeof(u_int32_t)))
			ret = -EFAULT;

		kfree(databuf);
		break;

	}

	case MEMREADOOB:
	{
		struct mtd_oob_buf buf;
		void *databuf;
		ssize_t retlen;

		if (copy_from_user(&buf, (struct mtd_oob_buf *)arg, sizeof(struct mtd_oob_buf)))
			return -EFAULT;
		
		if (buf.length > 0x4096)
			return -EINVAL;

		if (!mtd->read_oob)
			ret = -EOPNOTSUPP;
		else
			ret = verify_area(VERIFY_WRITE, (char *)buf.ptr, buf.length);

		if (ret)
			return ret;

		databuf = kmalloc(buf.length, GFP_KERNEL);
		if (!databuf)
			return -ENOMEM;
		
		ret = (mtd->read_oob)(mtd, buf.start, buf.length, &retlen, databuf);

		if (copy_to_user((void *)arg + sizeof(u_int32_t), &retlen, sizeof(u_int32_t)))
			ret = -EFAULT;
		else if (retlen && copy_to_user(buf.ptr, databuf, retlen))
			ret = -EFAULT;
		
		kfree(databuf);
		break;
	}

	case MEMLOCK:
	{
		unsigned long adrs[2];

		if (copy_from_user(adrs ,(void *)arg, 2* sizeof(unsigned long)))
			return -EFAULT;

		if (!mtd->lock)
			ret = -EOPNOTSUPP;
		else
			ret = mtd->lock(mtd, adrs[0], adrs[1]);
		break;
	}

	case MEMUNLOCK:
	{
		unsigned long adrs[2];

		if (copy_from_user(adrs, (void *)arg, 2* sizeof(unsigned long)))
			return -EFAULT;

		if (!mtd->unlock)
			ret = -EOPNOTSUPP;
		else
			ret = mtd->unlock(mtd, adrs[0], adrs[1]);
		break;
	}

		
	default:
		DEBUG(MTD_DEBUG_LEVEL0, "Invalid ioctl %x (MEMGETINFO = %x)\n", cmd, MEMGETINFO);
		ret = -ENOTTY;
	}

	return ret;
} /* memory_ioctl */
Example #6
0
int bt3c_open(bt3c_info_t *info)
{
	const struct firmware *firmware;
	struct hci_dev *hdev;
	int err;

	spin_lock_init(&(info->lock));

	skb_queue_head_init(&(info->txq));

	info->rx_state = RECV_WAIT_PACKET_TYPE;
	info->rx_count = 0;
	info->rx_skb = NULL;

	/* Initialize HCI device */
	hdev = hci_alloc_dev();
	if (!hdev) {
		BT_ERR("Can't allocate HCI device");
		return -ENOMEM;
	}

	info->hdev = hdev;

	hdev->type = HCI_PCCARD;
	hdev->driver_data = info;

	hdev->open     = bt3c_hci_open;
	hdev->close    = bt3c_hci_close;
	hdev->flush    = bt3c_hci_flush;
	hdev->send     = bt3c_hci_send_frame;
	hdev->destruct = bt3c_hci_destruct;
	hdev->ioctl    = bt3c_hci_ioctl;

	hdev->owner = THIS_MODULE;

	/* Load firmware */
	err = request_firmware(&firmware, "BT3CPCC.bin", &bt3c_device);
	if (err < 0) {
		BT_ERR("Firmware request failed");
		goto error;
	}

	err = bt3c_load_firmware(info, firmware->data, firmware->size);

	release_firmware(firmware);

	if (err < 0) {
		BT_ERR("Firmware loading failed");
		goto error;
	}

	/* Timeout before it is safe to send the first HCI packet */
	set_current_state(TASK_INTERRUPTIBLE);
	schedule_timeout(HZ);

	/* Register HCI device */
	err = hci_register_dev(hdev);
	if (err < 0) {
		BT_ERR("Can't register HCI device");
		goto error;
	}

	return 0;

error:
	info->hdev = NULL;
	hci_free_dev(hdev);
	return err;
}
Example #7
0
inline void cpm_uart_wait_until_send(struct uart_cpm_port *pinfo)
{
	set_current_state(TASK_UNINTERRUPTIBLE);
	schedule_timeout(pinfo->wait_closing);
}
/*
 * Write endpoint. This routine attempts to break the passed in buffer
 * into usb DATA0/1 packet size chunks and send them to the host.
 * (The lower-level driver tries to do this too, but easier for us
 * to manage things here.)
 *
 * We are at the mercy of the host here, in that it must send an IN
 * token to us to pull this data back, so hopefully some higher level
 * protocol is expecting traffic to flow in that direction so the host
 * is actually polling us. To guard against hangs, a 5 second timeout
 * is used.
 *
 * This routine takes some care to only report bytes sent that have
 * actually made it across the wire. Thus we try to stay in lockstep
 * with the completion routine and only have one packet on the xmit
 * hardware at a time. Multiple simultaneous writers will get
 * "undefined" results.
 *
  */
static ssize_t  usbc_write( struct file *pFile, const char * pUserBuffer,
							 size_t stCount, loff_t *pPos )
{
	 ssize_t retval = 0;
	 ssize_t stSent = 0;

	 DECLARE_WAITQUEUE( wait, current );

	 PRINTK( KERN_DEBUG "%swrite() %d bytes\n", pszMe, stCount );

	 down( &xmit_sem );  // only one thread onto the hardware at a time

	 while( stCount != 0 && retval == 0 ) {
		  int nThisTime  = MIN( TX_PACKET_SIZE, stCount );
		  copy_from_user( tx_buf, pUserBuffer, nThisTime );
		  sending = nThisTime;
 		  retval = pxa_usb_send( tx_buf, nThisTime, tx_done_callback );
		  if ( retval < 0 ) {
			   char * p = what_the_f( retval );
			   printk( "%sCould not queue xmission. rc=%d - %s\n",
					   pszMe, retval, p );
			   sending = 0;
			   break;
		  }
		  /* now have something on the diving board */
		  add_wait_queue( &wq_write, &wait );
		  tx_timer.expires = jiffies + ( HZ * 5 );
		  add_timer( &tx_timer );
		  while( 1 ) {
			   set_current_state( TASK_INTERRUPTIBLE );
			   if ( sending == 0 ) {  /* it jumped into the pool */
					del_timer( &tx_timer );
					retval = last_tx_result;
					if ( retval == 0 ) {
						 stSent		 += last_tx_size;
						 pUserBuffer += last_tx_size;
						 stCount     -= last_tx_size;
					}
					else
						 printk( "%sxmission error rc=%d - %s\n",
								 pszMe, retval, what_the_f(retval) );
					break;
			   }
			   else if ( signal_pending( current ) ) {
					del_timer( &tx_timer );
					printk( "%ssignal\n", pszMe  );
					retval = -ERESTARTSYS;
					break;
			   }
			   schedule();
		  }
		  set_current_state( TASK_RUNNING );
		  remove_wait_queue( &wq_write, &wait );
	 }

	 up( &xmit_sem );

	 if ( 0 == retval )
		  retval = stSent;
	 return retval;
}
Example #9
0
/*
 * The Device read callback Function
 */
static ssize_t mdc800_device_read (struct file *file, char *buf, size_t len, loff_t *pos)
{
	size_t left=len, sts=len; /* single transfer size */
	char* ptr=buf;
	long timeout;
	DECLARE_WAITQUEUE(wait, current);

	down (&mdc800->io_lock);
	if (mdc800->state == NOT_CONNECTED)
	{
		up (&mdc800->io_lock);
		return -EBUSY;
	}
	if (mdc800->state == WORKING)
	{
		warn ("Illegal State \"working\" reached during read ?!");
		up (&mdc800->io_lock);
		return -EBUSY;
	}
	if (!mdc800->open)
	{
		up (&mdc800->io_lock);
		return -EBUSY;
	}

	while (left)
	{
		if (signal_pending (current)) 
		{
			up (&mdc800->io_lock);
			return -EINTR;
		}

		sts=left > (mdc800->out_count-mdc800->out_ptr)?mdc800->out_count-mdc800->out_ptr:left;

		if (sts <= 0)
		{
			/* Too less Data in buffer */
			if (mdc800->state == DOWNLOAD)
			{
				mdc800->out_count=0;
				mdc800->out_ptr=0;

				/* Download -> Request new bytes */
				mdc800->download_urb->dev = mdc800->dev;
				if (usb_submit_urb (mdc800->download_urb, GFP_KERNEL))
				{
					err ("Can't submit download urb (status=%i)",mdc800->download_urb->status);
					up (&mdc800->io_lock);
					return len-left;
				}
				add_wait_queue(&mdc800->download_wait, &wait);
				timeout = TO_DOWNLOAD_GET_READY*HZ/1000;
				while (!mdc800->downloaded && timeout)
				{
					set_current_state(TASK_UNINTERRUPTIBLE);
					timeout = schedule_timeout (timeout);
				}
				set_current_state(TASK_RUNNING);
				remove_wait_queue(&mdc800->download_wait, &wait);
				mdc800->downloaded = 0;
				if (mdc800->download_urb->status != 0)
				{
					err ("request download-bytes fails (status=%i)",mdc800->download_urb->status);
					up (&mdc800->io_lock);
					return len-left;
				}
			}
			else
			{
				/* No more bytes -> that's an error*/
				up (&mdc800->io_lock);
				return -EIO;
			}
		}
		else
		{
			/* Copy Bytes */
			if (copy_to_user(ptr, &mdc800->out [mdc800->out_ptr],
						sts)) {
				up(&mdc800->io_lock);
				return -EFAULT;
			}
			ptr+=sts;
			left-=sts;
			mdc800->out_ptr+=sts;
		}
	}

	up (&mdc800->io_lock);
	return len-left;
}
Example #10
0
static int sync_unplug_thread(void *data)
{
	struct hotplug_pcp *hp = data;

	wait_for_completion(&hp->unplug_wait);
	preempt_disable();
	hp->unplug = current;
	wait_for_pinned_cpus(hp);

	/*
	 * This thread will synchronize the cpu_down() with threads
	 * that have pinned the CPU. When the pinned CPU count reaches
	 * zero, we inform the cpu_down code to continue to the next step.
	 */
	set_current_state(TASK_UNINTERRUPTIBLE);
	preempt_enable();
	complete(&hp->synced);

	/*
	 * If all succeeds, the next step will need tasks to wait till
	 * the CPU is offline before continuing. To do this, the grab_lock
	 * is set and tasks going into pin_current_cpu() will block on the
	 * mutex. But we still need to wait for those that are already in
	 * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
	 * will kick this thread out.
	 */
	while (!hp->grab_lock && !kthread_should_stop()) {
		schedule();
		set_current_state(TASK_UNINTERRUPTIBLE);
	}

	/* Make sure grab_lock is seen before we see a stale completion */
	smp_mb();

	/*
	 * Now just before cpu_down() enters stop machine, we need to make
	 * sure all tasks that are in pinned CPU sections are out, and new
	 * tasks will now grab the lock, keeping them from entering pinned
	 * CPU sections.
	 */
	if (!kthread_should_stop()) {
		preempt_disable();
		wait_for_pinned_cpus(hp);
		preempt_enable();
		complete(&hp->synced);
	}

	set_current_state(TASK_UNINTERRUPTIBLE);
	while (!kthread_should_stop()) {
		schedule();
		set_current_state(TASK_UNINTERRUPTIBLE);
	}
	set_current_state(TASK_RUNNING);

	/*
	 * Force this thread off this CPU as it's going down and
	 * we don't want any more work on this CPU.
	 */
	current->flags &= ~PF_THREAD_BOUND;
	do_set_cpus_allowed(current, cpu_present_mask);
	migrate_me();
	return 0;
}
/*
 * Read endpoint. Note that you can issue a read to an
 * unconfigured endpoint. Eventually, the host may come along
 * and configure underneath this module and data will appear.
 */
static ssize_t usbc_read( struct file *pFile, char *pUserBuffer,
                        size_t stCount, loff_t *pPos )
{
	 ssize_t retval;
	 int flags;
	 DECLARE_WAITQUEUE( wait, current );

	 PRINTK( KERN_DEBUG "%sread()\n", pszMe );

	 local_irq_save( flags );
	 if ( last_rx_result == 0 ) {
		  local_irq_restore( flags );
	 } else {  /* an error happended and receiver is paused */
		  local_irq_restore( flags );
		  last_rx_result = 0;
		  kick_start_rx();
	 }

	 add_wait_queue( &wq_read, &wait );
	 while( 1 ) {
		  ssize_t bytes_avail;
		  ssize_t bytes_to_end;

		  set_current_state( TASK_INTERRUPTIBLE );

		  /* snap ring buf state */
		  local_irq_save( flags );
		  bytes_avail  = CIRC_CNT( rx_ring.in, rx_ring.out, RBUF_SIZE );
		  bytes_to_end = CIRC_CNT_TO_END( rx_ring.in, rx_ring.out, RBUF_SIZE );
		  local_irq_restore( flags );

		  if ( bytes_avail != 0 ) {
			   ssize_t bytes_to_move = MIN( stCount, bytes_avail );
			   retval = 0;		// will be bytes transfered
			   if ( bytes_to_move != 0 ) {
					size_t n = MIN( bytes_to_end, bytes_to_move );
					if ( copy_to_user( pUserBuffer,
									   &rx_ring.buf[ rx_ring.out ],
									   n ) ) {
						 retval = -EFAULT;
						 break;
					}
					bytes_to_move -= n;
 					retval += n;
					// might go 1 char off end, so wrap
					rx_ring.out = ( rx_ring.out + n ) & (RBUF_SIZE-1);
					if ( copy_to_user( pUserBuffer + n,
									   &rx_ring.buf[ rx_ring.out ],
									   bytes_to_move )
						 ) {
						 retval = -EFAULT;
						 break;
					}
					rx_ring.out += bytes_to_move;		// cannot wrap
					retval += bytes_to_move;
					kick_start_rx();
			   }
			   break;
		  }
		  else if ( last_rx_result ) {
			   retval = last_rx_result;
			   break;
		  }
		  else if ( pFile->f_flags & O_NONBLOCK ) {  // no data, can't sleep
			   retval = -EAGAIN;
			   break;
		  }
		  else if ( signal_pending( current ) ) {   // no data, can sleep, but signal
			   retval = -ERESTARTSYS;
			   break;
		  }
		  schedule();					   			// no data, can sleep
	 }
	 set_current_state( TASK_RUNNING );
	 remove_wait_queue( &wq_read, &wait );

	 if ( retval < 0 )
		  printk(KERN_DEBUG  "%s read error %d - %s\n", pszMe, retval,
			 what_the_f( retval ) );
	 return retval;
}
Example #12
0
static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;
	struct mmc_context_info *cntx = &mq->card->host->context_info;

	current->flags |= PF_MEMALLOC;

	down(&mq->thread_sem);
	do {
		struct request *req = NULL;

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
		req = blk_fetch_request(q);
		mq->asleep = false;
		cntx->is_waiting_last_req = false;
		cntx->is_new_req = false;
		if (!req) {
			/*
			 * Dispatch queue is empty so set flags for
			 * mmc_request_fn() to wake us up.
			 */
			if (mq->mqrq_prev->req)
				cntx->is_waiting_last_req = true;
			else
				mq->asleep = true;
		}
		mq->mqrq_cur->req = req;
		spin_unlock_irq(q->queue_lock);

		if (req || mq->mqrq_prev->req) {
			bool req_is_special = mmc_req_is_special(req);

			set_current_state(TASK_RUNNING);
			mmc_blk_issue_rq(mq, req);
			cond_resched();
			if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
				mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
				continue; /* fetch again */
			}

			/*
			 * Current request becomes previous request
			 * and vice versa.
			 * In case of special requests, current request
			 * has been finished. Do not assign it to previous
			 * request.
			 */
			if (req_is_special)
				mq->mqrq_cur->req = NULL;

			mq->mqrq_prev->brq.mrq.data = NULL;
			mq->mqrq_prev->req = NULL;
			swap(mq->mqrq_prev, mq->mqrq_cur);
		} else {
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
				break;
			}
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
		}
	} while (1);
	up(&mq->thread_sem);

	return 0;
}
Example #13
0
int do_select(int n, fd_set_bits *fds, long *timeout)
{
    struct poll_wqueues table;
    poll_table *wait;
    int retval, i;
    long __timeout = *timeout;

    spin_lock(&current->files->file_lock);
    retval = max_select_fd(n, fds);
    spin_unlock(&current->files->file_lock);

    if (retval < 0)
        return retval;
    n = retval;

    poll_initwait(&table);
    wait = &table.pt;
    if (!__timeout)
        wait = NULL;
    retval = 0;
    for (;;) {
        unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;

        set_current_state(TASK_INTERRUPTIBLE);

        inp = fds->in;
        outp = fds->out;
        exp = fds->ex;
        rinp = fds->res_in;
        routp = fds->res_out;
        rexp = fds->res_ex;

        for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
            unsigned long in, out, ex, all_bits, bit = 1, mask, j;
            unsigned long res_in = 0, res_out = 0, res_ex = 0;
            struct file_operations *f_op = NULL;
            struct file *file = NULL;

            in = *inp++;
            out = *outp++;
            ex = *exp++;
            all_bits = in | out | ex;
            if (all_bits == 0) {
                i += __NFDBITS;
                continue;
            }

            for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) {
                if (i >= n)
                    break;
                if (!(bit & all_bits))
                    continue;
                file = fget(i);
                if (file) {
                    f_op = file->f_op;
                    ltt_ev_file_system(LTT_EV_FILE_SYSTEM_SELECT,
                                       i /*  The fd*/,
                                       __timeout,
                                       NULL);
                    mask = DEFAULT_POLLMASK;
                    if (f_op && f_op->poll)
                        mask = (*f_op->poll)(file, retval ? NULL : wait);
                    fput(file);
                    if ((mask & POLLIN_SET) && (in & bit)) {
                        res_in |= bit;
                        retval++;
                    }
                    if ((mask & POLLOUT_SET) && (out & bit)) {
                        res_out |= bit;
                        retval++;
                    }
                    if ((mask & POLLEX_SET) && (ex & bit)) {
                        res_ex |= bit;
                        retval++;
                    }
                }
                cond_resched();
            }
            if (res_in)
                *rinp = res_in;
            if (res_out)
                *routp = res_out;
            if (res_ex)
                *rexp = res_ex;
        }
        wait = NULL;
        if (retval || !__timeout || signal_pending(current))
            break;
        if(table.error) {
            retval = table.error;
            break;
        }
        __timeout = schedule_timeout(__timeout);
    }
    __set_current_state(TASK_RUNNING);

    poll_freewait(&table);

    /*
     * Up-to-date the caller timeout.
     */
    *timeout = __timeout;
    return retval;
}
int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
{
	struct ivtv *itv = s->itv;
	DECLARE_WAITQUEUE(wait, current);
	int cap_type;
	int stopmode;

	if (s->vdev == NULL)
		return -EINVAL;

	/* This function assumes that you are allowed to stop the capture
	   and that we are actually capturing */

	IVTV_DEBUG_INFO("Stop Capture\n");

	if (s->type == IVTV_DEC_STREAM_TYPE_VOUT)
		return 0;
	if (atomic_read(&itv->capturing) == 0)
		return 0;

	switch (s->type) {
	case IVTV_ENC_STREAM_TYPE_YUV:
		cap_type = 1;
		break;
	case IVTV_ENC_STREAM_TYPE_PCM:
		cap_type = 1;
		break;
	case IVTV_ENC_STREAM_TYPE_VBI:
		cap_type = 1;
		break;
	case IVTV_ENC_STREAM_TYPE_MPG:
	default:
		cap_type = 0;
		break;
	}

	/* Stop Capture Mode */
	if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) {
		stopmode = 0;
	} else {
		stopmode = 1;
	}

	/* end_capture */
	/* when: 0 =  end of GOP  1 = NOW!, type: 0 = mpeg, subtype: 3 = video+audio */
	ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, stopmode, cap_type, s->subtype);

	if (!test_bit(IVTV_F_S_PASSTHROUGH, &s->s_flags)) {
		if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) {
			/* only run these if we're shutting down the last cap */
			unsigned long duration;
			unsigned long then = jiffies;

			add_wait_queue(&itv->eos_waitq, &wait);

			set_current_state(TASK_INTERRUPTIBLE);

			/* wait 2s for EOS interrupt */
			while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
				time_before(jiffies,
					    then + msecs_to_jiffies(2000))) {
				schedule_timeout(msecs_to_jiffies(10));
			}

			/* To convert jiffies to ms, we must multiply by 1000
			 * and divide by HZ.  To avoid runtime division, we
			 * convert this to multiplication by 1000/HZ.
			 * Since integer division truncates, we get the best
			 * accuracy if we do a rounding calculation of the constant.
			 * Think of the case where HZ is 1024.
			 */
			duration = ((1000 + HZ / 2) / HZ) * (jiffies - then);

			if (!test_bit(IVTV_F_I_EOS, &itv->i_flags)) {
				IVTV_DEBUG_WARN("%s: EOS interrupt not received! stopping anyway.\n", s->name);
				IVTV_DEBUG_WARN("%s: waited %lu ms.\n", s->name, duration);
			} else {
				IVTV_DEBUG_INFO("%s: EOS took %lu ms to occur.\n", s->name, duration);
			}
			set_current_state(TASK_RUNNING);
			remove_wait_queue(&itv->eos_waitq, &wait);
			set_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
		}

		/* Handle any pending interrupts */
		ivtv_msleep_timeout(100, 1);
	}

	atomic_dec(&itv->capturing);

	/* Clear capture and no-read bits */
	clear_bit(IVTV_F_S_STREAMING, &s->s_flags);

	if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
		ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP);

	if (atomic_read(&itv->capturing) > 0) {
		return 0;
	}

	/* Set the following Interrupt mask bits for capture */
	ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
	del_timer(&itv->dma_timer);

	/* event notification (off) */
	if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
		/* type: 0 = refresh */
		/* on/off: 0 = off, intr: 0x10000000, mbox_id: -1: none */
		ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1);
		ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
	}

	/* Raw-passthrough is implied on start. Make sure it's stopped so
	   the encoder will re-initialize when next started */
	ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, 1, 2, 7);

	wake_up(&s->waitq);

	return 0;
}
Example #15
0
static void
splat_atomic_work(void *priv)
{
	atomic_priv_t *ap;
	atomic_op_t op;
	int i;

	ap = (atomic_priv_t *)priv;
	ASSERT(ap->ap_magic == SPLAT_ATOMIC_TEST_MAGIC);

	mutex_lock(&ap->ap_lock);
	op = ap->ap_op;
	wake_up(&ap->ap_waitq);
	mutex_unlock(&ap->ap_lock);

        splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME,
	             "Thread %d successfully started: %lu/%lu\n", op,
		     (long unsigned)ap->ap_atomic,
		     (long unsigned)ap->ap_atomic_exited);

	for (i = 0; i < SPLAT_ATOMIC_INIT_VALUE / 10; i++) {

		/* Periodically sleep to mix up the ordering */
		if ((i % (SPLAT_ATOMIC_INIT_VALUE / 100)) == 0) {
		        splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME,
			     "Thread %d sleeping: %lu/%lu\n", op,
			     (long unsigned)ap->ap_atomic,
			     (long unsigned)ap->ap_atomic_exited);
		        set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(HZ / 100);
		}

		switch (op) {
			case SPLAT_ATOMIC_INC_64:
				atomic_inc_64(&ap->ap_atomic);
				break;
			case SPLAT_ATOMIC_DEC_64:
				atomic_dec_64(&ap->ap_atomic);
				break;
			case SPLAT_ATOMIC_ADD_64:
				atomic_add_64(&ap->ap_atomic, 3);
				break;
			case SPLAT_ATOMIC_SUB_64:
				atomic_sub_64(&ap->ap_atomic, 3);
				break;
			case SPLAT_ATOMIC_ADD_64_NV:
				atomic_add_64_nv(&ap->ap_atomic, 5);
				break;
			case SPLAT_ATOMIC_SUB_64_NV:
				atomic_sub_64_nv(&ap->ap_atomic, 5);
				break;
			default:
				PANIC("Undefined op %d\n", op);
		}
	}

	atomic_inc_64(&ap->ap_atomic_exited);

        splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME,
	             "Thread %d successfully exited: %lu/%lu\n", op,
		     (long unsigned)ap->ap_atomic,
		     (long unsigned)ap->ap_atomic_exited);

	wake_up(&ap->ap_waitq);
	thread_exit();
}
Example #16
0
/*
 * The Device write callback Function
 * If a 8Byte Command is received, it will be send to the camera.
 * After this the driver initiates the request for the answer or
 * just waits until the camera becomes ready.
 */
static ssize_t mdc800_device_write (struct file *file, const char *buf, size_t len, loff_t *pos)
{
	size_t i=0;
	DECLARE_WAITQUEUE(wait, current);

	down (&mdc800->io_lock);
	if (mdc800->state != READY)
	{
		up (&mdc800->io_lock);
		return -EBUSY;
	}
	if (!mdc800->open )
	{
		up (&mdc800->io_lock);
		return -EBUSY;
	}

	while (i<len)
	{
		unsigned char c;
		if (signal_pending (current)) 
		{
			up (&mdc800->io_lock);
			return -EINTR;
		}
		
		if(get_user(c, buf+i))
		{
			up(&mdc800->io_lock);
			return -EFAULT;
		}

		/* check for command start */
		if (c == 0x55)
		{
			mdc800->in_count=0;
			mdc800->out_count=0;
			mdc800->out_ptr=0;
			mdc800->download_left=0;
		}

		/* save command byte */
		if (mdc800->in_count < 8)
		{
			mdc800->in[mdc800->in_count] = c;
			mdc800->in_count++;
		}
		else
		{
			up (&mdc800->io_lock);
			return -EIO;
		}

		/* Command Buffer full ? -> send it to camera */
		if (mdc800->in_count == 8)
		{
			int answersize;
			long timeout;

			if (mdc800_usb_waitForIRQ (0,TO_GET_READY))
			{
				err ("Camera didn't get ready.\n");
				up (&mdc800->io_lock);
				return -EIO;
			}

			answersize=mdc800_getAnswerSize (mdc800->in[1]);

			mdc800->state=WORKING;
			memcpy (mdc800->write_urb->transfer_buffer, mdc800->in,8);
			mdc800->write_urb->dev = mdc800->dev;
			if (usb_submit_urb (mdc800->write_urb, GFP_KERNEL))
			{
				err ("submitting write urb fails (status=%i)", mdc800->write_urb->status);
				up (&mdc800->io_lock);
				return -EIO;
			}
			add_wait_queue(&mdc800->write_wait, &wait);
			timeout = TO_WRITE_GET_READY*HZ/1000;
			while (!mdc800->written && timeout)
			{
				set_current_state(TASK_UNINTERRUPTIBLE);
				timeout = schedule_timeout (timeout);
			}
                        set_current_state(TASK_RUNNING);
			remove_wait_queue(&mdc800->write_wait, &wait);
			mdc800->written = 0;
			if (mdc800->state == WORKING)
			{
				usb_unlink_urb (mdc800->write_urb);
				up (&mdc800->io_lock);
				return -EIO;
			}

			switch ((unsigned char) mdc800->in[1])
			{
				case 0x05: /* Download Image */
				case 0x3e: /* Take shot in Fine Mode (WCam Mode) */
					if (mdc800->pic_len < 0)
					{
						err ("call 0x07 before 0x05,0x3e");
						mdc800->state=READY;
						up (&mdc800->io_lock);
						return -EIO;
					}
					mdc800->pic_len=-1;

				case 0x09: /* Download Thumbnail */
					mdc800->download_left=answersize+64;
					mdc800->state=DOWNLOAD;
					mdc800_usb_waitForIRQ (0,TO_DOWNLOAD_GET_BUSY);
					break;


				default:
					if (answersize)
					{

						if (mdc800_usb_waitForIRQ (1,TO_READ_FROM_IRQ))
						{
							err ("requesting answer from irq fails");
							up (&mdc800->io_lock);
							return -EIO;
						}

						/* Write dummy data, (this is ugly but part of the USB Protocol */
						/* if you use endpoint 1 as bulk and not as irq) */
						memcpy (mdc800->out, mdc800->camera_response,8);

						/* This is the interpreted answer */
						memcpy (&mdc800->out[8], mdc800->camera_response,8);

						mdc800->out_ptr=0;
						mdc800->out_count=16;

						/* Cache the Imagesize, if command was getImageSize */
						if (mdc800->in [1] == (char) 0x07)
						{
							mdc800->pic_len=(int) 65536*(unsigned char) mdc800->camera_response[0]+256*(unsigned char) mdc800->camera_response[1]+(unsigned char) mdc800->camera_response[2];

							dbg ("cached imagesize = %i",mdc800->pic_len);
						}

					}
					else
					{
						if (mdc800_usb_waitForIRQ (0,TO_DEFAULT_COMMAND))
						{
							err ("Command Timeout.");
							up (&mdc800->io_lock);
							return -EIO;
						}
					}
					mdc800->state=READY;
					break;
			}
		}
		i++;
	}
	up (&mdc800->io_lock);
	return i;
}
Example #17
0
int m5249audio_write(struct inode *inode, struct file *filp, const char *buf, int count)
{
	unsigned long	*dp, *buflp;
	unsigned short	*bufwp;
	unsigned char	*bufbp;
	unsigned int	slen, bufcnt, i, s, e;

#ifdef DEBUG
	printk("m5249_write(buf=%x,count=%d)\n", (int) buf, count);
#endif

	if (count <= 0)
		return(0);

	buflp = (unsigned long *) buf;
	bufwp = (unsigned short *) buf;
	bufbp = (unsigned char *) buf;

	bufcnt = count & ~0x3;
	if (m5249audio_stereo == 0)
		bufcnt <<= 1;
	if (m5249audio_bits == 8)
		bufcnt <<= 1;

tryagain:
	/*
	 *	Get a snapshot of buffer, so we can figure out how
	 *	much data we can fit in...
	 */
	s = m5249audio_dmastart;
	e = m5249audio_append;
	dp = (unsigned long *) &m5249audio_buf[e];

	slen = ((s > e) ? (s - e) : (BUFSIZE - (e - s))) - 4;
	if (slen > bufcnt)
		slen = bufcnt;
	if ((BUFSIZE - e) < slen)
		slen = BUFSIZE - e;

	if (slen == 0) {
		if (signal_pending(current))
			return(-ERESTARTSYS);
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(1);
		goto tryagain;
	}

	if (m5249audio_stereo) {
		if (m5249audio_bits == 16) {
			for (i = 0; (i < slen); i += 4)
				*dp++ = *buflp++;
		} else {
			for (i = 0; (i < slen); i += 4) {
				*dp    = (((unsigned long) *bufbp++) << 24);
				*dp++ |= (((unsigned long) *bufbp++) << 8);
			}
		}
	} else {
		if (m5249audio_bits == 16) {
			for (i = 0; (i < slen); i += 4) {
				*dp++ = (((unsigned long)*bufwp)<<16) | *bufwp;
				bufwp++;
			}
		} else {
			for (i = 0; (i < slen); i += 4) {
				*dp++ = (((unsigned long) *bufbp) << 24) | 
					(((unsigned long) *bufbp) << 8);
				bufbp++;
			}
		}
	}

	e += slen;
	if (e >= BUFSIZE)
		e = 0;
	m5249audio_append = e;

#ifdef CONFIG_AUDIOIRQ
	/* If not outputing audio, then start now */
	if (m5249audio_txbusy == 0) {
		m5249audio_txbusy++;
		*reg32p(MCFA_INTENABLE) |= 0x00000008;
		/* Dummy write to start output interrupts */
		*reg32p(MCFA_PDOR3) = 0;
	}
#endif
#ifdef CONFIG_AUDIODMA
	m5249audio_dmabuf();
#endif

	bufcnt -= slen;
	if (bufcnt > 0)
		goto tryagain;

	return(count);
}
Example #18
0
static int
vc_watchdog_thread_func(void *v)
{
	while (1) {
		long rc;
		unsigned long msg = WDOG_PING_MSG;
		VCHIQ_ELEMENT_T elem = {
			.data = (void *)&msg,
			.size = sizeof(msg)
		};
		int time_remaining =
			msecs_to_jiffies(WATCHDOG_PING_RATE_MS);

		LOG_DBG("%s: waiting on disable blocker...", __func__);
		if (wait_for_completion_interruptible(
				&vc_wdog_state->wdog_disable_blocker) != 0) {
			flush_signals(current);
			continue;
		}

		LOG_DBG("%s: Waiting for VC to be awake...", __func__);
		/* Ensure we only ping videocore when it's awake. Call use
		 * service in a mode which will not initiate a wakeup */
		vchiq_use_service_no_resume(vc_wdog_state->service_handle);

		if (!atomic_read(&vc_wdog_state->wdog_enabled)) {
			vchiq_release_service(vc_wdog_state->service_handle);
			LOG_DBG("%s: VC watchdog disabled", __func__);
			continue;
		}

		if (mutex_lock_interruptible(&vc_wdog_state->wdog_ping_mutex)
				!= 0) {
			vchiq_release_service(vc_wdog_state->service_handle);
			LOG_DBG("%s: Interrupted waiting for ping", __func__);
			continue;
		}

		LOG_DBG("%s: Pinging videocore", __func__);
		/* ping vc... */
		vchiq_queue_message(vc_wdog_state->service_handle, &elem, 1);

		LOG_DBG("%s: Waiting for ping response", __func__);
		/* ...and wait for the response with a timeout */
		rc = wait_for_completion_interruptible_timeout(
			&vc_wdog_state->wdog_ping_response,
			msecs_to_jiffies(VC_PING_RESPONSE_TIMEOUT_MS));

		if (rc == 0) {
			/* Timed out... BANG! */
			vc_wdog_state->failed_pings++;
			LOG_ERR("%s VideoCore Watchdog timed out!! (%d)",
				__func__, vc_wdog_state->failed_pings);
			if (vc_wdog_state->failed_pings >=
						WATCHDOG_NO_RESPONSE_COUNT)
			BUG();
		} else if (rc < 0)
			LOG_ERR("%s: Interrupted waiting for ping", __func__);
		else {
			LOG_DBG("%s: Ping response received", __func__);
			vc_wdog_state->failed_pings = 0;
		}

		mutex_unlock(&vc_wdog_state->wdog_ping_mutex);

		vchiq_release_service(vc_wdog_state->service_handle);

		LOG_DBG("%s: waiting before pinging again...", __func__);
		/* delay before running again */
		do {
			set_current_state(TASK_INTERRUPTIBLE);
			time_remaining = schedule_timeout(time_remaining);
			if (time_remaining) {
				LOG_ERR("%s interrupted", __func__);
				flush_signals(current);
			}
		} while (time_remaining > 0);
	}

	return 0;
}

static void vc_watchdog_connected_init(void)
{
	int ret = 0;
	VCHIQ_SERVICE_PARAMS_T vchiq_params = {
		.fourcc      = VCHIQ_MAKE_FOURCC('W', 'D', 'O', 'G'),
		.callback    = vc_watchdog_vchiq_callback,
		.version     = VC_WDOG_VERSION,
		.version_min = VC_WDOG_VERSION_MIN
	};

	LOG_INFO("%s: start", __func__);

	/* Initialize and create a VCHIQ connection */
	ret = vchiq_initialise(&vc_wdog_state->initialise_instance);
	if (ret != 0) {
		LOG_ERR("%s: failed to initialise VCHIQ instance (ret=%d)",
				__func__, ret);
		ret = -EIO;
		goto out;
	}
	ret = vchiq_connect(vc_wdog_state->initialise_instance);
	if (ret != 0) {
		LOG_ERR("%s: failed to connect VCHIQ instance (ret=%d)",
				__func__, ret);
		ret = -EIO;
		goto out;
	}

	ret = vchiq_open_service(vc_wdog_state->initialise_instance,
			&vchiq_params, &vc_wdog_state->service_handle);
	if (ret != 0 || (vc_wdog_state->service_handle == 0)) {
		LOG_ERR("%s: failed to add WDOG service: error %d",
				__func__, ret);
		ret = -EPERM;
		goto out;
	}

	vchiq_release_service(vc_wdog_state->service_handle);

	init_completion(&vc_wdog_state->wdog_ping_response);
	mutex_init(&vc_wdog_state->wdog_ping_mutex);
	init_completion(&vc_wdog_state->wdog_disable_blocker);

#ifdef ENABLE_VC_WATCHDOG
	complete_all(&vc_wdog_state->wdog_disable_blocker);
	atomic_set(&vc_wdog_state->wdog_enabled, 1);
#else
	atomic_set(&vc_wdog_state->wdog_enabled, 0);
#endif

	vc_wdog_state->wdog_thread = kthread_create(
		&vc_watchdog_thread_func, NULL, "vc-watchdog");
	if (vc_wdog_state->wdog_thread == NULL)
		LOG_ERR("FATAL: couldn't create thread vc-watchdog");
	else
		wake_up_process(vc_wdog_state->wdog_thread);

out:
	LOG_INFO("%s: end (ret=%d)", __func__, ret);
}

static int vc_watchdog_probe(struct platform_device *p_dev)
{
	int ret = 0;
	LOG_INFO("%s: start", __func__);
	vc_wdog_state = kzalloc(sizeof(struct vc_watchdog_state), GFP_KERNEL);
	if (!vc_wdog_state) {
		ret = -ENOMEM;
		goto exit;
	}

	vc_wdog_state->p_dev = p_dev;

	vc_wdog_state->proc_entry = create_proc_entry(DRIVER_NAME, 0444, NULL);
	if (vc_wdog_state->proc_entry == NULL) {
		ret = -EFAULT;
		goto out_efault;
	}
	vc_wdog_state->proc_entry->data = (void *)vc_wdog_state;
	vc_wdog_state->proc_entry->write_proc = vc_watchdog_proc_write;

	vchiq_add_connected_callback(vc_watchdog_connected_init);

	goto exit;

out_efault:
	kfree(vc_wdog_state);
	vc_wdog_state = NULL;
exit:
	LOG_INFO("%s: end, ret=%d", __func__, ret);
	return ret;
}
int __init
setup_netjet_s(struct IsdnCard *card)
{
	int bytecnt;
	struct IsdnCardState *cs = card->cs;
	char tmp[64];
	long flags;

#ifdef __BIG_ENDIAN
#error "not running on big endian machines now"
#endif
	strcpy(tmp, NETjet_S_revision);
	printk(KERN_INFO "HiSax: Traverse Tech. NETjet-S driver Rev. %s\n", HiSax_getrev(tmp));
	if (cs->typ != ISDN_CTYPE_NETJET_S)
		return(0);
	test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);

#if CONFIG_PCI

	for ( ;; )
	{
		if (!pci_present()) {
			printk(KERN_ERR "Netjet: no PCI bus present\n");
			return(0);
		}
		if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
			PCI_DEVICE_ID_TIGERJET_300,  dev_netjet))) {
			if (pci_enable_device(dev_netjet))
				return(0);
			pci_set_master(dev_netjet);
			cs->irq = dev_netjet->irq;
			if (!cs->irq) {
				printk(KERN_WARNING "NETjet-S: No IRQ for PCI card found\n");
				return(0);
			}
			cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
			if (!cs->hw.njet.base) {
				printk(KERN_WARNING "NETjet-S: No IO-Adr for PCI card found\n");
				return(0);
			}
			/* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG www.formula-n.com */
			if ((dev_netjet->subsystem_vendor == 0x55) &&
				(dev_netjet->subsystem_device == 0x02)) {
				printk(KERN_WARNING "Netjet: You tried to load this driver with an incompatible TigerJet-card\n");
				printk(KERN_WARNING "Use type=41 for Formula-n enter:now ISDN PCI and compatible\n");
				return(0);
			}
			/* end new code */
		} else {
			printk(KERN_WARNING "NETjet-S: No PCI card found\n");
			return(0);
		}

		cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
		cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;

		save_flags(flags);
		sti();

		cs->hw.njet.ctrl_reg = 0xff;  /* Reset On */
		byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);

		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout((10*HZ)/1000);	/* Timeout 10ms */

		cs->hw.njet.ctrl_reg = 0x00;  /* Reset Off and status read clear */
		byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);

		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout((10*HZ)/1000);	/* Timeout 10ms */

		restore_flags(flags);

		cs->hw.njet.auxd = 0xC0;
		cs->hw.njet.dmactrl = 0;

		byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
		byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
		byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);

		switch ( ( ( NETjet_ReadIC( cs, ISAC_RBCH ) >> 5 ) & 3 ) )
		{
			case 0 :
				break;

			case 3 :
				printk( KERN_WARNING "NETjet-S: NETspider-U PCI card found\n" );
				continue;

			default :
				printk( KERN_WARNING "NETjet-S: No PCI card found\n" );
				return 0;
                }
                break;
	}
#else

	printk(KERN_WARNING "NETjet-S: NO_PCI_BIOS\n");
	printk(KERN_WARNING "NETjet-S: unable to config NETJET-S PCI\n");
	return (0);

#endif /* CONFIG_PCI */

	bytecnt = 256;

	printk(KERN_INFO
		"NETjet-S: PCI card configured at %#lx IRQ %d\n",
		cs->hw.njet.base, cs->irq);
	if (check_region(cs->hw.njet.base, bytecnt)) {
		printk(KERN_WARNING
		       "HiSax: %s config port %#lx-%#lx already in use\n",
		       CardType[card->typ],
		       cs->hw.njet.base,
		       cs->hw.njet.base + bytecnt);
		return (0);
	} else {
		request_region(cs->hw.njet.base, bytecnt, "netjet-s isdn");
	}
	cs->readisac  = &NETjet_ReadIC;
	cs->writeisac = &NETjet_WriteIC;
	cs->readisacfifo  = &NETjet_ReadICfifo;
	cs->writeisacfifo = &NETjet_WriteICfifo;
	cs->BC_Read_Reg  = &dummyrr;
	cs->BC_Write_Reg = &dummywr;
	cs->BC_Send_Data = &netjet_fill_dma;
	cs->cardmsg = &NETjet_S_card_msg;
	cs->irq_func = &netjet_s_interrupt;
	cs->irq_flags |= SA_SHIRQ;
	ISACVersion(cs, "NETjet-S:");
	return (1);
}
Example #20
0
int timod_getmsg(unsigned int fd, char *ctl_buf, int ctl_maxlen, s32 *ctl_len,
			char *data_buf, int data_maxlen, s32 *data_len, int *flags_p)
{
	int error;
	int oldflags;
	struct file *filp;
	struct inode *ino;
	struct sol_socket_struct *sock;
	struct T_unitdata_ind udi;
	mm_segment_t old_fs = get_fs();
	long args[6];
	char *tmpbuf;
	int tmplen;
	int (*sys_socketcall)(int, unsigned long *) =
		(int (*)(int, unsigned long *))SYS(socketcall);
	int (*sys_recvfrom)(int, void *, size_t, unsigned, struct sockaddr *, int *);
	
	SOLD("entry");
	SOLDD(("%u %p %d %p %p %d %p %d\n", fd, ctl_buf, ctl_maxlen, ctl_len, data_buf, data_maxlen, data_len, *flags_p));
	filp = current->files->fd[fd];
	ino = filp->f_dentry->d_inode;
	sock = (struct sol_socket_struct *)filp->private_data;
	SOLDD(("%p %p\n", sock->pfirst, sock->pfirst ? sock->pfirst->next : NULL));
	if ( ctl_maxlen > 0 && !sock->pfirst && ino->u.socket_i.type == SOCK_STREAM
		&& sock->state == TS_IDLE) {
		SOLD("calling LISTEN");
		args[0] = fd;
		args[1] = -1;
		set_fs(KERNEL_DS);
		sys_socketcall(SYS_LISTEN, args);
		set_fs(old_fs);
		SOLD("LISTEN done");
	}
	if (!(filp->f_flags & O_NONBLOCK)) {
		poll_table wait_table, *wait;

		poll_initwait(&wait_table);
		wait = &wait_table;
		for(;;) {
			SOLD("loop");
			set_current_state(TASK_INTERRUPTIBLE);
			/* ! ( l<0 || ( l>=0 && ( ! pfirst || (flags == HIPRI && pri != HIPRI) ) ) ) */ 
			/* ( ! l<0 && ! ( l>=0 && ( ! pfirst || (flags == HIPRI && pri != HIPRI) ) ) ) */ 
			/* ( l>=0 && ( ! l>=0 || ! ( ! pfirst || (flags == HIPRI && pri != HIPRI) ) ) ) */ 
			/* ( l>=0 && ( l<0 || ( pfirst && ! (flags == HIPRI && pri != HIPRI) ) ) ) */ 
			/* ( l>=0 && ( l<0 || ( pfirst && (flags != HIPRI || pri == HIPRI) ) ) ) */ 
			/* ( l>=0 && ( pfirst && (flags != HIPRI || pri == HIPRI) ) ) */ 
			if (ctl_maxlen >= 0 && sock->pfirst && (*flags_p != MSG_HIPRI || sock->pfirst->pri == MSG_HIPRI))
				break;
			SOLD("cond 1 passed");
			if (
			#if 1
				*flags_p != MSG_HIPRI &&
			#endif
				((filp->f_op->poll(filp, wait) & POLLIN) ||
				(filp->f_op->poll(filp, NULL) & POLLIN) ||
				signal_pending(current))
			) {
				break;
			}
			if( *flags_p == MSG_HIPRI ) {
				SOLD("avoiding lockup");
				break ;
			}
			if(wait_table.error) {
				SOLD("wait-table error");
				poll_freewait(&wait_table);
				return wait_table.error;
			}
			SOLD("scheduling");
			schedule();
		}
		SOLD("loop done");
		current->state = TASK_RUNNING;
		poll_freewait(&wait_table);
		if (signal_pending(current)) {
			SOLD("signal pending");
			return -EINTR;
		}
	}
	if (ctl_maxlen >= 0 && sock->pfirst) {
		struct T_primsg *it = sock->pfirst;
		int l = min_t(int, ctl_maxlen, it->length);
		SCHECK_MAGIC((char*)((u64)(((char *)&it->type)+sock->offset+it->length+7)&~7),MKCTL_MAGIC);
		SOLD("purting ctl data");
		if(copy_to_user(ctl_buf,
			(char*)&it->type + sock->offset, l))
			return -EFAULT;
		SOLD("pur it");
		if(put_user(l, ctl_len))
			return -EFAULT;
		SOLD("set ctl_len");
		*flags_p = it->pri;
		it->length -= l;
		if (it->length) {
			SOLD("more ctl");
			sock->offset += l;
			return MORECTL;
		} else {
			SOLD("removing message");
			sock->pfirst = it->next;
			if (!sock->pfirst)
				sock->plast = NULL;
			SOLDD(("getmsg kfree %016lx->%016lx\n", it, sock->pfirst));
			mykfree(it);
			sock->offset = 0;
			SOLD("ctl done");
			return 0;
		}
	}
	*flags_p = 0;
	if (ctl_maxlen >= 0) {
		SOLD("ACCEPT perhaps?");
		if (ino->u.socket_i.type == SOCK_STREAM && sock->state == TS_IDLE) {
			struct T_conn_ind ind;
			char *buf = getpage();
			int len = BUF_SIZE;

			SOLD("trying ACCEPT");
			if (put_user(ctl_maxlen - sizeof(ind), ctl_len))
				return -EFAULT;
			args[0] = fd;
			args[1] = (long)buf;
			args[2] = (long)&len;
			oldflags = filp->f_flags;
			filp->f_flags |= O_NONBLOCK;
			SOLD("calling ACCEPT");
			set_fs(KERNEL_DS);
			error = sys_socketcall(SYS_ACCEPT, args);
			set_fs(old_fs);
			filp->f_flags = oldflags;
			if (error < 0) {
				SOLD("some error");
				putpage(buf);
				return error;
			}
			if (error) {
				SOLD("connect");
				putpage(buf);
				if (sizeof(ind) > ctl_maxlen) {
					SOLD("generating CONN_IND");
					ind.PRIM_type = T_CONN_IND;
					ind.SRC_length = len;
					ind.SRC_offset = sizeof(ind);
					ind.OPT_length = ind.OPT_offset = 0;
					ind.SEQ_number = error;
					if(copy_to_user(ctl_buf, &ind, sizeof(ind))||
					   put_user(sizeof(ind)+ind.SRC_length,ctl_len))
						return -EFAULT;
					SOLD("CONN_IND created");
				}
				if (data_maxlen >= 0)
					put_user(0, data_len);
				SOLD("CONN_IND done");
				return 0;
			}
			if (len>ctl_maxlen) {
				SOLD("data don't fit");
				putpage(buf);
				return -EFAULT;		/* XXX - is this ok ? */
			}
			if(copy_to_user(ctl_buf,buf,len) || put_user(len,ctl_len)){
				SOLD("can't copy data");
				putpage(buf);
				return -EFAULT;
			}
			SOLD("ACCEPT done");
			putpage(buf);
		}
	}
	SOLD("checking data req");
	if (data_maxlen <= 0) {
		if (data_maxlen == 0)
			put_user(0, data_len);
		if (ctl_maxlen >= 0)
			put_user(0, ctl_len);
		return -EAGAIN;
	}
	SOLD("wants data");
	if (ctl_maxlen > sizeof(udi) && sock->state == TS_IDLE) {
		SOLD("udi fits");
		tmpbuf = ctl_buf + sizeof(udi);
		tmplen = ctl_maxlen - sizeof(udi);
	} else {
		SOLD("udi does not fit");
		tmpbuf = NULL;
		tmplen = 0;
	}
	if (put_user(tmplen, ctl_len))
		return -EFAULT;
	SOLD("set ctl_len");
	oldflags = filp->f_flags;
	filp->f_flags |= O_NONBLOCK;
	SOLD("calling recvfrom");
	sys_recvfrom = (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(recvfrom);
	error = sys_recvfrom(fd, data_buf, data_maxlen, 0, (struct sockaddr*)tmpbuf, ctl_len);
	filp->f_flags = oldflags;
	if (error < 0)
		return error;
	SOLD("error >= 0" ) ;
	if (error && ctl_maxlen > sizeof(udi) && sock->state == TS_IDLE) {
		SOLD("generating udi");
		udi.PRIM_type = T_UNITDATA_IND;
		get_user(udi.SRC_length, ctl_len);
		udi.SRC_offset = sizeof(udi);
		udi.OPT_length = udi.OPT_offset = 0;
		copy_to_user(ctl_buf, &udi, sizeof(udi));
		put_user(sizeof(udi)+udi.SRC_length, ctl_len);
		SOLD("udi done");
	} else
		put_user(0, ctl_len);
	put_user(error, data_len);
	SOLD("done");
	return 0;
}
Example #21
0
int do_select(int n, fd_set_bits *fds, long *timeout)
{
	poll_table table, *wait;
	int retval, i, off;
	long __timeout = *timeout;

 	read_lock(&current->files->file_lock);
	retval = max_select_fd(n, fds);
	read_unlock(&current->files->file_lock);

	if (retval < 0)
		return retval;
	n = retval;

	poll_initwait(&table);
	wait = &table;
	if (!__timeout)
		wait = NULL;
	retval = 0;
	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		for (i = 0 ; i < n; i++) {
			unsigned long bit = BIT(i);
			unsigned long mask;
			struct file *file;

			off = i / __NFDBITS;
			if (!(bit & BITS(fds, off)))
				continue;
			file = fget(i);
			mask = POLLNVAL;
			if (file) {
				mask = DEFAULT_POLLMASK;
				if (file->f_op && file->f_op->poll)
					mask = file->f_op->poll(file, wait);
				fput(file);
			}
			if ((mask & POLLIN_SET) && ISSET(bit, __IN(fds,off))) {
				SET(bit, __RES_IN(fds,off));
				retval++;
				wait = NULL;
			}
			if ((mask & POLLOUT_SET) && ISSET(bit, __OUT(fds,off))) {
				SET(bit, __RES_OUT(fds,off));
				retval++;
				wait = NULL;
			}
			if ((mask & POLLEX_SET) && ISSET(bit, __EX(fds,off))) {
				SET(bit, __RES_EX(fds,off));
				retval++;
				wait = NULL;
			}
		}
		wait = NULL;
		if (retval || !__timeout || signal_pending(current))
			break;
		if(table.error) {
			retval = table.error;
			break;
		}
		__timeout = schedule_timeout(__timeout);
	}
	current->state = TASK_RUNNING;

	poll_freewait(&table);

	/*
	 * Up-to-date the caller timeout.
	 */
	*timeout = __timeout;
	return retval;
}
Example #22
0
/* Note:
   for SCO OpenServer 5.0.6 the original nap was fixed so that it
   no longer waits a minimum of 2 tick (20ms)
   but fewer time with a 10 ms granularity */
long
xnx_nap(long period)
{
#ifdef USE_NEW_NAP_CODE
    // Hz means the number of jiffies per second.
    // the below code needs HZ to be 1 <= HZ <= 1000
    // in order to work correctly in any case.
#if HZ > 1000
#error this code only works with HZ <= 1000
#endif
    struct timeval tv1, tv2;
    struct timezone tz;
    mm_segment_t oldfs;
    long period_s; // seconds part
    long period_ms_hz; // milli seconds part, scaled to a base of HZ
    long period_j; // jiffies

    if (!period)
        return 0; // zereo request, zero reply

    oldfs = get_fs();
    set_fs(get_ds());
    SYS(gettimeofday,&tv1, &tz);

    period_s = period / 1000;
    period_ms_hz = (period - period_s * 1000) * HZ;
    period_j = period_s * HZ + period_ms_hz / 1000;
    // take care of rounding errors, round up
    if (period > period_j * (1000 / HZ)) period_j++;

    set_current_state(TASK_INTERRUPTIBLE);
    schedule_timeout (period_j);

    SYS(gettimeofday,&tv2, &tz);
    set_fs(oldfs);

    if (signal_pending(current))
        return -EINTR; // interrupted
    return (tv2.tv_sec - tv1.tv_sec) * 1000
           + (tv2.tv_usec - tv1.tv_usec + 500) / 1000;
#else
    __sighandler_t old_handler;
    struct itimerval it;
    struct timeval tv1, tv2;
    struct timezone tz;
    mm_segment_t oldfs;

    if (!period)
        return 0;

    it.it_interval.tv_sec = 0;
    it.it_interval.tv_usec = 0;
    it.it_value.tv_sec = 0;
    it.it_value.tv_usec = period * 1000;

    oldfs = get_fs();
    set_fs(get_ds());

    SYS(gettimeofday,&tv1, &tz);
    old_handler = sigaction(SIGALRM, SIG_DFL); // SIG_DFL -> terminate
    SYS(setitimer,ITIMER_REAL, &it, NULL);
    SYS(pause);
    sigaction(SIGALRM, old_handler);
    SYS(gettimeofday,&tv2, &tz);
    set_fs(oldfs);

    deactivate_signal(current, SIGALRM);

    if (signal_pending(current))
        return -EINTR;
    return ((tv2.tv_sec - tv1.tv_sec) * 1000000
            + (tv2.tv_usec - tv1.tv_usec)) / 1000;
#endif
}
Example #23
0
static int cifs_oplock_thread(void *dummyarg)
{
    struct oplock_q_entry *oplock_item;
    struct cifsTconInfo *pTcon;
    struct inode *inode;
    __u16  netfid;
    int rc, waitrc = 0;

    set_freezable();
    do {
        if (try_to_freeze())
            continue;

        spin_lock(&GlobalMid_Lock);
        if (list_empty(&GlobalOplock_Q)) {
            spin_unlock(&GlobalMid_Lock);
            set_current_state(TASK_INTERRUPTIBLE);
            schedule_timeout(39*HZ);
        } else {
            oplock_item = list_entry(GlobalOplock_Q.next,
                                     struct oplock_q_entry, qhead);
            cFYI(1, ("found oplock item to write out"));
            pTcon = oplock_item->tcon;
            inode = oplock_item->pinode;
            netfid = oplock_item->netfid;
            spin_unlock(&GlobalMid_Lock);
            DeleteOplockQEntry(oplock_item);
            /* can not grab inode sem here since it would
            	deadlock when oplock received on delete
            	since vfs_unlink holds the i_mutex across
            	the call */
            /* mutex_lock(&inode->i_mutex);*/
            if (S_ISREG(inode->i_mode)) {
#ifdef CONFIG_CIFS_EXPERIMENTAL
                if (CIFS_I(inode)->clientCanCacheAll == 0)
                    break_lease(inode, FMODE_READ);
                else if (CIFS_I(inode)->clientCanCacheRead == 0)
                    break_lease(inode, FMODE_WRITE);
#endif
                rc = filemap_fdatawrite(inode->i_mapping);
                if (CIFS_I(inode)->clientCanCacheRead == 0) {
                    waitrc = filemap_fdatawait(
                                 inode->i_mapping);
                    invalidate_remote_inode(inode);
                }
                if (rc == 0)
                    rc = waitrc;
            } else
                rc = 0;
            /* mutex_unlock(&inode->i_mutex);*/
            if (rc)
                CIFS_I(inode)->write_behind_rc = rc;
            cFYI(1, ("Oplock flush inode %p rc %d",
                     inode, rc));

            /* releasing stale oplock after recent reconnect
            of smb session using a now incorrect file
            handle is not a data integrity issue but do
            not bother sending an oplock release if session
            to server still is disconnected since oplock
            already released by the server in that case */
            if (!pTcon->need_reconnect) {
                rc = CIFSSMBLock(0, pTcon, netfid,
                                 0 /* len */ , 0 /* offset */, 0,
                                 0, LOCKING_ANDX_OPLOCK_RELEASE,
                                 false /* wait flag */);
                cFYI(1, ("Oplock release rc = %d", rc));
            }
            set_current_state(TASK_INTERRUPTIBLE);
            schedule_timeout(1);  /* yield in case q were corrupt */
        }
    } while (!kthread_should_stop());

    return 0;
}
Example #24
0
File: lkdtm.c Project: 7799/linux
static void lkdtm_do_action(enum ctype which)
{
	switch (which) {
	case CT_PANIC:
		panic("dumptest");
		break;
	case CT_BUG:
		BUG();
		break;
	case CT_WARNING:
		WARN_ON(1);
		break;
	case CT_EXCEPTION:
		*((int *) 0) = 0;
		break;
	case CT_LOOP:
		for (;;)
			;
		break;
	case CT_OVERFLOW:
		(void) recursive_loop(recur_count);
		break;
	case CT_CORRUPT_STACK:
		corrupt_stack();
		break;
	case CT_UNALIGNED_LOAD_STORE_WRITE: {
		static u8 data[5] __attribute__((aligned(4))) = {1, 2,
				3, 4, 5};
		u32 *p;
		u32 val = 0x12345678;

		p = (u32 *)(data + 1);
		if (*p == 0)
			val = 0x87654321;
		*p = val;
		 break;
	}
	case CT_OVERWRITE_ALLOCATION: {
		size_t len = 1020;
		u32 *data = kmalloc(len, GFP_KERNEL);

		data[1024 / sizeof(u32)] = 0x12345678;
		kfree(data);
		break;
	}
	case CT_WRITE_AFTER_FREE: {
		size_t len = 1024;
		u32 *data = kmalloc(len, GFP_KERNEL);

		kfree(data);
		schedule();
		memset(data, 0x78, len);
		break;
	}
	case CT_SOFTLOCKUP:
		preempt_disable();
		for (;;)
			cpu_relax();
		break;
	case CT_HARDLOCKUP:
		local_irq_disable();
		for (;;)
			cpu_relax();
		break;
	case CT_SPINLOCKUP:
		/* Must be called twice to trigger. */
		spin_lock(&lock_me_up);
		/* Let sparse know we intended to exit holding the lock. */
		__release(&lock_me_up);
		break;
	case CT_HUNG_TASK:
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule();
		break;
	case CT_EXEC_DATA:
		execute_location(data_area);
		break;
	case CT_EXEC_STACK: {
		u8 stack_area[EXEC_SIZE];
		execute_location(stack_area);
		break;
	}
	case CT_EXEC_KMALLOC: {
		u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
		execute_location(kmalloc_area);
		kfree(kmalloc_area);
		break;
	}
	case CT_EXEC_VMALLOC: {
		u32 *vmalloc_area = vmalloc(EXEC_SIZE);
		execute_location(vmalloc_area);
		vfree(vmalloc_area);
		break;
	}
	case CT_EXEC_USERSPACE: {
		unsigned long user_addr;

		user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
				    PROT_READ | PROT_WRITE | PROT_EXEC,
				    MAP_ANONYMOUS | MAP_PRIVATE, 0);
		if (user_addr >= TASK_SIZE) {
			pr_warn("Failed to allocate user memory\n");
			return;
		}
		execute_user_location((void *)user_addr);
		vm_munmap(user_addr, PAGE_SIZE);
		break;
	}
	case CT_ACCESS_USERSPACE: {
		unsigned long user_addr, tmp;
		unsigned long *ptr;

		user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
				    PROT_READ | PROT_WRITE | PROT_EXEC,
				    MAP_ANONYMOUS | MAP_PRIVATE, 0);
		if (user_addr >= TASK_SIZE) {
			pr_warn("Failed to allocate user memory\n");
			return;
		}

		ptr = (unsigned long *)user_addr;

		pr_info("attempting bad read at %p\n", ptr);
		tmp = *ptr;
		tmp += 0xc0dec0de;

		pr_info("attempting bad write at %p\n", ptr);
		*ptr = tmp;

		vm_munmap(user_addr, PAGE_SIZE);

		break;
	}
	case CT_WRITE_RO: {
		unsigned long *ptr;

		ptr = (unsigned long *)&rodata;

		pr_info("attempting bad write at %p\n", ptr);
		*ptr ^= 0xabcd1234;

		break;
	}
	case CT_WRITE_KERN: {
		size_t size;
		unsigned char *ptr;

		size = (unsigned long)do_overwritten -
		       (unsigned long)do_nothing;
		ptr = (unsigned char *)do_overwritten;

		pr_info("attempting bad %zu byte write at %p\n", size, ptr);
		memcpy(ptr, (unsigned char *)do_nothing, size);
		flush_icache_range((unsigned long)ptr,
				   (unsigned long)(ptr + size));

		do_overwritten();
		break;
	}
	case CT_NONE:
	default:
		break;
	}

}
Example #25
0
/*
 * Slow path lock function:
 */
static int __sched
rt_mutex_slowlock(struct rt_mutex *lock, int state,
                  struct hrtimer_sleeper *timeout,
                  int detect_deadlock)
{
    struct rt_mutex_waiter waiter;
    int ret = 0;

    debug_rt_mutex_init_waiter(&waiter);
    waiter.task = NULL;

    spin_lock(&lock->wait_lock);

    /* Try to acquire the lock again: */
    if (try_to_take_rt_mutex(lock)) {
        spin_unlock(&lock->wait_lock);
        return 0;
    }

    set_current_state(state);

    /* Setup the timer, when timeout != NULL */
    if (unlikely(timeout))
        hrtimer_start(&timeout->timer, timeout->timer.expires,
                      HRTIMER_ABS);

    for (;;) {
        /* Try to acquire the lock: */
        if (try_to_take_rt_mutex(lock))
            break;

        /*
         * TASK_INTERRUPTIBLE checks for signals and
         * timeout. Ignored otherwise.
         */
        if (unlikely(state == TASK_INTERRUPTIBLE)) {
            /* Signal pending? */
            if (signal_pending(current))
                ret = -EINTR;
            if (timeout && !timeout->task)
                ret = -ETIMEDOUT;
            if (ret)
                break;
        }

        /*
         * waiter.task is NULL the first time we come here and
         * when we have been woken up by the previous owner
         * but the lock got stolen by a higher prio task.
         */
        if (!waiter.task) {
            ret = task_blocks_on_rt_mutex(lock, &waiter,
                                          detect_deadlock);
            /*
             * If we got woken up by the owner then start loop
             * all over without going into schedule to try
             * to get the lock now:
             */
            if (unlikely(!waiter.task))
                continue;

            if (unlikely(ret))
                break;
        }

        spin_unlock(&lock->wait_lock);

        debug_rt_mutex_print_deadlock(&waiter);

        if (waiter.task)
            schedule_rt_mutex(lock);

        spin_lock(&lock->wait_lock);
        set_current_state(state);
    }

    set_current_state(TASK_RUNNING);

    if (unlikely(waiter.task))
        remove_waiter(lock, &waiter);

    /*
     * try_to_take_rt_mutex() sets the waiter bit
     * unconditionally. We might have to fix that up.
     */
    fixup_rt_mutex_waiters(lock);

    spin_unlock(&lock->wait_lock);

    /* Remove pending timer: */
    if (unlikely(timeout))
        hrtimer_cancel(&timeout->timer);

    /*
     * Readjust priority, when we did not get the lock. We might
     * have been the pending owner and boosted. Since we did not
     * take the lock, the PI boost has to go.
     */
    if (unlikely(ret))
        rt_mutex_adjust_prio(current);

    debug_rt_mutex_free_waiter(&waiter);

    return ret;
}
Example #26
0
static ssize_t read_chan(struct tty_struct *tty, struct file *file,
			 unsigned char __user *buf, size_t nr)
{
	unsigned char __user *b = buf;
	DECLARE_WAITQUEUE(wait, current);
	int c;
	int minimum, time;
	ssize_t retval = 0;
	ssize_t size;
	long timeout;
	unsigned long flags;

do_it_again:

	if (!tty->read_buf) {
		printk("n_tty_read_chan: called with read_buf == NULL?!?\n");
		return -EIO;
	}

	c = job_control(tty, file);
	if(c < 0)
		return c;
	
	minimum = time = 0;
	timeout = MAX_SCHEDULE_TIMEOUT;
	if (!tty->icanon) {
		time = (HZ / 10) * TIME_CHAR(tty);
		minimum = MIN_CHAR(tty);
		if (minimum) {
			if (time)
				tty->minimum_to_wake = 1;
			else if (!waitqueue_active(&tty->read_wait) ||
				 (tty->minimum_to_wake > minimum))
				tty->minimum_to_wake = minimum;
		} else {
			timeout = 0;
			if (time) {
				timeout = time;
				time = 0;
			}
			tty->minimum_to_wake = minimum = 1;
		}
	}

	/*
	 *	Internal serialization of reads.
	 */
	if (file->f_flags & O_NONBLOCK) {
		if (down_trylock(&tty->atomic_read))
			return -EAGAIN;
	}
	else {
		if (down_interruptible(&tty->atomic_read))
			return -ERESTARTSYS;
	}

	add_wait_queue(&tty->read_wait, &wait);
	set_bit(TTY_DONT_FLIP, &tty->flags);
	while (nr) {
		/* First test for status change. */
		if (tty->packet && tty->link->ctrl_status) {
			unsigned char cs;
			if (b != buf)
				break;
			cs = tty->link->ctrl_status;
			tty->link->ctrl_status = 0;
			if (put_user(cs, b++)) {
				retval = -EFAULT;
				b--;
				break;
			}
			nr--;
			break;
		}
		/* This statement must be first before checking for input
		   so that any interrupt will set the state back to
		   TASK_RUNNING. */
		set_current_state(TASK_INTERRUPTIBLE);
		
		if (((minimum - (b - buf)) < tty->minimum_to_wake) &&
		    ((minimum - (b - buf)) >= 1))
			tty->minimum_to_wake = (minimum - (b - buf));
		
		if (!input_available_p(tty, 0)) {
			if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
				retval = -EIO;
				break;
			}
			if (tty_hung_up_p(file))
				break;
			if (!timeout)
				break;
			if (file->f_flags & O_NONBLOCK) {
				retval = -EAGAIN;
				break;
			}
			if (signal_pending(current)) {
				retval = -ERESTARTSYS;
				break;
			}
			clear_bit(TTY_DONT_FLIP, &tty->flags);
			timeout = schedule_timeout(timeout);
			set_bit(TTY_DONT_FLIP, &tty->flags);
			continue;
		}
		__set_current_state(TASK_RUNNING);

		/* Deal with packet mode. */
		if (tty->packet && b == buf) {
			if (put_user(TIOCPKT_DATA, b++)) {
				retval = -EFAULT;
				b--;
				break;
			}
			nr--;
		}

		if (tty->icanon) {
			/* N.B. avoid overrun if nr == 0 */
			while (nr && tty->read_cnt) {
 				int eol;

				eol = test_and_clear_bit(tty->read_tail,
						tty->read_flags);
				c = tty->read_buf[tty->read_tail];
				spin_lock_irqsave(&tty->read_lock, flags);
				tty->read_tail = ((tty->read_tail+1) &
						  (N_TTY_BUF_SIZE-1));
				tty->read_cnt--;
				if (eol) {
					/* this test should be redundant:
					 * we shouldn't be reading data if
					 * canon_data is 0
					 */
					if (--tty->canon_data < 0)
						tty->canon_data = 0;
				}
				spin_unlock_irqrestore(&tty->read_lock, flags);

				if (!eol || (c != __DISABLED_CHAR)) {
					if (put_user(c, b++)) {
						retval = -EFAULT;
						b--;
						break;
					}
					nr--;
				}
				if (eol)
					break;
			}
			if (retval)
				break;
		} else {
			int uncopied;
			uncopied = copy_from_read_buf(tty, &b, &nr);
			uncopied += copy_from_read_buf(tty, &b, &nr);
			if (uncopied) {
				retval = -EFAULT;
				break;
			}
		}

		/* If there is enough space in the read buffer now, let the
		 * low-level driver know. We use n_tty_chars_in_buffer() to
		 * check the buffer, as it now knows about canonical mode.
		 * Otherwise, if the driver is throttled and the line is
		 * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
		 * we won't get any more characters.
		 */
		if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE)
			check_unthrottle(tty);

		if (b - buf >= minimum)
			break;
		if (time)
			timeout = time;
	}
	clear_bit(TTY_DONT_FLIP, &tty->flags);
	up(&tty->atomic_read);
	remove_wait_queue(&tty->read_wait, &wait);

	if (!waitqueue_active(&tty->read_wait))
		tty->minimum_to_wake = minimum;

	__set_current_state(TASK_RUNNING);
	size = b - buf;
	if (size) {
		retval = size;
		if (nr)
	       		clear_bit(TTY_PUSH, &tty->flags);
	} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
		 goto do_it_again;

	return retval;
}
Example #27
0
static int trace_wakeup_test_thread(void *data)
{
	/* Make this a RT thread, doesn't need to be too high */
	static const struct sched_param param = { .sched_priority = 5 };
	struct completion *x = data;

	sched_setscheduler(current, SCHED_FIFO, &param);

	/* Make it know we have a new prio */
	complete(x);

	/* now go to sleep and let the test wake us up */
	set_current_state(TASK_INTERRUPTIBLE);
	schedule();

	/* we are awake, now wait to disappear */
	while (!kthread_should_stop()) {
		/*
		 * This is an RT task, do short sleeps to let
		 * others run.
		 */
		msleep(100);
	}

	return 0;
}

int
trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	struct task_struct *p;
	struct completion isrt;
	unsigned long count;
	int ret;

	init_completion(&isrt);

	/* create a high prio thread */
	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
	if (IS_ERR(p)) {
		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
		return -1;
	}

	/* make sure the thread is running at an RT prio */
	wait_for_completion(&isrt);

	/* start the tracing */
	ret = tracer_init(trace, tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

	/* reset the max latency */
	tracing_max_latency = 0;

	/* sleep to let the RT thread sleep too */
	msleep(100);

	/*
	 * Yes this is slightly racy. It is possible that for some
	 * strange reason that the RT thread we created, did not
	 * call schedule for 100ms after doing the completion,
	 * and we do a wakeup on a task that already is awake.
	 * But that is extremely unlikely, and the worst thing that
	 * happens in such a case, is that we disable tracing.
	 * Honestly, if this race does happen something is horrible
	 * wrong with the system.
	 */

	wake_up_process(p);

	/* give a little time to let the thread wake up */
	msleep(100);

	/* stop the tracing. */
	tracing_stop();
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
	if (!ret)
		ret = trace_test_buffer(&max_tr, &count);

	trace->reset(tr);
	tracing_start();

	tracing_max_latency = save_max;

	/* kill the thread */
	kthread_stop(p);

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	return ret;
}
Example #28
0
static ssize_t write_chan(struct tty_struct * tty, struct file * file,
			  const unsigned char * buf, size_t nr)
{
	const unsigned char *b = buf;
	DECLARE_WAITQUEUE(wait, current);
	int c;
	ssize_t retval = 0;

	/* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
	if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) {
		retval = tty_check_change(tty);
		if (retval)
			return retval;
	}

	add_wait_queue(&tty->write_wait, &wait);
	while (1) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (signal_pending(current)) {
			retval = -ERESTARTSYS;
			break;
		}
		if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) {
			retval = -EIO;
			break;
		}
		if (O_OPOST(tty) && !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) {
			while (nr > 0) {
				ssize_t num = opost_block(tty, b, nr);
				if (num < 0) {
					if (num == -EAGAIN)
						break;
					retval = num;
					goto break_out;
				}
				b += num;
				nr -= num;
				if (nr == 0)
					break;
				c = *b;
				if (opost(c, tty) < 0)
					break;
				b++; nr--;
			}
			if (tty->driver->flush_chars)
				tty->driver->flush_chars(tty);
		} else {
			while (nr > 0) {
				c = tty->driver->write(tty, b, nr);
				if (c < 0) {
					retval = c;
					goto break_out;
				}
				if (!c)
					break;
				b += c;
				nr -= c;
			}
		}
		if (!nr)
			break;
		if (file->f_flags & O_NONBLOCK) {
			retval = -EAGAIN;
			break;
		}
		schedule();
	}
break_out:
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&tty->write_wait, &wait);
	return (b - buf) ? b - buf : retval;
}
Example #29
0
/**
 * Initialize EcuM.
 */
void EcuM_Init(void) {
	Std_ReturnType status;
	set_current_state(ECUM_STATE_STARTUP_ONE);

	// Initialize drivers that are needed to determine PostBuild configuration
	EcuM_AL_DriverInitZero();

	// Initialize the OS
	InitOS();

	// Setup interrupts
	Os_IsrInit();

	// Determine PostBuild configuration
	EcuM_World.config = EcuM_DeterminePbConfiguration();

	// TODO: Check consistency of PB configuration

	// Initialize drivers needed before the OS-starts
	EcuM_AL_DriverInitOne(EcuM_World.config);

	// Determine the reset/wakeup reason
	switch (Mcu_GetResetReason()) {
	case MCU_POWER_ON_RESET:
		EcuM_SetWakeupEvent(ECUM_WKSOURCE_POWER);
		break;
	case MCU_SW_RESET:
		EcuM_SetWakeupEvent(ECUM_WKSOURCE_RESET);
		break;
	case MCU_RESET_UNDEFINED:
		break;
	case MCU_WATCHDOG_RESET:
		EcuM_SetWakeupEvent(ECUM_WKSOURCE_INTERNAL_WDG);
		break;
	default:
		assert(0);
		break;
	}

	// Moved this here because EcuM_SelectShutdownTarget needs us to be initilized.
	EcuM_World.initiated = TRUE;

	// Set default shutdown target

	status = EcuM_SelectShutdownTarget(
					EcuM_World.config->EcuMDefaultShutdownTarget,
					EcuM_World.config->EcuMDefaultSleepMode);/** @req EcuM2181 */
	if (status != E_OK) {
		//TODO: Report error.
	}

	// Set default application mode
	status = EcuM_SelectApplicationMode(
					EcuM_World.config->EcuMDefaultAppMode);
	if (status != E_OK) {
		//TODO: Report error.
	}

#if defined(USE_COMM)
	EcuM_World.run_comm_requests = 0;
#endif
	EcuM_World.run_requests = 0;
	EcuM_World.postrun_requests = 0;

	// Start this baby up
	AppModeType appMode;
	status = EcuM_GetApplicationMode(&appMode);
	if (status != E_OK) {
		//TODO: Report error.
	}

	StartOS(appMode); /** @req EcuM2141 */
}
/**
 * kthread_create_on_node - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @node: memory node number.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run().
 *
 * If thread is going to be bound on a particular cpu, give its node
 * in @node, to get NUMA affinity for kthread stack, or else give -1.
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which no one will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
					   void *data,
					   int node,
					   const char namefmt[],
					   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	create.node = node;
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		static const struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create_on_node);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
		WARN_ON(1);
		return;
	}

	/* It's safe because the task is inactive. */
	do_set_cpus_allowed(p, cpumask_of(cpu));
	p->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);
	get_task_struct(k);

	kthread = to_kthread(k);
	barrier(); /* it might have exited */
	if (k->vfork_done != NULL) {
		kthread->should_stop = 1;
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);

	return ret;
}
EXPORT_SYMBOL(kthread_stop);

int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	/* Setup a clean context for our children to inherit. */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
	set_mems_allowed(node_states[N_HIGH_MEMORY]);

	current->flags |= PF_NOFREEZE;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}

void __init_kthread_worker(struct kthread_worker *worker,
				const char *name,
				struct lock_class_key *key)
{
	spin_lock_init(&worker->lock);
	lockdep_set_class_and_name(&worker->lock, key, name);
	INIT_LIST_HEAD(&worker->work_list);
	worker->task = NULL;
}