Esempio n. 1
0
/* Incoming XPMEM command from enclave - enqueue and wake up kthread */
static void
xpmem_ctrl_handler(u8	* data, 
		   u32	  data_len, 
		   void * priv_data)
{
    struct pisces_xpmem_state * state = (struct pisces_xpmem_state *)priv_data;
    struct xpmem_cmd_ringbuf  * buf   = &(state->xpmem_buf);

    if (data_len != sizeof(struct xpmem_cmd_ex)) {
	printk(KERN_ERR "ERROR: DROPPING XPMEM CMD: MALFORMED CMD\n");
	return;
    }

    /* Find open entry */
    if (push_active_entry(buf, (struct xpmem_cmd_ex *)data) != 0) {
	printk(KERN_ERR "ERROR: DROPPING XPMEM CMD: NO ENTRIES AVAILABLE\n");
	return;
    }

    atomic_inc(&(buf->active_entries));
    mb();
    waitq_wakeup(&(state->waitq));

    /* Xbuf now complete */
    pisces_xbuf_complete(state->xbuf_desc, NULL, 0);
}
Esempio n. 2
0
int idesc_notify(struct idesc *idesc, int mask) {

	//TODO MASK
	waitq_wakeup(&idesc->idesc_waitq, 0);

	return 0;
}
Esempio n. 3
0
void
xpmem_seg_signal(struct xpmem_segment * seg)
{
    atomic_inc(&(seg->irq_count));
    mb();

    waitq_wakeup(&(seg->signalled_wq));
}
Esempio n. 4
0
const char *test_semaphore1(void)
{
	int i, j, k;
	atomic_count_t consumers;
	atomic_count_t producers;
	
	waitq_initialize(&can_start);
	semaphore_initialize(&sem, AT_ONCE);
	
	for (i = 1; i <= 3; i++) {
		thread_t *thrd;
		
		atomic_set(&items_produced, 0);
		atomic_set(&items_consumed, 0);
		
		consumers = i * CONSUMERS;
		producers = (4 - i) * PRODUCERS;
		
		TPRINTF("Creating %" PRIua " consumers and %" PRIua " producers...",
		    consumers, producers);
		
		for (j = 0; j < (CONSUMERS + PRODUCERS) / 2; j++) {
			for (k = 0; k < i; k++) {
				thrd = thread_create(consumer, NULL, TASK,
				    THREAD_FLAG_NONE, "consumer");
				if (thrd)
					thread_ready(thrd);
				else
					TPRINTF("could not create consumer %d\n", i);
			}
			for (k = 0; k < (4 - i); k++) {
				thrd = thread_create(producer, NULL, TASK,
				    THREAD_FLAG_NONE, "producer");
				if (thrd)
					thread_ready(thrd);
				else
					TPRINTF("could not create producer %d\n", i);
			}
		}
		
		TPRINTF("ok\n");
		
		thread_sleep(1);
		waitq_wakeup(&can_start, WAKEUP_ALL);
		
		while ((items_consumed.count != consumers) || (items_produced.count != producers)) {
			TPRINTF("%" PRIua " consumers remaining, %" PRIua " producers remaining\n",
			    consumers - items_consumed.count, producers - items_produced.count);
			thread_sleep(1);
		}
	}
	
	return NULL;
}
Esempio n. 5
0
static void 
cmd_handler(u8    * data, 
	    u32     data_len, 
	    void  * priv_data)
{	
	cmd_data = data;
	cmd_len  = data_len;

	__asm__ __volatile__("":::"memory");

	waitq_wakeup(&(user_waitq));

	return;
}
Esempio n. 6
0
static void
xpmem_kill_fn(void * priv_data)
{
    struct pisces_xpmem_state * state = (struct pisces_xpmem_state *)priv_data;

    /* De-init xbuf server */
    pisces_xbuf_server_deinit(state->xbuf_desc);

    /* Kill kernel thread - it will free state when it exists */
    spin_lock(&exit_lock);
    {
	should_exit = 1;

	mb();
	waitq_wakeup(&(state->waitq));
    }
    spin_unlock(&exit_lock);
}
Esempio n. 7
0
/** Unsafe unchecking version of ipc_call.
 *
 * @param phone Phone structure the call comes from.
 * @param box   Destination answerbox structure.
 * @param call  Call structure with request.
 *
 */
static void _ipc_call(phone_t *phone, answerbox_t *box, call_t *call)
{
	task_t *caller = phone->caller;

	/* Count sent ipc call */
	irq_spinlock_lock(&caller->lock, true);
	caller->ipc_info.call_sent++;
	irq_spinlock_unlock(&caller->lock, true);
	
	if (!(call->flags & IPC_CALL_FORWARDED))
		_ipc_call_actions_internal(phone, call);
	
	irq_spinlock_lock(&box->lock, true);
	list_append(&call->ab_link, &box->calls);
	irq_spinlock_unlock(&box->lock, true);
	
	waitq_wakeup(&box->wq, WAKEUP_FIRST);
}
Esempio n. 8
0
/** Answer a message which was not dispatched and is not listed in any queue.
 *
 * @param call       Call structure to be answered.
 * @param selflocked If true, then TASK->answebox is locked.
 *
 */
void _ipc_answer_free_call(call_t *call, bool selflocked)
{
	/* Count sent answer */
	irq_spinlock_lock(&TASK->lock, true);
	TASK->ipc_info.answer_sent++;
	irq_spinlock_unlock(&TASK->lock, true);

	spinlock_lock(&call->forget_lock);
	if (call->forget) {
		/* This is a forgotten call and call->sender is not valid. */
		spinlock_unlock(&call->forget_lock);
		ipc_call_free(call);
		return;
	} else {
		/*
		 * If the call is still active, i.e. it was answered
		 * in a non-standard way, remove the call from the
		 * sender's active call list.
		 */
		if (call->active) {
			spinlock_lock(&call->sender->active_calls_lock);
			list_remove(&call->ta_link);
			spinlock_unlock(&call->sender->active_calls_lock);
		}
	}
	spinlock_unlock(&call->forget_lock);

	answerbox_t *callerbox = &call->sender->answerbox;
	bool do_lock = ((!selflocked) || (callerbox != &TASK->answerbox));
	
	call->flags |= IPC_CALL_ANSWERED;
	
	call->data.task_id = TASK->taskid;
	
	if (do_lock)
		irq_spinlock_lock(&callerbox->lock, true);
	
	list_append(&call->ab_link, &callerbox->answers);
	
	if (do_lock)
		irq_spinlock_unlock(&callerbox->lock, true);
	
	waitq_wakeup(&callerbox->wq, WAKEUP_FIRST);
}
Esempio n. 9
0
/** Signal the condition has become true to all waiting threads by waking
 * them up.
 *
 * @param cv		Condition variable.
 */
void condvar_broadcast(condvar_t *cv)
{
	waitq_wakeup(&cv->wq, WAKEUP_ALL);
}
Esempio n. 10
0
/** Signal the condition has become true to the first waiting thread by waking
 * it up.
 *
 * @param cv		Condition variable.
 */
void condvar_signal(condvar_t *cv)
{
	waitq_wakeup(&cv->wq, WAKEUP_FIRST);
}
Esempio n. 11
0
ssize_t
sys_read(int           fd,
	 char __user * buf,
	 size_t        len)
{
	char buffer[PIPE_BUFFER_MAX+1];
	char temp[PIPE_BUFFER_MAX+1];
	unsigned long flags;
	//int orig_fd = fd;
	ssize_t ret;
	struct file * file = get_current_file(fd);

	if (!file) {
		ret = -EBADF;
	} else if (file->pipe != NULL) {
		int result = min((size_t)PIPE_BUFFER_MAX, len);
		//printk("sys_read(%d): trying to read %d\n", orig_fd, result);
		buffer[0] = 0; // make sure it's null-terminated

		int n = 0;

		//printk("sys_read(%d): LOCKING reading via pipe\n", orig_fd);
		spin_lock_irqsave(&file->pipe->buffer_lock, flags);

		while (n < result) {
			// If the buffer is empty, wait until it fills a bit
			while (pipe_buffer_empty(file->pipe)) {
				//printk("sys_read: UNLOCKING and WAITING\n");
				spin_unlock_irqrestore(&file->pipe->buffer_lock, flags);
				if (wait_event_interruptible(file->pipe->buffer_wait,
				     (!pipe_buffer_empty(file->pipe))))
					return -ERESTARTSYS;
				//printk("sys_read: LOCKING\n");
				spin_lock_irqsave(&file->pipe->buffer_lock, flags);
			}

			int num_to_read = min(result-n, file->pipe->amount);
			//printk("sys_read(): read %d = '%s'\n", num_to_read, file->pipe->buffer);
			strncat(buffer+n, file->pipe->buffer, num_to_read);
			strncpy(temp, file->pipe->buffer+num_to_read, PIPE_BUFFER_MAX+1-num_to_read); // XXX
			strncpy(file->pipe->buffer, temp, PIPE_BUFFER_MAX+1);
			buffer[num_to_read] = 0; // make sure it's null-terminated
			n += num_to_read;
			// notify everybody about our deletion from the buffer
			file->pipe->amount -= num_to_read;
			waitq_wakeup(&file->pipe->buffer_wait);
			break;
		}
		result = n;

		int rc = copy_to_user(buf, buffer, result);
		//printk("sys_read(%d): pipe buffer: '%s'\n", orig_fd, file->pipe->buffer);
		//printk("sys_read: UNLOCKING\n");
		spin_unlock_irqrestore(&file->pipe->buffer_lock, flags);
		ret = rc ? -EFAULT : result;
	} else if (file->f_op->read) {
		ret = file->f_op->read( file, (char *)buf, len, NULL );
	} else {
		printk( KERN_WARNING "%s: fd %d (%s) has no read operation\n",
			__func__, fd, file->inode->name );
		ret = -EBADF;
        }

	//printk("sys_read(%d): returning %d\n", fd, ret);
	return ret;
}
Esempio n. 12
0
static int tun_xmit(struct net_device *dev, struct sk_buff *skb) {
	struct tun *tun = netdev_priv(dev, struct tun);
	skb_queue_push(&tun->rx_q, skb);
	waitq_wakeup(&tun->wq, 1);
	return 0;
}