示例#1
0
static int 
pisces_ctrl_init(void)
{



	waitq_init(&(user_waitq));

	kfs_create("/dev/pisces",
		   NULL,
		   &pisces_ctrl_fops, 
		   0777, 
		   NULL, 0);

	xbuf_desc = pisces_xbuf_server_init((uintptr_t)__va(pisces_boot_params->control_buf_addr), 
					    pisces_boot_params->control_buf_size, 
					    cmd_handler, NULL, -1, 0);		 

	if (xbuf_desc == NULL) {
		printk(KERN_ERR "Could not initialize cmd/ctrl xbuf channel\n");
		return -1;
	}

	return 0;
}
示例#2
0
文件: ioq_linux.c 项目: dlbeer/libdlb
int ioq_init(struct ioq *q, unsigned int bg_threads)
{
	struct epoll_event evt;
	syserr_t err;

	if (runq_init(&q->run, bg_threads) < 0) {
		err = syserr_last();
		goto fail_runq;
	}

	if (!bg_threads)
		q->run.wakeup = wakeup_runq;

	waitq_init(&q->wait, &q->run);
	q->wait.wakeup = wakeup_waitq;

	thr_mutex_init(&q->lock);
	slist_init(&q->mod_list);

	if (pipe(q->intr) < 0) {
		err = syserr_last();
		goto fail_pipe;
	}

	fcntl(q->intr[0], F_SETFL, fcntl(q->intr[0], F_GETFL) | O_NONBLOCK);

	q->intr_state = 0;

	q->epoll_fd = epoll_create(64);
	if (q->epoll_fd < 0) {
		err = syserr_last();
		goto fail_epoll;
	}

	memset(&evt, 0, sizeof(evt));
	evt.events = EPOLLIN;
	if (epoll_ctl(q->epoll_fd, EPOLL_CTL_ADD, q->intr[0], &evt) < 0) {
		err = syserr_last();
		goto fail_ctl;
	}

	return 0;

fail_ctl:
	close(q->epoll_fd);
fail_epoll:
	close(q->intr[0]);
	close(q->intr[1]);
fail_pipe:
	thr_mutex_destroy(&q->lock);
	waitq_destroy(&q->wait);
	runq_destroy(&q->run);
fail_runq:
	syserr_set(err);
	return -1;
}
示例#3
0
/*
 * Allocate and initialize cpucap structure
 */
static cpucap_t *
cap_alloc(void)
{
	cpucap_t *cap = kmem_zalloc(sizeof (cpucap_t), KM_SLEEP);

	DISP_LOCK_INIT(&cap->cap_usagelock);
	waitq_init(&cap->cap_waitq);

	return (cap);
}
示例#4
0
文件: mutex.c 项目: abusalimov/embox
void mutex_init_default(struct mutex *m, const struct mutexattr *attr) {
	waitq_init(&m->wq);
	m->lock_count = 0;
	m->holder = NULL;

	if (attr) {
		mutexattr_copy(attr, &m->attr);
	} else {
		mutexattr_init(&m->attr);
	}
}
示例#5
0
/*
 *	Routine:	semaphore_create
 *
 *	Creates a semaphore.
 *	The port representing the semaphore is returned as a parameter.
 */
kern_return_t
semaphore_create(
	task_t			task,
	semaphore_t		*new_semaphore,
	int				policy,
	int				value)
{
	semaphore_t		 s = SEMAPHORE_NULL;
	kern_return_t		kret;


	*new_semaphore = SEMAPHORE_NULL;
	if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX)
		return KERN_INVALID_ARGUMENT;

	s = (semaphore_t) zalloc (semaphore_zone);

	if (s == SEMAPHORE_NULL)
		return KERN_RESOURCE_SHORTAGE; 

	kret = waitq_init(&s->waitq, policy | SYNC_POLICY_DISABLE_IRQ); /* also inits lock */
	if (kret != KERN_SUCCESS) {
		zfree(semaphore_zone, s);
		return kret;
	}

	/*
	 * Initialize the semaphore values.
	 */
	s->port	= IP_NULL;
	s->ref_count = 1;
	s->count = value;
	s->active = TRUE;
	s->owner = task;

	/*
	 *  Associate the new semaphore with the task by adding
	 *  the new semaphore to the task's semaphore list.
	 */
	task_lock(task);
	enqueue_head(&task->semaphore_list, (queue_entry_t) s);
	task->semaphores_owned++;
	task_unlock(task);

	*new_semaphore = s;

	return KERN_SUCCESS;
}		  
示例#6
0
static struct idesc *tun_dev_open(struct node *node, struct file_desc *file_desc, int flags) {
	struct net_device *netdev;
	struct tun *tun;

	netdev = tun_netdev_by_node(node);
	if (!netdev) {
		return err_ptr(ENOENT);
	}

	tun = netdev_priv(netdev, struct tun);
	tun_user_lock(tun);

	waitq_init(&tun->wq);

	tun_krnl_lock(tun);
	{
		skb_queue_init(&tun->rx_q);
	}
	tun_krnl_unlock(tun);

	file_desc->file_info = netdev;

	return &file_desc->idesc;
}
示例#7
0
文件: mutex.c 项目: abusalimov/embox
static inline void mutex_complete_static_init(struct mutex *m) {
	waitq_init(&m->wq);
}
示例#8
0
static void scsi_user_enter(struct scsi_dev *dev) {
	mutex_init(&dev->m);
	waitq_init(&dev->wq);
}
示例#9
0
void rwlock_init(rwlock_t *r) {
	waitq_init(&r->wq);
	r->status = RWLOCK_STATUS_NONE;
	r->count = 0;
}