Esempio n. 1
0
static int my_init (void) {
	proc_mydev = proc_mkdir(MYDEV,0);

	proc_hello = create_proc_entry(HELLO,0,proc_mydev);
	proc_hello->read_proc = read_hello;
	proc_hello->write_proc = write_hello;

#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,29)
	proc_hello->owner = THIS_MODULE;
#endif 

	hello_data=(struct proc_hello_data *)
		kmalloc(sizeof(*hello_data),GFP_KERNEL);

	hello_data->proc_hello_value=(char *)
		kmalloc(PROC_HELLO_BUFLEN,GFP_KERNEL);
	
	hello_data->proc_hello_wqh=(wait_queue_head_t *)
		kmalloc(sizeof(wait_queue_head_t),GFP_KERNEL);
	
	hello_data->proc_hello_wq=(wait_queue_t *)
		kmalloc(sizeof(wait_queue_t),GFP_KERNEL);
	
	// see what happens when the following line is commented out
    //
    init_waitqueue_head(hello_data->proc_hello_wqh);
	init_wait(hello_data->proc_hello_wq);

	hello_data->proc_hello_flag=0;

        // module init message
        printk(KERN_ALERT "2470:10.1: main initialized!\n");
	return 0;
}
Esempio n. 2
0
struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len)
{
	wait_queue_t wait;
	struct ceph_msg *msg;

	if (front_len && front_len > pool->front_len) {
		pr_err("msgpool_get pool %p need front %d, pool size is %d\n",
		       pool, front_len, pool->front_len);
		WARN_ON(1);

		/* try to alloc a fresh message */
		msg = ceph_msg_new(0, front_len, 0, 0, NULL);
		if (!IS_ERR(msg))
			return msg;
	}

	if (!front_len)
		front_len = pool->front_len;

	if (pool->blocking) {
		/* mempool_t behavior; first try to alloc */
		msg = ceph_msg_new(0, front_len, 0, 0, NULL);
		if (!IS_ERR(msg))
			return msg;
	}

	while (1) {
		spin_lock(&pool->lock);
		if (likely(pool->num)) {
			msg = list_entry(pool->msgs.next, struct ceph_msg,
					 list_head);
			list_del_init(&msg->list_head);
			pool->num--;
			dout("msgpool_get %p got %p, now %d/%d\n", pool, msg,
			     pool->num, pool->min);
			spin_unlock(&pool->lock);
			return msg;
		}
		pr_err("msgpool_get %p now %d/%d, %s\n", pool, pool->num,
		       pool->min, pool->blocking ? "waiting" : "may fail");
		spin_unlock(&pool->lock);

		if (!pool->blocking) {
			WARN_ON(1);

			/* maybe we can allocate it now? */
			msg = ceph_msg_new(0, front_len, 0, 0, NULL);
			if (!IS_ERR(msg))
				return msg;

			pr_err("msgpool_get %p empty + alloc failed\n", pool);
			return ERR_PTR(-ENOMEM);
		}

		init_wait(&wait);
		prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
		schedule();
		finish_wait(&pool->wait, &wait);
	}
Esempio n. 3
0
/**
 * mempool_alloc - allocate an element from a specific memory pool
 * @pool:      pointer to the memory pool which was allocated via
 *             mempool_create().
 * @gfp_mask:  the usual allocation bitmask.
 *
 * this function only sleeps if the alloc_fn() function sleeps or
 * returns NULL. Note that due to preallocation, this function
 * *never* fails when called from process contexts. (it might
 * fail if called from an IRQ context.)
 */
void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
{
	void *element;
	unsigned long flags;
	wait_queue_t wait;
	gfp_t gfp_temp;
#ifdef CONFIG_KRG_EPM
	struct task_struct *krg_cur;
#endif

	might_sleep_if(gfp_mask & __GFP_WAIT);

	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
	gfp_mask |= __GFP_NOWARN;	/* failures are OK */

	gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);

repeat_alloc:

	element = pool->alloc(gfp_temp, pool->pool_data);
	if (likely(element != NULL))
		return element;

	spin_lock_irqsave(&pool->lock, flags);
	if (likely(pool->curr_nr)) {
		element = remove_element(pool);
		spin_unlock_irqrestore(&pool->lock, flags);
		return element;
	}
	spin_unlock_irqrestore(&pool->lock, flags);

	/* We must not sleep in the GFP_ATOMIC case */
	if (!(gfp_mask & __GFP_WAIT))
		return NULL;

#ifdef CONFIG_KRG_EPM
	krg_current_save(krg_cur);
#endif
	/* Now start performing page reclaim */
	gfp_temp = gfp_mask;
	init_wait(&wait);
	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
	smp_mb();
	if (!pool->curr_nr) {
		/*
		 * FIXME: this should be io_schedule().  The timeout is there
		 * as a workaround for some DM problems in 2.6.18.
		 */
		io_schedule_timeout(5*HZ);
	}
	finish_wait(&pool->wait, &wait);
#ifdef CONFIG_KRG_EPM
	krg_current_restore(krg_cur);
#endif

	goto repeat_alloc;
}
Esempio n. 4
0
/**
 * mempool_alloc - allocate an element from a specific memory pool
 * @pool:      pointer to the memory pool which was allocated via
 *             mempool_create().
 * @gfp_mask:  the usual allocation bitmask.
 *
 * this function only sleeps if the alloc_fn function sleeps or
 * returns NULL. Note that due to preallocation, this function
 * *never* fails when called from process contexts. (it might
 * fail if called from an IRQ context.)
 */
void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
{
	void *element;
	unsigned long flags;
	wait_queue_t wait;
	gfp_t gfp_temp;

	might_sleep_if(gfp_mask & __GFP_WAIT);

	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
	gfp_mask |= __GFP_NOWARN;	/* failures are OK */

	gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);

repeat_alloc:

	element = pool->alloc(gfp_temp, pool->pool_data);
	if (likely(element != NULL))
		return element;

	spin_lock_irqsave(&pool->lock, flags);
	if (likely(pool->curr_nr)) {
		element = remove_element(pool);
		spin_unlock_irqrestore(&pool->lock, flags);
		return element;
	}
	spin_unlock_irqrestore(&pool->lock, flags);

	/* We must not sleep in the GFP_ATOMIC case */
	if (!(gfp_mask & __GFP_WAIT))
		return NULL;

	/* Now start performing page reclaim */
	gfp_temp = gfp_mask;
	init_wait(&wait);
	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
	smp_mb();
	if (!pool->curr_nr)
		io_schedule();
	finish_wait(&pool->wait, &wait);

	goto repeat_alloc;
}
Esempio n. 5
0
void pin_kill(struct fs_pin *p)
{
	wait_queue_entry_t wait;

	if (!p) {
		rcu_read_unlock();
		return;
	}
	init_wait(&wait);
	spin_lock_irq(&p->wait.lock);
	if (likely(!p->done)) {
		p->done = -1;
		spin_unlock_irq(&p->wait.lock);
		rcu_read_unlock();
		p->kill(p);
		return;
	}
	if (p->done > 0) {
		spin_unlock_irq(&p->wait.lock);
		rcu_read_unlock();
		return;
	}
	__add_wait_queue(&p->wait, &wait);
	while (1) {
		set_current_state(TASK_UNINTERRUPTIBLE);
		spin_unlock_irq(&p->wait.lock);
		rcu_read_unlock();
		schedule();
		rcu_read_lock();
		if (likely(list_empty(&wait.entry)))
			break;
		/* OK, we know p couldn't have been freed yet */
		spin_lock_irq(&p->wait.lock);
		if (p->done > 0) {
			spin_unlock_irq(&p->wait.lock);
			break;
		}
	}
	rcu_read_unlock();
}
Esempio n. 6
0
NDAS_SAL_API
int
sal_event_wait(sal_event event, sal_tick timeout)
{
    struct _sal_event    *sevent = (struct _sal_event*) event;
    long timeleft = timeout;
    int ret = SAL_SYNC_OK;
    wait_queue_t wait;

#if LINUX_VERSION_25_ABOVE
    if(in_interrupt() || in_atomic() || irqs_disabled()) {
#else
    if(in_interrupt()) {
#endif

#ifdef DEBUG
        *(char *)0 = 0;
#endif
        printk(KERN_ERR "Tried wait in interrupt context.\n");
        return SAL_SYNC_ERR;
    }

#if LINUX_VERSION_25_ABOVE
    // Initialze the wait.
    init_wait(&wait);
#else
    // Initialize the wait
    init_waitqueue_entry(&wait, current);
    // Add the wait to the queue
    add_wait_queue(&sevent->queue, &wait);
#endif

    for(;;)
    {
#if LINUX_VERSION_25_ABOVE
        // Add the wait to the queue if the wait is not added.
        // Set current process state to TASK_INTERRUPTIBLE
        prepare_to_wait(&sevent->queue, &wait, TASK_INTERRUPTIBLE);
#else
        // Set current process state to TASK_INTERRUPTIBLE
        set_current_state(TASK_INTERRUPTIBLE);
#endif
        // See if the state is set.
        if(atomic_read(&sevent->state))
            break;
        if(!signal_pending(current)) {
            if( timeout == SAL_SYNC_FOREVER) {
                schedule();
                continue;
            }
            else
            {
                timeleft = schedule_timeout(timeleft);
                // See if timeout.
                if(!timeleft) {
                    ret = SAL_SYNC_TIMEOUT;
                    break;
                }
                continue;
            }
        }
        ret = SAL_SYNC_INTERRUPTED;
        break;
    }
#if LINUX_VERSION_25_ABOVE
    // Set current process state to TASK_RUNNING
    // Remove the wait from the queue.
    finish_wait(&sevent->queue, &wait);
#else
    // Set current process state to TASK_RUNNING
    current->state = TASK_RUNNING;
    // Remove the wait from the queue.
    remove_wait_queue(&sevent->queue, &wait);
#endif

    return ret;
}

EXPORT_SYMBOL(sal_event_wait);


NDAS_SAL_API
void
sal_event_set(sal_event event)
{
    struct _sal_event    *sevent = (struct _sal_event*) event;
    atomic_set(&sevent->state, 1);
    wake_up_interruptible(&sevent->queue);
}

EXPORT_SYMBOL(sal_event_set);


NDAS_SAL_API
void
sal_event_reset(sal_event event)
{
    struct _sal_event    *sevent = (struct _sal_event*) event;
    atomic_set(&sevent->state, 0);
}

EXPORT_SYMBOL(sal_event_reset);

NDAS_SAL_API
void
sal_atomic_inc(sal_atomic *v)
{
    /* NDAS SAL uses same struct for atomic structure */
    atomic_inc((atomic_t*)v);
}
Esempio n. 7
0
/**
 * mempool_alloc - allocate an element from a specific memory pool
 * @pool:      pointer to the memory pool which was allocated via
 *             mempool_create().
 * @gfp_mask:  the usual allocation bitmask.
 *
 * this function only sleeps if the alloc_fn() function sleeps or
 * returns NULL. Note that due to preallocation, this function
 * *never* fails when called from process contexts. (it might
 * fail if called from an IRQ context.)
 * Note: using __GFP_ZERO is not supported.
 */
void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
{
	void *element;
	unsigned long flags;
	wait_queue_t wait;
	gfp_t gfp_temp;

	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
	might_sleep_if(gfp_mask & __GFP_WAIT);

	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
	gfp_mask |= __GFP_NOWARN;	/* failures are OK */

	gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);

repeat_alloc:

	element = pool->alloc(gfp_temp, pool->pool_data);
	if (likely(element != NULL))
		return element;

	spin_lock_irqsave(&pool->lock, flags);
	if (likely(pool->curr_nr)) {
		element = remove_element(pool);
		spin_unlock_irqrestore(&pool->lock, flags);
		/* paired with rmb in mempool_free(), read comment there */
		smp_wmb();
		/*
		 * Update the allocation stack trace as this is more useful
		 * for debugging.
		 */
		kmemleak_update_trace(element);
		return element;
	}

	/*
	 * We use gfp mask w/o __GFP_WAIT or IO for the first round.  If
	 * alloc failed with that and @pool was empty, retry immediately.
	 */
	if (gfp_temp != gfp_mask) {
		spin_unlock_irqrestore(&pool->lock, flags);
		gfp_temp = gfp_mask;
		goto repeat_alloc;
	}

	/* We must not sleep if !__GFP_WAIT */
	if (!(gfp_mask & __GFP_WAIT)) {
		spin_unlock_irqrestore(&pool->lock, flags);
		return NULL;
	}

	/* Let's wait for someone else to return an element to @pool */
	init_wait(&wait);
	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);

	spin_unlock_irqrestore(&pool->lock, flags);

	/*
	 * FIXME: this should be io_schedule().  The timeout is there as a
	 * workaround for some DM problems in 2.6.18.
	 */
	io_schedule_timeout(5*HZ);

	finish_wait(&pool->wait, &wait);
	goto repeat_alloc;
}
/* _VMKLNX_CODECHECK_: mempool_alloc */
void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
{
	void *element;
	unsigned long flags;
	wait_queue_t wait;
	gfp_t gfp_temp;

#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	might_sleep_if(gfp_mask & __GFP_WAIT);

	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
	gfp_mask |= __GFP_NOWARN;	/* failures are OK */

	gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);

#if defined(__VMKLNX__) && defined(VMX86_DEBUG)
	if (gfp_mask & __GFP_WAIT) {
		vmk_WorldAssertIsSafeToBlock();
	}
#endif /* defined(__VMKLNX__) */

repeat_alloc:

#if defined(__VMKLNX__)
        VMKAPI_MODULE_CALL(pool->module_id, element, pool->alloc,
                           gfp_temp, pool->pool_data);
#else /* !defined(__VMKLNX__) */
	element = pool->alloc(gfp_temp, pool->pool_data);
#endif /* defined(__VMKLNX__) */
	if (likely(element != NULL))
		return element;

	spin_lock_irqsave(&pool->lock, flags);
	if (likely(pool->curr_nr)) {
		element = remove_element(pool);
		spin_unlock_irqrestore(&pool->lock, flags);
		return element;
	}
	spin_unlock_irqrestore(&pool->lock, flags);

	/* We must not sleep in the GFP_ATOMIC case */
	if (!(gfp_mask & __GFP_WAIT))
		return NULL;

	/* Now start performing page reclaim */
	gfp_temp = gfp_mask;
	init_wait(&wait);
	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
	smp_mb();
	if (!pool->curr_nr) {
		/*
		 * FIXME: this should be io_schedule().  The timeout is there
		 * as a workaround for some DM problems in 2.6.18.
		 */
#if defined(__VMKLNX__)
		schedule_timeout(5*HZ);
#else /* !defined(__VMKLNX__) */
		io_schedule_timeout(5*HZ);
#endif /* defined(__VMKLNX__) */

	}
	finish_wait(&pool->wait, &wait);

	goto repeat_alloc;
}