static irqreturn_t dryice_norm_irq(int irq, void *dev_id)
{
	struct imxdi_dev *imxdi = dev_id;
	u32 dsr, dier;
	irqreturn_t rc = IRQ_NONE;

	dier = __raw_readl(imxdi->ioaddr + DIER);

	
	if ((dier & DIER_WCIE)) {
		if (list_empty_careful(&imxdi->write_wait.task_list))
			return rc;

		
		dsr = __raw_readl(imxdi->ioaddr + DSR);
		if ((dsr & (DSR_WCF | DSR_WEF))) {
			
			di_int_disable(imxdi, DIER_WCIE);

			
			imxdi->dsr |= dsr;

			wake_up_interruptible(&imxdi->write_wait);
			rc = IRQ_HANDLED;
		}
	}

	
	if ((dier & DIER_CAIE)) {
		
		dsr = __raw_readl(imxdi->ioaddr + DSR);
		if (dsr & DSR_CAF) {
			
			di_int_disable(imxdi, DIER_CAIE);

			
			schedule_work(&imxdi->work);
			rc = IRQ_HANDLED;
		}
	}
	return rc;
}
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
{
	struct kvm_async_pf *work;

	while (!list_empty_careful(&vcpu->async_pf.done) &&
	      kvm_arch_can_inject_async_page_present(vcpu)) {
		spin_lock(&vcpu->async_pf.lock);
		work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
					      link);
		list_del(&work->link);
		spin_unlock(&vcpu->async_pf.lock);

		kvm_arch_async_page_ready(vcpu, work);
		kvm_arch_async_page_present(vcpu, work);

		list_del(&work->queue);
		vcpu->async_pf.queued--;
		kmem_cache_free(async_pf_cache, work);
	}
}
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
{
	struct kvm_async_pf *work;

	if (!list_empty_careful(&vcpu->async_pf.done))
		return 0;

	work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
	if (!work)
		return -ENOMEM;

	work->wakeup_all = true;
	INIT_LIST_HEAD(&work->queue); /* for list_del to work */

	spin_lock(&vcpu->async_pf.lock);
	list_add_tail(&work->link, &vcpu->async_pf.done);
	spin_unlock(&vcpu->async_pf.lock);

	vcpu->async_pf.queued++;
	return 0;
}
Exemple #4
0
static int sep_list_from_area(const uint32_t prio, uint32_t order)
{
    struct page *page = NULL;
    free_area_t *queue = NULL;
    struct list_head *head, *item = NULL;

    queue = buddy_list.free_area + order;

    while(queue && order < NR_MEM_LISTS)
    {
        if(queue->nr_free_pages > 0) 
        {
            head = &(queue->lists[prio]);
            if(list_empty_careful(head))
            {
                ++order;
                ++queue;
            }
            else
            {
                /*DD("sep list order = %d. prio = %d", order, prio);*/
                if(free_list_del(head, order, &item) != 0)
                {
                    return -1;
                }

                page = list_entry(item, struct page, list);
                --order;
                --(page->order);
                free_list_add(page, prio, order);
                page += (uint32_t)pow(2,order);
                free_list_add(page, prio,order);

                return order;
            }
        }

        ++order;
        ++queue;
    }
Exemple #5
0
int buddy_list_add_line(struct page *page, const int order)
{
    if(page == NULL || order >= NR_MEM_LISTS)
    {
        printk("=");
        return -1;
    }

    struct buddy_list *header = buddy_list + order;
    struct list_head *head, *pos, *n;
    struct page *item_page = NULL;

    head = &(header->list);
    if(list_empty_careful(head))
    {
        list_add(&(page->list),&(header->list));
        ++buddy_list[order].nr_free_pages;
        return 0;
    }
    else
    {
        list_for_each_safe(pos, n, head)
        {
            item_page = list_entry(pos, Page, list);
            if(page->address < item_page->address)
            {
                __list_add(&(page->list), pos->prev, pos);
                ++buddy_list[order].nr_free_pages;
                return 0;
            }

            if(pos->next == head)
            {
                __list_add(&(page->list), pos, n);
                ++buddy_list[order].nr_free_pages;
                return 0;
            }
        }
    }
Exemple #6
0
static struct page * get_page_from_list(const uint32_t prio, const uint32_t order)
{
    free_area_t *queue = NULL;
    struct list_head *head = NULL, *item = NULL;
    struct page *page = NULL;

    queue = buddy_list.free_area + order;
    head = queue->lists + prio;

    if(!list_empty_careful(head) && queue->nr_free_pages > 0)
    {
        if(free_list_del(head, order, &item) != 0)
        {
            DD("free_list_del error.");
            return NULL;
        }
        page = list_entry(item,struct page,list);
        return page;
    }

    return NULL;
}
Exemple #7
0
/**
 * finish_wait - clean up after waiting in a queue
 * @q: waitqueue waited on
 * @wait: wait descriptor
 *
 * Sets current thread back to running state and removes
 * the wait descriptor from the given waitqueue if still
 * queued.
 */
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
{
	unsigned long flags;

	__set_current_state(TASK_RUNNING);
	/*
	 * We can check for list emptiness outside the lock
	 * IFF:
	 *  - we use the "careful" check that verifies both
	 *    the next and prev pointers, so that there cannot
	 *    be any half-pending updates in progress on other
	 *    CPU's that we haven't seen yet (and that might
	 *    still change the stack area.
	 * and
	 *  - all other users take the lock (ie we can only
	 *    have _one_ other CPU that looks at or modifies
	 *    the list).
	 */
	if (!list_empty_careful(&wait->task_list)) {
		spin_lock_irqsave(&q->lock, flags);
		list_del_init(&wait->task_list);
		spin_unlock_irqrestore(&q->lock, flags);
	}
}
Exemple #8
0
/**
 * process_cursors - do action on each cursor attached to inode
 * @inode:
 * @act: action to do
 *
 * Finds all cursors of @inode in reiser4's super block radix tree of cursors
 * and performs action specified by @act on each of cursors.
 */
static void process_cursors(struct inode *inode, enum cursor_action act)
{
	oid_t oid;
	dir_cursor *start;
	struct list_head *head;
	reiser4_context *ctx;
	struct d_cursor_info *info;

	/* this can be called by
	 *
	 * kswapd->...->prune_icache->..reiser4_destroy_inode
	 *
	 * without reiser4_context
	 */
	ctx = reiser4_init_context(inode->i_sb);
	if (IS_ERR(ctx)) {
		warning("vs-23", "failed to init context");
		return;
	}

	assert("nikita-3558", inode != NULL);

	info = d_info(inode);
	oid = get_inode_oid(inode);
	spin_lock_inode(inode);
	head = get_readdir_list(inode);
	spin_lock(&d_lock);
	/* find any cursor for this oid: reference to it is hanging of radix
	 * tree */
	start = lookup(info, (unsigned long)oid);
	if (start != NULL) {
		dir_cursor *scan;
		reiser4_file_fsdata *fsdata;

		/* process circular list of cursors for this oid */
		scan = start;
		do {
			dir_cursor *next;

			next = list_entry(scan->list.next, dir_cursor, list);
			fsdata = scan->fsdata;
			assert("nikita-3557", fsdata != NULL);
			if (scan->key.oid == oid) {
				switch (act) {
				case CURSOR_DISPOSE:
					list_del_init(&fsdata->dir.linkage);
					break;
				case CURSOR_LOAD:
					list_add(&fsdata->dir.linkage, head);
					break;
				case CURSOR_KILL:
					kill_cursor(scan);
					break;
				}
			}
			if (scan == next)
				/* last cursor was just killed */
				break;
			scan = next;
		} while (scan != start);
	}
	spin_unlock(&d_lock);
	/* check that we killed 'em all */
	assert("nikita-3568",
	       ergo(act == CURSOR_KILL,
		    list_empty_careful(get_readdir_list(inode))));
	assert("nikita-3569",
	       ergo(act == CURSOR_KILL, lookup(info, oid) == NULL));
	spin_unlock_inode(inode);
	reiser4_exit_context(ctx);
}