예제 #1
0
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
			struct vm_area_struct *vma, unsigned long addr)
{
	struct page *found_page, *new_page = NULL;
	int err;

	do {
		found_page = find_get_page(&swapper_space, entry.val);
		if (found_page)
			break;

		if (!new_page) {
			new_page = alloc_page_vma(gfp_mask, vma, addr);
			if (!new_page)
				break;		
		}

		err = radix_tree_preload(gfp_mask & GFP_KERNEL);
		if (err)
			break;

		err = swapcache_prepare(entry);
		if (err == -EEXIST) {	
			radix_tree_preload_end();
			continue;
		}
		if (err) {		
			radix_tree_preload_end();
			break;
		}

		
		__set_page_locked(new_page);
		SetPageSwapBacked(new_page);
		err = __add_to_swap_cache(new_page, entry);
		if (likely(!err)) {
			radix_tree_preload_end();
			lru_cache_add_anon(new_page);
			swap_readpage(new_page);
			return new_page;
		}
		radix_tree_preload_end();
		ClearPageSwapBacked(new_page);
		__clear_page_locked(new_page);
		swapcache_free(entry, NULL);
	} while (err != -ENOMEM);

	if (new_page)
		page_cache_release(new_page);
	return found_page;
}
예제 #2
0
/*
 * Insert a write request into an inode
 */
static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
{
	struct nfs_inode *nfsi = NFS_I(inode);
	int error;

	error = radix_tree_preload(GFP_NOFS);
	if (error != 0)
		goto out;

	/* Lock the request! */
	nfs_lock_request_dontget(req);

	spin_lock(&inode->i_lock);
	error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
	BUG_ON(error);
	if (!nfsi->npages) {
		igrab(inode);
		if (nfs_have_delegation(inode, FMODE_WRITE))
			nfsi->change_attr++;
	}
	SetPagePrivate(req->wb_page);
	set_page_private(req->wb_page, (unsigned long)req);
	nfsi->npages++;
	kref_get(&req->wb_kref);
	radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
				NFS_PAGE_TAG_LOCKED);
	spin_unlock(&inode->i_lock);
	radix_tree_preload_end();
out:
	return error;
}
예제 #3
0
/**
 * bfq_cic_link - add @cic to @ioc.
 * @bfqd: bfq_data @cic refers to.
 * @ioc: io_context @cic belongs to.
 * @cic: the cic to link.
 * @gfp_mask: the mask to use for radix tree preallocations.
 *
 * Add @cic to @ioc, using @bfqd as the search key.  This enables us to
 * lookup the process specific cfq io context when entered from the block
 * layer.  Also adds @cic to a per-bfqd list, used when this queue is
 * removed.
 */
static int bfq_cic_link(struct bfq_data *bfqd, struct io_context *ioc,
			struct cfq_io_context *cic, gfp_t gfp_mask)
{
	unsigned long flags;
	int ret;

	ret = radix_tree_preload(gfp_mask);
	if (ret == 0) {
		cic->ioc = ioc;

		/* No write-side locking, cic is not published yet. */
		rcu_assign_pointer(cic->key, bfqd);

		spin_lock_irqsave(&ioc->lock, flags);
		ret = radix_tree_insert(&ioc->bfq_radix_root,
					bfqd->cic_index, cic);
		if (ret == 0)
			hlist_add_head_rcu(&cic->cic_list, &ioc->bfq_cic_list);
		spin_unlock_irqrestore(&ioc->lock, flags);

		radix_tree_preload_end();

		if (ret == 0) {
			spin_lock_irqsave(bfqd->queue->queue_lock, flags);
			list_add(&cic->queue_list, &bfqd->cic_list);
			spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
		}
	}

	if (ret != 0)
		printk(KERN_ERR "bfq: cic link failed!\n");

	return ret;
}
예제 #4
0
/*
 * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
 * but sets SwapCache flag and private instead of mapping and index.
 */
static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
			       gfp_t gfp_mask)
{
	int error;

	BUG_ON(PageSwapCache(page));
	BUG_ON(PagePrivate(page));
	error = radix_tree_preload(gfp_mask);
	if (!error) {
		set_page_no_new_refs(page);
		write_lock_irq(&swapper_space.tree_lock);
		error = radix_tree_insert(&swapper_space.page_tree,
						entry.val, page);
		if (!error) {
			page_cache_get(page);
			SetPageLocked(page);
			SetPageSwapCache(page);
			set_page_private(page, entry.val);
			total_swapcache_pages++;
			__inc_zone_page_state(page, NR_FILE_PAGES);
		}
		write_unlock_irq(&swapper_space.tree_lock);
		end_page_no_new_refs(page);
		radix_tree_preload_end();
	}
	return error;
}
예제 #5
0
/**
 * ima_inode_alloc - allocate an iint associated with an inode
 * @inode: pointer to the inode
 */
int ima_inode_alloc(struct inode *inode)
{
	struct ima_iint_cache *iint = NULL;
	int rc = 0;

	if (!ima_enabled)
		return 0;

	iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
	if (!iint)
		return -ENOMEM;

	rc = radix_tree_preload(GFP_NOFS);
	if (rc < 0)
		goto out;

	spin_lock(&ima_iint_lock);
	rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
	spin_unlock(&ima_iint_lock);
	radix_tree_preload_end();
out:
	if (rc < 0)
		kmem_cache_free(iint_cache, iint);

	return rc;
}
/*
 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 * but sets SwapCache flag and private instead of mapping and index.
 */
int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
	int error;

	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(PageSwapCache(page));
	VM_BUG_ON(!PageSwapBacked(page));

	error = radix_tree_preload(gfp_mask);
	if (!error) {
		page_cache_get(page);
		SetPageSwapCache(page);
		set_page_private(page, entry.val);

		spin_lock_irq(&swapper_space.tree_lock);
		error = radix_tree_insert(&swapper_space.page_tree,
						entry.val, page);
		if (likely(!error)) {
			total_swapcache_pages++;
			__inc_zone_page_state(page, NR_FILE_PAGES);
			INC_CACHE_INFO(add_total);
		}
		spin_unlock_irq(&swapper_space.tree_lock);
		radix_tree_preload_end();

		if (unlikely(error)) {
			set_page_private(page, 0UL);
			ClearPageSwapCache(page);
			page_cache_release(page);
		}
	}
	return error;
}
예제 #7
0
/* Allocate memory for the iint associated with the inode
 * from the iint_cache slab, initialize the iint, and
 * insert it into the radix tree.
 *
 * On success return a pointer to the iint; on failure return NULL.
 */
struct ima_iint_cache *ima_iint_insert(struct inode *inode)
{
    struct ima_iint_cache *iint = NULL;
    int rc = 0;

    if (!ima_initialized)
        return iint;
    iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
    if (!iint)
        return iint;

    rc = radix_tree_preload(GFP_NOFS);
    if (rc < 0)
        goto out;

    spin_lock(&ima_iint_lock);
    rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
    spin_unlock(&ima_iint_lock);
out:
    if (rc < 0) {
        kmem_cache_free(iint_cache, iint);
        if (rc == -EEXIST) {
            spin_lock(&ima_iint_lock);
            iint = radix_tree_lookup(&ima_iint_store,
                                     (unsigned long)inode);
            spin_unlock(&ima_iint_lock);
        } else
            iint = NULL;
    }
    radix_tree_preload_end();
    return iint;
}
예제 #8
0
/*
 * remove an extent from the root, returns 0 on success
 */
static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
			 *root, u64 blocknr, u64 num_blocks, int pin)
{
	struct btrfs_path path;
	struct btrfs_key key;
	struct btrfs_fs_info *info = root->fs_info;
	struct btrfs_root *extent_root = info->extent_root;
	int ret;
	struct btrfs_extent_item *ei;
	struct btrfs_key ins;
	u32 refs;

	BUG_ON(pin && num_blocks != 1);
	key.objectid = blocknr;
	key.flags = 0;
	btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
	key.offset = num_blocks;

	find_free_extent(trans, root, 0, 0, (u64)-1, &ins);
	btrfs_init_path(&path);
	ret = btrfs_search_slot(trans, extent_root, &key, &path, -1, 1);
	if (ret) {
		btrfs_print_tree(extent_root, extent_root->node);
		printf("failed to find %llu\n",
		       (u64)key.objectid);
		BUG();
	}
	ei = btrfs_item_ptr(&path.nodes[0]->leaf, path.slots[0],
			    struct btrfs_extent_item);
	BUG_ON(ei->refs == 0);
	refs = btrfs_extent_refs(ei) - 1;
	btrfs_set_extent_refs(ei, refs);
	if (refs == 0) {
		u64 super_blocks_used;
		if (pin) {
			int err;
			unsigned long bl = blocknr;
			radix_tree_preload(GFP_KERNEL);
			err = radix_tree_insert(&info->pinned_radix,
						blocknr, (void *)bl);
			BUG_ON(err);
			radix_tree_preload_end();
		}
		super_blocks_used = btrfs_super_blocks_used(info->disk_super);
		btrfs_set_super_blocks_used(info->disk_super,
					    super_blocks_used - num_blocks);
		ret = btrfs_del_item(trans, extent_root, &path);
		if (!pin && extent_root->fs_info->last_insert.objectid >
		    blocknr)
			extent_root->fs_info->last_insert.objectid = blocknr;
		if (ret)
			BUG();
		ret = update_block_group(trans, root, blocknr, num_blocks, 0);
		BUG_ON(ret);
	}
	btrfs_release_path(extent_root, &path);
	finish_current_insert(trans, extent_root);
	return ret;
}
예제 #9
0
int nilfs_btnode_prepare_change_key(struct address_space *btnc,
				    struct nilfs_btnode_chkey_ctxt *ctxt)
{
	struct buffer_head *obh, *nbh;
	struct inode *inode = NILFS_BTNC_I(btnc);
	__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
	int err;

	if (oldkey == newkey)
		return 0;

	obh = ctxt->bh;
	ctxt->newbh = NULL;

	if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
		lock_page(obh->b_page);
retry:
		err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
		if (err)
			goto failed_unlock;
		
		if (unlikely(oldkey != obh->b_page->index))
			NILFS_PAGE_BUG(obh->b_page,
				       "invalid oldkey %lld (newkey=%lld)",
				       (unsigned long long)oldkey,
				       (unsigned long long)newkey);

		spin_lock_irq(&btnc->tree_lock);
		err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
		spin_unlock_irq(&btnc->tree_lock);
		radix_tree_preload_end();
		if (!err)
			return 0;
		else if (err != -EEXIST)
			goto failed_unlock;

		err = invalidate_inode_pages2_range(btnc, newkey, newkey);
		if (!err)
			goto retry;
		
		unlock_page(obh->b_page);
	}

	nbh = nilfs_btnode_create_block(btnc, newkey);
	if (!nbh)
		return -ENOMEM;

	BUG_ON(nbh == obh);
	ctxt->newbh = nbh;
	return 0;

 failed_unlock:
	unlock_page(obh->b_page);
	return err;
}
예제 #10
0
/**
 * add_to_swap_cache:page插入到交换区高速缓存中
 */
int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
	int error;

	error = radix_tree_preload(gfp_mask);
	if (!error) {
		error = __add_to_swap_cache(page, entry);
		radix_tree_preload_end();
	}
	return error;
}
예제 #11
0
/**
 * insert_cursor - allocate file_fsdata, insert cursor to tree and hash table
 * @cursor:
 * @file:
 * @inode:
 *
 * Allocates reiser4_file_fsdata, attaches it to @cursor, inserts cursor to
 * reiser4 super block's hash table and radix tree.
 add detachable readdir
 * state to the @f
 */
static int insert_cursor(dir_cursor *cursor, struct file *file,
			 struct inode *inode)
{
	int result;
	reiser4_file_fsdata *fsdata;

	memset(cursor, 0, sizeof *cursor);

	/* this is either first call to readdir, or rewind. Anyway, create new
	 * cursor. */
	fsdata = create_fsdata(NULL);
	if (fsdata != NULL) {
		result = radix_tree_preload(reiser4_ctx_gfp_mask_get());
		if (result == 0) {
			struct d_cursor_info *info;
			oid_t oid;

			info = d_info(inode);
			oid = get_inode_oid(inode);
			/* cid occupies higher 12 bits of f->f_pos. Don't
			 * allow it to become negative: this confuses
			 * nfsd_readdir() */
			cursor->key.cid = (++cid_counter) & 0x7ff;
			cursor->key.oid = oid;
			cursor->fsdata = fsdata;
			cursor->info = info;
			cursor->ref = 1;

			spin_lock_inode(inode);
			/* install cursor as @f's private_data, discarding old
			 * one if necessary */
#if REISER4_DEBUG
			if (file->private_data)
				warning("", "file has fsdata already");
#endif
			clean_fsdata(file);
			free_file_fsdata_nolock(file);
			file->private_data = fsdata;
			fsdata->cursor = cursor;
			spin_unlock_inode(inode);
			spin_lock(&d_lock);
			/* insert cursor into hash table */
			d_cursor_hash_insert(&info->table, cursor);
			/* and chain it into radix-tree */
			bind_cursor(cursor, (unsigned long)oid);
			spin_unlock(&d_lock);
			radix_tree_preload_end();
			file->f_pos = ((__u64) cursor->key.cid) << CID_SHIFT;
		}
	} else
		result = RETERR(-ENOMEM);
	return result;
}
예제 #12
0
/**
 * ioc_create_icq - create and link io_cq
 * @q: request_queue of interest
 * @gfp_mask: allocation mask
 *
 * Make sure io_cq linking %current->io_context and @q exists.  If either
 * io_context and/or icq don't exist, they will be created using @gfp_mask.
 *
 * The caller is responsible for ensuring @ioc won't go away and @q is
 * alive and will stay alive until this function returns.
 */
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
{
	struct elevator_type *et = q->elevator->type;
	struct io_context *ioc;
	struct io_cq *icq;

	/* allocate stuff */
	ioc = create_io_context(current, gfp_mask, q->node);
	if (!ioc)
		return NULL;

	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
				    q->node);
	if (!icq)
		return NULL;

	if (radix_tree_preload(gfp_mask) < 0) {
		kmem_cache_free(et->icq_cache, icq);
		return NULL;
	}

	icq->ioc = ioc;
	icq->q = q;
	INIT_LIST_HEAD(&icq->q_node);
	INIT_HLIST_NODE(&icq->ioc_node);

	/* lock both q and ioc and try to link @icq */
	spin_lock_irq(q->queue_lock);
	spin_lock(&ioc->lock);

	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
		list_add(&icq->q_node, &q->icq_list);
		if (et->ops.elevator_init_icq_fn)
			et->ops.elevator_init_icq_fn(icq);
	} else {
		kmem_cache_free(et->icq_cache, icq);
		icq = ioc_lookup_icq(ioc, q);
		if (!icq)
			printk(KERN_ERR "cfq: icq link failed!\n");
	}

	spin_unlock(&ioc->lock);
	spin_unlock_irq(q->queue_lock);
	radix_tree_preload_end();
	return icq;
}
예제 #13
0
void si_pid_set_slow(struct super_block *sb)
{
	int err;
	struct au_sbinfo *sbinfo;

	AuDebugOn(si_pid_test_slow(sb));

	sbinfo = au_sbi(sb);
	err = radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
	AuDebugOn(err);
	spin_lock(&sbinfo->au_si_pid.tree_lock);
	err = radix_tree_insert(&sbinfo->au_si_pid.tree, current->pid,
				/*any valid ptr*/sb);
	spin_unlock(&sbinfo->au_si_pid.tree_lock);
	AuDebugOn(err);
	radix_tree_preload_end();
}
static struct q_irq_data *qpnpint_alloc_irq_data(
					struct q_chip_data *chip_d,
					unsigned long hwirq)
{
	struct q_irq_data *irq_d;
	struct q_perip_data *per_d;
	int rc;

	irq_d = kzalloc(sizeof(struct q_irq_data), GFP_KERNEL);
	if (!irq_d)
		return ERR_PTR(-ENOMEM);

	/**
	 * The Peripheral Tree is keyed from the slave + per_id. We're
	 * ignoring the irq bits here since this peripheral structure
	 * should be common for all irqs on the same peripheral.
	 */
	per_d = radix_tree_lookup(&chip_d->per_tree, (hwirq & ~0x7));
	if (!per_d) {
		per_d = kzalloc(sizeof(struct q_perip_data), GFP_KERNEL);
		if (!per_d) {
			rc = -ENOMEM;
			goto alloc_fail;
		}
		spin_lock_init(&per_d->lock);
		rc = radix_tree_preload(GFP_KERNEL);
		if (rc)
			goto alloc_fail;
		rc = radix_tree_insert(&chip_d->per_tree,
				  (hwirq & ~0x7), per_d);
		if (rc)
			goto alloc_fail;
		radix_tree_preload_end();
	}
	irq_d->per_d = per_d;

	return irq_d;

alloc_fail:
	kfree(per_d);
	kfree(irq_d);
	return ERR_PTR(rc);
}
예제 #15
0
/* 
 * Locate a page of swap in physical memory, reserving swap cache space
 * and reading the disk if it is not already cached.
 * A failure return means that either the page allocation failed or that
 * the swap entry is no longer in use.
 */
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
			struct vm_area_struct *vma, unsigned long addr)
{
	struct page *found_page, *new_page = NULL;
	int err;

	do {
		/*
		 * First check the swap cache.  Since this is normally
		 * called after lookup_swap_cache() failed, re-calling
		 * that would confuse statistics.
		 */
		found_page = find_get_page(&swapper_space, entry.val);
		if (found_page)
			break;

		/*
		 * Get a new page to read into from swap.
		 */
		if (!new_page) {
			new_page = alloc_page_vma(gfp_mask, vma, addr);
			if (!new_page)
				break;		/* Out of memory */
		}

		/*
		 * call radix_tree_preload() while we can wait.
		 */
		err = radix_tree_preload(gfp_mask & GFP_KERNEL);
		if (err)
			break;

		/*
		 * Swap entry may have been freed since our caller observed it.
		 */
		err = swapcache_prepare(entry);
		if (err == -EEXIST) {	/* seems racy */
			radix_tree_preload_end();
			continue;
		}
		if (err) {		/* swp entry is obsolete ? */
			radix_tree_preload_end();
			break;
		}

		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
		__set_page_locked(new_page);
		SetPageSwapBacked(new_page);
		err = __add_to_swap_cache(new_page, entry);
		if (likely(!err)) {
			radix_tree_preload_end();
			/*
			 * Initiate read into locked page and return.
			 */
			lru_cache_add_anon(new_page);
			swap_readpage(new_page);
			return new_page;
		}
		radix_tree_preload_end();
		ClearPageSwapBacked(new_page);
		__clear_page_locked(new_page);
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
		 */
		swapcache_free(entry, NULL);
	} while (err != -ENOMEM);

	if (new_page)
		page_cache_release(new_page);
	return found_page;
}
예제 #16
0
파일: swap_state.c 프로젝트: oldzhu/linux
struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                                     struct vm_area_struct *vma, unsigned long addr,
                                     bool *new_page_allocated)
{
    struct page *found_page, *new_page = NULL;
    struct address_space *swapper_space = swap_address_space(entry);
    int err;
    *new_page_allocated = false;

    do {
        /*
         * First check the swap cache.  Since this is normally
         * called after lookup_swap_cache() failed, re-calling
         * that would confuse statistics.
         */
        found_page = find_get_page(swapper_space, entry.val);
        if (found_page)
            break;

        /*
         * Get a new page to read into from swap.
         */
        if (!new_page) {
            new_page = alloc_page_vma(gfp_mask, vma, addr);
            if (!new_page)
                break;		/* Out of memory */
        }

        /*
         * call radix_tree_preload() while we can wait.
         */
        err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
        if (err)
            break;

        /*
         * Swap entry may have been freed since our caller observed it.
         */
        err = swapcache_prepare(entry);
        if (err == -EEXIST) {
            radix_tree_preload_end();
            /*
             * We might race against get_swap_page() and stumble
             * across a SWAP_HAS_CACHE swap_map entry whose page
             * has not been brought into the swapcache yet, while
             * the other end is scheduled away waiting on discard
             * I/O completion at scan_swap_map().
             *
             * In order to avoid turning this transitory state
             * into a permanent loop around this -EEXIST case
             * if !CONFIG_PREEMPT and the I/O completion happens
             * to be waiting on the CPU waitqueue where we are now
             * busy looping, we just conditionally invoke the
             * scheduler here, if there are some more important
             * tasks to run.
             */
            cond_resched();
            continue;
        }
        if (err) {		/* swp entry is obsolete ? */
            radix_tree_preload_end();
            break;
        }

        /* May fail (-ENOMEM) if radix-tree node allocation failed. */
        __SetPageLocked(new_page);
        __SetPageSwapBacked(new_page);
        err = __add_to_swap_cache(new_page, entry);
        if (likely(!err)) {
            radix_tree_preload_end();
            /*
             * Initiate read into locked page and return.
             */
            lru_cache_add_anon(new_page);
            *new_page_allocated = true;
            return new_page;
        }
        radix_tree_preload_end();
        __ClearPageLocked(new_page);
        /*
         * add_to_swap_cache() doesn't return -EEXIST, so we can safely
         * clear SWAP_HAS_CACHE flag.
         */
        swapcache_free(entry);
    } while (err != -ENOMEM);

    if (new_page)
        put_page(new_page);
    return found_page;
}
예제 #17
0
static int
xfs_iget_cache_miss(
	struct xfs_mount	*mp,
	struct xfs_perag	*pag,
	xfs_trans_t		*tp,
	xfs_ino_t		ino,
	struct xfs_inode	**ipp,
	int			flags,
	int			lock_flags)
{
	struct xfs_inode	*ip;
	int			error;
	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
	int			iflags;

	ip = xfs_inode_alloc(mp, ino);
	if (!ip)
		return ENOMEM;

	error = xfs_iread(mp, tp, ip, flags);
	if (error)
		goto out_destroy;

	trace_xfs_iget_miss(ip);

	if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
		error = ENOENT;
		goto out_destroy;
	}

	/*
	 * Preload the radix tree so we can insert safely under the
	 * write spinlock. Note that we cannot sleep inside the preload
	 * region.
	 */
	if (radix_tree_preload(GFP_KERNEL)) {
		error = EAGAIN;
		goto out_destroy;
	}

	/*
	 * Because the inode hasn't been added to the radix-tree yet it can't
	 * be found by another thread, so we can do the non-sleeping lock here.
	 */
	if (lock_flags) {
		if (!xfs_ilock_nowait(ip, lock_flags))
			BUG();
	}

	/*
	 * These values must be set before inserting the inode into the radix
	 * tree as the moment it is inserted a concurrent lookup (allowed by the
	 * RCU locking mechanism) can find it and that lookup must see that this
	 * is an inode currently under construction (i.e. that XFS_INEW is set).
	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
	 * memory barrier that ensures this detection works correctly at lookup
	 * time.
	 */
	iflags = XFS_INEW;
	if (flags & XFS_IGET_DONTCACHE)
		iflags |= XFS_IDONTCACHE;
	ip->i_udquot = ip->i_gdquot = NULL;
	xfs_iflags_set(ip, iflags);

	/* insert the new inode */
	spin_lock(&pag->pag_ici_lock);
	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
	if (unlikely(error)) {
		WARN_ON(error != -EEXIST);
		XFS_STATS_INC(xs_ig_dup);
		error = EAGAIN;
		goto out_preload_end;
	}
	spin_unlock(&pag->pag_ici_lock);
	radix_tree_preload_end();

	*ipp = ip;
	return 0;

out_preload_end:
	spin_unlock(&pag->pag_ici_lock);
	radix_tree_preload_end();
	if (lock_flags)
		xfs_iunlock(ip, lock_flags);
out_destroy:
	__destroy_inode(VFS_I(ip));
	xfs_inode_free(ip);
	return error;
}
예제 #18
0
/*
 * zswap_get_swap_cache_page
 *
 * This is an adaption of read_swap_cache_async()
 *
 * This function tries to find a page with the given swap entry
 * in the swapper_space address space (the swap cache).  If the page
 * is found, it is returned in retpage.  Otherwise, a page is allocated,
 * added to the swap cache, and returned in retpage.
 *
 * If success, the swap cache page is returned in retpage
 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
 *     the new page is added to swapcache and locked
 * Returns ZSWAP_SWAPCACHE_FAIL on error
 */
static int zswap_get_swap_cache_page(swp_entry_t entry,
				struct page **retpage)
{
	struct page *found_page, *new_page = NULL;
	struct address_space *swapper_space = swap_address_space(entry);
	int err;

	*retpage = NULL;
	do {
		/*
		 * First check the swap cache.  Since this is normally
		 * called after lookup_swap_cache() failed, re-calling
		 * that would confuse statistics.
		 */
		found_page = find_get_page(swapper_space, entry.val);
		if (found_page)
			break;

		/*
		 * Get a new page to read into from swap.
		 */
		if (!new_page) {
			new_page = alloc_page(GFP_KERNEL);
			if (!new_page)
				break; /* Out of memory */
		}

		/*
		 * call radix_tree_preload() while we can wait.
		 */
		err = radix_tree_preload(GFP_KERNEL);
		if (err)
			break;

		/*
		 * Swap entry may have been freed since our caller observed it.
		 */
		err = swapcache_prepare(entry);
		if (err == -EEXIST) { /* seems racy */
			radix_tree_preload_end();
			continue;
		}
		if (err) { /* swp entry is obsolete ? */
			radix_tree_preload_end();
			break;
		}

		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
		__set_page_locked(new_page);
		SetPageSwapBacked(new_page);
		err = __add_to_swap_cache(new_page, entry);
		if (likely(!err)) {
			radix_tree_preload_end();
			lru_cache_add_anon(new_page);
			*retpage = new_page;
			return ZSWAP_SWAPCACHE_NEW;
		}
		radix_tree_preload_end();
		ClearPageSwapBacked(new_page);
		__clear_page_locked(new_page);
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
		 */
		swapcache_free(entry, NULL);
	} while (err != -ENOMEM);

	if (new_page)
		page_cache_release(new_page);
	if (!found_page)
		return ZSWAP_SWAPCACHE_FAIL;
	*retpage = found_page;
	return ZSWAP_SWAPCACHE_EXIST;
}
// TODO: Bug 1995015: use a more efficient data structure for
// physically-contiguous allocations.
NV_STATUS uvm_pmm_sysmem_mappings_add_gpu_mapping(uvm_pmm_sysmem_mappings_t *sysmem_mappings,
                                                  NvU64 dma_addr,
                                                  NvU64 virt_addr,
                                                  NvU64 region_size,
                                                  uvm_va_block_t *va_block,
                                                  uvm_processor_id_t owner)
{
    int ret;
    uvm_reverse_map_t *new_reverse_map;
    NvU64 key;
    const NvU64 base_key = dma_addr / PAGE_SIZE;
    const NvU32 num_pages = region_size / PAGE_SIZE;
    uvm_page_index_t page_index;

    UVM_ASSERT(va_block);
    UVM_ASSERT(va_block->va_range);
    UVM_ASSERT(IS_ALIGNED(dma_addr, region_size));
    UVM_ASSERT(IS_ALIGNED(virt_addr, region_size));
    UVM_ASSERT(region_size <= UVM_VA_BLOCK_SIZE);
    UVM_ASSERT(is_power_of_2(region_size));
    UVM_ASSERT(uvm_va_block_contains_address(va_block, virt_addr));
    UVM_ASSERT(uvm_va_block_contains_address(va_block, virt_addr + region_size - 1));
    uvm_assert_mutex_locked(&va_block->lock);

    if (!sysmem_mappings->gpu->access_counters_supported)
        return NV_OK;

    new_reverse_map = kmem_cache_zalloc(g_reverse_page_map_cache, NV_UVM_GFP_FLAGS);
    if (!new_reverse_map)
        return NV_ERR_NO_MEMORY;

    page_index = uvm_va_block_cpu_page_index(va_block, virt_addr);

    new_reverse_map->va_block = va_block;
    new_reverse_map->region   = uvm_va_block_region(page_index, page_index + num_pages);
    new_reverse_map->owner    = owner;

    for (key = base_key; key < base_key + num_pages; ++key) {
        // Pre-load the tree to allocate memory outside of the table lock. This
        // returns with preemption disabled.
        ret = radix_tree_preload(NV_UVM_GFP_FLAGS);
        if (ret != 0) {
            NvU64 remove_key;

            uvm_spin_lock(&sysmem_mappings->reverse_map_lock);
            for (remove_key = base_key; remove_key < key; ++remove_key)
                (void *)radix_tree_delete(&sysmem_mappings->reverse_map_tree, remove_key);
            uvm_spin_unlock(&sysmem_mappings->reverse_map_lock);

            kmem_cache_free(g_reverse_page_map_cache, new_reverse_map);

            return NV_ERR_NO_MEMORY;
        }

        uvm_spin_lock(&sysmem_mappings->reverse_map_lock);
        ret = radix_tree_insert(&sysmem_mappings->reverse_map_tree, key, new_reverse_map);
        uvm_spin_unlock(&sysmem_mappings->reverse_map_lock);
        UVM_ASSERT(ret == 0);

        // This re-enables preemption
        radix_tree_preload_end();
    }


    return NV_OK;
}
예제 #20
0
/**
 * nilfs_btnode_prepare_change_key
 *  prepare to move contents of the block for old key to one of new key.
 *  the old buffer will not be removed, but might be reused for new buffer.
 *  it might return -ENOMEM because of memory allocation errors,
 *  and might return -EIO because of disk read errors.
 */
int nilfs_btnode_prepare_change_key(struct address_space *btnc,
				    struct nilfs_btnode_chkey_ctxt *ctxt)
{
	struct buffer_head *obh, *nbh;
	struct inode *inode = NILFS_BTNC_I(btnc);
	__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
	int err;

	if (oldkey == newkey)
		return 0;

	obh = ctxt->bh;
	ctxt->newbh = NULL;

	if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
		lock_page(obh->b_page);
		/*
		 * We cannot call radix_tree_preload for the kernels older
		 * than 2.6.23, because it is not exported for modules.
		 */
retry:
		err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
		if (err)
			goto failed_unlock;
		/* BUG_ON(oldkey != obh->b_page->index); */
		if (unlikely(oldkey != obh->b_page->index))
			NILFS_PAGE_BUG(obh->b_page,
				       "invalid oldkey %lld (newkey=%lld)",
				       (unsigned long long)oldkey,
				       (unsigned long long)newkey);

		spin_lock_irq(&btnc->tree_lock);
		err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
		spin_unlock_irq(&btnc->tree_lock);
		/*
		 * Note: page->index will not change to newkey until
		 * nilfs_btnode_commit_change_key() will be called.
		 * To protect the page in intermediate state, the page lock
		 * is held.
		 */
		radix_tree_preload_end();
		if (!err)
			return 0;
		else if (err != -EEXIST)
			goto failed_unlock;

		err = invalidate_inode_pages2_range(btnc, newkey, newkey);
		if (!err)
			goto retry;
		/* fallback to copy mode */
		unlock_page(obh->b_page);
	}

	nbh = nilfs_btnode_create_block(btnc, newkey);
	if (!nbh)
		return -ENOMEM;

	BUG_ON(nbh == obh);
	ctxt->newbh = nbh;
	return 0;

 failed_unlock:
	unlock_page(obh->b_page);
	return err;
}
예제 #21
0
static int
xfs_iget_cache_miss(
	struct xfs_mount	*mp,
	struct xfs_perag	*pag,
	xfs_trans_t		*tp,
	xfs_ino_t		ino,
	struct xfs_inode	**ipp,
	int			flags,
	int			lock_flags)
{
	struct xfs_inode	*ip;
	int			error;
	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
	int			iflags;

	ip = xfs_inode_alloc(mp, ino);
	if (!ip)
		return ENOMEM;

	error = xfs_iread(mp, tp, ip, flags);
	if (error)
		goto out_destroy;

	trace_xfs_iget_miss(ip);

	if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
		error = ENOENT;
		goto out_destroy;
	}

	if (radix_tree_preload(GFP_KERNEL)) {
		error = EAGAIN;
		goto out_destroy;
	}

	if (lock_flags) {
		if (!xfs_ilock_nowait(ip, lock_flags))
			BUG();
	}

	iflags = XFS_INEW;
	if (flags & XFS_IGET_DONTCACHE)
		iflags |= XFS_IDONTCACHE;
	ip->i_udquot = ip->i_gdquot = NULL;
	xfs_iflags_set(ip, iflags);

	
	spin_lock(&pag->pag_ici_lock);
	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
	if (unlikely(error)) {
		WARN_ON(error != -EEXIST);
		XFS_STATS_INC(xs_ig_dup);
		error = EAGAIN;
		goto out_preload_end;
	}
	spin_unlock(&pag->pag_ici_lock);
	radix_tree_preload_end();

	*ipp = ip;
	return 0;

out_preload_end:
	spin_unlock(&pag->pag_ici_lock);
	radix_tree_preload_end();
	if (lock_flags)
		xfs_iunlock(ip, lock_flags);
out_destroy:
	__destroy_inode(VFS_I(ip));
	xfs_inode_free(ip);
	return error;
}
예제 #22
0
파일: xfs_iget.c 프로젝트: longqzh/chronnOS
static int
xfs_iget_cache_miss(
	struct xfs_mount	*mp,
	struct xfs_perag	*pag,
	xfs_trans_t		*tp,
	xfs_ino_t		ino,
	struct xfs_inode	**ipp,
	int			flags,
	int			lock_flags)
{
	struct xfs_inode	*ip;
	int			error;
	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);

	ip = xfs_inode_alloc(mp, ino);
	if (!ip)
		return ENOMEM;

	error = xfs_iread(mp, tp, ip, flags);
	if (error)
		goto out_destroy;

	trace_xfs_iget_miss(ip);

	if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
		error = ENOENT;
		goto out_destroy;
	}

	/*
	 * Preload the radix tree so we can insert safely under the
	 * write spinlock. Note that we cannot sleep inside the preload
	 * region.
	 */
	if (radix_tree_preload(GFP_KERNEL)) {
		error = EAGAIN;
		goto out_destroy;
	}

	/*
	 * Because the inode hasn't been added to the radix-tree yet it can't
	 * be found by another thread, so we can do the non-sleeping lock here.
	 */
	if (lock_flags) {
		if (!xfs_ilock_nowait(ip, lock_flags))
			BUG();
	}

	spin_lock(&pag->pag_ici_lock);

	/* insert the new inode */
	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
	if (unlikely(error)) {
		WARN_ON(error != -EEXIST);
		XFS_STATS_INC(xs_ig_dup);
		error = EAGAIN;
		goto out_preload_end;
	}

	/* These values _must_ be set before releasing the radix tree lock! */
	ip->i_udquot = ip->i_gdquot = NULL;
	xfs_iflags_set(ip, XFS_INEW);

	spin_unlock(&pag->pag_ici_lock);
	radix_tree_preload_end();

	*ipp = ip;
	return 0;

out_preload_end:
	spin_unlock(&pag->pag_ici_lock);
	radix_tree_preload_end();
	if (lock_flags)
		xfs_iunlock(ip, lock_flags);
out_destroy:
	__destroy_inode(VFS_I(ip));
	xfs_inode_free(ip);
	return error;
}
예제 #23
0
파일: swap_state.c 프로젝트: mdamt/linux
struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
			struct vm_area_struct *vma, unsigned long addr,
			bool *new_page_allocated)
{
	struct page *found_page, *new_page = NULL;
	struct address_space *swapper_space = swap_address_space(entry);
	int err;
	*new_page_allocated = false;

	do {
		/*
		 * First check the swap cache.  Since this is normally
		 * called after lookup_swap_cache() failed, re-calling
		 * that would confuse statistics.
		 */
		found_page = find_get_page(swapper_space, swp_offset(entry));
		if (found_page)
			break;

		/*
		 * Just skip read ahead for unused swap slot.
		 * During swap_off when swap_slot_cache is disabled,
		 * we have to handle the race between putting
		 * swap entry in swap cache and marking swap slot
		 * as SWAP_HAS_CACHE.  That's done in later part of code or
		 * else swap_off will be aborted if we return NULL.
		 */
		if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
			break;

		/*
		 * Get a new page to read into from swap.
		 */
		if (!new_page) {
			new_page = alloc_page_vma(gfp_mask, vma, addr);
			if (!new_page)
				break;		/* Out of memory */
		}

		/*
		 * call radix_tree_preload() while we can wait.
		 */
		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
		if (err)
			break;

		/*
		 * Swap entry may have been freed since our caller observed it.
		 */
		err = swapcache_prepare(entry);
		if (err == -EEXIST) {
			radix_tree_preload_end();
			/*
			 * We might race against get_swap_page() and stumble
			 * across a SWAP_HAS_CACHE swap_map entry whose page
			 * has not been brought into the swapcache yet.
			 */
			cond_resched();
			continue;
		}
		if (err) {		/* swp entry is obsolete ? */
			radix_tree_preload_end();
			break;
		}

		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
		__SetPageLocked(new_page);
		__SetPageSwapBacked(new_page);
		err = __add_to_swap_cache(new_page, entry);
		if (likely(!err)) {
			radix_tree_preload_end();
			/*
			 * Initiate read into locked page and return.
			 */
			lru_cache_add_anon(new_page);
			*new_page_allocated = true;
			return new_page;
		}
		radix_tree_preload_end();
		__ClearPageLocked(new_page);
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
		 */
		put_swap_page(new_page, entry);
	} while (err != -ENOMEM);

	if (new_page)
		put_page(new_page);
	return found_page;
}
예제 #24
0
/*
 * zcache_get_swap_cache_page
 *
 * This is an adaption of read_swap_cache_async()
 *
 * If success, page is returned in retpage
 * Returns 0 if page was already in the swap cache, page is not locked
 * Returns 1 if the new page needs to be populated, page is locked
 */
static int zcache_get_swap_cache_page(int type, pgoff_t offset,
				struct page *new_page)
{
	struct page *found_page;
	swp_entry_t entry = swp_entry(type, offset);
	int err;

	BUG_ON(new_page == NULL);
	do {
		/*
		 * First check the swap cache.  Since this is normally
		 * called after lookup_swap_cache() failed, re-calling
		 * that would confuse statistics.
		 */
		found_page = find_get_page(&swapper_space, entry.val);
		if (found_page)
			return 0;

		/*
		 * call radix_tree_preload() while we can wait.
		 */
		err = radix_tree_preload(GFP_KERNEL);
		if (err)
			break;

		/*
		 * Swap entry may have been freed since our caller observed it.
		 */
		err = swapcache_prepare(entry);
		if (err == -EEXIST) { /* seems racy */
			radix_tree_preload_end();
			continue;
		}
		if (err) { /* swp entry is obsolete ? */
			radix_tree_preload_end();
			break;
		}

		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
		__set_page_locked(new_page);
		SetPageSwapBacked(new_page);
		err = __add_to_swap_cache(new_page, entry);
		if (likely(!err)) {
			radix_tree_preload_end();
			lru_cache_add_anon(new_page);
			return 1;
		}
		radix_tree_preload_end();
		ClearPageSwapBacked(new_page);
		__clear_page_locked(new_page);
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
		 */
		swapcache_free(entry, NULL);
		/* FIXME: is it possible to get here without err==-ENOMEM?
		 * If not, we can dispense with the do loop, use goto retry */
	} while (err != -ENOMEM);

	return -ENOMEM;
}