Esempio n. 1
0
aku_Status PageHeader::add_entry( const aku_ParamId param
                                , const aku_Timestamp timestamp
                                , const aku_MemRange &range )
{
    if (count != 0) {
        // Require >= timestamp
        if (timestamp < page_index(count - 1)->timestamp) {
            return AKU_EBAD_ARG;
        }
    }

    const auto SPACE_REQUIRED = sizeof(aku_Entry)              // entry header
                              + range.length                   // data size (in bytes)
                              + sizeof(aku_EntryIndexRecord);  // offset inside page_index

    const auto ENTRY_SIZE = sizeof(aku_Entry) + range.length;

    if (!range.length) {
        return AKU_EBAD_DATA;
    }
    if (SPACE_REQUIRED > get_free_space()) {
        return AKU_EOVERFLOW;
    }
    char* free_slot = payload + next_offset;
    aku_Entry* entry = reinterpret_cast<aku_Entry*>(free_slot);
    entry->param_id = param;
    entry->length = range.length;
    memcpy((void*)&entry->value, range.address, range.length);
    page_index(count)->offset = next_offset;
    page_index(count)->timestamp = timestamp;
    next_offset += ENTRY_SIZE;
    count++;
    return AKU_SUCCESS;
}
Esempio n. 2
0
static void __tux3_test_set_page_writeback(struct page *page, int old_writeback)
{
	struct address_space *mapping = page->mapping;

	if (mapping) {
		struct backing_dev_info *bdi = mapping->backing_dev_info;
		unsigned long flags;

		spin_lock_irqsave(&mapping->tree_lock, flags);
		if (!old_writeback) {
			/* If PageForked(), don't touch tag */
			if (!PageForked(page))
				radix_tree_tag_set(&mapping->page_tree,
						   page_index(page),
						   PAGECACHE_TAG_WRITEBACK);
			if (bdi_cap_account_writeback(bdi))
				__inc_bdi_stat(bdi, BDI_WRITEBACK);
		}
		/* If PageForked(), don't touch tag */
		if (!PageDirty(page) && !PageForked(page))
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_DIRTY);
		radix_tree_tag_clear(&mapping->page_tree,
				     page_index(page),
				     PAGECACHE_TAG_TOWRITE);
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
	}
	if (!old_writeback) {
		account_page_writeback(page);
		tux3_accout_set_writeback(page);
	}
}
Esempio n. 3
0
int test_set_page_writeback(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	int ret;

	if (mapping) {
		struct backing_dev_info *bdi = mapping->backing_dev_info;
		unsigned long flags;

		spin_lock_irqsave(&mapping->tree_lock, flags);
		ret = TestSetPageWriteback(page);
		if (!ret) {
			radix_tree_tag_set(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_WRITEBACK);
			if (bdi_cap_account_writeback(bdi))
				__inc_bdi_stat(bdi, BDI_WRITEBACK);
		}
		if (!PageDirty(page))
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_DIRTY);
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
	} else {
		ret = TestSetPageWriteback(page);
	}
	if (!ret)
		inc_zone_page_state(page, NR_WRITEBACK);
	return ret;

}
Esempio n. 4
0
int test_set_page_writeback(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	int ret;

	if (mapping) {
		unsigned long flags;

		write_lock_irqsave(&mapping->tree_lock, flags);
		ret = TestSetPageWriteback(page);
		if (!ret)
			radix_tree_tag_set(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_WRITEBACK);
		if (!PageDirty(page))
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_DIRTY);
		write_unlock_irqrestore(&mapping->tree_lock, flags);
	} else {
		ret = TestSetPageWriteback(page);
	}
	if (!ret)
		inc_zone_page_state(page, NR_WRITEBACK);
	return ret;

}
Esempio n. 5
0
void
ik_munmap_from_segment(ikptr base, unsigned long int size, ikpcb* pcb){
  assert(base >= pcb->memory_base);
  assert((base+size) <= pcb->memory_end);
  assert(size == align_to_next_page(size));
  unsigned int* p = 
    ((unsigned int*)(long)(pcb->segment_vector)) + page_index(base);
  unsigned int* s = 
    ((unsigned int*)(long)(pcb->dirty_vector)) + page_index(base);
  unsigned int* q = p + page_index(size);
  while(p < q){
    assert(*p != hole_mt);
    *p = hole_mt; /* holes */
    *s = 0;  
    p++; s++;
  }
  ikpage* r = pcb->uncached_pages;
  if (r){
    ikpage* cache = pcb->cached_pages;
    do{
      r->base = base;
      ikpage* next = r->next;
      r->next = cache;
      cache = r;
      r = next;
      base += pagesize;
      size -= pagesize;
    } while(r && size);
    pcb->cached_pages = cache;
    pcb->uncached_pages = r;
  }
  if(size){
    ik_munmap(base, size);
  }
}
Esempio n. 6
0
static void
set_segment_type(ikptr base, unsigned long int size, unsigned int type, ikpcb* pcb){
  assert(base >= pcb->memory_base);
  assert((base+size) <= pcb->memory_end);
  assert(size == align_to_next_page(size));
  unsigned int* p = pcb->segment_vector + page_index(base);
  unsigned int* q = p + page_index(size);
  while(p < q){
    *p = type;
    p++;
  }
}
Esempio n. 7
0
int test_clear_page_writeback(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	int ret;

	if (mapping) {
		struct backing_dev_info *bdi = mapping->backing_dev_info;
		unsigned long flags;

		spin_lock_irqsave(&mapping->tree_lock, flags);
		ret = TestClearPageWriteback(page);
		if (ret) {
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_WRITEBACK);
			if (bdi_cap_account_writeback(bdi)) {
				__dec_bdi_stat(bdi, BDI_WRITEBACK);
				__bdi_writeout_inc(bdi);
			}
		}
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
	} else {
		ret = TestClearPageWriteback(page);
	}
	if (ret)
		dec_zone_page_state(page, NR_WRITEBACK);
	return ret;
}
Esempio n. 8
0
const aku_Entry *PageHeader::read_entry_at(uint32_t index) const {
    if (index < count) {
        auto offset = page_index(index)->offset;
        return read_entry(offset);
    }
    return 0;
}
void nilfs_bh_debug(const char *fname, int line, struct buffer_head *bh,
		    const char *m, ...)
{
	struct page *page = bh->b_page;
	int len;
	char b[MSIZ];
	va_list args;

	len = snprintf(b, MSIZ, "BH %p ", bh);
	va_start(args, m);
	len += vsnprintf(b + len, MSIZ - len, m, args);
	va_end(args);

	if (bh == NULL) {
		printk(KERN_DEBUG "%s: bh=NULL %s at %d\n", b, fname, line);
		return;
	}
	len += snprintf(b + len, MSIZ - len,
			": page=%p cnt=%d blk#=%llu lst=%d",
			page, atomic_read(&bh->b_count),
			(unsigned long long)bh->b_blocknr,
			!list_empty(&bh->b_assoc_buffers));
	if (page)
		len += snprintf(b + len, MSIZ - len,
				" pagecnt=%d pageindex=%lu",
				page_count(page), page_index(page));
	len += snprintf(b + len, MSIZ - len, " %s(%d) state=", fname, line);
	len += snprint_bh_state(b + len, MSIZ - len, bh);

	printk(KERN_DEBUG "%s\n", b);
}
Esempio n. 10
0
ikptr
ikrt_set_code_reloc_vector(ikptr code, ikptr vec, ikpcb* pcb){
  ref(code, off_code_reloc_vector) = vec;
  ik_relocate_code(code-vector_tag);
  ((unsigned int*)(long)pcb->dirty_vector)[page_index(code)] = -1;
  return void_object;
}
Esempio n. 11
0
/*
 * For address_spaces which do not use buffers.  Just tag the page as dirty in
 * its radix tree.
 *
 * This is also used when a single buffer is being dirtied: we want to set the
 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
 *
 * Most callers have locked the page, which pins the address_space in memory.
 * But zap_pte_range() does not lock the page, however in that case the
 * mapping is pinned by the vma's ->vm_file reference.
 *
 * We take care to handle the case where the page was truncated from the
 * mapping by re-checking page_mapping() inside tree_lock.
 */
int __set_page_dirty_nobuffers(struct page *page)
{
	if (!TestSetPageDirty(page)) {
		struct address_space *mapping = page_mapping(page);
		struct address_space *mapping2;

		if (!mapping)
			return 1;

		spin_lock_irq(&mapping->tree_lock);
		mapping2 = page_mapping(page);
		if (mapping2) { /* Race with truncate? */
			BUG_ON(mapping2 != mapping);
			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
			account_page_dirtied(page, mapping);
			radix_tree_tag_set(&mapping->page_tree,
				page_index(page), PAGECACHE_TAG_DIRTY);
		}
		spin_unlock_irq(&mapping->tree_lock);
		if (mapping->host) {
			/* !PageAnon && !swapper_space */
			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
		}
		return 1;
	}
	return 0;
}
Esempio n. 12
0
void dump_lniobuf(struct niobuf_local *nb)
{
	CDEBUG(D_RPCTRACE,
	       "niobuf_local: file_offset="LPD64", len=%d, page=%p, rc=%d\n",
	       nb->lnb_file_offset, nb->lnb_len, nb->lnb_page, nb->lnb_rc);
	CDEBUG(D_RPCTRACE, "nb->page: index = %ld\n",
	       nb->lnb_page ? page_index(nb->lnb_page) : -1);
}
Esempio n. 13
0
/*
 * Find a request
 */
static inline struct nfs_page *
_nfs_find_request(struct inode *inode, struct page *page)
{
	struct list_head	*head, *pos;
	unsigned long pg_idx = page_index(page);

	head = &inode->u.nfs_i.writeback;
	list_for_each_prev(pos, head) {
		struct nfs_page *req = nfs_inode_wb_entry(pos);
		unsigned long found_idx = page_index(req->wb_page);

		if (pg_idx < found_idx)
			continue;
		if (pg_idx != found_idx)
			break;
		req->wb_count++;
		return req;
	}
	return NULL;
}
Esempio n. 14
0
/*
 * Copy of __set_page_dirty() without __mark_inode_dirty(). Caller
 * decides whether mark inode dirty or not.
 */
static void __tux3_set_page_dirty(struct page *page,
				  struct address_space *mapping, int warn)
{
	spin_lock_irq(&mapping->tree_lock);
	if (page->mapping) {	/* Race with truncate? */
		WARN_ON_ONCE(warn && !PageUptodate(page));
		account_page_dirtied(page, mapping);
		radix_tree_tag_set(&mapping->page_tree,
				page_index(page), PAGECACHE_TAG_DIRTY);
	}
	spin_unlock_irq(&mapping->tree_lock);
}
Esempio n. 15
0
/*
 * Insert a write request into an inode
 * Note: we sort the list in order to be able to optimize nfs_find_request()
 *	 & co. for the 'write append' case. For 2.5 we may want to consider
 *	 some form of hashing so as to perform well on random writes.
 */
static inline void
nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
{
	struct list_head *pos, *head;
	unsigned long pg_idx = page_index(req->wb_page);

	if (!list_empty(&req->wb_hash))
		return;
	if (!NFS_WBACK_BUSY(req))
		printk(KERN_ERR "NFS: unlocked request attempted hashed!\n");
	head = &inode->u.nfs_i.writeback;
	if (list_empty(head))
		igrab(inode);
	list_for_each_prev(pos, head) {
		struct nfs_page *entry = nfs_inode_wb_entry(pos);
		if (page_index(entry->wb_page) < pg_idx)
			break;
	}
	inode->u.nfs_i.npages++;
	list_add(&req->wb_hash, pos);
	req->wb_count++;
}
Esempio n. 16
0
/*
 * Internal use only
 */
__u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
			      const struct buffer_head *bh)
{
	struct buffer_head *pbh;
	__u64 key;

	key = page_index(bh->b_page) << (PAGE_SHIFT -
					 bmap->b_inode->i_blkbits);
	for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page)
		key++;

	return key;
}
Esempio n. 17
0
/*
 * Copy of __set_page_dirty() without __mark_inode_dirty(). Caller
 * decides whether mark inode dirty or not.
 */
void __tux3_set_page_dirty_account(struct page *page,
				   struct address_space *mapping, int warn)
{
	unsigned long flags;

	spin_lock_irqsave(&mapping->tree_lock, flags);
	if (page->mapping) {	/* Race with truncate? */
		WARN_ON_ONCE(warn && !PageUptodate(page));
		account_page_dirtied(page, mapping);
		radix_tree_tag_set(&mapping->page_tree,
				page_index(page), PAGECACHE_TAG_DIRTY);
	}
	spin_unlock_irqrestore(&mapping->tree_lock, flags);
}
Esempio n. 18
0
void ik_delete_pcb(ikpcb* pcb){
  ikpage* p = pcb->cached_pages;
  pcb->cached_pages = 0;
  pcb->uncached_pages = 0;
  while(p){
    ik_munmap(p->base, pagesize);
    p = p->next;
  }
  ik_munmap(pcb->cached_pages_base, pcb->cached_pages_size);
  {
    int i;
    for(i=0; i<generation_count; i++){
      ik_ptr_page* p = pcb->protected_list[i];
      while(p){
        ik_ptr_page* next = p->next;
        ik_munmap((ikptr)(long)p, pagesize);
        p = next;
      }
    }
  }
  ikptr base = pcb->memory_base;
  ikptr end = pcb->memory_end;
  unsigned int* segment_vec = pcb->segment_vector;
  long int i = page_index(base);
  long int j = page_index(end);
  while(i < j){
    unsigned int t = segment_vec[i];
    if(t != hole_mt){
      ik_munmap((ikptr)(i<<pageshift), pagesize);
    }
    i++;
  }
  long int vecsize = (segment_index(end) - segment_index(base)) * pagesize;
  ik_munmap((ikptr)(long)pcb->dirty_vector_base, vecsize);
  ik_munmap((ikptr)(long)pcb->segment_vector_base, vecsize);
  ik_free(pcb, sizeof(ikpcb));
}
Esempio n. 19
0
/*
 * NILFS2 needs clear_page_dirty() in the following two cases:
 *
 * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
 *    page dirty flags when it copies back pages from the shadow cache
 *    (gcdat->{i_mapping,i_btnode_cache}) to its original cache
 *    (dat->{i_mapping,i_btnode_cache}).
 *
 * 2) Some B-tree operations like insertion or deletion may dispose buffers
 *    in dirty state, and this needs to cancel the dirty state of their pages.
 */
int __nilfs_clear_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;

	if (mapping) {
		spin_lock_irq(&mapping->tree_lock);
		if (test_bit(PG_dirty, &page->flags)) {
			radix_tree_tag_clear(&mapping->page_tree,
					     page_index(page),
					     PAGECACHE_TAG_DIRTY);
			spin_unlock_irq(&mapping->tree_lock);
			return clear_page_dirty_for_io(page);
		}
		spin_unlock_irq(&mapping->tree_lock);
		return 0;
	}
	return TestClearPageDirty(page);
}
Esempio n. 20
0
File: dir.c Progetto: acton393/linux
static struct dentry *
__dcache_find_get_entry(struct dentry *parent, u64 idx,
			struct ceph_readdir_cache_control *cache_ctl)
{
	struct inode *dir = d_inode(parent);
	struct dentry *dentry;
	unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1;
	loff_t ptr_pos = idx * sizeof(struct dentry *);
	pgoff_t ptr_pgoff = ptr_pos >> PAGE_SHIFT;

	if (ptr_pos >= i_size_read(dir))
		return NULL;

	if (!cache_ctl->page || ptr_pgoff != page_index(cache_ctl->page)) {
		ceph_readdir_cache_release(cache_ctl);
		cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
		if (!cache_ctl->page) {
			dout(" page %lu not found\n", ptr_pgoff);
			return ERR_PTR(-EAGAIN);
		}
		/* reading/filling the cache are serialized by
		   i_mutex, no need to use page lock */
		unlock_page(cache_ctl->page);
		cache_ctl->dentries = kmap(cache_ctl->page);
	}

	cache_ctl->index = idx & idx_mask;

	rcu_read_lock();
	spin_lock(&parent->d_lock);
	/* check i_size again here, because empty directory can be
	 * marked as complete while not holding the i_mutex. */
	if (ceph_dir_is_complete_ordered(dir) && ptr_pos < i_size_read(dir))
		dentry = cache_ctl->dentries[cache_ctl->index];
	else
		dentry = NULL;
	spin_unlock(&parent->d_lock);
	if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
		dentry = NULL;
	rcu_read_unlock();
	return dentry ? : ERR_PTR(-EAGAIN);
}
Esempio n. 21
0
int test_clear_page_writeback(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	int ret;

	if (mapping) {
		unsigned long flags;

		write_lock_irqsave(&mapping->tree_lock, flags);
		ret = TestClearPageWriteback(page);
		if (ret)
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_WRITEBACK);
		write_unlock_irqrestore(&mapping->tree_lock, flags);
	} else {
		ret = TestClearPageWriteback(page);
	}
	return ret;
}
Esempio n. 22
0
void nilfs_btnode_delete(struct buffer_head *bh)
{
	struct address_space *mapping;
	struct page *page = bh->b_page;
	pgoff_t index = page_index(page);
	int still_dirty;

	page_cache_get(page);
	lock_page(page);
	wait_on_page_writeback(page);

	nilfs_forget_buffer(bh);
	still_dirty = PageDirty(page);
	mapping = page->mapping;
	unlock_page(page);
	page_cache_release(page);

	if (!still_dirty && mapping)
		invalidate_inode_pages2_range(mapping, index, index);
}
Esempio n. 23
0
/*
 * The following is used by wait_on_page(), generic_file_readahead()
 * to initiate the completion of any page readahead operations.
 */
static int nfs_sync_page(struct page *page)
{
	struct address_space *mapping;
	struct inode	*inode;
	unsigned long	index = page_index(page);
	unsigned int	rpages;
	int		result;

	mapping = page->mapping;
	if (!mapping)
		return 0;
	inode = mapping->host;
	if (!inode)
		return 0;

	rpages = NFS_SERVER(inode)->rpages;
	result = nfs_pagein_inode(inode, index, rpages);
	if (result < 0)
		return result;
	return 0;
}
Esempio n. 24
0
/*
 * Wait for a request to complete.
 *
 * Interruptible by signals only if mounted with intr flag.
 */
static int
nfs_wait_on_requests(struct inode *inode, struct file *file, unsigned long idx_start, unsigned int npages)
{
	struct list_head	*p, *head;
	unsigned long		idx_end;
	unsigned int		res = 0;
	int			error;

	if (npages == 0)
		idx_end = ~0;
	else
		idx_end = idx_start + npages - 1;

	head = &inode->u.nfs_i.writeback;
 restart:
	spin_lock(&nfs_wreq_lock);
	list_for_each_prev(p, head) {
		unsigned long pg_idx;
		struct nfs_page *req = nfs_inode_wb_entry(p);

		if (file && req->wb_file != file)
			continue;

		pg_idx = page_index(req->wb_page);
		if (pg_idx < idx_start)
			break;
		if (pg_idx > idx_end)
			continue;

		if (!NFS_WBACK_BUSY(req))
			continue;
		req->wb_count++;
		spin_unlock(&nfs_wreq_lock);
		error = nfs_wait_on_request(req);
		nfs_release_request(req);
		if (error < 0)
			return error;
		res++;
		goto restart;
	}
Esempio n. 25
0
File: elfldr.c Progetto: tdz/opsys
static int
elf_loader_construct_phdr_load(const struct vmem *as,
                               const Elf32_Phdr * elf_phdr,
                               const unsigned char *img,
                               struct vmem *dst_as)
{
        int err;

        err = vmem_alloc_frames(dst_as,
                                   pageframe_index(elf_phdr->p_offset + img),
                                   page_index((void *)elf_phdr->p_vaddr),
                                   page_count((void *)elf_phdr->p_vaddr,
                                                      elf_phdr->p_filesz),
                                   PTE_FLAG_PRESENT|PTE_FLAG_WRITEABLE|
                                   PTE_FLAG_USERMODE);
        if (err < 0)
        {
                goto err_vmem_alloc_pageframes;
        }

        /*
         * set remaining bytes to zero
         */

        if (elf_phdr->p_filesz < elf_phdr->p_memsz)
        {
                unsigned char *vaddr = (unsigned char *)elf_phdr->p_vaddr;
                memset(vaddr + elf_phdr->p_filesz, 0, elf_phdr->p_memsz -
                       elf_phdr->p_filesz);
        }

        return 0;

err_vmem_alloc_pageframes:
        return err;
}
Esempio n. 26
0
bool inventory_column::is_selected_by_category( const inventory_entry &entry ) const
{
    return entry.is_item() && mode == navigation_mode::CATEGORY
                           && entry.get_category_ptr() == get_selected().get_category_ptr()
                           && page_of( entry ) == page_index();
}
Esempio n. 27
0
const aku_Timestamp PageHeader::read_timestamp_at(uint32_t index) const {
    return page_index(index)->timestamp;
}
Esempio n. 28
0
int write_one_page(struct page *page, int wait)
{
	struct address_space *mapping = page->mapping;
	int ret = 0;
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = 1,
	};

	BUG_ON(!PageLocked(page));

	if (wait)
		wait_on_page_writeback(page);

	if (clear_page_dirty_for_io(page)) {
		page_cache_get(page);
		ret = mapping->a_ops->writepage(page, &wbc);
		if (ret == 0 && wait) {
			wait_on_page_writeback(page);
			if (PageError(page))
				ret = -EIO;
		}
		page_cache_release(page);
	} else {
		unlock_page(page);
	}
	return ret;
}
EXPORT_SYMBOL(write_one_page);

int __set_page_dirty_no_writeback(struct page *page)
{
	if (!PageDirty(page))
		SetPageDirty(page);
	return 0;
}

void account_page_dirtied(struct page *page, struct address_space *mapping)
{
	if (mapping_cap_account_dirty(mapping)) {
		__inc_zone_page_state(page, NR_FILE_DIRTY);
		__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
		task_dirty_inc(current);
		task_io_account_write(PAGE_CACHE_SIZE);
	}
}

int __set_page_dirty_nobuffers(struct page *page)
{
	if (!TestSetPageDirty(page)) {
		struct address_space *mapping = page_mapping(page);
		struct address_space *mapping2;

		if (!mapping)
			return 1;

		spin_lock_irq(&mapping->tree_lock);
		mapping2 = page_mapping(page);
		if (mapping2) { /* Race with truncate? */
			BUG_ON(mapping2 != mapping);
			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
			account_page_dirtied(page, mapping);
			radix_tree_tag_set(&mapping->page_tree,
				page_index(page), PAGECACHE_TAG_DIRTY);
		}
		spin_unlock_irq(&mapping->tree_lock);
		if (mapping->host) {
			/* !PageAnon && !swapper_space */
			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
		}
		return 1;
	}
	return 0;
}
EXPORT_SYMBOL(__set_page_dirty_nobuffers);

int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
{
	wbc->pages_skipped++;
	return __set_page_dirty_nobuffers(page);
}
Esempio n. 29
0
/**
 * write_one_page - write out a single page and optionally wait on I/O
 * @page: the page to write
 * @wait: if true, wait on writeout
 *
 * The page must be locked by the caller and will be unlocked upon return.
 *
 * write_one_page() returns a negative error code if I/O failed.
 */
int write_one_page(struct page *page, int wait)
{
	struct address_space *mapping = page->mapping;
	int ret = 0;
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = 1,
	};

	BUG_ON(!PageLocked(page));

	if (wait)
		wait_on_page_writeback(page);

	if (clear_page_dirty_for_io(page)) {
		page_cache_get(page);
		ret = mapping->a_ops->writepage(page, &wbc);
		if (ret == 0 && wait) {
			wait_on_page_writeback(page);
			if (PageError(page))
				ret = -EIO;
		}
		page_cache_release(page);
	} else {
		unlock_page(page);
	}
	return ret;
}
EXPORT_SYMBOL(write_one_page);

/*
 * For address_spaces which do not use buffers nor write back.
 */
int __set_page_dirty_no_writeback(struct page *page)
{
	if (!PageDirty(page))
		SetPageDirty(page);
	return 0;
}

/*
 * For address_spaces which do not use buffers.  Just tag the page as dirty in
 * its radix tree.
 *
 * This is also used when a single buffer is being dirtied: we want to set the
 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
 *
 * Most callers have locked the page, which pins the address_space in memory.
 * But zap_pte_range() does not lock the page, however in that case the
 * mapping is pinned by the vma's ->vm_file reference.
 *
 * We take care to handle the case where the page was truncated from the
 * mapping by re-checking page_mapping() inside tree_lock.
 */
int __set_page_dirty_nobuffers(struct page *page)
{
	if (!TestSetPageDirty(page)) {
		struct address_space *mapping = page_mapping(page);
		struct address_space *mapping2;

		if (!mapping)
			return 1;

		spin_lock_irq(&mapping->tree_lock);
		mapping2 = page_mapping(page);
		if (mapping2) { /* Race with truncate? */
			BUG_ON(mapping2 != mapping);
			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
			if (mapping_cap_account_dirty(mapping)) {
				__inc_zone_page_state(page, NR_FILE_DIRTY);
				__inc_bdi_stat(mapping->backing_dev_info,
						BDI_RECLAIMABLE);
				task_io_account_write(PAGE_CACHE_SIZE);
			}
			radix_tree_tag_set(&mapping->page_tree,
				page_index(page), PAGECACHE_TAG_DIRTY);
		}
		spin_unlock_irq(&mapping->tree_lock);
		if (mapping->host) {
			/* !PageAnon && !swapper_space */
			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
		}
		return 1;
	}
	return 0;
}
EXPORT_SYMBOL(__set_page_dirty_nobuffers);

/*
 * When a writepage implementation decides that it doesn't want to write this
 * page for some reason, it should redirty the locked page via
 * redirty_page_for_writepage() and it should then unlock the page and return 0
 */
int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
{
	wbc->pages_skipped++;
	return __set_page_dirty_nobuffers(page);
}
Esempio n. 30
0
ikptr
ikrt_set_code_annotation(ikptr code, ikptr annot, ikpcb* pcb){
  ref(code, off_code_annotation) = annot;
  ((unsigned int*)(long)pcb->dirty_vector)[page_index(code)] = -1;
  return void_object;
}