/* * A simple wrapper so the base function doesn't need to enforce * that all swap pages go through the swap cache! We verify that: * - the page is locked * - it's marked as being swap-cache * - it's associated with the swap inode */ void rw_swap_page(int rw, struct page *page) { swp_entry_t entry; entry.val = page->index; if (!PageLocked(page)) PAGE_BUG(page); if (!PageSwapCache(page)) PAGE_BUG(page); if (!rw_swap_page_base(rw, entry, page)) UnlockPage(page); }
int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) { struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); unsigned char *pg_buf; int ret; D2(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT)); if (!PageLocked(pg)) PAGE_BUG(pg); pg_buf = kmap(pg); /* FIXME: Can kmap fail? */ ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE); if (ret) { ClearPageUptodate(pg); SetPageError(pg); } else { SetPageUptodate(pg); ClearPageError(pg); } flush_dcache_page(pg); kunmap(pg); D2(printk(KERN_DEBUG "readpage finished\n")); return 0; }
/* * The swap lock map insists that pages be in the page cache! * Therefore we can't use it. Later when we can remove the need for the * lock map and we can reduce the number of functions exported. */ void rw_swap_page_nolock(int rw, swp_entry_t entry, char *buf) { struct page *page = virt_to_page(buf); if (!PageLocked(page)) PAGE_BUG(page); if (PageSwapCache(page)) PAGE_BUG(page); if (page->mapping) PAGE_BUG(page); /* needs sync_page to wait I/O completation */ page->mapping = &swapper_space; if (!rw_swap_page_base(rw, entry, page)) UnlockPage(page); wait_on_page(page); page->mapping = NULL; }
static int yaffs_readpage_nolock(struct file *f, struct page *pg) { yaffs_Object *obj; unsigned char *pg_buf; int ret; yaffs_Device *dev; T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage at %08x, size %08x\n", (unsigned)(pg->index << PAGE_CACHE_SHIFT), (unsigned)PAGE_CACHE_SIZE)); obj = yaffs_DentryToObject(f->f_dentry); dev = obj->myDev; #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) BUG_ON(!PageLocked(pg)); #else if (!PageLocked(pg)) PAGE_BUG(pg); #endif pg_buf = kmap(pg); yaffs_GrossLock(dev); ret = yaffs_ReadDataFromFile(obj, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE); yaffs_GrossUnlock(dev); if (ret >= 0) ret = 0; if (ret) { ClearPageUptodate(pg); SetPageError(pg); } else { SetPageUptodate(pg); ClearPageError(pg); } flush_dcache_page(pg); kunmap(pg); T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage done\n")); return ret; }
static inline void remove_from_swap_cache(struct page *page) { struct address_space *mapping = page->mapping; if (mapping != &swapper_space) BUG(); if (!PageSwapCache(page) || !PageLocked(page)) PAGE_BUG(page); PageClearSwapCache(page); ClearPageDirty(page); __remove_inode_page(page); }
int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) { struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_node_frag *frag = f->fraglist; __u32 offset = pg->index << PAGE_CACHE_SHIFT; __u32 end = offset + PAGE_CACHE_SIZE; unsigned char *pg_buf; int ret; D1(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%x\n", inode->i_ino, offset)); if (!PageLocked(pg)) PAGE_BUG(pg); while(frag && frag->ofs + frag->size <= offset) { // D1(printk(KERN_DEBUG "skipping frag %d-%d; before the region we care about\n", frag->ofs, frag->ofs + frag->size)); frag = frag->next; } pg_buf = kmap(pg); /* XXX FIXME: Where a single physical node actually shows up in two frags, we read it twice. Don't do that. */ /* Now we're pointing at the first frag which overlaps our page */ while(offset < end) { D2(printk(KERN_DEBUG "jffs2_readpage: offset %d, end %d\n", offset, end)); if (!frag || frag->ofs > offset) { __u32 holesize = end - offset; if (frag) { D1(printk(KERN_NOTICE "Eep. Hole in ino %ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", inode->i_ino, frag->ofs, offset)); holesize = min(holesize, frag->ofs - offset); D1(jffs2_print_frag_list(f)); } D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize)); memset(pg_buf, 0, holesize); pg_buf += holesize; offset += holesize; continue; } else if (frag->ofs < offset && (offset & (PAGE_CACHE_SIZE-1)) != 0) { D1(printk(KERN_NOTICE "Eep. Overlap in ino #%ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", inode->i_ino, frag->ofs, offset)); D1(jffs2_print_frag_list(f)); memset(pg_buf, 0, end - offset); ClearPageUptodate(pg); SetPageError(pg); kunmap(pg); return -EIO; } else if (!frag->node) { __u32 holeend = min(end, frag->ofs + frag->size); D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size)); memset(pg_buf, 0, holeend - offset); pg_buf += holeend - offset; offset = holeend; frag = frag->next; continue; } else { __u32 readlen; __u32 fragofs; /* offset within the frag to start reading */ fragofs = offset - frag->ofs; readlen = min(frag->size - fragofs, end - offset); D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%x\n", frag->ofs+fragofs, fragofs+frag->ofs+readlen, frag->node->raw->flash_offset & ~3)); ret = jffs2_read_dnode(c, frag->node, pg_buf, fragofs + frag->ofs - frag->node->ofs, readlen); D2(printk(KERN_DEBUG "node read done\n")); if (ret) { D1(printk(KERN_DEBUG"jffs2_readpage error %d\n",ret)); memset(pg_buf, 0, readlen); ClearPageUptodate(pg); SetPageError(pg); kunmap(pg); return ret; } pg_buf += readlen; offset += readlen; frag = frag->next; D2(printk(KERN_DEBUG "node read was OK. Looping\n")); } } D2(printk(KERN_DEBUG "readpage finishing\n")); SetPageUptodate(pg); ClearPageError(pg); flush_dcache_page(pg); kunmap(pg); D1(printk(KERN_DEBUG "readpage finished\n")); return 0; }
static int ptpfs_file_readpage(struct file *filp, struct page *page) { //printk(KERN_INFO "%s\n", __FUNCTION__); struct inode *inode; int ret; struct ptp_data_buffer *data=(struct ptp_data_buffer*)filp->private_data; inode = page->mapping->host; if (!PageLocked(page)) PAGE_BUG(page); ret = -ESTALE; /* work out how much to get and from where */ int offset = page->index << PAGE_CACHE_SHIFT; int size = min((size_t)(inode->i_size - offset),(size_t)PAGE_SIZE); char *buffer = kmap(page); clear_page(buffer); /* read the contents of the file from the server into the page */ int block = 0; while (block < data->num_blocks && offset > data->blocks[block].block_size) { offset -= data->blocks[block].block_size; block++; } if (block == data->num_blocks) { kunmap(page); ret = -ESTALE; goto error; } int toCopy = min(size,data->blocks[block].block_size-offset); memcpy(buffer,&data->blocks[block].block[offset],toCopy); size -= toCopy; int pos = toCopy; block++; while (size && block < data->num_blocks) { toCopy = min(size,data->blocks[block].block_size); memcpy(&buffer[pos],data->blocks[block].block,toCopy); size -= toCopy; pos += toCopy; block++; } if (block == data->num_blocks && size > 0) { kunmap(page); ret = -ESTALE; goto error; } kunmap(page); flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); return 0; error: SetPageError(page); unlock_page(page); return ret; }
static void kcdfsd_process_request(void){ struct list_head * tmp; struct kcdfsd_req * req; struct page * page; struct inode * inode; unsigned request; while (!list_empty (&kcdfsd_req_list)){ /* Grab the next entry from the beginning of the list */ tmp = kcdfsd_req_list.next; req = list_entry (tmp, struct kcdfsd_req, req_list); list_del (tmp); page = req->page; inode = req->dentry->d_inode; request = req->request_type; if (!PageLocked(page)) #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)) PAGE_BUG(page); #else BUG(); #endif switch (request){ case CDDA_REQUEST: case CDDA_RAW_REQUEST: { cd *this_cd = cdfs_info (inode->i_sb); char *p; track_info *this_track = &(this_cd->track[inode->i_ino]); cdfs_cdda_file_read (inode, p = (char *) kmap (page), 1 << PAGE_CACHE_SHIFT, (page->index << PAGE_CACHE_SHIFT) + ((this_track->avi) ? this_track-> avi_offset : 0), (request == CDDA_RAW_REQUEST)); if ((this_track->avi) && (this_track->avi_swab)){ int k; for (k=0; k<(1 << PAGE_CACHE_SHIFT); k+=2){ char c; c = p[k]; p[k] = p[k + 1]; p[k + 1] = c; } } } break; case CDXA_REQUEST: cdfs_copy_from_cdXA(inode->i_sb, inode->i_ino, page->index << PAGE_CACHE_SHIFT, (page->index + 1) << PAGE_CACHE_SHIFT, (char *)kmap(page)); break; case CDDATA_REQUEST: cdfs_copy_from_cddata(inode->i_sb, inode->i_ino, page->index << PAGE_CACHE_SHIFT, (page->index + 1) << PAGE_CACHE_SHIFT, (char *)kmap(page)); break; case CDHFS_REQUEST: cdfs_copy_from_cdhfs(inode->i_sb, inode->i_ino, page->index << PAGE_CACHE_SHIFT, (page->index + 1) << PAGE_CACHE_SHIFT, (char *)kmap(page)); break; } SetPageUptodate (page); kunmap (page); unlock_page (page); kfree (req); } }