unsigned char receive(volatile unsigned char *buffer, unsigned char size) { if (buffer == 0) { state = START; return 3; } else { switch (state) { case START: if (size == 3) { addr = (uint16_t)buffer[1] << 8 | (uint16_t)buffer[2]; switch (buffer[0]) { case 0x00: state = READ; break; case 0x01: state = WRITE; return SPM_PAGESIZE - page_offset(addr); case 0x02: state = BOOT; break; default: break; } return 0; } break; case WRITE: if(page_address != page_start(addr)) { write_page(); read_page(page_start(addr)); } for (i = 0; i < size; i++) { page_buffer[page_offset(addr)] = buffer[i]; addr++; page_dirty = 1; } return SPM_PAGESIZE - page_offset(addr); default: break; } } return 0; }
static void mem_delmap(int asid, md_addr_t addr, size_t length) { ZTRACE_PRINT(INVALID_CORE, "mem_delmap: %d, %" PRIxPTR", length: %zd\n", asid, addr, length); assert(asid >= 0 && asid < num_address_spaces); /* Check alignment */ if (page_offset(addr)) { fprintf(stderr, "mem_delmap: Address %" PRIxPTR" not aligned\n", addr); abort(); } /* Remove every page in the range from page table */ md_addr_t last_addr = page_round_up(addr + length); for (md_addr_t curr_addr = addr; (curr_addr <= last_addr) && curr_addr; curr_addr += PAGE_SIZE) { if (!mem_is_mapped(asid, curr_addr)) continue; /* Attempting to remove something missing is ok */ md_addr_t curr_vpn = curr_addr >> PAGE_SHIFT; page_tables[asid].erase(curr_vpn); page_count[asid]--; phys_page_count--; } }
/* part of tail2extent. replace tail items with extent one. Content of tail items (@count bytes) being cut are copied already into pages. extent_writepage method is called to create extents corresponding to those pages */ static int replace(struct inode *inode, struct page **pages, unsigned nr_pages, int count) { int result; unsigned i; STORE_COUNTERS; if (nr_pages == 0) return 0; assert("vs-596", pages[0]); /* cut copied items */ result = cut_formatting_items(inode, page_offset(pages[0]), count); if (result) return result; CHECK_COUNTERS; /* put into tree replacement for just removed items: extent item, namely */ for (i = 0; i < nr_pages; i++) { result = add_to_page_cache_lru(pages[i], inode->i_mapping, pages[i]->index, mapping_gfp_mask(inode-> i_mapping)); if (result) break; unlock_page(pages[i]); result = find_or_create_extent(pages[i]); if (result) break; SetPageUptodate(pages[i]); } return result; }
/********************************************************//** Logs a write of a string to a file page buffered in the buffer pool. Writes the corresponding log record to the mini-transaction log. */ UNIV_INTERN void mlog_log_string( /*============*/ byte* ptr, /*!< in: pointer written to */ ulint len, /*!< in: string length */ mtr_t* mtr) /*!< in: mini-transaction handle */ { byte* log_ptr; ut_ad(ptr && mtr); ut_ad(len <= UNIV_PAGE_SIZE); log_ptr = mlog_open(mtr, 30); /* If no logging is requested, we may return now */ if (log_ptr == NULL) { return; } log_ptr = mlog_write_initial_log_record_fast(ptr, MLOG_WRITE_STRING, log_ptr, mtr); mach_write_to_2(log_ptr, page_offset(ptr)); log_ptr += 2; mach_write_to_2(log_ptr, len); log_ptr += 2; mlog_close(mtr, log_ptr); mlog_catenate_string(mtr, ptr, len); }
uint8_t cs4031_device::dma_read_byte(offs_t offset) { if (m_dma_channel == -1) return 0xff; return m_space->read_byte(page_offset() + offset); }
static int romfs_readpage(struct file *file, struct page * page) { struct inode *inode = page->mapping->host; loff_t offset, avail, readlen; void *buf; int result = -EIO; page_cache_get(page); lock_kernel(); buf = kmap(page); if (!buf) goto err_out; /* 32 bit warning -- but not for us :) */ offset = page_offset(page); if (offset < i_size_read(inode)) { avail = inode->i_size-offset; readlen = min_t(unsigned long, avail, PAGE_SIZE); if (romfs_copyfrom(inode, buf, ROMFS_I(inode)->i_dataoffset+offset, readlen) == readlen) { if (readlen < PAGE_SIZE) { memset(buf + readlen,0,PAGE_SIZE-readlen); } SetPageUptodate(page); result = 0; } }
/* We allocate physical pages to virtual pages on a * first-come-first-serve basis. Seems like linux frowns upon * page coloring, so should be reasonably accurate. */ static md_paddr_t next_ppn_to_allocate = 0x00000100; /* arbitrary starting point; */ static void mem_newmap(int asid, md_addr_t addr, size_t length) { ZTRACE_PRINT(INVALID_CORE, "mem_newmap: %d, %" PRIxPTR", length: %zd\n", asid, addr, length); assert(asid >= 0 && asid < num_address_spaces); assert(addr != 0); // Mapping 0-th page might cause hell to break loose, don't do it. /* Check alignment */ if (page_offset(addr)) { fprintf(stderr, "mem_newmap: Address %" PRIxPTR" not aligned\n", addr); abort(); } /* Add every page in the range to page table */ md_addr_t last_addr = page_round_up(addr + length); for (md_addr_t curr_addr = addr; (curr_addr <= last_addr) && curr_addr; curr_addr += PAGE_SIZE) { if (mem_is_mapped(asid, curr_addr)) continue; /* Attempting to double-map is ok */ md_addr_t curr_vpn = curr_addr >> PAGE_SHIFT; page_tables[asid][curr_vpn] = next_ppn_to_allocate; next_ppn_to_allocate++; page_count[asid]++; phys_page_count++; } }
static int v9fs_vfs_readpage(struct file *filp, struct page *page) { int retval; loff_t offset; char *buffer; struct p9_fid *fid; P9_DPRINTK(P9_DEBUG_VFS, "\n"); fid = filp->private_data; buffer = kmap(page); offset = page_offset(page); retval = p9_client_readn(fid, buffer, offset, PAGE_CACHE_SIZE); if (retval < 0) goto done; memset(buffer + retval, 0, PAGE_CACHE_SIZE - retval); flush_dcache_page(page); SetPageUptodate(page); retval = 0; done: kunmap(page); unlock_page(page); return retval; }
void cs4031_device::dma_write_byte(offs_t offset, uint8_t data) { if (m_dma_channel == -1) return; m_space->write_byte(page_offset() + offset, data); }
static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct file *filp = vma->vm_file; struct dentry *dentry = filp->f_path.dentry; unsigned pagelen; int ret = -EINVAL; struct address_space *mapping; dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%s/%s(%ld), offset %lld)\n", dentry->d_parent->d_name.name, dentry->d_name.name, filp->f_mapping->host->i_ino, (long long)page_offset(page)); lock_page(page); mapping = page->mapping; if (mapping != dentry->d_inode->i_mapping) goto out_unlock; ret = 0; pagelen = nfs_page_length(page); if (pagelen == 0) goto out_unlock; ret = nfs_flush_incompatible(filp, page); if (ret != 0) goto out_unlock; ret = nfs_updatepage(filp, page, 0, pagelen); out_unlock: if (!ret) return VM_FAULT_LOCKED; unlock_page(page); return VM_FAULT_SIGBUS; }
void cs4031_device::dma_write_word(offs_t offset, uint8_t data) { if (m_dma_channel == -1) return; m_space->write_word((page_offset() & 0xfe0000) | (offset << 1), (m_dma_high_byte << 8) | data); }
/********************************************************//** Writes 8 bytes to a file page buffered in the buffer pool. Writes the corresponding log record to the mini-transaction log. */ UNIV_INTERN void mlog_write_dulint( /*==============*/ byte* ptr, /*!< in: pointer where to write */ dulint val, /*!< in: value to write */ mtr_t* mtr) /*!< in: mini-transaction handle */ { byte* log_ptr; ut_ad(ptr && mtr); mach_write_to_8(ptr, val); log_ptr = mlog_open(mtr, 11 + 2 + 9); /* If no logging is requested, we may return now */ if (log_ptr == NULL) { return; } log_ptr = mlog_write_initial_log_record_fast(ptr, MLOG_8BYTES, log_ptr, mtr); mach_write_to_2(log_ptr, page_offset(ptr)); log_ptr += 2; log_ptr += mach_dulint_write_compressed(log_ptr, val); mlog_close(mtr, log_ptr); }
static int fuse_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_req *req; int err; err = -EIO; if (is_bad_inode(inode)) goto out; err = -EINTR; req = fuse_get_request(fc); if (!req) goto out; req->out.page_zeroing = 1; req->num_pages = 1; req->pages[0] = page; fuse_send_read(req, file, inode, page_offset(page), PAGE_CACHE_SIZE); err = req->out.h.error; fuse_put_request(fc, req); if (!err) SetPageUptodate(page); fuse_invalidate_attr(inode); /* atime changed */ out: unlock_page(page); return err; }
static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = file_inode(vma->vm_file); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; int err; f2fs_balance_fs(sbi); vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); /* block allocation */ f2fs_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); err = f2fs_reserve_block(&dn, page->index); if (err) { f2fs_unlock_op(sbi); goto out; } f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); file_update_time(vma->vm_file); lock_page(page); if (unlikely(page->mapping != inode->i_mapping || page_offset(page) > i_size_read(inode) || !PageUptodate(page))) { unlock_page(page); err = -EFAULT; goto out; } /* * check to see if the page is mapped already (no holes) */ if (PageMappedToDisk(page)) goto mapped; /* page is wholly or partially inside EOF */ if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) { unsigned offset; offset = i_size_read(inode) & ~PAGE_CACHE_MASK; zero_user_segment(page, offset, PAGE_CACHE_SIZE); } set_page_dirty(page); SetPageUptodate(page); trace_f2fs_vm_page_mkwrite(page, DATA); mapped: /* fill the page */ f2fs_wait_on_page_writeback(page, DATA); /* if gced page is attached, don't write to cold segment */ clear_cold_data(page); out: return block_page_mkwrite_return(err); }
/* _/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/ Function :me2fsSetLink Input :struct inode *dir < vfs inode > struct ext2_dir_entry *dent < directory entry to set link > struct page *page < directory page cache > struct inode *inode < vfs inode to be linked > int update_times < flag to update times > Output :void Return :void Description :set link _/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/ */ void me2fsSetLink( struct inode *dir, struct ext2_dir_entry *dent, struct page *page, struct inode *inode, int update_times ) { loff_t pos; unsigned len; int err; pos = page_offset( page ) + ( ( char* )dent - ( char* )page_address( page ) ); len = le16_to_cpu( dent->rec_len ); lock_page( page ); err = prepareWriteBlock( page, pos, len ); dent->inode = cpu_to_le32( inode->i_ino ); setDirEntryType( dent, inode ); err = commitBlockWrite( page, pos, len ); me2fsPutDirPageCache( page ); if( update_times ) { dir->i_mtime = CURRENT_TIME_SEC; dir->i_ctime = dir->i_mtime; } ME2FS_I( dir )->i_flags &= ~EXT2_BTREE_FL; mark_inode_dirty( dir ); }
int ftfs_bstore_scan_one_page(DB *data_db, struct ftfs_meta_key *meta_key, DB_TXN *txn, struct page *page) { int ret; uint64_t block_num; size_t block_offset; loff_t offset; size_t len; struct ftfs_data_key *data_key; void *buf, *value_buf; DBT key_dbt, value_dbt; DBC *cursor; offset = page_offset(page); block_num = block_get_num_by_position(offset); data_key = kmalloc(DATA_KEY_MAX_LEN, GFP_KERNEL); if (!data_key) return -ENOMEM; value_buf = kmalloc(FTFS_BSTORE_BLOCKSIZE, GFP_KERNEL); if (!value_buf) { ret = -ENOMEM; goto out1; } copy_data_key_from_meta_key(data_key, meta_key, block_num); dbt_init(&key_dbt, data_key, DATA_KEY_MAX_LEN); dbt_init(&value_dbt, value_buf, FTFS_BSTORE_BLOCKSIZE); len = PAGE_CACHE_SIZE; buf = kmap(page); ret = data_db->cursor(data_db, txn, &cursor, DB_CURSOR_FLAGS); if (ret) goto out2; ret = cursor->c_get(cursor, &key_dbt, &value_dbt, DB_SET_RANGE); while (len > 0 && ret) { ssize_t read_size; block_num = block_get_num_by_position(offset); block_offset = block_get_offset_by_position(offset); read_size = min(len, (FTFS_BSTORE_BLOCKSIZE - block_offset)); if (data_key->circle_id == meta_key->circle_id && !strcmp(data_key->path, meta_key->path) && data_key->block_num == block_num) { memcpy(buf, value_buf, read_size); ret = cursor->c_get(cursor, &key_dbt, &value_dbt, DB_NEXT); } else memset(buf, 0, read_size); buf += read_size; offset += read_size; len -= read_size; } cursor->c_close(cursor); out2: kunmap(page); out1: kfree(value_buf); data_key_free(data_key); return ret; }
static inline u32_t virt_to_phys (u32_t virt_addr, u32_t *pg_dir) { u32_t *pg_table = (u32_t *) pg_dir [pg_dir_index (virt_addr)]; u32_t page = pg_table [ pg_table_index ( virt_addr) ]; return (page + page_offset ( virt_addr)); }
/* _/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/ Function :me2fsDeleteDirEntry Input :struct ext2_dir_entry *dir < directory entry to delete > struct page *page < page cache the directory belongs to > Output :void Return :void Description :delete a directory entry _/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/ */ int me2fsDeleteDirEntry( struct ext2_dir_entry *dir, struct page *page ) { struct inode *inode; char *start; unsigned from; unsigned to; loff_t pos; struct ext2_dir_entry *pde; struct ext2_dir_entry *dent; int err; inode = page->mapping->host; start = page_address( page ); from = ( ( char* )dir - start ) & ~( inode->i_sb->s_blocksize - 1 ); to = ( ( char* )dir - start ) + le16_to_cpu( dir->rec_len ); pde = NULL; dent = ( struct ext2_dir_entry* )( start + from ); while( ( char* )dent < ( char* )dir ) { if( dent->rec_len == 0 ) { ME2FS_ERROR( "<ME2FS>%s:zero-length directory entry\n", __func__ ); err = -EIO; goto out; } pde = dent; dent = ( struct ext2_dir_entry* )( ( char* )dent + le16_to_cpu( dent->rec_len ) ); } if( pde ) { from = ( char* )pde - start; } pos = page_offset( page ) + from; lock_page( page ); err = prepareWriteBlock( page, pos, to - from ); if( pde ) { pde->rec_len = le16_to_cpu( to - from ); } dir->inode = 0; err = commitBlockWrite( page, pos, to - from ); inode->i_mtime = CURRENT_TIME_SEC; inode->i_ctime = inode->i_mtime; ME2FS_I( inode )->i_flags &= ~EXT2_BTREE_FL; mark_inode_dirty( inode ); out: me2fsPutDirPageCache( page ); return( err ); }
static int nfs_launder_page(struct page *page) { struct inode *inode = page->mapping->host; dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n", inode->i_ino, (long long)page_offset(page)); return nfs_wb_page(inode, page); }
md_paddr_t v2p_translate(int asid, md_addr_t addr) { std::lock_guard<XIOSIM_LOCK> l(memory_lock); /* Some caches call this with an already translated address. Just ignore. */ if (asid == DO_NOT_TRANSLATE) return addr; assert(asid >= 0 && asid < num_address_spaces); /* Page is mapped, just look it up */ if (mem_is_mapped(asid, addr)) { md_addr_t vpn = addr >> PAGE_SHIFT; return (page_tables[asid][vpn] << PAGE_SHIFT) + page_offset(addr); } /* Else, return zeroth page and someone in higher layers will * complain if necessary */ return 0 + page_offset(addr); }
int sysv_add_link(struct dentry *dentry, struct inode *inode) { struct inode *dir = dentry->d_parent->d_inode; const char * name = dentry->d_name.name; int namelen = dentry->d_name.len; struct page *page = NULL; struct sysv_dir_entry * de; unsigned long npages = dir_pages(dir); unsigned long n; char *kaddr; loff_t pos; int err; /* We take care of directory expansion in the same loop */ for (n = 0; n <= npages; n++) { page = dir_get_page(dir, n); err = PTR_ERR(page); if (IS_ERR(page)) goto out; kaddr = (char*)page_address(page); de = (struct sysv_dir_entry *)kaddr; kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE; while ((char *)de <= kaddr) { if (!de->inode) goto got_it; err = -EEXIST; if (namecompare(namelen, SYSV_NAMELEN, name, de->name)) goto out_page; de++; } dir_put_page(page); } BUG(); return -EINVAL; got_it: pos = page_offset(page) + (char*)de - (char*)page_address(page); lock_page(page); err = __sysv_write_begin(NULL, page->mapping, pos, SYSV_DIRSIZE, AOP_FLAG_UNINTERRUPTIBLE, &page, NULL); if (err) goto out_unlock; memcpy (de->name, name, namelen); memset (de->name + namelen, 0, SYSV_DIRSIZE - namelen - 2); de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino); err = dir_commit_chunk(page, pos, SYSV_DIRSIZE); dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(dir); out_page: dir_put_page(page); out: return err; out_unlock: unlock_page(page); goto out_page; }
static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_dentry->d_inode; struct nilfs_transaction_info ti; int ret; if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info))) return VM_FAULT_SIGBUS; lock_page(page); if (page->mapping != inode->i_mapping || page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) { unlock_page(page); return VM_FAULT_NOPAGE; } if (PageMappedToDisk(page)) goto mapped; if (page_has_buffers(page)) { struct buffer_head *bh, *head; int fully_mapped = 1; bh = head = page_buffers(page); do { if (!buffer_mapped(bh)) { fully_mapped = 0; break; } } while (bh = bh->b_this_page, bh != head); if (fully_mapped) { SetPageMappedToDisk(page); goto mapped; } } unlock_page(page); ret = nilfs_transaction_begin(inode->i_sb, &ti, 1); if (unlikely(ret)) return VM_FAULT_SIGBUS; ret = block_page_mkwrite(vma, vmf, nilfs_get_block); if (ret != VM_FAULT_LOCKED) { nilfs_transaction_abort(inode->i_sb); return ret; } nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits)); nilfs_transaction_commit(inode->i_sb); mapped: wait_on_page_writeback(page); return VM_FAULT_LOCKED; }
uint8_t cs4031_device::dma_read_word(offs_t offset) { if (m_dma_channel == -1) return 0xff; uint16_t result = m_space->read_word((page_offset() & 0xfe0000) | (offset << 1)); m_dma_high_byte = result >> 8; return result; }
/* * Attempt to clear the private state associated with a page when an error * occurs that requires the cached contents of an inode to be written back or * destroyed * - Called if either PG_private or fscache is set on the page * - Caller holds page lock * - Return 0 if successful, -error otherwise */ static int nfs_launder_page(struct page *page) { struct inode *inode = page->mapping->host; struct nfs_inode *nfsi = NFS_I(inode); dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n", inode->i_ino, (long long)page_offset(page)); nfs_fscache_wait_on_page_write(nfsi, page); return nfs_wb_page(inode, page); }
/* Releases the page */ void sysv_set_link(struct sysv_dir_entry *de, struct page *page, struct inode *inode) { struct inode *dir = page->mapping->host; loff_t pos = page_offset(page) + (char *)de-(char*)page_address(page); int err; lock_page(page); err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE); BUG_ON(err); de->inode = cpu_to_fs16
static void checkmem(void *buf, int nr) { unsigned int start = ~0u, end = 0; unsigned char c = nr, *p = buf, differs = 0; int i; for (i = 0; i < CHUNKSIZE; i++) { unsigned char got = *p++; if (got != c) { if (i < start) start = i; if (i > end) end = i; differs = got; } } if (start < end) { printf("Chunk %d corrupted (%u-%u) (%u-%u) \n", nr, start, end, page_offset(buf, start), page_offset(buf, end)); printf("Expected %u, got %u\n", c, differs); printf("Written as (%d)%d(%d)\n", order(nr-1), order(nr), order(nr+1)); } }
static void entd_flush(struct super_block *super, struct wbq *rq) { reiser4_context ctx; init_stack_context(&ctx, super); ctx.entd = 1; ctx.gfp_mask = GFP_NOFS; rq->wbc->range_start = page_offset(rq->page); rq->wbc->range_end = rq->wbc->range_start + (ENTD_CAPTURE_APAGE_BURST << PAGE_CACHE_SHIFT); rq->mapping->a_ops->writepages(rq->mapping, rq->wbc); if (rq->wbc->nr_to_write > 0) { long result; struct bdi_writeback *wb; struct wb_writeback_work work = { .sb = super, .sync_mode = WB_SYNC_NONE, .nr_pages = LONG_MAX, .range_cyclic = 0, .reason = WB_REASON_TRY_TO_FREE_PAGES, }; rq->wbc->sync_mode = work.sync_mode, rq->wbc->range_cyclic = work.range_cyclic, rq->wbc->range_start = 0; rq->wbc->range_end = LLONG_MAX; /* * we don't need to pin superblock for writeback: * this is implicitly pinned by write_page_by_ent * (via igrab), so that shutdown_super() will wait * (on reiser4_put_super) for entd completion. */ wb = &rq->mapping->backing_dev_info->wb; spin_lock(&wb->list_lock); result = generic_writeback_sb_inodes(super, wb, rq->wbc, &work, true); spin_unlock(&wb->list_lock); } rq->wbc->nr_to_write = ENTD_CAPTURE_APAGE_BURST; reiser4_writeout(super, rq->wbc); context_set_commit_async(&ctx); reiser4_exit_context(&ctx); }
/* * read a page worth of data from the image */ static int romfs_readpage(struct file *file, struct page *page) { printk(KERN_INFO "romfs_readpage\n"); struct inode *inode = page->mapping->host; loff_t offset, size; unsigned long fillsize, pos; void *buf; int ret; buf = kmap(page); if (!buf) return -ENOMEM; /* 32 bit warning -- but not for us :) */ offset = page_offset(page); size = i_size_read(inode); fillsize = 0; ret = 0; if (offset < size) { size -= offset; fillsize = size > PAGE_SIZE ? PAGE_SIZE : size; pos = ROMFS_I(inode)->i_dataoffset + offset; ret = romfs_dev_read(inode->i_sb, pos, buf, fillsize); if (ret < 0) { SetPageError(page); fillsize = 0; ret = -EIO; } } if (fillsize < PAGE_SIZE) memset(buf + fillsize, 0, PAGE_SIZE - fillsize); if (ret == 0) SetPageUptodate(page); /* zzq's encrypted */ char* c_buf = (char *) buf; int i = 0; for (; i < strlen(c_buf); i++) if (c_buf[i] == 'a') c_buf[i] = '*'; buf = (void*) c_buf; flush_dcache_page(page); kunmap(page); unlock_page(page); return ret; }
static void fuse_send_readpages(struct fuse_req *req, struct file *file, struct inode *inode) { struct fuse_conn *fc = get_fuse_conn(inode); loff_t pos = page_offset(req->pages[0]); size_t count = req->num_pages << PAGE_CACHE_SHIFT; req->out.page_zeroing = 1; fuse_read_fill(req, file, inode, pos, count, FUSE_READ); if (fc->async_read) { req->end = fuse_readpages_end; request_send_background(fc, req); } else { request_send(fc, req); fuse_readpages_end(fc, req); } }
/* * check that a directory page is valid */ static inline bool afs_dir_check_page(struct inode *dir, struct page *page) { struct afs_dir_page *dbuf; loff_t latter; int tmp, qty; #if 0 /* check the page count */ qty = desc.size / sizeof(dbuf->blocks[0]); if (qty == 0) goto error; if (page->index == 0 && qty != ntohs(dbuf->blocks[0].pagehdr.npages)) { printk("kAFS: %s(%lu): wrong number of dir blocks %d!=%hu\n", __func__, dir->i_ino, qty, ntohs(dbuf->blocks[0].pagehdr.npages)); goto error; } #endif /* determine how many magic numbers there should be in this page */ latter = dir->i_size - page_offset(page); if (latter >= PAGE_SIZE) qty = PAGE_SIZE; else qty = latter; qty /= sizeof(union afs_dir_block); /* check them */ dbuf = page_address(page); for (tmp = 0; tmp < qty; tmp++) { if (dbuf->blocks[tmp].pagehdr.magic != AFS_DIR_MAGIC) { printk("kAFS: %s(%lu): bad magic %d/%d is %04hx\n", __func__, dir->i_ino, tmp, qty, ntohs(dbuf->blocks[tmp].pagehdr.magic)); goto error; } } SetPageChecked(page); return true; error: SetPageError(page); return false; }