RC Row_ts::access(txn_man * txn, TsType type, row_t * row) { RC rc; ts_t ts = txn->get_ts(); if (g_central_man) glob_manager.lock_row(_row); else pthread_mutex_lock( latch ); if (type == R_REQ) { if (ts < wts) { rc = Abort; } else if (ts > min_pts) { // insert the req into the read request queue buffer_req(R_REQ, txn, NULL); txn->ts_ready = false; rc = WAIT; } else { // return the value. txn->cur_row->copy(_row); if (rts < ts) rts = ts; rc = RCOK; } } else if (type == P_REQ) { if (ts < rts) { rc = Abort; } else { #if TS_TWR buffer_req(P_REQ, txn, NULL); rc = RCOK; #else if (ts < wts) { rc = Abort; } else { buffer_req(P_REQ, txn, NULL); rc = RCOK; } #endif } } else if (type == W_REQ) { // write requests are always accepted. rc = RCOK; #if TS_TWR // according to TWR, this write is already stale, ignore. if (ts < wts) { TsReqEntry * req = debuffer_req(P_REQ, txn); assert(req != NULL); update_buffer(); return_req_entry(req); row->free_row(); mem_allocator.free(row, sizeof(row_t)); goto final; }
static int sync_block (struct inode * inode, u32 * block, int wait) { struct buffer_head * bh; int tmp; if (!*block) return 0; tmp = *block; bh = get_hash_table (inode->i_dev, *block, blocksize); if (!bh) return 0; if (*block != tmp) { brelse (bh); return 1; } if (wait && buffer_req(bh) && !buffer_uptodate(bh)) { brelse (bh); return -1; } if (wait || !buffer_uptodate(bh) || !buffer_dirty(bh)) { brelse (bh); return 0; } ll_rw_block (WRITE, 1, &bh); bh->b_count--; return 0; }
/* Sync one block. The block number is * from_coh_ulong(*blockp) if convert=1, *blockp if convert=0. */ static int sync_block (struct inode * inode, u32 *blockp, int convert, int wait) { struct buffer_head * bh; u32 tmp, block; struct super_block * sb; block = tmp = *blockp; if (convert) block = from_coh_ulong(block); if (!block) return 0; sb = inode->i_sb; bh = sv_get_hash_table(sb, inode->i_dev, block); if (!bh) return 0; if (*blockp != tmp) { brelse (bh); return 1; } if (wait && buffer_req(bh) && !buffer_uptodate(bh)) { brelse(bh); return -1; } if (wait || !buffer_uptodate(bh) || !buffer_dirty(bh)) { brelse(bh); return 0; } ll_rw_block(WRITE, 1, &bh); atomic_dec(&bh->b_count); return 0; }
int ext2_fsync(struct file *file, int datasync) { int ret; struct inode *inode = file->f_mapping->host; ino_t ino = inode->i_ino; struct super_block *sb = inode->i_sb; struct address_space *sb_mapping = sb->s_bdev->bd_inode->i_mapping; struct buffer_head *bh; struct ext2_inode *raw_inode; ret = generic_file_fsync(file, datasync); if (ret == -EIO || test_and_clear_bit(AS_EIO, &sb_mapping->flags)) { /* We don't really know where the IO error happened... */ ext2_error(sb, __func__, "detected IO error when writing metadata buffers"); return -EIO; } raw_inode = ext2_get_inode(sb, ino, &bh); if (IS_ERR(raw_inode)) return -EIO; sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk ("IO error syncing ext2 inode [%s:%08lx]\n", sb->s_id, (unsigned long) ino); ret = -EIO; } brelse (bh); return ret; }
int __ext4_handle_dirty_metadata(const char *where, unsigned int line, handle_t *handle, struct inode *inode, struct buffer_head *bh) { int err = 0; if (ext4_handle_valid(handle)) { err = jbd2_journal_dirty_metadata(handle, bh); if (err) { /* Errors can only happen if there is a bug */ handle->h_err = err; __ext4_journal_stop(where, line, handle); } } else { if (inode) mark_buffer_dirty_inode(bh, inode); else mark_buffer_dirty(bh); if (inode && inode_needs_sync(inode)) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { struct ext4_super_block *es; es = EXT4_SB(inode->i_sb)->s_es; es->s_last_error_block = cpu_to_le64(bh->b_blocknr); ext4_error_inode(inode, where, line, bh->b_blocknr, "IO error syncing itable block"); err = -EIO; } } } return err; }
int __ext4_handle_dirty_metadata(const char *where, unsigned int line, handle_t *handle, struct inode *inode, struct buffer_head *bh) { int err = 0; might_sleep(); set_buffer_meta(bh); set_buffer_prio(bh); if (ext4_handle_valid(handle)) { err = jbd2_journal_dirty_metadata(handle, bh); /* Errors can only happen if there is a bug */ if (WARN_ON_ONCE(err)) { ext4_journal_abort_handle(where, line, __func__, bh, handle, err); if (inode == NULL) { pr_err("EXT4: jbd2_journal_dirty_metadata " "failed: handle type %u started at " "line %u, credits %u/%u, errcode %d", handle->h_type, handle->h_line_no, handle->h_requested_credits, handle->h_buffer_credits, err); return err; } ext4_error_inode(inode, where, line, bh->b_blocknr, "journal_dirty_metadata failed: " "handle type %u started at line %u, " "credits %u/%u, errcode %d", handle->h_type, handle->h_line_no, handle->h_requested_credits, handle->h_buffer_credits, err); } } else { if (inode) mark_buffer_dirty_inode(bh, inode); else mark_buffer_dirty(bh); if (inode && inode_needs_sync(inode)) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { struct ext4_super_block *es; es = EXT4_SB(inode->i_sb)->s_es; es->s_last_error_block = cpu_to_le64(bh->b_blocknr); ext4_error_inode(inode, where, line, bh->b_blocknr, "IO error syncing itable block"); err = -EIO; } } } return err; }
static int ufs_fmp_run(struct device *dev, uint32_t mode, uint8_t *data, uint32_t len, uint32_t write) { int ret = 0; struct ufs_hba *hba; struct ufs_fmp_work *work; struct Scsi_Host *host; static struct buffer_head *bh; work = dev_get_drvdata(dev); if (!work) { dev_err(dev, "Fail to get work from platform device\n"); return -ENODEV; } host = work->host; hba = shost_priv(host); hba->self_test_mode = mode; bh = __getblk(work->bdev, work->sector, FMP_BLK_SIZE); if (!bh) { dev_err(dev, "Fail to get block from bdev\n"); return -ENODEV; } hba->self_test_bh = bh; get_bh(bh); if (write == WRITE_MODE) { memcpy(bh->b_data, data, len); set_buffer_dirty(bh); sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { dev_err(dev, "IO error syncing for FMP fips write\n"); ret = -EIO; goto out; } memset(bh->b_data, 0, FMP_BLK_SIZE); } else { lock_buffer(bh); bh->b_end_io = end_buffer_read_sync; submit_bh(READ_SYNC, bh); wait_on_buffer(bh); if (unlikely(!buffer_uptodate(bh))) { ret = -EIO; goto out; } memcpy(data, bh->b_data, len); } out: hba->self_test_mode = 0; hba->self_test_bh = NULL; put_bh(bh); return ret; }
static int jread(struct buffer_head **bhp, journal_t *journal, unsigned int offset) { int err; unsigned long long blocknr; struct buffer_head *bh; *bhp = NULL; if (offset >= journal->j_maxlen) { printk(KERN_ERR "JBD: corrupted journal superblock\n"); return -EIO; } err = jbd2_journal_bmap(journal, offset, &blocknr); if (err) { printk (KERN_ERR "JBD: bad block at offset %u\n", offset); return err; } bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); if (!bh) return -ENOMEM; if(buffer_uptodate(bh)){ if (!trylock_buffer(bh)){ lock_buffer(bh); printk("debug:concurrent exist \n"); } unlock_buffer(bh); } if (!buffer_uptodate(bh)) { /* If this is a brand new buffer, start readahead. Otherwise, we assume we are already reading it. */ if (!buffer_req(bh)) do_readahead(journal, offset); wait_on_buffer(bh); } if (!buffer_uptodate(bh)) { printk (KERN_ERR "JBD: Failed to read block at offset %u\n", offset); brelse(bh); return -EIO; } *bhp = bh; return 0; }
int __ext4bf_handle_dirty_metadata(const char *where, unsigned int line, handle_t *handle, struct inode *inode, struct buffer_head *bh) { int err = 0; if (ext4bf_handle_valid(handle)) { #ifdef DCHECKSUM /* ext4bf: handle cases where it is a data block. */ if (bh && bh->b_blocktype == B_BLOCKTYPE_DATA) { #endif #ifdef PARTJ if (!buffer_new(bh)) err = jbdbf_journal_dirty_metadata(handle, bh); else #endif #ifdef DCHECKSUM jbdbf_journal_dirty_data(handle, bh); } else #endif err = jbdbf_journal_dirty_metadata(handle, bh); if (err) { /* Errors can only happen if there is a bug */ handle->h_err = err; __ext4bf_journal_stop(where, line, handle); } } else { if (inode) mark_buffer_dirty_inode(bh, inode); else mark_buffer_dirty(bh); if (inode && inode_needs_sync(inode)) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { struct ext4bf_super_block *es; es = EXT4_SB(inode->i_sb)->s_es; es->s_last_error_block = cpu_to_le64(bh->b_blocknr); ext4bf_error_inode(inode, where, line, bh->b_blocknr, "IO error syncing itable block"); err = -EIO; } } } return err; }
static int __sysv_write_inode(struct inode *inode, int wait) { struct super_block * sb = inode->i_sb; struct sysv_sb_info * sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; struct sysv_inode_info * si; unsigned int ino, block; int err = 0; ino = inode->i_ino; if (!ino || ino > sbi->s_ninodes) { printk("Bad inode number on dev %s: %d is out of range\n", inode->i_sb->s_id, ino); return -EIO; } raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) { printk("unable to read i-node block\n"); return -EIO; } raw_inode->i_mode = cpu_to_fs16(sbi, inode->i_mode); raw_inode->i_uid = cpu_to_fs16(sbi, fs_high2lowuid(inode->i_uid)); raw_inode->i_gid = cpu_to_fs16(sbi, fs_high2lowgid(inode->i_gid)); raw_inode->i_nlink = cpu_to_fs16(sbi, inode->i_nlink); raw_inode->i_size = cpu_to_fs32(sbi, inode->i_size); raw_inode->i_atime = cpu_to_fs32(sbi, inode->i_atime.tv_sec); raw_inode->i_mtime = cpu_to_fs32(sbi, inode->i_mtime.tv_sec); raw_inode->i_ctime = cpu_to_fs32(sbi, inode->i_ctime.tv_sec); si = SYSV_I(inode); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) si->i_data[0] = cpu_to_fs32(sbi, old_encode_dev(inode->i_rdev)); for (block = 0; block < 10+1+1+1; block++) write3byte(sbi, (u8 *)&si->i_data[block], &raw_inode->i_data[3*block]); mark_buffer_dirty(bh); if (wait) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk ("IO error syncing sysv inode [%s:%08x]\n", sb->s_id, ino); err = -EIO; } } brelse(bh); return 0; }
static int adfs_fplus_sync(struct adfs_dir *dir) { int err = 0; int i; for (i = dir->nr_buffers - 1; i >= 0; i--) { struct buffer_head *bh = dir->bh[i]; sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) err = -EIO; } return err; }
static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc) { struct bfs_sb_info *info = BFS_SB(inode->i_sb); unsigned int ino = (u16)inode->i_ino; unsigned long i_sblock; struct bfs_inode *di; struct buffer_head *bh; int err = 0; dprintf("ino=%08x\n", ino); di = find_inode(inode->i_sb, ino, &bh); if (IS_ERR(di)) return PTR_ERR(di); mutex_lock(&info->bfs_lock); if (ino == BFS_ROOT_INO) di->i_vtype = cpu_to_le32(BFS_VDIR); else di->i_vtype = cpu_to_le32(BFS_VREG); di->i_ino = cpu_to_le16(ino); di->i_mode = cpu_to_le32(inode->i_mode); di->i_uid = cpu_to_le32(inode->i_uid); di->i_gid = cpu_to_le32(inode->i_gid); di->i_nlink = cpu_to_le32(inode->i_nlink); di->i_atime = cpu_to_le32(inode->i_atime.tv_sec); di->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec); di->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec); i_sblock = BFS_I(inode)->i_sblock; di->i_sblock = cpu_to_le32(i_sblock); di->i_eblock = cpu_to_le32(BFS_I(inode)->i_eblock); di->i_eoffset = cpu_to_le32(i_sblock * BFS_BSIZE + inode->i_size - 1); mark_buffer_dirty(bh); if (wbc->sync_mode == WB_SYNC_ALL) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) err = -EIO; } brelse(bh); mutex_unlock(&info->bfs_lock); return err; }
/* * write back super block information to disk * * @vsb: the VFS super block structure * @wait: whether to wait for the super block to be synced to disk * * return: 0 on success, error code otherwise */ int wtfs_sync_super(struct super_block * vsb, int wait) { struct wtfs_sb_info * sbi = WTFS_SB_INFO(vsb); struct wtfs_super_block * sb = NULL; struct buffer_head * bh = NULL; int ret = -EIO; if ((bh = sb_bread(vsb, WTFS_RB_SUPER)) == NULL) { wtfs_error("unable to read the super block\n"); goto error; } sb = (struct wtfs_super_block *)bh->b_data; sb->version = cpu_to_wtfs64(sbi->version); sb->magic = cpu_to_wtfs64(sbi->magic); sb->block_size = cpu_to_wtfs64(sbi->block_size); sb->block_count = cpu_to_wtfs64(sbi->block_count); sb->inode_table_first = cpu_to_wtfs64(sbi->inode_table_first); sb->inode_table_count = cpu_to_wtfs64(sbi->inode_table_count); sb->block_bitmap_first = cpu_to_wtfs64(sbi->block_bitmap_first); sb->block_bitmap_count = cpu_to_wtfs64(sbi->block_bitmap_count); sb->inode_bitmap_first = cpu_to_wtfs64(sbi->inode_bitmap_first); sb->inode_bitmap_count = cpu_to_wtfs64(sbi->inode_bitmap_count); sb->inode_count = cpu_to_wtfs64(sbi->inode_count); sb->free_block_count = cpu_to_wtfs64(sbi->free_block_count); mark_buffer_dirty(bh); if (wait) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { wtfs_error("super block sync failed\n"); goto error; } } brelse(bh); return 0; error: if (bh != NULL) { brelse(bh); } return ret; }
int sysv_sync_inode(struct inode * inode) { int err = 0; struct buffer_head *bh; bh = sysv_update_inode(inode); if (bh && buffer_dirty(bh)) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk ("IO error syncing sysv inode [%s:%08lx]\n", inode->i_sb->s_id, inode->i_ino); err = -1; } } else if (!bh) err = -1; brelse (bh); return err; }
static int jread(struct buffer_head **bhp, journal_t *journal, unsigned int offset) { unsigned int blocknr; struct buffer_head *bh; *bhp = NULL; J_ASSERT (offset < journal->j_maxlen); blocknr = offset; if (journal->j_inode) blocknr = bmap(journal->j_inode, offset); if (!blocknr) { printk (KERN_ERR "JFS: bad block at offset %u\n", offset); return -EIO; } bh = getblk(journal->j_dev, blocknr, journal->j_blocksize); if (!bh) return -ENOMEM; if (!buffer_uptodate(bh)) { /* If this is a brand new buffer, start readahead. Otherwise, we assume we are already reading it. */ if (!buffer_req(bh)) do_readahead(journal, offset); wait_on_buffer(bh); } if (!buffer_uptodate(bh)) { printk (KERN_ERR "JFS: Failed to read block at offset %u\n", offset); brelse(bh); return -EIO; } *bhp = bh; return 0; }
static int jread(struct buffer_head **bhp, journal_t *journal, unsigned int offset) { int err; unsigned long long blocknr; struct buffer_head *bh; *bhp = NULL; if (offset >= journal->j_maxlen) { printk(KERN_ERR "JBD2: corrupted journal superblock\n"); return -EIO; } err = jbd2_journal_bmap(journal, offset, &blocknr); if (err) { printk(KERN_ERR "JBD2: bad block at offset %u\n", offset); return err; } bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); if (!bh) return -ENOMEM; if (!buffer_uptodate(bh)) { if (!buffer_req(bh)) do_readahead(journal, offset); wait_on_buffer(bh); } if (!buffer_uptodate(bh)) { printk(KERN_ERR "JBD2: Failed to read block at offset %u\n", offset); brelse(bh); return -EIO; } *bhp = bh; return 0; }
int sfs_write_inode(struct inode *inode, struct writeback_control *wbc) { int err = 0; struct buffer_head *bh; pr_debug("Enter: sfs_write_inode (ino = %ld)\n", inode->i_ino); bh = sfs_update_inode(inode); if (!bh) return -EIO; if (wbc->sync_mode == WB_SYNC_ALL && buffer_dirty(bh)) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { pr_debug("IO error syncing sfs inode 0x%lx\n", inode->i_ino); err = -EIO; } } pr_debug("Leave: sfs_write_inode (ino = %ld)\n", inode->i_ino); brelse(bh); return err; }
static int minix_write_inode(struct inode *inode, struct writeback_control *wbc) { int err = 0; struct buffer_head *bh; if (INODE_VERSION(inode) == MINIX_V1) bh = V1_minix_update_inode(inode); else bh = V2_minix_update_inode(inode); if (!bh) return -EIO; if (wbc->sync_mode == WB_SYNC_ALL && buffer_dirty(bh)) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk("IO error syncing minix inode [%s:%08lx]\n", inode->i_sb->s_id, inode->i_ino); err = -EIO; } } brelse (bh); return err; }
int qnx4_sync_inode(struct inode *inode) { int err = 0; # if 0 struct buffer_head *bh; bh = qnx4_update_inode(inode); if (bh && buffer_dirty(bh)) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk ("IO error syncing qnx4 inode [%s:%08lx]\n", inode->i_sb->s_id, inode->i_ino); err = -1; } brelse (bh); } else if (!bh) { err = -1; } # endif return err; }
RC Row_mvcc::access(txn_man * txn, TsType type, row_t * row) { RC rc = RCOK; ts_t ts = txn->get_ts(); uint64_t t1 = get_sys_clock(); if (g_central_man) glob_manager->lock_row(_row); else while (!ATOM_CAS(blatch, false, true)) PAUSE //pthread_mutex_lock( latch ); uint64_t t2 = get_sys_clock(); INC_STATS(txn->get_thd_id(), debug4, t2 - t1); #if DEBUG_CC for (uint32_t i = 0; i < _req_len; i++) if (_requests[i].valid) { assert(_requests[i].ts > _latest_wts); if (_exists_prewrite) assert(_prewrite_ts < _requests[i].ts); } #endif if (type == R_REQ) { if (ts < _oldest_wts) // the version was already recycled... This should be very rare rc = Abort; else if (ts > _latest_wts) { if (_exists_prewrite && _prewrite_ts < ts) { // exists a pending prewrite request before the current read. should wait. rc = WAIT; buffer_req(R_REQ, txn, false); txn->ts_ready = false; } else { // should just read rc = RCOK; txn->cur_row = _latest_row; if (ts > _max_served_rts) _max_served_rts = ts; } } else { rc = RCOK; // ts is between _oldest_wts and _latest_wts, should find the correct version uint32_t the_ts = 0; uint32_t the_i = _his_len; for (uint32_t i = 0; i < _his_len; i++) { if (_write_history[i].valid && _write_history[i].ts < ts && _write_history[i].ts > the_ts) { the_ts = _write_history[i].ts; the_i = i; } } if (the_i == _his_len) txn->cur_row = _row; else txn->cur_row = _write_history[the_i].row; } } else if (type == P_REQ) { if (ts < _latest_wts || ts < _max_served_rts || (_exists_prewrite && _prewrite_ts > ts)) rc = Abort; else if (_exists_prewrite) { // _prewrite_ts < ts rc = WAIT; buffer_req(P_REQ, txn, false); txn->ts_ready = false; } else { rc = RCOK; row_t * res_row = reserveRow(ts, txn); assert(res_row); res_row->copy(_latest_row); txn->cur_row = res_row; } } else if (type == W_REQ) { rc = RCOK; assert(ts > _latest_wts); assert(row == _write_history[_prewrite_his_id].row); _write_history[_prewrite_his_id].valid = true; _write_history[_prewrite_his_id].ts = ts; _latest_wts = ts; _latest_row = row; _exists_prewrite = false; _num_versions ++; update_buffer(txn, W_REQ); } else if (type == XP_REQ) { assert(row == _write_history[_prewrite_his_id].row); _write_history[_prewrite_his_id].valid = false; _write_history[_prewrite_his_id].reserved = false; _exists_prewrite = false; update_buffer(txn, XP_REQ); } else assert(false); INC_STATS(txn->get_thd_id(), debug3, get_sys_clock() - t2); if (g_central_man) glob_manager->release_row(_row); else blatch = false; //pthread_mutex_unlock( latch ); return rc; }
RC Row_mvcc::access(TxnManager * txn, TsType type, row_t * row) { RC rc = RCOK; ts_t ts = txn->get_timestamp(); uint64_t starttime = get_sys_clock(); if (g_central_man) glob_manager.lock_row(_row); else pthread_mutex_lock( latch ); if (type == R_REQ) { // figure out if ts is in interval(prewrite(x)) bool conf = conflict(type, ts); if ( conf && rreq_len < g_max_read_req) { rc = WAIT; //txn->wait_starttime = get_sys_clock(); DEBUG("buf R_REQ %ld %ld\n",txn->get_txn_id(),_row->get_primary_key()); buffer_req(R_REQ, txn); txn->ts_ready = false; } else if (conf) { rc = Abort; printf("\nshould never happen. rreq_len=%ld", rreq_len); } else { // return results immediately. rc = RCOK; MVHisEntry * whis = writehis; while (whis != NULL && whis->ts > ts) whis = whis->next; row_t * ret = (whis == NULL)? _row : whis->row; txn->cur_row = ret; insert_history(ts, NULL); assert(strstr(_row->get_table_name(), ret->get_table_name())); } } else if (type == P_REQ) { if ( conflict(type, ts) ) { rc = Abort; } else if (preq_len < g_max_pre_req){ DEBUG("buf P_REQ %ld %ld\n",txn->get_txn_id(),_row->get_primary_key()); buffer_req(P_REQ, txn); rc = RCOK; } else { rc = Abort; } } else if (type == W_REQ) { rc = RCOK; // the corresponding prewrite request is debuffered. insert_history(ts, row); DEBUG("debuf %ld %ld\n",txn->get_txn_id(),_row->get_primary_key()); MVReqEntry * req = debuffer_req(P_REQ, txn); assert(req != NULL); return_req_entry(req); update_buffer(txn); } else if (type == XP_REQ) { DEBUG("debuf %ld %ld\n",txn->get_txn_id(),_row->get_primary_key()); MVReqEntry * req = debuffer_req(P_REQ, txn); assert (req != NULL); return_req_entry(req); update_buffer(txn); } else assert(false); if (rc == RCOK) { if (whis_len > g_his_recycle_len || rhis_len > g_his_recycle_len) { ts_t t_th = glob_manager.get_min_ts(txn->get_thd_id()); if (readhistail && readhistail->ts < t_th) clear_history(R_REQ, t_th); // Here is a tricky bug. The oldest transaction might be // reading an even older version whose timestamp < t_th. // But we cannot recycle that version because it is still being used. // So the HACK here is to make sure that the first version older than // t_th not be recycled. if (whis_len > 1 && writehistail->prev->ts < t_th) { row_t * latest_row = clear_history(W_REQ, t_th); if (latest_row != NULL) { assert(_row != latest_row); _row->copy(latest_row); } } } } uint64_t timespan = get_sys_clock() - starttime; txn->txn_stats.cc_time += timespan; txn->txn_stats.cc_time_short += timespan; if (g_central_man) glob_manager.release_row(_row); else pthread_mutex_unlock( latch ); return rc; }
static int __omfs_write_inode(struct inode *inode, int wait) { struct omfs_inode *oi; struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); struct buffer_head *bh, *bh2; u64 ctime; int i; int ret = -EIO; int sync_failed = 0; /* get current inode since we may have written sibling ptrs etc. */ bh = omfs_bread(inode->i_sb, inode->i_ino); if (!bh) goto out; oi = (struct omfs_inode *) bh->b_data; oi->i_head.h_self = cpu_to_be64(inode->i_ino); if (S_ISDIR(inode->i_mode)) oi->i_type = OMFS_DIR; else if (S_ISREG(inode->i_mode)) oi->i_type = OMFS_FILE; else { printk(KERN_WARNING "omfs: unknown file type: %d\n", inode->i_mode); goto out_brelse; } oi->i_head.h_body_size = cpu_to_be32(sbi->s_sys_blocksize - sizeof(struct omfs_header)); oi->i_head.h_version = 1; oi->i_head.h_type = OMFS_INODE_NORMAL; oi->i_head.h_magic = OMFS_IMAGIC; oi->i_size = cpu_to_be64(inode->i_size); ctime = inode->i_ctime.tv_sec * 1000LL + ((inode->i_ctime.tv_nsec + 999)/1000); oi->i_ctime = cpu_to_be64(ctime); omfs_update_checksums(oi); mark_buffer_dirty(bh); if (wait) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) sync_failed = 1; } /* if mirroring writes, copy to next fsblock */ for (i = 1; i < sbi->s_mirrors; i++) { bh2 = omfs_bread(inode->i_sb, inode->i_ino + i); if (!bh2) goto out_brelse; memcpy(bh2->b_data, bh->b_data, bh->b_size); mark_buffer_dirty(bh2); if (wait) { sync_dirty_buffer(bh2); if (buffer_req(bh2) && !buffer_uptodate(bh2)) sync_failed = 1; } brelse(bh2); } ret = (sync_failed) ? -EIO : 0; out_brelse: brelse(bh); out: return ret; }