/* Prepare log info for replay and pin logblocks. */ static struct replay *replay_prepare(struct sb *sb) { if(DEBUG_MODE_K==1) { printk(KERN_INFO"%25s %25s %4d #in\n",__FILE__,__func__,__LINE__); } block_t logchain = be64_to_cpu(sb->super.logchain); unsigned i, logcount = be32_to_cpu(sb->super.logcount); struct replay *rp; struct buffer_head *buffer; int err; /* FIXME: this address array is quick hack. Rethink about log * block management and log block address. */ rp = alloc_replay(sb, logcount); if (IS_ERR(rp)) return rp; /* FIXME: maybe, we should use bufvec to read log blocks */ trace("load %u logblocks", logcount); i = logcount; while (i-- > 0) { struct logblock *log; buffer = blockget(mapping(sb->logmap), i); if (!buffer) { i++; err = -ENOMEM; goto error; } assert(bufindex(buffer) == i); err = blockio(READ, sb, buffer, logchain); if (err) goto error; err = replay_check_log(rp, buffer); if (err) goto error; /* Store index => blocknr map */ rp->blocknrs[bufindex(buffer)] = logchain; log = bufdata(buffer); logchain = be64_to_cpu(log->logchain); } return rp; error: free_replay(rp); replay_unpin_logblocks(sb, i, logcount); return ERR_PTR(err); }
/* Unpin log blocks, and prepare for future logging. */ static void replay_done(struct replay *rp) { if(DEBUG_MODE_K==1) { printk(KERN_INFO"%25s %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct sb *sb = rp->sb; clean_orphan_list(&rp->log_orphan_add); /* for error path */ free_replay(rp); sb->lognext = be32_to_cpu(sb->super.logcount); replay_unpin_logblocks(sb, 0, sb->lognext); log_finish_cycle(sb, 0); }
void reset_replay_slot( replay_slot_t sn ) { replay_t *p, *next; if( g_replay_head[sn] == NULL ) return; for( p = g_replay_head[sn]; p != NULL; p = next ){ next = p->next; free_replay( p ); } g_flg_reg_replay[sn] = FALSE; g_replay_head[sn] = NULL; g_replay_cur[sn] = NULL; g_replay_cur_draw = NULL; }