/* -------------------------------------------------------------------------------- This function renames a file, if exists. Otherwise do nothing. --------------------------------------------------------------------------------- */ int rename_file_if_exists(char *org_fn, int org_fn_len, char *rename_fn, int *rename_fn_len, uint4 *ustatus) { mstr orgfile; int status; jnl_tm_t now; memcpy(rename_fn, org_fn, org_fn_len + 1); /* Ensure it to be NULL terminated */ *rename_fn_len = org_fn_len; orgfile.addr = org_fn; orgfile.len = org_fn_len; if (FILE_NOT_FOUND == (status = gtm_file_stat(&orgfile, NULL, NULL, FALSE, ustatus))) return RENAME_NOT_REQD; else if (FILE_STAT_ERROR == status) { assert(SS_NORMAL != *ustatus); return RENAME_FAILED; } /* File is present in the system */ assert(0 < MAX_FN_LEN - org_fn_len - 1); JNL_SHORT_TIME(now); if (SS_NORMAL != (status = prepare_unique_name(org_fn, org_fn_len, "", "", rename_fn, rename_fn_len, now, ustatus))) { /* "prepare_unique_name" would not have set "ustatus" to the error code. So set it here and return */ assert(SS_NORMAL == *ustatus); *ustatus = status; assert(SS_NORMAL != *ustatus); return RENAME_FAILED; } assert(0 == rename_fn[*rename_fn_len]); if (SS_NORMAL != (status = gtm_rename(org_fn, org_fn_len, rename_fn, *rename_fn_len, ustatus))) { *ustatus = status; assert(SS_NORMAL != *ustatus); if (IS_GTM_IMAGE) send_msg_csa(CSA_ARG(NULL) VARLSTCNT(9) ERR_RENAMEFAIL, 4, org_fn_len, org_fn, *rename_fn_len, rename_fn, status, 0, *ustatus); else gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT1(8) ERR_RENAMEFAIL, 4, org_fn_len, org_fn, *rename_fn_len, rename_fn, status, PUT_SYS_ERRNO(*ustatus)); return RENAME_FAILED; } if (IS_GTM_IMAGE) send_msg_csa(CSA_ARG(NULL) VARLSTCNT (6) ERR_FILERENAME, 4, org_fn_len, org_fn, *rename_fn_len, rename_fn); else gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT (6) ERR_FILERENAME, 4, org_fn_len, org_fn, *rename_fn_len, rename_fn); return RENAME_SUCCESS; }
uint4 jnl_file_open_switch(gd_region *reg, uint4 sts) { sgmnt_addrs *csa; jnl_private_control *jpc; jnl_create_info create; char prev_jnl_fn[JNL_NAME_SIZE]; csa = &FILE_INFO(reg)->s_addrs; jpc = csa->jnl; assert((ERR_JNLFILOPN != sts) && (NOJNL != jpc->channel) || (ERR_JNLFILOPN == sts) && (NOJNL == jpc->channel)); if ((ERR_JNLFILOPN != sts) && (NOJNL != jpc->channel)) F_CLOSE(jpc->channel); jpc->channel = NOJNL; jnl_send_oper(jpc, sts); /* attempt to create a new journal file */ memset(&create, 0, sizeof(create)); create.status = create.status2 = SS_NORMAL; create.prev_jnl = &prev_jnl_fn[0]; set_jnl_info(reg, &create); create.no_prev_link = TRUE; create.no_rename = FALSE; if (!jgbl.forw_phase_recovery) JNL_SHORT_TIME(jgbl.gbl_jrec_time); /* needed for cre_jnl_file() */ /* else mur_output_record() would have already set jgbl.gbl_jrec_time */ assert(jgbl.gbl_jrec_time); if (EXIT_NRM != cre_jnl_file(&create)) { jpc->status = create.status; jpc->status2 = create.status2; return ERR_JNLINVALID; } else { jpc->status = SS_NORMAL; sts = 0; } send_msg(VARLSTCNT(6) ERR_PREVJNLLINKCUT, 4, JNL_LEN_STR(csa->hdr), DB_LEN_STR(reg)); assert(csa->hdr->jnl_file_len == create.jnl_len); assert(0 == memcmp(csa->hdr->jnl_file_name, create.jnl, create.jnl_len)); return sts; }
/* This function is called primarily to append a new histinfo record to the replication instance file by one of the following * 1) MUPIP REPLIC -SOURCE -START -ROOTPRIMARY command (after forking the child source server) if it created the journal pool. * 2) MUPIP REPLIC -SOURCE -ACTIVATE -ROOTPRIMARY command if this is a propagating primary to root primary transition. * In addition, this function also initializes the "lms_group_info" field in the instance file (from the "inst_info" field) * if the current value is NULL. */ void gtmsource_rootprimary_init(seq_num start_seqno) { unix_db_info *udi; repl_histinfo histinfo; boolean_t was_crit, switch_jnl; gd_region *reg, *region_top; jnl_private_control *jpc; jnl_buffer_ptr_t jbp; uint4 jnl_status; udi = FILE_INFO(jnlpool.jnlpool_dummy_reg); assert(NULL != jnlpool.repl_inst_filehdr); /* Update journal pool fields to reflect this is a root primary startup and updates are enabled */ assert(!udi->s_addrs.hold_onto_crit || jgbl.onlnrlbk); was_crit = udi->s_addrs.now_crit; if (!was_crit) grab_lock(jnlpool.jnlpool_dummy_reg, TRUE, ASSERT_NO_ONLINE_ROLLBACK); jnlpool.repl_inst_filehdr->root_primary_cycle++; /* If this instance is transitioning from a non-rootprimary to rootprimary, switch journal files. * This helps with maintaining accurate value of csd->zqgblmod_tn when the former primary connects * to the current primary through a fetchresync-rollback or receiver-server-autorollback.. */ switch_jnl = (!jnlpool.repl_inst_filehdr->was_rootprimary && (0 < jnlpool.repl_inst_filehdr->num_histinfo)); jnlpool.repl_inst_filehdr->was_rootprimary = TRUE; assert(start_seqno >= jnlpool.jnlpool_ctl->start_jnl_seqno); assert(start_seqno == jnlpool.jnlpool_ctl->jnl_seqno); jnlpool.repl_inst_filehdr->jnl_seqno = start_seqno; assert(jgbl.onlnrlbk || jnlpool.jnlpool_ctl->upd_disabled); if (!jgbl.onlnrlbk) jnlpool.jnlpool_ctl->upd_disabled = FALSE; if (IS_REPL_INST_UUID_NULL(jnlpool.repl_inst_filehdr->lms_group_info)) { /* This is the first time this instance is being brought up either as a root primary or as a propagating * primary. Initialize the "lms_group_info" fields in the instance file header in journal pool shared memory. * They will be flushed to the instance file as part of the "repl_inst_histinfo_add -> repl_inst_flush_filehdr" * function invocation below. */ assert('\0' == jnlpool.repl_inst_filehdr->lms_group_info.created_nodename[0]); assert('\0' == jnlpool.repl_inst_filehdr->lms_group_info.this_instname[0]); assert(!jnlpool.repl_inst_filehdr->lms_group_info.creator_pid); jnlpool.repl_inst_filehdr->lms_group_info = jnlpool.repl_inst_filehdr->inst_info; assert('\0' != jnlpool.repl_inst_filehdr->lms_group_info.created_nodename[0]); DBG_CHECK_CREATED_NODENAME(jnlpool.repl_inst_filehdr->lms_group_info.created_nodename); assert('\0' != jnlpool.repl_inst_filehdr->lms_group_info.this_instname[0]); assert(jnlpool.repl_inst_filehdr->lms_group_info.created_time); assert(jnlpool.repl_inst_filehdr->lms_group_info.creator_pid); } /* Initialize histinfo fields */ memcpy(histinfo.root_primary_instname, jnlpool.repl_inst_filehdr->inst_info.this_instname, MAX_INSTNAME_LEN - 1); histinfo.root_primary_instname[MAX_INSTNAME_LEN - 1] = '\0'; assert('\0' != histinfo.root_primary_instname[0]); histinfo.start_seqno = start_seqno; assert(jnlpool.jnlpool_ctl->strm_seqno[0] == jnlpool.repl_inst_filehdr->strm_seqno[0]); assert(jnlpool.repl_inst_filehdr->is_supplementary || (0 == jnlpool.jnlpool_ctl->strm_seqno[0])); histinfo.strm_seqno = (!jnlpool.repl_inst_filehdr->is_supplementary) ? 0 : jnlpool.jnlpool_ctl->strm_seqno[0]; histinfo.root_primary_cycle = jnlpool.repl_inst_filehdr->root_primary_cycle; assert(process_id == getpid()); histinfo.creator_pid = process_id; JNL_SHORT_TIME(histinfo.created_time); histinfo.strm_index = 0; histinfo.history_type = HISTINFO_TYPE_NORMAL; NULL_INITIALIZE_REPL_INST_UUID(histinfo.lms_group); /* The following fields will be initialized in the "repl_inst_histinfo_add" function call below. * histinfo.histinfo_num * histinfo.prev_histinfo_num * histinfo.last_histinfo_num[] */ /* Add the histinfo record to the instance file and flush the changes in the journal pool to the file header */ repl_inst_histinfo_add(&histinfo); if (!was_crit) rel_lock(jnlpool.jnlpool_dummy_reg); if (switch_jnl) { SET_GBL_JREC_TIME; /* jnl_ensure_open/jnl_file_extend and its callees assume jgbl.gbl_jrec_time is set */ for (reg = gd_header->regions, region_top = gd_header->regions + gd_header->n_regions; reg < region_top; reg++) { gv_cur_region = reg; change_reg(); /* sets cs_addrs/cs_data (needed by jnl_ensure_open) */ if (!JNL_ENABLED(cs_addrs)) continue; grab_crit(gv_cur_region); jpc = cs_addrs->jnl; /* Before writing to jnlfile, adjust jgbl.gbl_jrec_time if needed to maintain time order of jnl * records. This needs to be done BEFORE the jnl_ensure_open as that could write journal records * (if it decides to switch to a new journal file) */ jbp = jpc->jnl_buff; ADJUST_GBL_JREC_TIME(jgbl, jbp); jnl_status = jnl_ensure_open(); if (0 == jnl_status) { if (EXIT_ERR == SWITCH_JNL_FILE(jpc)) rts_error_csa(CSA_ARG(cs_addrs) VARLSTCNT(4) ERR_JNLEXTEND, 2, JNL_LEN_STR(cs_data)); } else { if (SS_NORMAL != jpc->status) rts_error_csa(CSA_ARG(cs_addrs) VARLSTCNT(7) jnl_status, 4, JNL_LEN_STR(cs_data), DB_LEN_STR(gv_cur_region), jpc->status); else rts_error_csa(CSA_ARG(cs_addrs) VARLSTCNT(6) jnl_status, 4, JNL_LEN_STR(cs_data), DB_LEN_STR(gv_cur_region)); } rel_crit(gv_cur_region); } } }
void dse_chng_bhead(void) { block_id blk; block_id *blkid_ptr; sgm_info *dummysi = NULL; int4 x; cache_rec_ptr_t cr; uchar_ptr_t bp; sm_uc_ptr_t blkBase; blk_hdr new_hdr; blk_segment *bs1, *bs_ptr; cw_set_element *cse; int4 blk_seg_cnt, blk_size; /* needed for BLK_INIT,BLK_SEG and BLK_FINI macros */ bool ismap; bool chng_blk; uint4 mapsize; uint4 jnl_status; error_def(ERR_DSEBLKRDFAIL); error_def(ERR_DSEFAIL); error_def(ERR_DBRDONLY); if (gv_cur_region->read_only) rts_error(VARLSTCNT(4) ERR_DBRDONLY, 2, DB_LEN_STR(gv_cur_region)); assert(update_array); /* reset new block mechanism */ update_array_ptr = update_array; chng_blk = FALSE; if (cli_present("BLOCK") == CLI_PRESENT) { if (!cli_get_hex("BLOCK",&blk)) return; if (blk < 0 || blk > cs_addrs->ti->total_blks) { util_out_print("Error: invalid block number.",TRUE); return; } patch_curr_blk = blk; } blk_size = cs_addrs->hdr->blk_size; ismap = (patch_curr_blk / cs_addrs->hdr->bplmap * cs_addrs->hdr->bplmap == patch_curr_blk); mapsize = BM_SIZE(cs_addrs->hdr->bplmap); t_begin_crit (ERR_DSEFAIL); if (!(bp = t_qread (patch_curr_blk,&dummy_hist.h[0].cycle,&dummy_hist.h[0].cr))) rts_error(VARLSTCNT(1) ERR_DSEBLKRDFAIL); new_hdr = *(blk_hdr_ptr_t)bp; if (cli_present("LEVEL") == CLI_PRESENT) { if (!cli_get_num("LEVEL",&x)) { t_abort(gv_cur_region, cs_addrs); return; } if (ismap && (unsigned char)x != LCL_MAP_LEVL) { util_out_print("Error: invalid level for a bit map block.",TRUE); t_abort(gv_cur_region, cs_addrs); return; } if (!ismap && (x < 0 || x > MAX_BT_DEPTH + 1)) { util_out_print("Error: invalid level.",TRUE); t_abort(gv_cur_region, cs_addrs); return; } new_hdr.levl = (unsigned char)x; chng_blk = TRUE; if (new_hdr.bsiz < sizeof(blk_hdr)) new_hdr.bsiz = sizeof(blk_hdr); if (new_hdr.bsiz > blk_size) new_hdr.bsiz = blk_size; } if (cli_present("BSIZ") == CLI_PRESENT) { if (!cli_get_hex("BSIZ",&x)) { t_abort(gv_cur_region, cs_addrs); return; } if (ismap && x != mapsize) { util_out_print("Error: invalid bsiz.",TRUE); t_abort(gv_cur_region, cs_addrs); return; } else if (x < sizeof(blk_hdr) || x > blk_size) { util_out_print("Error: invalid bsiz.",TRUE); t_abort(gv_cur_region, cs_addrs); return; } chng_blk = TRUE; new_hdr.bsiz = x; } if (!chng_blk) t_abort(gv_cur_region, cs_addrs); else { BLK_INIT(bs_ptr, bs1); BLK_SEG(bs_ptr, bp + sizeof(new_hdr), new_hdr.bsiz - sizeof(new_hdr)); if (!BLK_FINI(bs_ptr, bs1)) { util_out_print("Error: bad block build.",TRUE); t_abort(gv_cur_region, cs_addrs); return; } t_write (patch_curr_blk, (unsigned char *)bs1, 0, 0, bp, new_hdr.levl, TRUE, FALSE); BUILD_AIMG_IF_JNL_ENABLED(cs_addrs, cs_data, non_tp_jfb_buff_ptr, cse); t_end(&dummy_hist, 0); } if (cli_present("TN") == CLI_PRESENT) { if (!cli_get_hex("TN",&x)) return; t_begin_crit(ERR_DSEFAIL); assert(cs_addrs->ti->early_tn == cs_addrs->ti->curr_tn); cs_addrs->ti->early_tn++; blkBase = t_qread(patch_curr_blk, &dummy_hist.h[0].cycle, &dummy_hist.h[0].cr); if (NULL == blkBase) { rel_crit(gv_cur_region); util_out_print("Error: Unable to read buffer.", TRUE); t_abort(gv_cur_region, cs_addrs); return; } /* Create a null update array for a block */ if (ismap) { BLK_ADDR(blkid_ptr, sizeof(block_id), block_id); *blkid_ptr = 0; t_write_map(patch_curr_blk, blkBase, (unsigned char *)blkid_ptr, cs_addrs->ti->curr_tn); cr_array_index = 0; block_saved = FALSE; } else { BLK_INIT(bs_ptr, bs1); BLK_SEG(bs_ptr, bp + sizeof(new_hdr), new_hdr.bsiz - sizeof(new_hdr)); BLK_FINI(bs_ptr, bs1); t_write(patch_curr_blk, (unsigned char *)bs1, 0, 0, blkBase, ((blk_hdr_ptr_t)blkBase)->levl, TRUE, FALSE); cr_array_index = 0; block_saved = FALSE; if (JNL_ENABLED(cs_data)) { JNL_SHORT_TIME(jgbl.gbl_jrec_time); /* needed for jnl_put_jrt_pini() and jnl_write_aimg_rec() */ jnl_status = jnl_ensure_open(); if (0 == jnl_status) { cse = (cw_set_element *)(&cw_set[0]); cse->new_buff = non_tp_jfb_buff_ptr; gvcst_blk_build(cse, (uchar_ptr_t)cse->new_buff, x); cse->done = TRUE; if (0 == cs_addrs->jnl->pini_addr) jnl_put_jrt_pini(cs_addrs); jnl_write_aimg_rec(cs_addrs, cse->blk, (blk_hdr_ptr_t)cse->new_buff); } else rts_error(VARLSTCNT(6) jnl_status, 4, JNL_LEN_STR(cs_data), DB_LEN_STR(gv_cur_region)); } } /* Pass the desired tn "x" as argument to bg_update or mm_update */ if (dba_bg == cs_addrs->hdr->acc_meth) bg_update(cw_set, cw_set + cw_set_depth, cs_addrs->ti->curr_tn, x, dummysi); else mm_update(cw_set, cw_set + cw_set_depth, cs_addrs->ti->curr_tn, x, dummysi); cs_addrs->ti->curr_tn++; assert(cs_addrs->ti->early_tn == cs_addrs->ti->curr_tn); /* the following code is analogous to that in t_end and should be maintained in a similar fashion */ while (cr_array_index) cr_array[--cr_array_index]->in_cw_set = FALSE; rel_crit(gv_cur_region); if (block_saved) backup_buffer_flush(gv_cur_region); UNIX_ONLY( if (unhandled_stale_timer_pop) process_deferred_stale(); ) wcs_timer_start(gv_cur_region, TRUE); }
void ccp_close1(ccp_db_header *db) { ccp_db_header *db0, *db1; ccp_que_entry *que_ent; ccp_relque *que_hd; mem_list *ml_ptr, *ml_ptr_hold; sgmnt_addrs *csa; vms_gds_info *gds_info; unsigned char section_name[GLO_NAME_MAXLEN]; uint4 retadr[2], status, outaddrs[2]; struct dsc$descriptor_s name_dsc; if (ccp_stop) ccp_stop_ctr--; if (db->stale_in_progress) sys$cantim(&db->stale_timer_id, PSL$C_USER); ccp_quemin_adjust(CCP_CLOSE_REGION); sys$cantim(&db->tick_timer_id, PSL$C_USER); sys$cantim(&db->quantum_timer_id, PSL$C_USER); db->segment->nl->ccp_state = CCST_CLOSED; db->wmexit_requested = TRUE; /* ignore any blocking ASTs - already releasing */ gds_info = FILE_INFO(db->greg); if (JNL_ENABLED(db->glob_sec)) { if (db->segment->jnl != NULL && db->segment->jnl->channel != 0) { status = sys$setimr(0, delta_1_sec, ccp_close_timeout, &db->close_timer_id, 0); if (status != SS$_NORMAL) ccp_signal_cont(status); /***** Is this reasonable? *****/ status = ccp_enqw(EFN$C_ENF, LCK$K_EXMODE, &db->wm_iosb, LCK$M_CONVERT | LCK$M_NOQUEUE, NULL, 0, NULL, 0, NULL, PSL$C_USER, 0); if (status == SS$_NOTQUEUED) /* We're not the only node accessing the journal file */ jnl_file_close(db->greg, FALSE, FALSE); else { /***** Check error status here? *****/ if (db->segment->jnl->jnl_buff->before_images && db->segment->ti->curr_tn > db->segment->jnl->jnl_buff->epoch_tn) { csa = db->segment; JNL_SHORT_TIME(jgbl.gbl_jrec_time); /* needed for jnl_put_jrt_pini() and jnl_write_epoch_rec() */ if (0 == csa->jnl->pini_addr) jnl_put_jrt_pini(csa); db->segment->jnl->jnl_buff->epoch_tn = db->segment->ti->curr_tn; jnl_write_epoch_rec(db->segment); } jnl_file_close(db->greg, TRUE, FALSE); } sys$cantim(&db->close_timer_id, PSL$C_USER); status = gtm_deq(gds_info->s_addrs.jnl->jnllsb->lockid, NULL, PSL$C_USER, 0); if (status != SS$_NORMAL) ccp_signal_cont(status); /***** Is this reasonable? *****/ } } db->segment = NULL; /* Warn AST's that the segment has been deleted */ status = sys$deq(db->lock_iosb.lockid, NULL, PSL$C_USER, LCK$M_CANCEL); if (status != SS$_NORMAL && status != SS$_CANCELGRANT) ccp_signal_cont(status); /***** Is this reasonable? *****/ status = sys$deq(db->refcnt_iosb.lockid, NULL, PSL$C_USER, LCK$M_CANCEL); if (status != SS$_NORMAL && status != SS$_CANCELGRANT) ccp_signal_cont(status); /***** Is this reasonable? *****/ status = sys$deq(db->wm_iosb.lockid, NULL, PSL$C_USER, LCK$M_CANCEL); if (status != SS$_NORMAL && status != SS$_CANCELGRANT) ccp_signal_cont(status); /***** Is this reasonable? *****/ status = sys$deq(db->flush_iosb.lockid, NULL, PSL$C_USER, LCK$M_CANCEL); if (status != SS$_NORMAL && status != SS$_CANCELGRANT) ccp_signal_cont(status); /***** Is this reasonable? *****/ status = sys$cancel(gds_info->fab->fab$l_stv); if (status != SS$_NORMAL) ccp_signal_cont(status); /***** Is this reasonable? *****/ status = sys$dassgn(gds_info->fab->fab$l_stv); if (status != SS$_NORMAL) ccp_signal_cont(status); /***** Is this reasonable? *****/ csa = &gds_info->s_addrs; outaddrs[0] = csa->db_addrs[0] - OS_PAGE_SIZE; /* header no access page */ outaddrs[1] = csa->db_addrs[1] + OS_PAGE_SIZE; /* trailer no access page */ if (FALSE == is_va_free(outaddrs[0])) gtm_deltva(outaddrs, NULL, PSL$C_USER); if (status != SS$_NORMAL) ccp_signal_cont(status); /***** Is this reasonable? *****/ status = sys$cretva(csa->db_addrs, retadr, PSL$C_USER); if (status != SS$_NORMAL) ccp_signal_cont(status); /***** Is this reasonable? *****/ assert(retadr[0] == csa->db_addrs[0] && retadr[1] == csa->db_addrs[1]); ml_ptr_hold=db->mem_ptr; if (ml_ptr_hold->prev != NULL) { /* if prior segment is adjacent and free, coalesce the segments */ if (ml_ptr_hold->prev->free && ml_ptr_hold->addr == ml_ptr_hold->prev->addr + OS_PAGELET_SIZE * ml_ptr_hold->prev->pages) { ml_ptr = ml_ptr_hold->prev; ml_ptr->next = ml_ptr_hold->next; if (ml_ptr->next != NULL) ml_ptr->next->prev = ml_ptr; ml_ptr->pages += ml_ptr_hold->pages; free(ml_ptr_hold); ml_ptr_hold = ml_ptr; } } if (ml_ptr_hold->next != NULL) { /* if next segment is adjacent and free, coalesce the segments */ if (ml_ptr_hold->next->free && ml_ptr_hold->next->addr == ml_ptr_hold->addr + OS_PAGELET_SIZE * ml_ptr_hold->pages) { ml_ptr = ml_ptr_hold->next; ml_ptr_hold->next = ml_ptr->next; if (ml_ptr_hold->next != NULL) ml_ptr_hold->next->prev = ml_ptr_hold; ml_ptr_hold->pages += ml_ptr->pages; free(ml_ptr); } } ml_ptr_hold->free = TRUE; global_name("GT$S", &gds_info->file_id, section_name); name_dsc.dsc$a_pointer = §ion_name[1]; name_dsc.dsc$w_length = section_name[0]; name_dsc.dsc$b_dtype = DSC$K_DTYPE_T; name_dsc.dsc$b_class = DSC$K_CLASS_S; status = del_sec(SEC$M_SYSGBL, &name_dsc, NULL); if (status != SS$_NORMAL) ccp_signal_cont(status); /***** Is this reasonable? *****/ /* Dequeue locks after delete section in ccp_close, acquire lock before create section in gvcst_init, release lock after delete section in gds_rundown */ status = gtm_deq(db->lock_iosb.lockid, NULL, PSL$C_USER, 0); if (status != SS$_NORMAL) ccp_signal_cont(status); /***** Is this reasonable? *****/ status = gtm_deq(db->refcnt_iosb.lockid, NULL, PSL$C_USER, 0); if (status != SS$_NORMAL) ccp_signal_cont(status); /***** Is this reasonable? *****/ status = gtm_deq(db->wm_iosb.lockid, NULL, PSL$C_USER, 0); if (status != SS$_NORMAL) ccp_signal_cont(status); /***** Is this reasonable? *****/ status = gtm_deq(db->flush_iosb.lockid, NULL, PSL$C_USER, 0); if (status != SS$_NORMAL) ccp_signal_cont(status); /***** Is this reasonable? *****/ que_hd = &ccp_action_que[PRIORITY]; for (que_ent = (char *)que_hd + que_hd->bl; que_ent != que_hd; que_ent = (char *)que_ent + que_ent->q.bl) if (que_ent->value.v.h == db) que_ent->value.v.h = 0; free(gds_info->fab->fab$l_nam); free(gds_info->fab); free(db->greg->dyn.addr); free(db->greg); /* Remove db from list, this list should never be changed in an AST */ for (db0 = ccp_reg_root, db1 = NULL; db0 != db; db1 = db0, db0 = db0->next) ; if (db1 == NULL) ccp_reg_root = db0->next; else db1->next = db0->next; free(db); return; }
uint4 mur_process_intrpt_recov() { jnl_ctl_list *jctl, *last_jctl; reg_ctl_list *rctl, *rctl_top; int rename_fn_len, save_name_len, idx; char prev_jnl_fn[MAX_FN_LEN + 1], rename_fn[MAX_FN_LEN + 1], save_name[MAX_FN_LEN + 1]; jnl_create_info jnl_info; uint4 status, status2; uint4 max_autoswitchlimit, max_jnl_alq, max_jnl_deq, freeblks; sgmnt_data_ptr_t csd; jnl_private_control *jpc; jnl_buffer_ptr_t jbp; boolean_t jfh_changed; jnl_record *jnlrec; jnl_file_header *jfh; jnl_tm_t now; for (rctl = mur_ctl, rctl_top = mur_ctl + murgbl.reg_total; rctl < rctl_top; rctl++) { TP_CHANGE_REG(rctl->gd); csd = cs_data; /* MM logic after wcs_flu call requires this to be set */ assert(csd == rctl->csa->hdr); jctl = rctl->jctl_turn_around; max_jnl_alq = max_jnl_deq = max_autoswitchlimit = 0; for (last_jctl = NULL ; (NULL != jctl); last_jctl = jctl, jctl = jctl->next_gen) { jfh = jctl->jfh; if (max_autoswitchlimit < jfh->autoswitchlimit) { /* Note that max_jnl_alq, max_jnl_deq are not the maximum journal allocation/extensions across * generations, but rather the allocation/extension corresponding to the maximum autoswitchlimit. */ max_autoswitchlimit = jfh->autoswitchlimit; max_jnl_alq = jfh->jnl_alq; max_jnl_deq = jfh->jnl_deq; } /* Until now, "rctl->blks_to_upgrd_adjust" holds the number of V4 format newly created bitmap blocks * seen in INCTN records in backward processing. It is possible that backward processing might have * missed out on seeing those INCTN records which are part of virtually-truncated or completely-rolled-bak * journal files. The journal file-header has a separate field "prev_recov_blks_to_upgrd_adjust" which * maintains exactly this count. Therefore adjust the rctl counter accordingly. */ assert(!jfh->prev_recov_blks_to_upgrd_adjust || !jfh->recover_interrupted); assert(!jfh->prev_recov_blks_to_upgrd_adjust || jfh->prev_recov_end_of_data); rctl->blks_to_upgrd_adjust += jfh->prev_recov_blks_to_upgrd_adjust; } if (max_autoswitchlimit > last_jctl->jfh->autoswitchlimit) { csd->jnl_alq = max_jnl_alq; csd->jnl_deq = max_jnl_deq; csd->autoswitchlimit = max_autoswitchlimit; } else { assert(csd->jnl_alq == last_jctl->jfh->jnl_alq); assert(csd->jnl_deq == last_jctl->jfh->jnl_deq); assert(csd->autoswitchlimit == last_jctl->jfh->autoswitchlimit); } jctl = rctl->jctl_turn_around; /* Get a pointer to the turn around point EPOCH record */ jnlrec = rctl->mur_desc->jnlrec; assert(JRT_EPOCH == jnlrec->prefix.jrec_type); assert(jctl->turn_around_time == jnlrec->prefix.time); assert(jctl->turn_around_seqno == jnlrec->jrec_epoch.jnl_seqno); assert(jctl->turn_around_tn == jnlrec->prefix.tn); assert(jctl->rec_offset == jctl->turn_around_offset); /* Reset file-header "blks_to_upgrd" counter to the turn around point epoch value. Adjust this to include * the number of new V4 format bitmaps created by post-turnaround-point db file extensions. * The adjustment value is maintained in rctl->blks_to_upgrd_adjust. */ csd->blks_to_upgrd = jnlrec->jrec_epoch.blks_to_upgrd; csd->blks_to_upgrd += rctl->blks_to_upgrd_adjust; # ifdef GTM_TRIGGER /* online rollback can potentially take the database to a point in the past where the triggers that were * previously installed are no longer a part of the current database state and so any process that restarts * AFTER online rollback completes SHOULD reload triggers and the only way to do that is by incrementing the * db_trigger_cycle in the file header. */ if (jgbl.onlnrlbk && (0 < csd->db_trigger_cycle)) { /* check for non-zero db_trigger_cycle is to prevent other processes (continuing after online rollback) * to establish implicit TP (on seeing the trigger cycle mismatch) when there are actually no triggers * installed in the database (because there were none at the start of online rollback). */ csd->db_trigger_cycle++; if (0 == csd->db_trigger_cycle) csd->db_trigger_cycle = 1; /* Don't allow cycle set to 0 which means uninitialized */ } # endif assert((WBTEST_ALLOW_ARBITRARY_FULLY_UPGRADED == gtm_white_box_test_case_number) || (FALSE == jctl->turn_around_fullyupgraded) || (TRUE == jctl->turn_around_fullyupgraded)); /* Set csd->fully_upgraded to FALSE if: * a) The turn around EPOCH had the fully_upgraded field set to FALSE * OR * b) If csd->blks_to_upgrd counter is non-zero. This field can be non-zero even if the turnaround EPOCH's * fully_upgraded field is TRUE. This is possible if the database was downgraded to V4 (post turnaround EPOCH) * format and database extensions happened causing new V4 format bitmap blocks to be written. The count of V4 * format bitmap blocks is maintained ONLY as part of INCTN records (with INCTN opcode SET_JNL_FILE_CLOSE_EXTEND) * noted down in rctl->blks_to_upgrd_adjust counter as part of BACKWARD processing which are finally added to * csd->blks_to_upgrd. */ if (!jctl->turn_around_fullyupgraded || csd->blks_to_upgrd) csd->fully_upgraded = FALSE; csd->trans_hist.early_tn = jctl->turn_around_tn; csd->trans_hist.curr_tn = csd->trans_hist.early_tn; /* INCREMENT_CURR_TN macro not used but noted in comment * to identify all places that set curr_tn */ csd->jnl_eovtn = csd->trans_hist.curr_tn; csd->turn_around_point = TRUE; /* MUPIP REORG UPGRADE/DOWNGRADE stores its partially processed state in the database file header. * It is difficult for recovery to restore those fields to a correct partial value. * Hence reset the related fields as if the desired_db_format got set just ONE tn BEFORE the EPOCH record * and that there was no more processing that happened. * This might potentially mean some duplicate processing for MUPIP REORG UPGRADE/DOWNGRADE after the recovery. * But that will only be the case as long as the database is in compatibility (mixed) mode (hopefully not long). */ if (csd->desired_db_format_tn >= jctl->turn_around_tn) csd->desired_db_format_tn = jctl->turn_around_tn - 1; if (csd->reorg_db_fmt_start_tn >= jctl->turn_around_tn) csd->reorg_db_fmt_start_tn = jctl->turn_around_tn - 1; if (csd->tn_upgrd_blks_0 > jctl->turn_around_tn) csd->tn_upgrd_blks_0 = (trans_num)-1; csd->reorg_upgrd_dwngrd_restart_block = 0; /* Compute current value of "free_blocks" based on the value of "free_blocks" at the turnaround point epoch * record and the change in "total_blks" since that epoch to the present form of the database. Any difference * in "total_blks" implies database file extensions happened since the turnaround point. A backward rollback * undoes everything (including all updates) except file extensions (it does not truncate the file size). * Therefore every block that was newly allocated as part of those file extensions should be considered FREE * for the current calculations except for the local bitmap blocks which are BUSY the moment they are created. */ assert(jnlrec->jrec_epoch.total_blks <= csd->trans_hist.total_blks); csd->trans_hist.free_blocks = jnlrec->jrec_epoch.free_blocks + (csd->trans_hist.total_blks - jnlrec->jrec_epoch.total_blks) - DIVIDE_ROUND_UP(csd->trans_hist.total_blks, BLKS_PER_LMAP) + DIVIDE_ROUND_UP(jnlrec->jrec_epoch.total_blks, BLKS_PER_LMAP); assert(!csd->blks_to_upgrd || !csd->fully_upgraded); assert((freeblks = mur_blocks_free(rctl)) == csd->trans_hist.free_blocks); /* Update strm_reg_seqno[] in db file header to reflect the turn around point. * Before updating "strm_reg_seqno", make sure value is saved into "save_strm_reg_seqno". * This is relied upon by the function "mur_get_max_strm_reg_seqno" in case of interrupted rollback. */ for (idx = 0; idx < MAX_SUPPL_STRMS; idx++) { if (!csd->save_strm_reg_seqno[idx]) csd->save_strm_reg_seqno[idx] = csd->strm_reg_seqno[idx]; csd->strm_reg_seqno[idx] = jnlrec->jrec_epoch.strm_seqno[idx]; } wcs_flu(WCSFLU_FLUSH_HDR | WCSFLU_FSYNC_DB); assert(cs_addrs->ti->curr_tn == jctl->turn_around_tn); # ifdef UNIX if (jgbl.onlnrlbk) { if (dba_bg == cs_addrs->hdr->acc_meth) { /* dryclean the cache (basically reset the cycle fields in all teh cache records) so as to make * GT.M processes that only does 'reads' to require crit and hence realize that online rollback * is in progress */ bt_refresh(cs_addrs, FALSE); /* sets earliest bt TN to be the turn around TN */ } db_csh_ref(cs_addrs, FALSE); assert(NULL != cs_addrs->jnl); jpc = cs_addrs->jnl; assert(NULL != jpc->jnl_buff); jbp = jpc->jnl_buff; /* Since Rollback simulates the journal record along with the timestamp at which the update was made, it * sets jgbl.dont_reset_gbl_jrec_time to TRUE so that during forward processing t_end or tp_tend does not * reset the gbl_jrec_time to reflect the current time. But, with Online Rollback, one can have the shared * memory up and running and hence can have jbp->prev_jrec_time to be the time of the most recent journal * update made. Later in t_end/tp_tend, ADJUST_GBL_JREC_TIME is invoked which ensures that if ever * gbl_jrec_time (the time of the current update) is less than jbp->prev_jrec_time (time of the latest * journal update), dont_reset_gbl_jrec_time better be FALSE. But, this assert will trip since Rollback * sets the latter to TRUE. To fix this, set jbp->prev_jrec_time to the turn around time stamp. This way * we are guaranteed that all the updates done in the forward processing will have a timestamp that is * greater than the turn around timestamp */ SET_JNLBUFF_PREV_JREC_TIME(jbp, jctl->turn_around_time, DO_GBL_JREC_TIME_CHECK_FALSE); } else if (dba_bg == csd->acc_meth) { /* set earliest bt TN to be the turn-around TN (taken from bt_refresh()) */ SET_OLDEST_HIST_TN(cs_addrs, cs_addrs->ti->curr_tn - 1); } # else if (dba_bg == csd->acc_meth) { /* set earliest bt TN to be the turn-around TN (taken from bt_refresh()) */ SET_OLDEST_HIST_TN(cs_addrs, cs_addrs->ti->curr_tn - 1); } # endif csd->turn_around_point = FALSE; assert(OLDEST_HIST_TN(cs_addrs) == (cs_addrs->ti->curr_tn - 1)); /* In case this is MM and wcs_flu() remapped an extended database, reset rctl->csd */ assert((dba_mm == cs_data->acc_meth) || (rctl->csd == cs_data)); rctl->csd = cs_data; } JNL_SHORT_TIME(now); for (rctl = mur_ctl, rctl_top = mur_ctl + murgbl.reg_total; rctl < rctl_top; rctl++) { TP_CHANGE_REG_IF_NEEDED(rctl->gd); if (!rctl->jfh_recov_interrupted) jctl = rctl->jctl_turn_around; else { DEBUG_ONLY( for (jctl = rctl->jctl_turn_around; NULL != jctl->next_gen; jctl = jctl->next_gen) ; /* check that latest gener file name does not match db header */ assert((rctl->csd->jnl_file_len != jctl->jnl_fn_len) || (0 != memcmp(rctl->csd->jnl_file_name, jctl->jnl_fn, jctl->jnl_fn_len))); ) jctl = rctl->jctl_alt_head; } assert(NULL != jctl); for ( ; NULL != jctl->next_gen; jctl = jctl->next_gen) ; assert(rctl->csd->jnl_file_len == jctl->jnl_fn_len); /* latest gener file name */ assert(0 == memcmp(rctl->csd->jnl_file_name, jctl->jnl_fn, jctl->jnl_fn_len)); /* should match db header */ if (SS_NORMAL != (status = prepare_unique_name((char *)jctl->jnl_fn, jctl->jnl_fn_len, "", "", rename_fn, &rename_fn_len, now, &status2))) return status; jctl->jnl_fn_len = rename_fn_len; /* change the name in memory to the proposed name */ memcpy(jctl->jnl_fn, rename_fn, rename_fn_len + 1); /* Rename hasn't happened yet at the filesystem level. In case current recover command is interrupted, * we need to update jfh->next_jnl_file_name before mur_forward(). Update jfh->next_jnl_file_name for * all journal files from which PBLK records were applied. Create new journal files for forward play. */ assert(NULL != rctl->jctl_turn_around); jctl = rctl->jctl_turn_around; /* points to journal file which has current recover's turn around point */ assert(0 != jctl->turn_around_offset); jfh = jctl->jfh; jfh->turn_around_offset = jctl->turn_around_offset; /* save progress in file header for */ jfh->turn_around_time = jctl->turn_around_time; /* possible re-issue of recover */ for (idx = 0; idx < MAX_SUPPL_STRMS; idx++) jfh->strm_end_seqno[idx] = csd->strm_reg_seqno[idx]; jfh_changed = TRUE; /* We are about to update the journal file header of the turnaround-point journal file to store the * non-zero jfh->turn_around_offset. Ensure corresponding database is considered updated. * This is needed in case journal recovery/rollback terminates abnormally and we go to mur_close_files. * We need to ensure csd->recov_interrupted does not get reset to FALSE even if this region did not have * have any updates to the corresponding database file otherwise. (GTM-8394) */ rctl->db_updated = TRUE; for ( ; NULL != jctl; jctl = jctl->next_gen) { /* setup the next_jnl links. note that in the case of interrupted recovery, next_jnl links * would have been already set starting from the turn-around point journal file of the * interrupted recovery but the new recovery MIGHT have taken us to a still previous * generation journal file that needs its next_jnl link set. this is why we do the next_jnl * link setup even in the case of interrupted recovery although in most cases it is unnecessary. */ jfh = jctl->jfh; if (NULL != jctl->next_gen) { jfh->next_jnl_file_name_length = jctl->next_gen->jnl_fn_len; memcpy(jfh->next_jnl_file_name, jctl->next_gen->jnl_fn, jctl->next_gen->jnl_fn_len); jfh_changed = TRUE; } else assert(0 == jfh->next_jnl_file_name_length); /* null link from latest generation */ if (jfh->turn_around_offset && (jctl != rctl->jctl_turn_around)) { /* It is possible that the current recovery has a turn-around-point much before the * previously interrupted recovery. If it happens to be a previous generation journal * file then we have to reset the original turn-around-point to be zero in the journal * file header in order to ensure if this recovery gets interrupted we do interrupted * recovery processing until the new turn-around-point instead of stopping incorrectly * at the original turn-around-point itself. Note that there could be more than one * journal file with a non-zero turn_around_offset (depending on how many previous * recoveries got interrupted in this loop) that need to be reset. */ assert(!jctl->turn_around_offset); assert(rctl->recov_interrupted || rctl->jctl_apply_pblk); /* rctl->jfh_recov_interrupted can fail */ jfh->turn_around_offset = 0; jfh->turn_around_time = 0; jfh_changed = TRUE; } if (jfh_changed) { /* Since overwriting the journal file header (an already allocated block * in the file) should not cause ENOSPC, we dont take the trouble of * passing csa or jnl_fn (first two parameters). Instead we pass NULL. */ JNL_DO_FILE_WRITE(NULL, NULL, jctl->channel, 0, jfh, REAL_JNL_HDR_LEN, jctl->status, jctl->status2); if (SS_NORMAL != jctl->status) { assert(FALSE); if (SS_NORMAL == jctl->status2) gtm_putmsg_csa(CSA_ARG(rctl->csa) VARLSTCNT(5) ERR_JNLWRERR, 2, jctl->jnl_fn_len, jctl->jnl_fn, jctl->status); else gtm_putmsg_csa(CSA_ARG(rctl->csa) VARLSTCNT1(6) ERR_JNLWRERR, 2, jctl->jnl_fn_len, jctl->jnl_fn, jctl->status, PUT_SYS_ERRNO(jctl->status2)); return jctl->status; } GTM_JNL_FSYNC(rctl->csa, jctl->channel, jctl->status); if (-1 == jctl->status) { jctl->status2 = errno; assert(FALSE); gtm_putmsg_csa(CSA_ARG(rctl->csa) VARLSTCNT(9) ERR_JNLFSYNCERR, 2, jctl->jnl_fn_len, jctl->jnl_fn, ERR_TEXT, 2, RTS_ERROR_TEXT("Error with fsync"), jctl->status2); return ERR_JNLFSYNCERR; } } jfh_changed = FALSE; } memset(&jnl_info, 0, SIZEOF(jnl_info)); jnl_info.status = jnl_info.status2 = SS_NORMAL; jnl_info.prev_jnl = &prev_jnl_fn[0]; set_jnl_info(rctl->gd, &jnl_info); jnl_info.prev_jnl_len = rctl->jctl_turn_around->jnl_fn_len; memcpy(jnl_info.prev_jnl, rctl->jctl_turn_around->jnl_fn, rctl->jctl_turn_around->jnl_fn_len); jnl_info.prev_jnl[jnl_info.prev_jnl_len] = 0; jnl_info.jnl_len = rctl->csd->jnl_file_len; memcpy(jnl_info.jnl, rctl->csd->jnl_file_name, jnl_info.jnl_len); jnl_info.jnl[jnl_info.jnl_len] = 0; assert(!mur_options.rollback || jgbl.mur_rollback); jnl_info.reg_seqno = rctl->jctl_turn_around->turn_around_seqno; jgbl.gbl_jrec_time = rctl->jctl_turn_around->turn_around_time; /* time needed for cre_jnl_file_common() */ if (EXIT_NRM != cre_jnl_file_common(&jnl_info, rename_fn, rename_fn_len)) { gtm_putmsg_csa(CSA_ARG(rctl->csa) VARLSTCNT(4) ERR_JNLNOCREATE, 2, jnl_info.jnl_len, jnl_info.jnl); return jnl_info.status; } # ifdef UNIX if (jgbl.onlnrlbk) { cs_addrs = rctl->csa; /* Mimic what jnl_file_close in case of cleanly a closed journal file */ jpc = cs_addrs->jnl; /* the previous loop makes sure cs_addrs->jnl->jnl_buff is valid*/ NULLIFY_JNL_FILE_ID(cs_addrs); jpc->jnl_buff->cycle++; /* so that, all other processes knows to switch to newer journal file */ jpc->cycle--; /* decrement cycle so jnl_ensure_open() knows to reopen the journal */ } # endif if (NULL != rctl->jctl_alt_head) /* remove the journal files created by last interrupted recover process */ { mur_rem_jctls(rctl); rctl->jctl_alt_head = NULL; } /* From this point on, journal records are written into the newly created journal file. However, we still read * from old journal files. */ }