int mu_extr_getblk(unsigned char *ptr) { error_def(ERR_GVGETFAIL); enum cdb_sc status; rec_hdr_ptr_t rp; bool two_histories, end_of_tree; blk_hdr_ptr_t bp; srch_blk_status *bh; srch_hist *rt_history; t_begin(ERR_GVGETFAIL, FALSE); for (;;) { if (cdb_sc_normal != (status = gvcst_search(gv_currkey, NULL))) { t_retry(status); continue; } end_of_tree = two_histories = FALSE; bh = gv_target->hist.h; rp = (rec_hdr_ptr_t)(bh->buffaddr + bh->curr_rec.offset); bp = (blk_hdr_ptr_t)bh->buffaddr; if (rp >= (rec_hdr_ptr_t)CST_TOB(bp)) { rt_history = gv_target->alt_hist; if (cdb_sc_normal == (status = gvcst_rtsib(rt_history, 0))) { two_histories = TRUE; if (cdb_sc_normal != (status = gvcst_search_blk(gv_currkey, rt_history->h))) { t_retry(status); continue; } bp = (blk_hdr_ptr_t)rt_history->h[0].buffaddr; } else if (cdb_sc_endtree == status) end_of_tree = TRUE; else { t_retry(status); continue; } } memcpy(ptr, bp, bp->bsiz); if (t_end(&gv_target->hist, two_histories ? rt_history : NULL) != 0) { if (two_histories) memcpy(gv_target->hist.h, rt_history->h, sizeof(srch_blk_status) * (rt_history->depth + 1)); return !end_of_tree; } } }
/* Importance Sampling */ int4 mu_size_impsample(glist *gl_ptr, int4 M, int4 seed) { boolean_t tn_aborted; double a[MAX_BT_DEPTH + 1]; /* a[j] is # of adjacent block pointers in level j block of cur traversal */ double r[MAX_BT_DEPTH + 1]; /* r[j] is #records in level j block of current traversal */ enum cdb_sc status; int k, h; stat_t rstat; trans_num ret_tn; unsigned int lcl_t_tries; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; inctn_opcode = inctn_invalid_op; /* set gv_target/gv_currkey/gv_cur_region/cs_addrs/cs_data to correspond to <globalname,reg> in gl_ptr */ DO_OP_GVNAME(gl_ptr); if (0 == gv_target->root) { /* Global does not exist (online rollback). Not an error. */ gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(4) ERR_GBLNOEXIST, 2, GNAME(gl_ptr).len, GNAME(gl_ptr).addr); return EXIT_NRM; } if (!seed) seed = (int4)(time(0) * process_id); srand48(seed); /* do M random traversals */ INIT_STATS(rstat); for (k = 1; k <= M; k++) { if (mu_ctrlc_occurred || mu_ctrly_occurred) return EXIT_ERR; t_begin(ERR_MUSIZEFAIL, 0); for (;;) { CLEAR_VECTOR(r); CLEAR_VECTOR(a); if (cdb_sc_normal != (status = mu_size_rand_traverse(r, a))) /* WARNING: assignment */ { assert((CDB_STAGNATE > t_tries) || IS_FINAL_RETRY_CODE(status)); t_retry(status); continue; } gv_target->clue.end = 0; gv_target->hist.h[0] = gv_target->hist.h[1]; /* No level 0 block to validate */ DEBUG_ONLY(lcl_t_tries = t_tries); if ((trans_num)0 == (ret_tn = t_end(&gv_target->hist, NULL, TN_NOT_SPECIFIED))) /* WARNING: assignment */ { ABORT_TRANS_IF_GBL_EXIST_NOMORE(lcl_t_tries, tn_aborted); if (tn_aborted) { /* Global does not exist (online rollback). Not an error. */ gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(4) ERR_GBLNOEXIST, 2, GNAME(gl_ptr).len, GNAME(gl_ptr).addr); return EXIT_NRM; } continue; } accum_stats_impsmpl(&rstat, r, a); break; } } finalize_stats_impsmpl(&rstat); /* display rstat data * Showing the error as 2 standard deviations which is a 95% confidence interval for the * mean number of blocks at each level */ util_out_print("Number of generated samples = !UL", FLUSH, rstat.n); util_out_print("Level Blocks Adjacent 2 sigma(+/-)", FLUSH); for (h = MAX_BT_DEPTH; (0 <= h) && (rstat.blktot[h] < EPS); h--); for ( ; h > 0; h--) util_out_print("!5UL !15UL !15UL !15UL ~ !3UL%", FLUSH, h, (int)ROUND(rstat.blktot[h]), (int)ROUND(mu_int_adj[h]), (int)ROUND(sqrt(rstat.blkerr[h]) * 2), (int)ROUND(sqrt(rstat.blkerr[h]) * 2 / rstat.blktot[h] * 100.0) ); util_out_print("!5UL !15UL !15UL !15UL ~ !3UL%", FLUSH, h, (int)ROUND(rstat.blktot[h]), (int)ROUND(mu_int_adj[h]), (int)ROUND(sqrt(rstat.blkerr[h]) * 2), (int)ROUND(sqrt(rstat.blkerr[h]) * 2 / rstat.blktot[h] * 100.0) ); util_out_print("Total !15UL !15UL !15UL ~ !3UL%", FLUSH, (int)ROUND(rstat.B), (int)ROUND(rstat.AT), (int)ROUND(sqrt(rstat.error) * 2), (int)ROUND(sqrt(rstat.error) * 2 / rstat.B * 100.0) ); return EXIT_NRM; }
trans_num gvcst_bmp_mark_free(kill_set *ks) { block_id bit_map, next_bm, *updptr; blk_ident *blk, *blk_top, *nextblk; trans_num ctn, start_db_fmt_tn; unsigned int len; # if defined(UNIX) && defined(DEBUG) unsigned int lcl_t_tries; # endif int4 blk_prev_version; srch_hist alt_hist; trans_num ret_tn = 0; boolean_t visit_blks; srch_blk_status bmphist; cache_rec_ptr_t cr; enum db_ver ondsk_blkver; enum cdb_sc status; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; TREF(in_gvcst_bmp_mark_free) = TRUE; assert(inctn_bmp_mark_free_gtm == inctn_opcode || inctn_bmp_mark_free_mu_reorg == inctn_opcode); /* Note down the desired_db_format_tn before you start relying on cs_data->fully_upgraded. * If the db is fully_upgraded, take the optimal path that does not need to read each block being freed. * But in order to detect concurrent desired_db_format changes, note down the tn (when the last format change occurred) * before the fully_upgraded check and after having noted down the database current_tn. * If they are the same, then we are guaranteed no concurrent desired_db_format change occurred. * If they are not, then fall through to the non-optimal path where each to-be-killed block has to be visited. * The reason we need to visit every block in case desired_db_format changes is to take care of the case where * MUPIP REORG DOWNGRADE concurrently changes a block that we are about to free. */ start_db_fmt_tn = cs_data->desired_db_format_tn; visit_blks = (!cs_data->fully_upgraded); /* Local evaluation */ assert(!visit_blks || (visit_blks && dba_bg == cs_addrs->hdr->acc_meth)); /* must have blks_to_upgrd == 0 for non-BG */ assert(!dollar_tlevel); /* Should NOT be in TP now */ blk = &ks->blk[0]; blk_top = &ks->blk[ks->used]; if (!visit_blks) { /* Database has been completely upgraded. Free all blocks in one bitmap as part of one transaction. */ assert(cs_data->db_got_to_v5_once); /* assert all V4 fmt blocks (including RECYCLED) have space for V5 upgrade */ inctn_detail.blknum_struct.blknum = 0; /* to indicate no adjustment to "blks_to_upgrd" necessary */ /* If any of the mini transaction below restarts because of an online rollback, we don't want the application * refresh to happen (like $ZONLNRLBK++ or rts_error(DBROLLEDBACK). This is because, although we are currently in * non-tp (dollar_tleve = 0), we could actually be in a TP transaction and have actually faked dollar_tlevel. In * such a case, we should NOT * be issuing a DBROLLEDBACK error as TP transactions are supposed to just restart in * case of an online rollback. So, set the global variable that gtm_onln_rlbk_clnup can check and skip doing the * application refresh, but will reset the clues. The next update will see the cycle mismatch and will accordingly * take the right action. */ for ( ; blk < blk_top; blk = nextblk) { if (0 != blk->flag) { nextblk = blk + 1; continue; } assert(0 < blk->block); assert((int4)blk->block < cs_addrs->ti->total_blks); bit_map = ROUND_DOWN2((int)blk->block, BLKS_PER_LMAP); next_bm = bit_map + BLKS_PER_LMAP; CHECK_AND_RESET_UPDATE_ARRAY; /* reset update_array_ptr to update_array */ /* Scan for the next local bitmap */ updptr = (block_id *)update_array_ptr; for (nextblk = blk; (0 == nextblk->flag) && (nextblk < blk_top) && ((block_id)nextblk->block < next_bm); ++nextblk) { assert((block_id)nextblk->block - bit_map); *updptr++ = (block_id)nextblk->block - bit_map; } len = (unsigned int)((char *)nextblk - (char *)blk); update_array_ptr = (char *)updptr; alt_hist.h[0].blk_num = 0; /* need for calls to T_END for bitmaps */ alt_hist.h[0].blk_target = NULL; /* need to initialize for calls to T_END */ /* the following assumes SIZEOF(blk_ident) == SIZEOF(int) */ assert(SIZEOF(blk_ident) == SIZEOF(int)); *(int *)update_array_ptr = 0; t_begin(ERR_GVKILLFAIL, UPDTRNS_DB_UPDATED_MASK); for (;;) { ctn = cs_addrs->ti->curr_tn; /* Need a read fence before reading fields from cs_data as we are reading outside * of crit and relying on this value to detect desired db format state change. */ SHM_READ_MEMORY_BARRIER; if (start_db_fmt_tn != cs_data->desired_db_format_tn) { /* Concurrent db format change has occurred. Need to visit every block to be killed * to determine its block format. Fall through to the non-optimal path below */ ret_tn = 0; break; } bmphist.blk_num = bit_map; if (NULL == (bmphist.buffaddr = t_qread(bmphist.blk_num, (sm_int_ptr_t)&bmphist.cycle, &bmphist.cr))) { t_retry((enum cdb_sc)rdfail_detail); continue; } t_write_map(&bmphist, (uchar_ptr_t)update_array, ctn, -(int4)(nextblk - blk)); UNIX_ONLY(DEBUG_ONLY(lcl_t_tries = t_tries)); if ((trans_num)0 == (ret_tn = t_end(&alt_hist, NULL, TN_NOT_SPECIFIED))) { # ifdef UNIX assert((CDB_STAGNATE == t_tries) || (lcl_t_tries == t_tries - 1)); status = LAST_RESTART_CODE; if ((cdb_sc_onln_rlbk1 == status) || (cdb_sc_onln_rlbk2 == status) || TREF(rlbk_during_redo_root)) { /* t_end restarted due to online rollback. Discard bitmap free-up and return control * to the application. But, before that reset only_reset_clues_if_onln_rlbk to FALSE */ TREF(in_gvcst_bmp_mark_free) = FALSE; send_msg(VARLSTCNT(6) ERR_IGNBMPMRKFREE, 4, REG_LEN_STR(gv_cur_region), DB_LEN_STR(gv_cur_region)); t_abort(gv_cur_region, cs_addrs); return ret_tn; /* actually 0 */ } # endif continue; } break; } if (0 == ret_tn) /* db format change occurred. Fall through to below for loop to visit each block */ { /* Abort any active transaction to get rid of lingering Non-TP artifacts */ t_abort(gv_cur_region, cs_addrs); break; } } } /* for all blocks in the kill_set */
void mu_swap_root(glist *gl_ptr, int *root_swap_statistic_ptr) { sgmnt_data_ptr_t csd; sgmnt_addrs *csa; node_local_ptr_t cnl; srch_hist *dir_hist_ptr, *gvt_hist_ptr; gv_namehead *save_targ; block_id root_blk_id, child_blk_id, free_blk_id; sm_uc_ptr_t root_blk_ptr, child_blk_ptr; kill_set kill_set_list; trans_num curr_tn, ret_tn; int level, root_blk_lvl; block_id save_root; boolean_t tn_aborted; unsigned int lcl_t_tries; enum cdb_sc status; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; assert(mu_reorg_process); gv_target = gl_ptr->gvt; gv_target->root = 0; /* reset root so we recompute it in DO_OP_GVNAME below */ gv_target->clue.end = 0; /* reset clue since reorg action on later globals might have invalidated it */ reorg_gv_target->gvname.var_name = gv_target->gvname.var_name; /* needed by SAVE_ROOTSRCH_ENTRY_STATE */ dir_hist_ptr = gv_target->alt_hist; gvt_hist_ptr = &(gv_target->hist); inctn_opcode = inctn_invalid_op; DO_OP_GVNAME(gl_ptr); /* sets gv_target/gv_currkey/gv_cur_region/cs_addrs/cs_data to correspond to <globalname,reg> in gl_ptr */ csa = cs_addrs; cnl = csa->nl; csd = cs_data; /* Be careful to keep csd up to date. With MM, cs_data can change, and * dereferencing an older copy can result in a SIG-11. */ if (gv_cur_region->read_only) return; /* Cannot proceed for read-only data files */ if (0 == gv_target->root) { /* Global does not exist (online rollback). No problem. */ gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_GBLNOEXIST, 2, GNAME(gl_ptr).len, GNAME(gl_ptr).addr); return; } if (dba_mm == csd->acc_meth) /* return for now without doing any swapping operation because later mu_truncate * is going to issue the MUTRUNCNOTBG message. */ return; SET_GV_ALTKEY_TO_GBLNAME_FROM_GV_CURRKEY; /* set up gv_altkey to be just the gblname */ /* ------------ Swap root block of global variable tree --------- */ t_begin(ERR_MUREORGFAIL, UPDTRNS_DB_UPDATED_MASK); for (;;) { curr_tn = csa->ti->curr_tn; kill_set_list.used = 0; save_root = gv_target->root; gv_target->root = csa->dir_tree->root; gv_target->clue.end = 0; if (cdb_sc_normal != (status = gvcst_search(gv_altkey, dir_hist_ptr))) { /* Assign directory tree path to dir_hist_ptr */ assert(t_tries < CDB_STAGNATE); gv_target->root = save_root; t_retry(status); continue; } gv_target->root = save_root; gv_target->clue.end = 0; if (cdb_sc_normal != (gvcst_search(gv_currkey, NULL))) { /* Assign global variable tree path to gvt_hist_ptr */ assert(t_tries < CDB_STAGNATE); t_retry(status); continue; } /* We've already search the directory tree in op_gvname/t_retry and obtained gv_target->root. * Should restart with gvtrootmod2 if they don't agree. gvcst_root_search is the final arbiter. * Really need that for debug info and also should assert(gv_currkey is global name). */ root_blk_lvl = gvt_hist_ptr->depth; assert(root_blk_lvl > 0); root_blk_ptr = gvt_hist_ptr->h[root_blk_lvl].buffaddr; root_blk_id = gvt_hist_ptr->h[root_blk_lvl].blk_num; assert((CDB_STAGNATE > t_tries) || (gv_target->root == gvt_hist_ptr->h[root_blk_lvl].blk_num)); free_blk_id = swap_root_or_directory_block(0, root_blk_lvl, dir_hist_ptr, root_blk_id, root_blk_ptr, &kill_set_list, curr_tn); if (RETRY_SWAP == free_blk_id) continue; else if (ABORT_SWAP == free_blk_id) break; update_trans = UPDTRNS_DB_UPDATED_MASK; inctn_opcode = inctn_mu_reorg; assert(1 == kill_set_list.used); need_kip_incr = TRUE; if (!csa->now_crit) WAIT_ON_INHIBIT_KILLS(cnl, MAXWAIT2KILL); DEBUG_ONLY(lcl_t_tries = t_tries); TREF(in_mu_swap_root_state) = MUSWP_INCR_ROOT_CYCLE; assert(!TREF(in_gvcst_redo_root_search)); if ((trans_num)0 == (ret_tn = t_end(gvt_hist_ptr, dir_hist_ptr, TN_NOT_SPECIFIED))) { TREF(in_mu_swap_root_state) = MUSWP_NONE; need_kip_incr = FALSE; assert(NULL == kip_csa); ABORT_TRANS_IF_GBL_EXIST_NOMORE(lcl_t_tries, tn_aborted); if (tn_aborted) { /* It is not an error if the global (that once existed) doesn't exist anymore (due to ROLLBACK) */ gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_GBLNOEXIST, 2, GNAME(gl_ptr).len, GNAME(gl_ptr).addr); return; } continue; } TREF(in_mu_swap_root_state) = MUSWP_NONE; /* Note that this particular process's csa->root_search_cycle is now behind cnl->root_search_cycle. * This forces a cdb_sc_gvtrootmod2 restart in gvcst_bmp_mark_free below. */ assert(cnl->root_search_cycle > csa->root_search_cycle); gvcst_kill_sort(&kill_set_list); GVCST_BMP_MARK_FREE(&kill_set_list, ret_tn, inctn_mu_reorg, inctn_bmp_mark_free_mu_reorg, inctn_opcode, csa); DECR_KIP(csd, csa, kip_csa); *root_swap_statistic_ptr += 1; break; } /* ------------ Swap blocks in branch of directory tree --------- */ for (level = 0; level <= MAX_BT_DEPTH; level++) { t_begin(ERR_MUREORGFAIL, UPDTRNS_DB_UPDATED_MASK); for (;;) { curr_tn = csa->ti->curr_tn; kill_set_list.used = 0; save_root = gv_target->root; gv_target->root = csa->dir_tree->root; gv_target->clue.end = 0; if (cdb_sc_normal != (status = gvcst_search(gv_altkey, dir_hist_ptr))) { /* assign branch path of directory tree into dir_hist_ptr */ assert(t_tries < CDB_STAGNATE); gv_target->root = save_root; t_retry(status); continue; } gv_target->root = save_root; gv_target->clue.end = 0; if (level >= dir_hist_ptr->depth) { /* done */ t_abort(gv_cur_region, csa); return; } child_blk_ptr = dir_hist_ptr->h[level].buffaddr; child_blk_id = dir_hist_ptr->h[level].blk_num; assert(csa->dir_tree->root != child_blk_id); free_blk_id = swap_root_or_directory_block(level + 1, level, dir_hist_ptr, child_blk_id, child_blk_ptr, &kill_set_list, curr_tn); if (level == 0) /* set level as 1 to mark this kill set is for level-0 block in directory tree. * The kill-set level later will be used in gvcst_bmp_markfree to assign a special value to * cw_set_element, which will be eventually used by t_end to write the block to snapshot */ kill_set_list.blk[kill_set_list.used - 1].level = 1; if (RETRY_SWAP == free_blk_id) continue; else if (ABORT_SWAP == free_blk_id) break; update_trans = UPDTRNS_DB_UPDATED_MASK; inctn_opcode = inctn_mu_reorg; assert(1 == kill_set_list.used); need_kip_incr = TRUE; if (!csa->now_crit) WAIT_ON_INHIBIT_KILLS(cnl, MAXWAIT2KILL); DEBUG_ONLY(lcl_t_tries = t_tries); TREF(in_mu_swap_root_state) = MUSWP_DIRECTORY_SWAP; if ((trans_num)0 == (ret_tn = t_end(dir_hist_ptr, NULL, TN_NOT_SPECIFIED))) { TREF(in_mu_swap_root_state) = MUSWP_NONE; need_kip_incr = FALSE; assert(NULL == kip_csa); continue; } TREF(in_mu_swap_root_state) = MUSWP_NONE; gvcst_kill_sort(&kill_set_list); TREF(in_mu_swap_root_state) = MUSWP_FREE_BLK; GVCST_BMP_MARK_FREE(&kill_set_list, ret_tn, inctn_mu_reorg, inctn_bmp_mark_free_mu_reorg, inctn_opcode, csa); TREF(in_mu_swap_root_state) = MUSWP_NONE; DECR_KIP(csd, csa, kip_csa); break; } } return; }
boolean_t mu_truncate(int4 truncate_percent) { sgmnt_addrs *csa; sgmnt_data_ptr_t csd; int num_local_maps; int lmap_num, lmap_blk_num; int bml_status, sigkill; int save_errno; int ftrunc_status; uint4 jnl_status; uint4 old_total, new_total; uint4 old_free, new_free; uint4 end_blocks; int4 blks_in_lmap, blk; gtm_uint64_t before_trunc_file_size; off_t trunc_file_size; off_t padding; uchar_ptr_t lmap_addr; boolean_t was_crit; uint4 found_busy_blk; srch_blk_status bmphist; srch_blk_status *blkhist; srch_hist alt_hist; trans_num curr_tn; blk_hdr_ptr_t lmap_blk_hdr; block_id *blkid_ptr; unix_db_info *udi; jnl_private_control *jpc; jnl_buffer_ptr_t jbp; char *err_msg; intrpt_state_t prev_intrpt_state; off_t offset; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; csa = cs_addrs; csd = cs_data; if (dba_mm == csd->acc_meth) { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_MUTRUNCNOTBG, 2, REG_LEN_STR(gv_cur_region)); return TRUE; } if ((GDSVCURR != csd->desired_db_format) || (csd->blks_to_upgrd != 0)) { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_MUTRUNCNOV4, 2, REG_LEN_STR(gv_cur_region)); return TRUE; } if (csa->ti->free_blocks < (truncate_percent * csa->ti->total_blks / 100)) { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(5) ERR_MUTRUNCNOSPACE, 3, REG_LEN_STR(gv_cur_region), truncate_percent); return TRUE; } /* already checked for parallel truncates on this region --- see mupip_reorg.c */ gv_target = NULL; assert(csa->nl->trunc_pid == process_id); assert(dba_mm != csd->acc_meth); old_total = csa->ti->total_blks; old_free = csa->ti->free_blocks; sigkill = 0; found_busy_blk = 0; memset(&alt_hist, 0, SIZEOF(alt_hist)); /* null-initialize history */ assert(csd->bplmap == BLKS_PER_LMAP); end_blocks = old_total % BLKS_PER_LMAP; /* blocks in the last lmap (first one we start scanning) */ if (0 == end_blocks) end_blocks = BLKS_PER_LMAP; num_local_maps = DIVIDE_ROUND_UP(old_total, BLKS_PER_LMAP); /* ======================================== PHASE 1 ======================================== */ for (lmap_num = num_local_maps - 1; (lmap_num > 0 && !found_busy_blk); lmap_num--) { if (mu_ctrly_occurred || mu_ctrlc_occurred) return TRUE; assert(csa->ti->total_blks >= old_total); /* otherwise, a concurrent truncate happened... */ if (csa->ti->total_blks != old_total) /* Extend (likely called by mupip extend) -- don't truncate */ { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(5) ERR_MUTRUNCNOSPACE, 3, REG_LEN_STR(gv_cur_region), truncate_percent); return TRUE; } lmap_blk_num = lmap_num * BLKS_PER_LMAP; if (csa->nl->highest_lbm_with_busy_blk >= lmap_blk_num) { found_busy_blk = lmap_blk_num; break; } blks_in_lmap = (lmap_num == num_local_maps - 1) ? end_blocks : BLKS_PER_LMAP; /* Loop through non-bitmap blocks of this lmap, do recycled2free */ DBGEHND((stdout, "DBG:: lmap_num = [%lu], lmap_blk_num = [%lu], blks_in_lmap = [%lu]\n", lmap_num, lmap_blk_num, blks_in_lmap)); for (blk = 1; blk < blks_in_lmap && blk != -1 && !found_busy_blk;) { t_begin(ERR_MUTRUNCFAIL, UPDTRNS_DB_UPDATED_MASK); for (;;) /* retry loop for recycled to free transactions */ { curr_tn = csd->trans_hist.curr_tn; /* Read the nth local bitmap into memory */ bmphist.blk_num = lmap_blk_num; bmphist.buffaddr = t_qread(bmphist.blk_num, &bmphist.cycle, &bmphist.cr); lmap_blk_hdr = (blk_hdr_ptr_t)bmphist.buffaddr; if (!(bmphist.buffaddr) || (BM_SIZE(BLKS_PER_LMAP) != lmap_blk_hdr->bsiz)) { /* Could not read the block successfully. Retry. */ t_retry((enum cdb_sc)rdfail_detail); continue; } lmap_addr = bmphist.buffaddr + SIZEOF(blk_hdr); /* starting from the hint (blk itself), find the first busy or recycled block */ blk = bml_find_busy_recycled(blk, lmap_addr, blks_in_lmap, &bml_status); assert(blk < BLKS_PER_LMAP); if (blk == -1 || blk >= blks_in_lmap) { /* done with this lmap, continue to next */ t_abort(gv_cur_region, csa); break; } else if (BLK_BUSY == bml_status || csa->nl->highest_lbm_with_busy_blk >= lmap_blk_num) { /* stop processing blocks... skip ahead to phase 2 */ found_busy_blk = lmap_blk_num; t_abort(gv_cur_region, csa); break; } else if (BLK_RECYCLED == bml_status) { /* Write PBLK records for recycled blocks only if before_image journaling is * enabled. t_end() takes care of checking if journaling is enabled and * writing PBLK record. We have to at least mark the recycled block as free. */ RESET_UPDATE_ARRAY; update_trans = UPDTRNS_DB_UPDATED_MASK; *((block_id *)update_array_ptr) = blk; update_array_ptr += SIZEOF(block_id); *(int *)update_array_ptr = 0; alt_hist.h[1].blk_num = 0; alt_hist.h[0].level = 0; alt_hist.h[0].cse = NULL; alt_hist.h[0].tn = curr_tn; alt_hist.h[0].blk_num = lmap_blk_num + blk; alt_hist.h[0].buffaddr = t_qread(alt_hist.h[0].blk_num, &alt_hist.h[0].cycle, &alt_hist.h[0].cr); if (!alt_hist.h[0].buffaddr) { t_retry((enum cdb_sc)rdfail_detail); continue; } if (!t_recycled2free(&alt_hist.h[0])) { t_retry(cdb_sc_lostbmlcr); continue; } t_write_map(&bmphist, (unsigned char *)update_array, curr_tn, 0); /* Set the opcode for INCTN record written by t_end() */ inctn_opcode = inctn_blkmarkfree; if ((trans_num)0 == t_end(&alt_hist, NULL, TN_NOT_SPECIFIED)) continue; /* block processed, scan from the next one */ blk++; break; } else { assert(t_tries < CDB_STAGNATE); t_retry(cdb_sc_badbitmap); continue; } } /* END recycled2free retry loop */ } /* END scanning blocks of this particular lmap */ /* Write PBLK for the bitmap block, in case it hasn't been written i.e. t_end() was never called above */ /* Do a transaction that just increments the bitmap block's tn so that t_end() can do its thing */ DBGEHND((stdout, "DBG:: bitmap block inctn -- lmap_blk_num = [%lu]\n", lmap_blk_num)); t_begin(ERR_MUTRUNCFAIL, UPDTRNS_DB_UPDATED_MASK); for (;;) { RESET_UPDATE_ARRAY; BLK_ADDR(blkid_ptr, SIZEOF(block_id), block_id); *blkid_ptr = 0; update_trans = UPDTRNS_DB_UPDATED_MASK; inctn_opcode = inctn_mu_reorg; /* inctn_mu_truncate */ curr_tn = csd->trans_hist.curr_tn; blkhist = &alt_hist.h[0]; blkhist->blk_num = lmap_blk_num; blkhist->tn = curr_tn; blkhist->cse = NULL; /* start afresh (do not use value from previous retry) */ /* Read the nth local bitmap into memory */ blkhist->buffaddr = t_qread(lmap_blk_num, (sm_int_ptr_t)&blkhist->cycle, &blkhist->cr); lmap_blk_hdr = (blk_hdr_ptr_t)blkhist->buffaddr; if (!(blkhist->buffaddr) || (BM_SIZE(BLKS_PER_LMAP) != lmap_blk_hdr->bsiz)) { /* Could not read the block successfully. Retry. */ t_retry((enum cdb_sc)rdfail_detail); continue; } t_write_map(blkhist, (unsigned char *)blkid_ptr, curr_tn, 0); blkhist->blk_num = 0; /* create empty history for bitmap block */ if ((trans_num)0 == t_end(&alt_hist, NULL, TN_NOT_SPECIFIED)) continue; break; } } /* END scanning lmaps */ /* ======================================== PHASE 2 ======================================== */ assert(!csa->now_crit); for (;;) { /* wait for FREEZE, we don't want to truncate a frozen database */ grab_crit(gv_cur_region); if (FROZEN_CHILLED(cs_data)) DO_CHILLED_AUTORELEASE(csa, cs_data); if (!FROZEN(cs_data) && !IS_REPL_INST_FROZEN) break; rel_crit(gv_cur_region); while (FROZEN(cs_data) || IS_REPL_INST_FROZEN) { hiber_start(1000); if (FROZEN_CHILLED(cs_data) && CHILLED_AUTORELEASE(cs_data)) break; } } assert(csa->nl->trunc_pid == process_id); /* Flush pending updates to disk. If this is not done, old updates can be flushed AFTER ftruncate, extending the file. */ if (!wcs_flu(WCSFLU_FLUSH_HDR | WCSFLU_WRITE_EPOCH | WCSFLU_MSYNC_DB)) { assert(FALSE); gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT("MUPIP REORG TRUNCATE"), DB_LEN_STR(gv_cur_region)); rel_crit(gv_cur_region); return FALSE; } csa->nl->highest_lbm_with_busy_blk = MAX(found_busy_blk, csa->nl->highest_lbm_with_busy_blk); assert(IS_BITMAP_BLK(csa->nl->highest_lbm_with_busy_blk)); new_total = MIN(old_total, csa->nl->highest_lbm_with_busy_blk + BLKS_PER_LMAP); if (mu_ctrly_occurred || mu_ctrlc_occurred) { rel_crit(gv_cur_region); return TRUE; } else if (csa->ti->total_blks != old_total || new_total == old_total) { assert(csa->ti->total_blks >= old_total); /* Better have been an extend, not a truncate... */ gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(5) ERR_MUTRUNCNOSPACE, 3, REG_LEN_STR(gv_cur_region), truncate_percent); rel_crit(gv_cur_region); return TRUE; } else if (GDSVCURR != csd->desired_db_format || csd->blks_to_upgrd != 0 || !csd->fully_upgraded) { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_MUTRUNCNOV4, 2, REG_LEN_STR(gv_cur_region)); rel_crit(gv_cur_region); return TRUE; } else if (SNAPSHOTS_IN_PROG(csa->nl)) { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_MUTRUNCSSINPROG, 2, REG_LEN_STR(gv_cur_region)); rel_crit(gv_cur_region); return TRUE; } else if (BACKUP_NOT_IN_PROGRESS != cs_addrs->nl->nbb) { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_MUTRUNCBACKINPROG, 2, REG_LEN_STR(gv_cur_region)); rel_crit(gv_cur_region); return TRUE; } DEFER_INTERRUPTS(INTRPT_IN_TRUNC, prev_intrpt_state); if (JNL_ENABLED(csa)) { /* Write JRT_TRUNC and INCTN records */ if (!jgbl.dont_reset_gbl_jrec_time) SET_GBL_JREC_TIME; /* needed before jnl_ensure_open as that can write jnl records */ jpc = csa->jnl; jbp = jpc->jnl_buff; /* Before writing to jnlfile, adjust jgbl.gbl_jrec_time if needed to maintain time order * of jnl records. This needs to be done BEFORE the jnl_ensure_open as that could write * journal records (if it decides to switch to a new journal file). */ ADJUST_GBL_JREC_TIME(jgbl, jbp); jnl_status = jnl_ensure_open(gv_cur_region, csa); if (SS_NORMAL != jnl_status) send_msg_csa(CSA_ARG(csa) VARLSTCNT(6) jnl_status, 4, JNL_LEN_STR(csd), DB_LEN_STR(gv_cur_region)); else { if (0 == jpc->pini_addr) jnl_put_jrt_pini(csa); jnl_write_trunc_rec(csa, old_total, csa->ti->free_blocks, new_total); inctn_opcode = inctn_mu_reorg; jnl_write_inctn_rec(csa); jnl_status = jnl_flush(gv_cur_region); if (SS_NORMAL != jnl_status) { send_msg_csa(CSA_ARG(csa) VARLSTCNT(9) ERR_JNLFLUSH, 2, JNL_LEN_STR(csd), ERR_TEXT, 2, RTS_ERROR_TEXT("Error with journal flush during mu_truncate"), jnl_status); assert(NOJNL == jpc->channel); /* jnl file lost has been triggered */ } } } /* Good to go ahead and REALLY truncate (reduce total_blks, clear cache_array, FTRUNCATE) */ curr_tn = csa->ti->curr_tn; CHECK_TN(csa, csd, curr_tn); udi = FILE_INFO(gv_cur_region); /* Information used by recover_truncate to check if the file size and csa->ti->total_blks are INCONSISTENT */ trunc_file_size = BLK_ZERO_OFF(csd->start_vbn) + ((off_t)csd->blk_size * (new_total + 1)); csd->after_trunc_total_blks = new_total; csd->before_trunc_free_blocks = csa->ti->free_blocks; csd->before_trunc_total_blks = old_total; /* Flags interrupted truncate for recover_truncate */ /* file size and total blocks: INCONSISTENT */ csa->ti->total_blks = new_total; /* past the point of no return -- shared memory intact */ assert(csa->ti->free_blocks >= DELTA_FREE_BLOCKS(old_total, new_total)); csa->ti->free_blocks -= DELTA_FREE_BLOCKS(old_total, new_total); new_free = csa->ti->free_blocks; KILL_TRUNC_TEST(WBTEST_CRASH_TRUNCATE_1); /* 55 : Issue a kill -9 before 1st fsync */ fileheader_sync(gv_cur_region); DB_FSYNC(gv_cur_region, udi, csa, db_fsync_in_prog, save_errno); CHECK_DBSYNC(gv_cur_region, save_errno); /* past the point of no return -- shared memory deleted */ KILL_TRUNC_TEST(WBTEST_CRASH_TRUNCATE_2); /* 56 : Issue a kill -9 after 1st fsync */ clear_cache_array(csa, csd, gv_cur_region, new_total, old_total); offset = (off_t)BLK_ZERO_OFF(csd->start_vbn) + (off_t)new_total * csd->blk_size; save_errno = db_write_eof_block(udi, udi->fd, csd->blk_size, offset, &(TREF(dio_buff))); if (0 != save_errno) { err_msg = (char *)STRERROR(errno); rts_error_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_MUTRUNCERROR, 4, REG_LEN_STR(gv_cur_region), LEN_AND_STR(err_msg)); return FALSE; } KILL_TRUNC_TEST(WBTEST_CRASH_TRUNCATE_3); /* 57 : Issue a kill -9 after reducing csa->ti->total_blks, before FTRUNCATE */ /* Execute an ftruncate() and truncate the DB file * ftruncate() is a SYSTEM CALL on almost all platforms (except SunOS) * It ignores kill -9 signal till its operation is completed. * So we can safely assume that the result of ftruncate() will be complete. */ FTRUNCATE(FILE_INFO(gv_cur_region)->fd, trunc_file_size, ftrunc_status); if (0 != ftrunc_status) { err_msg = (char *)STRERROR(errno); rts_error_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_MUTRUNCERROR, 4, REG_LEN_STR(gv_cur_region), LEN_AND_STR(err_msg)); /* should go through recover_truncate now, which will again try to FTRUNCATE */ return FALSE; } /* file size and total blocks: CONSISTENT (shrunk) */ KILL_TRUNC_TEST(WBTEST_CRASH_TRUNCATE_4); /* 58 : Issue a kill -9 after FTRUNCATE, before 2nd fsync */ csa->nl->root_search_cycle++; /* Force concurrent processes to restart in t_end/tp_tend to make sure no one * tries to commit updates past the end of the file. Bitmap validations together * with highest_lbm_with_busy_blk should actually be sufficient, so this is * just to be safe. */ csd->before_trunc_total_blks = 0; /* indicate CONSISTENT */ /* Increment TN */ assert(csa->ti->early_tn == csa->ti->curr_tn); csd->trans_hist.early_tn = csd->trans_hist.curr_tn + 1; INCREMENT_CURR_TN(csd); fileheader_sync(gv_cur_region); DB_FSYNC(gv_cur_region, udi, csa, db_fsync_in_prog, save_errno); KILL_TRUNC_TEST(WBTEST_CRASH_TRUNCATE_5); /* 58 : Issue a kill -9 after after 2nd fsync */ CHECK_DBSYNC(gv_cur_region, save_errno); ENABLE_INTERRUPTS(INTRPT_IN_TRUNC, prev_intrpt_state); curr_tn = csa->ti->curr_tn; rel_crit(gv_cur_region); send_msg_csa(CSA_ARG(csa) VARLSTCNT(7) ERR_MUTRUNCSUCCESS, 5, DB_LEN_STR(gv_cur_region), old_total, new_total, &curr_tn); util_out_print("Truncated region: !AD. Reduced total blocks from [!UL] to [!UL]. Reduced free blocks from [!UL] to [!UL].", FLUSH, REG_LEN_STR(gv_cur_region), old_total, new_total, old_free, new_free); return TRUE; } /* END of mu_truncate() */
int4 mu_size_arsample(mval *gn, uint4 M, boolean_t ar, int seed) { enum cdb_sc status; trans_num ret_tn; int k, h; boolean_t verify_reads; boolean_t tn_aborted; unsigned int lcl_t_tries; double r[MAX_BT_DEPTH + 1]; /* r[j] is #records in level j block of current traversal */ stat_t rstat, ustat; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; inctn_opcode = inctn_invalid_op; op_gvname(VARLSTCNT(1) gn); if (0 == gv_target->root) { /* Global does not exist (online rollback). Not an error. */ gtm_putmsg(VARLSTCNT(4) ERR_GBLNOEXIST, 2, gn->str.len, gn->str.addr); return EXIT_NRM; } if (!seed) seed = (int4)(time(0) * process_id); srand48(seed); /* do random traversals until M of them are accepted at level 1 */ INIT_STATS(rstat); for (k = 1; rstat.N[1] < M; k++) { if (mu_ctrlc_occurred || mu_ctrly_occurred) return EXIT_ERR; t_begin(ERR_MUSIZEFAIL, 0); for (;;) { CLEAR_VECTOR(r); if (cdb_sc_normal != (status = rand_traverse(r))) { assert(CDB_STAGNATE > t_tries); t_retry(status); continue; } gv_target->clue.end = 0; gv_target->hist.h[0] = gv_target->hist.h[1]; /* No level 0 block to validate */ DEBUG_ONLY(lcl_t_tries = t_tries); if ((trans_num)0 == (ret_tn = t_end(&gv_target->hist, NULL, TN_NOT_SPECIFIED))) { ABORT_TRANS_IF_GBL_EXIST_NOMORE(lcl_t_tries, tn_aborted); if (tn_aborted) { /* Global does not exist (online rollback). Not an error. */ gtm_putmsg(VARLSTCNT(4) ERR_GBLNOEXIST, 2, gn->str.len, gn->str.addr); return EXIT_NRM; } continue; } accum_stats_ar(&rstat, r, ar); break; } } finalize_stats_ar(&rstat, ar); /* display rstat data */ /* Showing the error as 2 standard deviations which is a 95% confidence interval for the mean number of blocks at * each level*/ util_out_print("!/Number of generated samples = !UL", FLUSH, rstat.n); util_out_print("Number of accepted samples = !UL", FLUSH, rstat.N[1]); util_out_print("Level Blocks 2 sigma(+/-) % Accepted", FLUSH); for (h = MAX_BT_DEPTH; (h >= 0) && (rstat.blktot[h] < EPS); h--); for ( ; h > 0; h--) util_out_print("!5UL !15UL !15UL ~ !3UL% !15UL", FLUSH, h, (int)ROUND(rstat.blktot[h]), (int)ROUND(sqrt(rstat.blkerr[h])*2), (int)ROUND(sqrt(rstat.blkerr[h])*2/rstat.blktot[h]*100), (int)ROUND(100.0*rstat.N[h]/rstat.n) ); util_out_print("!5UL !15UL !15UL ~ !3UL% N/A", FLUSH, h, (int)ROUND(rstat.blktot[h]), (int)ROUND(sqrt(rstat.blkerr[h])*2), (int)ROUND(sqrt(rstat.blkerr[h])*2/rstat.blktot[h]*100.0) ); util_out_print("Total !15UL !15UL ~ !3UL% N/A", FLUSH, (int)ROUND(rstat.B), (int)ROUND(sqrt(rstat.error)*2), (int)ROUND(sqrt(rstat.error)*2/rstat.B*100.0) ); return EXIT_NRM; }
trans_num gvcst_bmp_mark_free(kill_set *ks) { block_id bit_map, next_bm, *updptr; blk_ident *blk, *blk_top, *nextblk; trans_num ctn, start_db_fmt_tn; unsigned int len; int4 blk_prev_version; srch_hist alt_hist; trans_num ret_tn = 0; boolean_t visit_blks; srch_blk_status bmphist; cache_rec_ptr_t cr; enum db_ver ondsk_blkver; error_def(ERR_GVKILLFAIL); assert(inctn_bmp_mark_free_gtm == inctn_opcode || inctn_bmp_mark_free_mu_reorg == inctn_opcode); /* Note down the desired_db_format_tn before you start relying on cs_data->fully_upgraded. * If the db is fully_upgraded, take the optimal path that does not need to read each block being freed. * But in order to detect concurrent desired_db_format changes, note down the tn (when the last format change occurred) * before the fully_upgraded check and after having noted down the database current_tn. * If they are the same, then we are guaranteed no concurrent desired_db_format change occurred. * If they are not, then fall through to the non-optimal path where each to-be-killed block has to be visited. * The reason we need to visit every block in case desired_db_format changes is to take care of the case where * MUPIP REORG DOWNGRADE concurrently changes a block that we are about to free. */ start_db_fmt_tn = cs_data->desired_db_format_tn; visit_blks = (!cs_data->fully_upgraded); /* Local evaluation */ assert(!visit_blks || (visit_blks && dba_bg == cs_addrs->hdr->acc_meth)); /* must have blks_to_upgrd == 0 for non-BG */ assert(!dollar_tlevel); /* Should NOT be in TP now */ blk = &ks->blk[0]; blk_top = &ks->blk[ks->used]; if (!visit_blks) { /* Database has been completely upgraded. Free all blocks in one bitmap as part of one transaction. */ assert(cs_data->db_got_to_v5_once); /* assert all V4 fmt blocks (including RECYCLED) have space for V5 upgrade */ inctn_detail.blknum_struct.blknum = 0; /* to indicate no adjustment to "blks_to_upgrd" necessary */ for ( ; blk < blk_top; blk = nextblk) { if (0 != blk->flag) { nextblk = blk + 1; continue; } assert(0 < blk->block); assert((int4)blk->block < cs_addrs->ti->total_blks); bit_map = ROUND_DOWN2((int)blk->block, BLKS_PER_LMAP); next_bm = bit_map + BLKS_PER_LMAP; CHECK_AND_RESET_UPDATE_ARRAY; /* reset update_array_ptr to update_array */ /* Scan for the next local bitmap */ updptr = (block_id *)update_array_ptr; for (nextblk = blk; (0 == nextblk->flag) && (nextblk < blk_top) && ((block_id)nextblk->block < next_bm); ++nextblk) { assert((block_id)nextblk->block - bit_map); *updptr++ = (block_id)nextblk->block - bit_map; } len = (unsigned int)((char *)nextblk - (char *)blk); update_array_ptr = (char *)updptr; alt_hist.h[0].blk_num = 0; /* need for calls to T_END for bitmaps */ /* the following assumes SIZEOF(blk_ident) == SIZEOF(int) */ assert(SIZEOF(blk_ident) == SIZEOF(int)); *(int *)update_array_ptr = 0; t_begin(ERR_GVKILLFAIL, UPDTRNS_DB_UPDATED_MASK); for (;;) { ctn = cs_addrs->ti->curr_tn; /* Need a read fence before reading fields from cs_data as we are reading outside * of crit and relying on this value to detect desired db format state change. */ SHM_READ_MEMORY_BARRIER; if (start_db_fmt_tn != cs_data->desired_db_format_tn) { /* Concurrent db format change has occurred. Need to visit every block to be killed * to determine its block format. Fall through to the non-optimal path below */ ret_tn = 0; break; } bmphist.blk_num = bit_map; if (NULL == (bmphist.buffaddr = t_qread(bmphist.blk_num, (sm_int_ptr_t)&bmphist.cycle, &bmphist.cr))) { t_retry((enum cdb_sc)rdfail_detail); continue; } t_write_map(&bmphist, (uchar_ptr_t)update_array, ctn, -(int4)(nextblk - blk)); if ((trans_num)0 == (ret_tn = t_end(&alt_hist, NULL, TN_NOT_SPECIFIED))) continue; break; } if (0 == ret_tn) /* db format change occurred. Fall through to below for loop to visit each block */ break; } } /* for all blocks in the kill_set */
void mu_reorg_upgrd_dwngrd(void) { blk_hdr new_hdr; blk_segment *bs1, *bs_ptr; block_id *blkid_ptr, curblk, curbmp, start_blk, stop_blk, start_bmp, last_bmp; block_id startblk_input, stopblk_input; boolean_t upgrade, downgrade, safejnl, nosafejnl, region, first_reorg_in_this_db_fmt, reorg_entiredb; boolean_t startblk_specified, stopblk_specified, set_fully_upgraded, db_got_to_v5_once, mark_blk_free; cache_rec_ptr_t cr; char *bml_lcl_buff = NULL, *command, *reorg_command; sm_uc_ptr_t bptr = NULL; cw_set_element *cse; enum cdb_sc cdb_status; enum db_ver new_db_format, ondsk_blkver; gd_region *reg; int cycle; int4 blk_seg_cnt, blk_size; /* needed for BLK_INIT,BLK_SEG and BLK_FINI macros */ int4 blocks_left, expected_blks2upgrd, actual_blks2upgrd, total_blks, free_blks; int4 status, status1, mapsize, lcnt, bml_status; reorg_stats_t reorg_stats; sgmnt_addrs *csa; sgmnt_data_ptr_t csd; sm_uc_ptr_t blkBase, bml_sm_buff; /* shared memory pointer to the bitmap global buffer */ srch_hist alt_hist; srch_blk_status *blkhist, bmlhist; tp_region *rptr; trans_num curr_tn; unsigned char save_cw_set_depth; uint4 lcl_update_trans; region = (CLI_PRESENT == cli_present("REGION")); upgrade = (CLI_PRESENT == cli_present("UPGRADE")); downgrade = (CLI_PRESENT == cli_present("DOWNGRADE")); assert(upgrade && !downgrade || !upgrade && downgrade); command = upgrade ? "UPGRADE" : "DOWNGRADE"; reorg_command = upgrade ? "MUPIP REORG UPGRADE" : "MUPIP REORG DOWNGRADE"; reorg_entiredb = TRUE; /* unless STARTBLK or STOPBLK is specified we are going to {up,down}grade the entire database */ startblk_specified = FALSE; assert(SIZEOF(block_id) == SIZEOF(uint4)); if ((CLI_PRESENT == cli_present("STARTBLK")) && (cli_get_hex("STARTBLK", (uint4 *)&startblk_input))) { reorg_entiredb = FALSE; startblk_specified = TRUE; } stopblk_specified = FALSE; assert(SIZEOF(block_id) == SIZEOF(uint4)); if ((CLI_PRESENT == cli_present("STOPBLK")) && (cli_get_hex("STOPBLK", (uint4 *)&stopblk_input))) { reorg_entiredb = FALSE; stopblk_specified = TRUE; } mu_reorg_upgrd_dwngrd_in_prog = TRUE; mu_reorg_nosafejnl = (CLI_NEGATED == cli_present("SAFEJNL")) ? TRUE : FALSE; assert(region); status = SS_NORMAL; error_mupip = FALSE; gvinit(); /* initialize gd_header (needed by the later call to mu_getlst) */ mu_getlst("REG_NAME", SIZEOF(tp_region)); /* get the parameter corresponding to REGION qualifier */ if (error_mupip) { util_out_print("!/MUPIP REORG !AD cannot proceed with above errors!/", TRUE, LEN_AND_STR(command)); mupip_exit(ERR_MUNOACTION); } assert(DBKEYSIZE(MAX_KEY_SZ) == gv_keysize); /* no need to invoke GVKEYSIZE_INIT_IF_NEEDED macro */ gv_target = targ_alloc(gv_keysize, NULL, NULL); /* t_begin needs this initialized */ gv_target_list = NULL; memset(&alt_hist, 0, SIZEOF(alt_hist)); /* null-initialize history */ blkhist = &alt_hist.h[0]; for (rptr = grlist; NULL != rptr; rptr = rptr->fPtr) { if (mu_ctrly_occurred || mu_ctrlc_occurred) break; reg = rptr->reg; util_out_print("!/Region !AD : MUPIP REORG !AD started", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command)); if (reg_cmcheck(reg)) { util_out_print("Region !AD : MUPIP REORG !AD cannot run across network", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command)); status = ERR_MUNOFINISH; continue; } mu_reorg_process = TRUE; /* gvcst_init will use this value to use gtm_poollimit settings. */ gvcst_init(reg); mu_reorg_process = FALSE; assert(update_array != NULL); /* access method stored in global directory and database file header might be different in which case * the database setting prevails. therefore, the access method check can be done only after opening * the database (i.e. after the gvcst_init) */ if (dba_bg != REG_ACC_METH(reg)) { util_out_print("Region !AD : MUPIP REORG !AD cannot continue as access method is not BG", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command)); status = ERR_MUNOFINISH; continue; } /* The mu_getlst call above uses insert_region to create the grlist, which ensures that duplicate regions mapping to * the same db file correspond to only one grlist entry. */ assert(FALSE == reg->was_open); TP_CHANGE_REG(reg); /* sets gv_cur_region, cs_addrs, cs_data */ csa = cs_addrs; csd = cs_data; blk_size = csd->blk_size; /* "blk_size" is used by the BLK_FINI macro */ if (reg->read_only) { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_DBRDONLY, 2, DB_LEN_STR(reg)); status = ERR_MUNOFINISH; continue; } assert(GDSVCURR == GDSV6); /* so we trip this assert in case GDSVCURR changes without a change to this module */ new_db_format = (upgrade ? GDSV6 : GDSV4); grab_crit(reg); curr_tn = csd->trans_hist.curr_tn; /* set the desired db format in the file header to the appropriate version, increment transaction number */ status1 = desired_db_format_set(reg, new_db_format, reorg_command); assert(csa->now_crit); /* desired_db_format_set() should not have released crit */ first_reorg_in_this_db_fmt = TRUE; /* with the current desired_db_format, this is the first reorg */ if (SS_NORMAL != status1) { /* "desired_db_format_set" would have printed appropriate error messages */ if (ERR_MUNOACTION != status1) { /* real error occurred while setting the db format. skip to next region */ status = ERR_MUNOFINISH; rel_crit(reg); continue; } util_out_print("Region !AD : Desired DB Format remains at !AD after !AD", TRUE, REG_LEN_STR(reg), LEN_AND_STR(gtm_dbversion_table[new_db_format]), LEN_AND_STR(reorg_command)); if (csd->reorg_db_fmt_start_tn == csd->desired_db_format_tn) first_reorg_in_this_db_fmt = FALSE; } else util_out_print("Region !AD : Desired DB Format set to !AD by !AD", TRUE, REG_LEN_STR(reg), LEN_AND_STR(gtm_dbversion_table[new_db_format]), LEN_AND_STR(reorg_command)); assert(dba_bg == csd->acc_meth); /* Check blks_to_upgrd counter to see if upgrade/downgrade is complete */ total_blks = csd->trans_hist.total_blks; free_blks = csd->trans_hist.free_blocks; actual_blks2upgrd = csd->blks_to_upgrd; /* If MUPIP REORG UPGRADE and there is no block to upgrade in the database as indicated by BOTH * "csd->blks_to_upgrd" and "csd->fully_upgraded", then we can skip processing. * If MUPIP REORG UPGRADE and all non-free blocks need to be upgraded then again we can skip processing. */ if ((upgrade && (0 == actual_blks2upgrd) && csd->fully_upgraded) || (!upgrade && ((total_blks - free_blks) == actual_blks2upgrd))) { util_out_print("Region !AD : Blocks to Upgrade counter indicates no action needed for MUPIP REORG !AD", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command)); util_out_print("Region !AD : Total Blocks = [0x!XL] : Free Blocks = [0x!XL] : " "Blocks to upgrade = [0x!XL]", TRUE, REG_LEN_STR(reg), total_blks, free_blks, actual_blks2upgrd); util_out_print("Region !AD : MUPIP REORG !AD finished!/", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command)); rel_crit(reg); continue; } stop_blk = total_blks; if (stopblk_specified && stopblk_input <= stop_blk) stop_blk = stopblk_input; if (first_reorg_in_this_db_fmt) { /* Note down reorg start tn (in case we are interrupted, future reorg will know to resume) */ csd->reorg_db_fmt_start_tn = csd->desired_db_format_tn; csd->reorg_upgrd_dwngrd_restart_block = 0; start_blk = (startblk_specified ? startblk_input : 0); } else { /* Either a concurrent MUPIP REORG of the same type ({up,down}grade) is currently running * or a previously running REORG of the same type was interrupted (Ctrl-Ced). * In either case resume processing from whatever restart block number is stored in fileheader * the only exception is if "STARTBLK" was specified in the input in which use that unconditionally. */ start_blk = (startblk_specified ? startblk_input : csd->reorg_upgrd_dwngrd_restart_block); } if (start_blk > stop_blk) start_blk = stop_blk; mu_reorg_upgrd_dwngrd_start_tn = csd->reorg_db_fmt_start_tn; /* Before releasing crit, flush the file-header and dirty buffers in cache to disk. This is because we are now * going to read each GDS block directly from disk to determine if it needs to be upgraded/downgraded or not. */ if (!wcs_flu(WCSFLU_FLUSH_HDR)) /* wcs_flu assumes gv_cur_region is set (which it is in this routine) */ { rel_crit(reg); gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT("MUPIP REORG UPGRADE/DOWNGRADE"), DB_LEN_STR(reg)); status = ERR_MUNOFINISH; continue; } rel_crit(reg); /* Loop through entire database one GDS block at a time and upgrade/downgrade each of them */ status1 = SS_NORMAL; start_bmp = ROUND_DOWN2(start_blk, BLKS_PER_LMAP); last_bmp = ROUND_DOWN2(stop_blk - 1, BLKS_PER_LMAP); curblk = start_blk; /* curblk is the block to be upgraded/downgraded */ util_out_print("Region !AD : Started processing from block number [0x!XL]", TRUE, REG_LEN_STR(reg), curblk); if (NULL != bptr) { /* malloc/free "bptr" for each region as GDS block-size can be different */ free(bptr); bptr = NULL; } memset(&reorg_stats, 0, SIZEOF(reorg_stats)); /* initialize statistics for this region */ for (curbmp = start_bmp; curbmp <= last_bmp; curbmp += BLKS_PER_LMAP) { if (mu_ctrly_occurred || mu_ctrlc_occurred) { status1 = ERR_MUNOFINISH; break; } /* -------------------------------------------------------------- * Read in current bitmap block * -------------------------------------------------------------- */ assert(!csa->now_crit); bml_sm_buff = t_qread(curbmp, (sm_int_ptr_t)&cycle, &cr); /* bring block into the cache outside of crit */ reorg_stats.blks_read_from_disk_bmp++; grab_crit_encr_cycle_sync(reg); /* needed so t_qread does not return NULL below */ if (mu_reorg_upgrd_dwngrd_start_tn != csd->desired_db_format_tn) { /* csd->desired_db_format changed since reorg started. discontinue the reorg */ /* see later comment on "csd->reorg_upgrd_dwngrd_restart_block" for why the assignment * of this field should be done only if a db format change did not occur. */ rel_crit(reg); status1 = ERR_MUNOFINISH; /* This "start_tn" check is redone after the for-loop and an error message is printed there */ break; } else if (reorg_entiredb) { /* Change "csd->reorg_upgrd_dwngrd_restart_block" only if STARTBLK or STOPBLK was NOT specified */ assert(csd->reorg_upgrd_dwngrd_restart_block <= MAX(start_blk, curbmp)); csd->reorg_upgrd_dwngrd_restart_block = curbmp; /* previous blocks have been upgraded/downgraded */ } /* Check blks_to_upgrd counter to see if upgrade/downgrade is complete. * Repeat check done a few steps earlier outside of this for loop. */ total_blks = csd->trans_hist.total_blks; free_blks = csd->trans_hist.free_blocks; actual_blks2upgrd = csd->blks_to_upgrd; if ((upgrade && (0 == actual_blks2upgrd) && csd->fully_upgraded) || (!upgrade && ((total_blks - free_blks) == actual_blks2upgrd))) { rel_crit(reg); break; } bml_sm_buff = t_qread(curbmp, (sm_int_ptr_t)&cycle, &cr); /* now that in crit, note down stable buffer */ if (NULL == bml_sm_buff) rts_error_csa(CSA_ARG(csa) VARLSTCNT(1) ERR_DSEBLKRDFAIL); ondsk_blkver = cr->ondsk_blkver; /* note down db fmt on disk for bitmap block */ /* Take a copy of the shared memory bitmap buffer into process-private memory before releasing crit. * We are interested in those blocks that are currently marked as USED in the bitmap. * It is possible that once we release crit, concurrent updates change the bitmap state of those blocks. * In that case, those updates will take care of doing the upgrade/downgrade of those blocks in the * format currently set in csd->desired_db_format i.e. accomplishing MUPIP REORG UPGRADE/DOWNGRADE's job. * If the desired_db_format changes concurrently, we will stop doing REORG UPGRADE/DOWNGRADE processing. */ if (NULL == bml_lcl_buff) bml_lcl_buff = malloc(BM_SIZE(BLKS_PER_LMAP)); memcpy(bml_lcl_buff, (blk_hdr_ptr_t)bml_sm_buff, BM_SIZE(BLKS_PER_LMAP)); if (FALSE == cert_blk(reg, curbmp, (blk_hdr_ptr_t)bml_lcl_buff, 0, FALSE)) { /* certify the block while holding crit as cert_blk uses fields from file-header (shared memory) */ assert(FALSE); /* in pro, skip ugprading/downgarding all blks in this unreliable local bitmap */ rel_crit(reg); util_out_print("Region !AD : Bitmap Block [0x!XL] has integrity errors. Skipping this bitmap.", TRUE, REG_LEN_STR(reg), curbmp); status1 = ERR_MUNOFINISH; continue; } rel_crit(reg); /* ------------------------------------------------------------------------ * Upgrade/Downgrade all BUSY blocks in the current bitmap * ------------------------------------------------------------------------ */ curblk = (curbmp == start_bmp) ? start_blk : curbmp; mapsize = (curbmp == last_bmp) ? (stop_blk - curbmp) : BLKS_PER_LMAP; assert(0 != mapsize); assert(mapsize <= BLKS_PER_LMAP); db_got_to_v5_once = csd->db_got_to_v5_once; for (lcnt = curblk - curbmp; lcnt < mapsize; lcnt++, curblk++) { if (mu_ctrly_occurred || mu_ctrlc_occurred) { status1 = ERR_MUNOFINISH; goto stop_reorg_on_this_reg; /* goto needed because of nested FOR Loop */ } GET_BM_STATUS(bml_lcl_buff, lcnt, bml_status); assert(BLK_MAPINVALID != bml_status); /* cert_blk ran clean so we dont expect invalid entries */ if (BLK_FREE == bml_status) { reorg_stats.blks_skipped_free++; continue; } /* MUPIP REORG UPGRADE/DOWNGRADE will convert USED & RECYCLED blocks */ if (db_got_to_v5_once || (BLK_RECYCLED != bml_status)) { /* Do NOT read recycled V4 block from disk unless it is guaranteed NOT to be too full */ if (lcnt) { /* non-bitmap block */ /* read in block from disk into private buffer. dont pollute the cache yet */ if (NULL == bptr) bptr = (sm_uc_ptr_t)malloc(blk_size); status1 = dsk_read(curblk, bptr, &ondsk_blkver, FALSE); /* dsk_read on curblk could return an error (DYNUPGRDFAIL) if curblk needs to be * upgraded and if its block size was too big to allow the extra block-header space * requirements for a dynamic upgrade. a MUPIP REORG DOWNGRADE should not error out * in that case as the block is already in the downgraded format. */ if (SS_NORMAL != status1) { if (!upgrade && (ERR_DYNUPGRDFAIL == status1)) { assert(GDSV4 == new_db_format); ondsk_blkver = new_db_format; } else { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(reg), status1); util_out_print("Region !AD : Error occurred while reading block " "[0x!XL]", TRUE, REG_LEN_STR(reg), curblk); status1 = ERR_MUNOFINISH; goto stop_reorg_on_this_reg;/* goto needed due to nested FOR Loop */ } } reorg_stats.blks_read_from_disk_nonbmp++; } /* else bitmap block has been read in crit earlier and ondsk_blkver appropriately set */ if (new_db_format == ondsk_blkver) { assert((SS_NORMAL == status1) || (!upgrade && (ERR_DYNUPGRDFAIL == status1))); status1 = SS_NORMAL; /* treat DYNUPGRDFAIL as no error in case of downgrade */ reorg_stats.blks_skipped_newfmtindisk++; continue; /* current disk version is identical to what is desired */ } assert(SS_NORMAL == status1); } /* Begin non-TP transaction to upgrade/downgrade the block. * The way we do that is by updating the block using a null update array. * Any update to a block will trigger an automatic upgrade/downgrade of the block based on * the current fileheader desired_db_format setting and we use that here. */ t_begin(ERR_MUREORGFAIL, UPDTRNS_DB_UPDATED_MASK); for (; ;) { CHECK_AND_RESET_UPDATE_ARRAY; /* reset update_array_ptr to update_array */ curr_tn = csd->trans_hist.curr_tn; db_got_to_v5_once = csd->db_got_to_v5_once; if (db_got_to_v5_once || (BLK_RECYCLED != bml_status)) { blkhist->cse = NULL; /* start afresh (do not use value from previous retry) */ blkBase = t_qread(curblk, (sm_int_ptr_t)&blkhist->cycle, &blkhist->cr); if (NULL == blkBase) { t_retry((enum cdb_sc)rdfail_detail); continue; } blkhist->blk_num = curblk; blkhist->buffaddr = blkBase; ondsk_blkver = blkhist->cr->ondsk_blkver; new_hdr = *(blk_hdr_ptr_t)blkBase; mu_reorg_upgrd_dwngrd_blktn = new_hdr.tn; mark_blk_free = FALSE; inctn_opcode = upgrade ? inctn_blkupgrd : inctn_blkdwngrd; } else { mark_blk_free = TRUE; inctn_opcode = inctn_blkmarkfree; } inctn_detail.blknum_struct.blknum = curblk; /* t_end assumes that the history it is passed does not contain a bitmap block. * for bitmap block, the history validation information is passed through cse instead. * therefore we need to handle bitmap and non-bitmap cases separately. */ if (!lcnt) { /* Means a bitmap block. * At this point we can do a "new_db_format != ondsk_blkver" check to determine * if the block got converted since we did the dsk_read (see the non-bitmap case * for a similar check done there), but in that case we will have a transaction * which has read 1 bitmap block and is updating no block. "t_end" currently cannot * handle this case as it expects any bitmap block that needs validation to also * have a corresponding cse which will hold its history. Hence we avoid doing the * new_db_format check. The only disadvantage of this is that we will end up * modifying the bitmap block as part of this transaction (in an attempt to convert * its ondsk_blkver) even though it is already in the right format. Since this * overhead is going to be one per bitmap block and since the block is in the cache * at this point, we should not lose much. */ assert(!mark_blk_free); BLK_ADDR(blkid_ptr, SIZEOF(block_id), block_id); *blkid_ptr = 0; t_write_map(blkhist, (unsigned char *)blkid_ptr, curr_tn, 0); assert(&alt_hist.h[0] == blkhist); alt_hist.h[0].blk_num = 0; /* create empty history for bitmap block */ assert(update_trans); } else { /* non-bitmap block. fill in history for validation in t_end */ assert(curblk); /* we should never come here for block 0 (bitmap) */ if (!mark_blk_free) { assert(blkhist->blk_num == curblk); assert(blkhist->buffaddr == blkBase); blkhist->tn = curr_tn; alt_hist.h[1].blk_num = 0; } /* Also need to pass the bitmap as history to detect if any concurrent M-kill * is freeing up the same USED block that we are trying to convert OR if any * concurrent M-set is reusing the same RECYCLED block that we are trying to * convert. Because of t_end currently not being able to validate a bitmap * without that simultaneously having a cse, we need to create a cse for the * bitmap that is used only for bitmap history validation, but should not be * used to update the contents of the bitmap block in bg_update. */ bmlhist.buffaddr = t_qread(curbmp, (sm_int_ptr_t)&bmlhist.cycle, &bmlhist.cr); if (NULL == bmlhist.buffaddr) { t_retry((enum cdb_sc)rdfail_detail); continue; } bmlhist.blk_num = curbmp; bmlhist.tn = curr_tn; GET_BM_STATUS(bmlhist.buffaddr, lcnt, bml_status); if (BLK_MAPINVALID == bml_status) { t_retry(cdb_sc_lostbmlcr); continue; } if (!mark_blk_free) { if ((new_db_format != ondsk_blkver) && (BLK_FREE != bml_status)) { /* block still needs to be converted. create cse */ BLK_INIT(bs_ptr, bs1); BLK_SEG(bs_ptr, blkBase + SIZEOF(new_hdr), new_hdr.bsiz - SIZEOF(new_hdr)); BLK_FINI(bs_ptr, bs1); t_write(blkhist, (unsigned char *)bs1, 0, 0, ((blk_hdr_ptr_t)blkBase)->levl, FALSE, FALSE, GDS_WRITE_PLAIN); /* The directory tree status for now is only used to determine * whether writing the block to snapshot file (see t_end_sysops.c). * For reorg upgrade/downgrade process, the block is updated in a * sequential way without changing the gv_target. In this case, we * assume the block is in directory tree so as to have it written to * the snapshot file. */ BIT_SET_DIR_TREE(cw_set[cw_set_depth-1].blk_prior_state); /* reset update_trans in case previous retry had set it to 0 */ update_trans = UPDTRNS_DB_UPDATED_MASK; if (BLK_RECYCLED == bml_status) { /* If block that we are upgarding is RECYCLED, indicate to * bg_update that blks_to_upgrd counter should NOT be * touched in this case by setting "mode" to a special value */ assert(cw_set[cw_set_depth-1].mode == gds_t_write); cw_set[cw_set_depth-1].mode = gds_t_write_recycled; /* we SET block as NOT RECYCLED, otherwise, the mm_update() * or bg_update_phase2 may skip writing it to snapshot file * when its level is 0 */ BIT_CLEAR_RECYCLED(cw_set[cw_set_depth-1].blk_prior_state); } } else { /* Block got converted by another process since we did the dsk_read. * or this block became marked free in the bitmap. * No need to update this block. just call t_end for validation of * both the non-bitmap block as well as the bitmap block. * Note down that this transaction is no longer updating any blocks. */ update_trans = 0; } /* Need to put bit maps on the end of the cw set for concurrency checking. * We want to simulate t_write_map, except we want to update "cw_map_depth" * instead of "cw_set_depth". Hence the save and restore logic below. * This part of the code is similar to the one in mu_swap_blk.c */ save_cw_set_depth = cw_set_depth; assert(!cw_map_depth); t_write_map(&bmlhist, NULL, curr_tn, 0); /* will increment cw_set_depth */ cw_map_depth = cw_set_depth; /* set cw_map_depth to latest cw_set_depth */ cw_set_depth = save_cw_set_depth;/* restore cw_set_depth */ /* t_write_map simulation end */ } else { if (BLK_RECYCLED != bml_status) { /* Block was RECYCLED at beginning but no longer so. Retry */ t_retry(cdb_sc_bmlmod); continue; } /* Mark recycled block as FREE in bitmap */ assert(lcnt == (curblk - curbmp)); assert(update_array_ptr == update_array); *((block_id *)update_array_ptr) = lcnt; update_array_ptr += SIZEOF(block_id); /* the following assumes SIZEOF(block_id) == SIZEOF(int) */ assert(SIZEOF(block_id) == SIZEOF(int)); *(int *)update_array_ptr = 0; t_write_map(&bmlhist, (unsigned char *)update_array, curr_tn, 0); update_trans = UPDTRNS_DB_UPDATED_MASK; } } assert(SIZEOF(lcl_update_trans) == SIZEOF(update_trans)); lcl_update_trans = update_trans; /* take a copy before t_end modifies it */ if ((trans_num)0 != t_end(&alt_hist, NULL, TN_NOT_SPECIFIED)) { /* In case this is MM and t_end() remapped an extended database, reset csd */ assert(csd == cs_data); if (!lcl_update_trans) { assert(lcnt); assert(!mark_blk_free); assert((new_db_format == ondsk_blkver) || (BLK_BUSY != bml_status)); if (BLK_BUSY != bml_status) reorg_stats.blks_skipped_free++; else reorg_stats.blks_skipped_newfmtincache++; } else if (!lcnt) reorg_stats.blks_converted_bmp++; else reorg_stats.blks_converted_nonbmp++; break; } assert(csd == cs_data); } } } stop_reorg_on_this_reg: /* even though ctrl-c occurred, update file-header fields to store reorg's progress before exiting */ grab_crit(reg); blocks_left = 0; assert(csd->trans_hist.total_blks >= csd->blks_to_upgrd); actual_blks2upgrd = csd->blks_to_upgrd; total_blks = csd->trans_hist.total_blks; free_blks = csd->trans_hist.free_blocks; /* Care should be taken not to set "csd->reorg_upgrd_dwngrd_restart_block" in case of a concurrent db fmt * change. This is because let us say we are doing REORG UPGRADE. A concurrent REORG DOWNGRADE would * have reset "csd->reorg_upgrd_dwngrd_restart_block" field to 0 and if that reorg is interrupted by a * Ctrl-C (before this reorg came here) it would have updated "csd->reorg_upgrd_dwngrd_restart_block" to * a non-zero value indicating how many blocks from 0 have been downgraded. We should not reset this * field to "curblk" as it will be mis-interpreted as the number of blocks that have been DOWNgraded. */ set_fully_upgraded = FALSE; if (mu_reorg_upgrd_dwngrd_start_tn != csd->desired_db_format_tn) { /* csd->desired_db_format changed since reorg started. discontinue the reorg */ util_out_print("Region !AD : Desired DB Format changed during REORG. Stopping REORG.", TRUE, REG_LEN_STR(reg)); status1 = ERR_MUNOFINISH; } else if (reorg_entiredb) { /* Change "csd->reorg_upgrd_dwngrd_restart_block" only if STARTBLK or STOPBLK was NOT specified */ assert(csd->reorg_upgrd_dwngrd_restart_block <= curblk); csd->reorg_upgrd_dwngrd_restart_block = curblk; /* blocks lesser than this have been upgraded/downgraded */ expected_blks2upgrd = upgrade ? 0 : (total_blks - free_blks); blocks_left = upgrade ? actual_blks2upgrd : (expected_blks2upgrd - actual_blks2upgrd); /* If this reorg command went through all blocks in the database, then it should have * correctly concluded at this point whether the reorg is complete or not. * If this reorg command started from where a previous incomplete reorg left * (i.e. first_reorg_in_this_db_fmt is FALSE), it cannot determine if the initial * GDS blocks that it skipped are completely {up,down}graded or not. */ assert((0 == blocks_left) || (SS_NORMAL != status1) || !first_reorg_in_this_db_fmt); /* If this is a MUPIP REORG UPGRADE that did go through every block in the database (indicated by * "reorg_entiredb" && "first_reorg_in_this_db_fmt") and the current count of "blks_to_upgrd" is * 0 in the file-header and the desired_db_format did not change since the start of the REORG, * we can be sure that the entire database has been upgraded. Set "csd->fully_upgraded" to TRUE. */ if ((SS_NORMAL == status1) && first_reorg_in_this_db_fmt && upgrade && (0 == actual_blks2upgrd)) { csd->fully_upgraded = TRUE; csd->db_got_to_v5_once = TRUE; set_fully_upgraded = TRUE; } /* flush all changes noted down in the file-header */ if (!wcs_flu(WCSFLU_FLUSH_HDR)) /* wcs_flu assumes gv_cur_region is set (which it is in this routine) */ { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT("MUPIP REORG UPGRADE/DOWNGRADE"), DB_LEN_STR(reg)); status = ERR_MUNOFINISH; rel_crit(reg); continue; } } curr_tn = csd->trans_hist.curr_tn; rel_crit(reg); util_out_print("Region !AD : Stopped processing at block number [0x!XL]", TRUE, REG_LEN_STR(reg), curblk); /* Print statistics */ util_out_print("Region !AD : Statistics : Blocks Read From Disk (Bitmap) : 0x!XL", TRUE, REG_LEN_STR(reg), reorg_stats.blks_read_from_disk_bmp); util_out_print("Region !AD : Statistics : Blocks Skipped (Free) : 0x!XL", TRUE, REG_LEN_STR(reg), reorg_stats.blks_skipped_free); util_out_print("Region !AD : Statistics : Blocks Read From Disk (Non-Bitmap) : 0x!XL", TRUE, REG_LEN_STR(reg), reorg_stats.blks_read_from_disk_nonbmp); util_out_print("Region !AD : Statistics : Blocks Skipped (new fmt in disk) : 0x!XL", TRUE, REG_LEN_STR(reg), reorg_stats.blks_skipped_newfmtindisk); util_out_print("Region !AD : Statistics : Blocks Skipped (new fmt in cache) : 0x!XL", TRUE, REG_LEN_STR(reg), reorg_stats.blks_skipped_newfmtincache); util_out_print("Region !AD : Statistics : Blocks Converted (Bitmap) : 0x!XL", TRUE, REG_LEN_STR(reg), reorg_stats.blks_converted_bmp); util_out_print("Region !AD : Statistics : Blocks Converted (Non-Bitmap) : 0x!XL", TRUE, REG_LEN_STR(reg), reorg_stats.blks_converted_nonbmp); if (reorg_entiredb && (SS_NORMAL == status1) && (0 != blocks_left)) { /* file-header counter does not match what reorg on the entire database expected to see */ gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_DBBTUWRNG, 2, expected_blks2upgrd, actual_blks2upgrd); util_out_print("Region !AD : Run MUPIP INTEG (without FAST qualifier) to fix the counter", TRUE, REG_LEN_STR(reg)); status1 = ERR_MUNOFINISH; } else util_out_print("Region !AD : Total Blocks = [0x!XL] : Free Blocks = [0x!XL] : " "Blocks to upgrade = [0x!XL]", TRUE, REG_LEN_STR(reg), total_blks, free_blks, actual_blks2upgrd); /* Issue success or failure message for this region */ if (SS_NORMAL == status1) { /* issue success only if REORG did not encounter any error in its processing */ if (set_fully_upgraded) util_out_print("Region !AD : Database is now FULLY UPGRADED", TRUE, REG_LEN_STR(reg)); util_out_print("Region !AD : MUPIP REORG !AD finished!/", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command)); send_msg_csa(CSA_ARG(csa) VARLSTCNT(7) ERR_MUREUPDWNGRDEND, 5, REG_LEN_STR(reg), process_id, process_id, &curr_tn); } else { assert(ERR_MUNOFINISH == status1); assert((SS_NORMAL == status) || (ERR_MUNOFINISH == status)); util_out_print("Region !AD : MUPIP REORG !AD incomplete. See above messages.!/", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command)); status = status1; } } if (NULL != bptr) free(bptr); if (NULL != bml_lcl_buff) free(bml_lcl_buff); if (mu_ctrly_occurred || mu_ctrlc_occurred) { gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_REORGCTRLY); status = ERR_MUNOFINISH; } mupip_exit(status); }
/**************************************************************** Input Parameter: gn = Global name exclude_glist_ptr = list of globals in EXCLUDE option index_fill_factor = index blocks' fill factor data_fill_factor = data blocks' fill factor Input/Output Parameter: resume = resume flag reorg_op = What operations to do (coalesce or, swap or, split) [Default is all] [Only for debugging] ****************************************************************/ boolean_t mu_reorg(mval *gn, glist *exclude_glist_ptr, boolean_t *resume, int index_fill_factor, int data_fill_factor, int reorg_op) { boolean_t end_of_tree = FALSE, complete_merge, detailed_log; int rec_size; /* * * "level" is the level of the working block. * "pre_order_successor_level" is pre_order successor level except in the case * where we are in a left-most descent of the tree * in which case pre_order_successor_level will be the maximum height of that subtree * until we reach the leaf level block . * In other words, pre_order_successor_level and level variable controls the iterative pre-order traversal. * We start reorg from the (root_level - 1) to 0. That is, level = pre_order_successor_level:-1:0. */ int pre_order_successor_level, level; static block_id dest_blk_id = 0; int tkeysize; int blks_killed, blks_processed, blks_reused, blks_coalesced, blks_split, blks_swapped, count, file_extended, lvls_reduced; int d_max_fill, i_max_fill, blk_size, cur_blk_size, max_fill, toler, d_toler, i_toler; int cnt1, cnt2; kill_set kill_set_list; sm_uc_ptr_t rPtr1; enum cdb_sc status; srch_hist *rtsib_hist; jnl_buffer_ptr_t jbp; trans_num ret_tn; error_def(ERR_MUREORGFAIL); error_def(ERR_DBRDONLY); error_def(ERR_GBLNOEXIST); error_def(ERR_MAXBTLEVEL); t_err = ERR_MUREORGFAIL; kill_set_tail = &kill_set_list; /* Initialization for current global */ op_gvname(VARLSTCNT(1) gn); /* Cannot proceed for read-only data files */ if (gv_cur_region->read_only) { gtm_putmsg(VARLSTCNT(4) ERR_DBRDONLY, 2, DB_LEN_STR(gv_cur_region)); return FALSE; } dest_blk_id = cs_addrs->reorg_last_dest; inctn_opcode = inctn_mu_reorg; /* If resume option is present, then reorg_restart_key should be not null. * Skip all globals until we are in the region for that global. * Get the reorg_restart_key and reorg_restart_block from database header and restart from there. */ if (*resume && 0 != cs_data->reorg_restart_key[0]) { /* resume from last key reorged in GVT */ GET_KEY_LEN(tkeysize, &cs_data->reorg_restart_key[0]); memcpy(gv_currkey->base, cs_data->reorg_restart_key, tkeysize); gv_currkey->end = tkeysize - 1; dest_blk_id = cs_data->reorg_restart_block; if (0 == memcmp(cs_data->reorg_restart_key, gn->str.addr, gn->str.len)) /* Going to resume from current global, so it resumed and make it false */ *resume = FALSE; } else { /* start from the left most leaf */ memcpy(&gv_currkey->base[0], gn->str.addr, gn->str.len); gv_currkey->base[gn->str.len] = gv_currkey->base[gn->str.len + 1] = 0; gv_currkey->end = gn->str.len + 1; } if (*resume) { util_out_print("REORG cannot be resumed from this point, Skipping this global...", FLUSH); memcpy(&gv_currkey->base[0], gn->str.addr, gn->str.len); gv_currkey->base[gn->str.len] = gv_currkey->base[gn->str.len + 1] = 0; gv_currkey->end = gn->str.len + 1; return TRUE; } memcpy(&gv_currkey_next_reorg->base[0], &gv_currkey->base[0], gv_currkey->end + 1); gv_currkey_next_reorg->end = gv_currkey->end; if (2 > dest_blk_id) dest_blk_id = 2; /* we know that first block is bitmap and next one is directory tree root */ file_extended = cs_data->trans_hist.total_blks; blk_size = cs_data->blk_size; d_max_fill = (double)data_fill_factor * blk_size / 100.0 - cs_data->reserved_bytes; i_max_fill = (double)index_fill_factor * blk_size / 100.0 - cs_data->reserved_bytes; d_toler = (double) DATA_FILL_TOLERANCE * blk_size / 100.0; i_toler = (double) INDEX_FILL_TOLERANCE * blk_size / 100.0; blks_killed = blks_processed = blks_reused = lvls_reduced = blks_coalesced = blks_split = blks_swapped = 0; pre_order_successor_level = level = MAX_BT_DEPTH + 1; /* Just some high value to initialize */ /* --- more detailed debugging information --- */ if (detailed_log = reorg_op & DETAIL) util_out_print("STARTING to work on global ^!AD from region !AD", TRUE, gn->str.len, gn->str.addr, REG_LEN_STR(gv_cur_region)); /* In each iteration of MAIN loop, a working block is processed for a GVT */ for (; ;) /* ================ START MAIN LOOP ================ */ { /* If right sibling is completely merged with the working block, do not swap the working block * with its final destination block. Continue trying next right sibling. Swap only at the end. */ complete_merge = TRUE; while(complete_merge) /* === START WHILE COMPLETE_MERGE === */ { if (mu_ctrlc_occurred || mu_ctrly_occurred) { cs_data->reorg_restart_block = dest_blk_id; memcpy(&cs_data->reorg_restart_key[0], &gv_currkey->base[0], gv_currkey->end + 1); return FALSE; } complete_merge = FALSE; blks_processed++; t_begin(ERR_MUREORGFAIL, UPDTRNS_DB_UPDATED_MASK); /* Folllowing for loop is to handle concurrency retry for split/coalesce */ for (; ;) /* === SPLIT-COALESCE LOOP STARTS === */ { gv_target->clue.end = 0; /* search gv_currkey and get the result in gv_target */ if ((status = gvcst_search(gv_currkey, NULL)) != cdb_sc_normal) { assert(CDB_STAGNATE > t_tries); t_retry(status); continue; } else if (gv_currkey->end + 1 != gv_target->hist.h[0].curr_rec.match) { if (SIZEOF(blk_hdr) == ((blk_hdr_ptr_t)gv_target->hist.h[0].buffaddr)->bsiz && 1 == gv_target->hist.depth) { if (cs_addrs->now_crit) { t_abort(gv_cur_region, cs_addrs); /* do crit and other cleanup */ gtm_putmsg(VARLSTCNT(4) ERR_GBLNOEXIST, 2, gn->str.len, gn->str.addr); reorg_finish(dest_blk_id, blks_processed, blks_killed, blks_reused, file_extended, lvls_reduced, blks_coalesced, blks_split, blks_swapped); return TRUE; /* It is not an error that global was killed */ } else { assert(CDB_STAGNATE > t_tries); t_retry(status); continue; } } } if (gv_target->hist.depth <= level) { /* Will come here * 1) first iteration of the for loop (since level == MAX_BT_DEPTH + 1) or, * 2) tree depth decreased for mu_reduce_level or, M-kill */ pre_order_successor_level = gv_target->hist.depth - 1; if (MAX_BT_DEPTH + 1 != level) { /* break the loop when tree depth decreased (case 2) */ level = pre_order_successor_level; break; } level = pre_order_successor_level; } max_fill = (0 == level)? d_max_fill : i_max_fill; toler = (0 == level)? d_toler:i_toler; cur_blk_size = ((blk_hdr_ptr_t)(gv_target->hist.h[level].buffaddr))->bsiz; if (cur_blk_size > max_fill + toler && 0 == (reorg_op & NOSPLIT)) /* SPLIT BLOCK */ { cnt1 = cnt2 = 0; /* history of current working block is in gv_target */ status = mu_split(level, i_max_fill, d_max_fill, &cnt1, &cnt2); if (cdb_sc_maxlvl == status) { gtm_putmsg(VARLSTCNT(4) ERR_MAXBTLEVEL, 2, gn->str.len, gn->str.addr); reorg_finish(dest_blk_id, blks_processed, blks_killed, blks_reused, file_extended, lvls_reduced, blks_coalesced, blks_split, blks_swapped); return FALSE; } else if (cdb_sc_normal == status) { if ((trans_num)0 == (ret_tn = t_end(&(gv_target->hist), NULL, TN_NOT_SPECIFIED))) { need_kip_incr = FALSE; continue; } if (detailed_log) log_detailed_log("SPL", &(gv_target->hist), NULL, level, NULL, ret_tn); blks_reused += cnt1; lvls_reduced -= cnt2; blks_split++; break; } else if (cdb_sc_oprnotneeded == status) { /* undo any update_array/cw_set changes and DROP THRU to mu_clsce */ cw_set_depth = 0; CHECK_AND_RESET_UPDATE_ARRAY; /* reset update_array_ptr to update_array */ assert(0 == cw_map_depth); /* mu_swap_blk (that changes cw_map_depth) comes later */ } else { assert(CDB_STAGNATE > t_tries); t_retry(status); continue; } } /* end if SPLIT BLOCK */ /* We are here because, mu_split() was not called or, split was not done or, not required */ rtsib_hist = gv_target->alt_hist; status = gvcst_rtsib(rtsib_hist, level); if (cdb_sc_normal != status && cdb_sc_endtree != status) { assert(CDB_STAGNATE > t_tries); t_retry(status); continue; } if (cdb_sc_endtree == status) { if (0 == level) end_of_tree = TRUE; break; } else if (0 == level) pre_order_successor_level = rtsib_hist->depth - 1; /* COALESCE WITH RTSIB */ kill_set_list.used = 0; if (cur_blk_size < max_fill - toler && 0 == (reorg_op & NOCOALESCE)) { /* histories are sent in &gv_target->hist and gv_target->alt_hist */ status = mu_clsce(level, i_max_fill, d_max_fill, &kill_set_list, &complete_merge); if (cdb_sc_normal == status) { if (level) /* delete lower elements of array, t_end might confuse */ { memmove(&rtsib_hist->h[0], &rtsib_hist->h[level], SIZEOF(srch_blk_status)*(rtsib_hist->depth - level + 2)); rtsib_hist->depth = rtsib_hist->depth - level; } if (0 < kill_set_list.used) /* increase kill_in_prog */ { need_kip_incr = TRUE; if (!cs_addrs->now_crit) /* Do not sleep while holding crit */ WAIT_ON_INHIBIT_KILLS(cs_addrs->nl, MAXWAIT2KILL); } if ((trans_num)0 == (ret_tn = t_end(&(gv_target->hist), rtsib_hist, TN_NOT_SPECIFIED))) { need_kip_incr = FALSE; assert(NULL == kip_csa); if (level) { /* reinitialize level member in rtsib_hist srch_blk_status' */ for (count = 0; count < MAX_BT_DEPTH; count++) rtsib_hist->h[count].level = count; } continue; } if (level) { /* reinitialize level member in rtsib_hist srch_blk_status' */ for (count = 0; count < MAX_BT_DEPTH; count++) rtsib_hist->h[count].level = count; } if (detailed_log) log_detailed_log("CLS", &(gv_target->hist), rtsib_hist, level, NULL, ret_tn); assert(0 < kill_set_list.used || (NULL == kip_csa)); if (0 < kill_set_list.used) /* decrease kill_in_prog */ { gvcst_kill_sort(&kill_set_list); GVCST_BMP_MARK_FREE(&kill_set_list, ret_tn, inctn_mu_reorg, inctn_bmp_mark_free_mu_reorg, inctn_opcode, cs_addrs) DECR_KIP(cs_data, cs_addrs, kip_csa); if (detailed_log) log_detailed_log("KIL", &(gv_target->hist), NULL, level, &kill_set_list, ret_tn); blks_killed += kill_set_list.used; } blks_coalesced++; break; } else if (cdb_sc_oprnotneeded == status) { /* undo any update_array/cw_set changes and DROP THRU to t_end */ cw_set_depth = 0; CHECK_AND_RESET_UPDATE_ARRAY; /* reset update_array_ptr to update_array */ assert(0 == cw_map_depth); /* mu_swap_blk (that changes cw_map_depth) comes later */ } else { assert(CDB_STAGNATE > t_tries); t_retry(status); continue; } } /* end if try coalesce */ if (0 == level) { /* Note: In data block level: * if split is successful or, * if coalesce is successful without a complete merge of rtsib, * then gv_currkey_next_reorg is already set from the called function. * if split or, coalesce do a retry or, * if coalesce is successful with a complete merge then * gv_currkey will not be changed. * If split or, coalesce is not successful or, not needed then * here gv_currkey_next_reorg will be set from right sibling */ cw_set_depth = cw_map_depth = 0; GET_KEY_LEN(tkeysize, rtsib_hist->h[0].buffaddr + SIZEOF(blk_hdr) + SIZEOF(rec_hdr)); if (2 < tkeysize && MAX_KEY_SZ >= tkeysize) { memcpy(&(gv_currkey_next_reorg->base[0]), rtsib_hist->h[0].buffaddr + SIZEOF(blk_hdr) +SIZEOF(rec_hdr), tkeysize); gv_currkey_next_reorg->end = tkeysize - 1; inctn_opcode = inctn_invalid_op; /* temporary reset; satisfy an assert in t_end() */ assert(UPDTRNS_DB_UPDATED_MASK == update_trans); update_trans = 0; /* tell t_end, this is no longer an update transaction */ if ((trans_num)0 == (ret_tn = t_end(rtsib_hist, NULL, TN_NOT_SPECIFIED))) { need_kip_incr = FALSE; inctn_opcode = inctn_mu_reorg; /* reset inctn_opcode to its default */ update_trans = UPDTRNS_DB_UPDATED_MASK;/* reset update_trans to old value */ assert(NULL == kip_csa); continue; } /* There is no need to reset update_trans in case of a successful "t_end" call. * This is because before the next call to "t_end" we should have a call to * "t_begin" which will reset update_trans anyways. */ inctn_opcode = inctn_mu_reorg; /* reset inctn_opcode to its default */ if (detailed_log) log_detailed_log("NOU", rtsib_hist, NULL, level, NULL, ret_tn); } else { assert(CDB_STAGNATE > t_tries); t_retry(status); continue; } } /* end if (0 == level) */ break; }/* === SPLIT-COALESCE LOOP END === */ t_abort(gv_cur_region, cs_addrs); /* do crit and other cleanup */ }/* === START WHILE COMPLETE_MERGE === */ if (mu_ctrlc_occurred || mu_ctrly_occurred) { cs_data->reorg_restart_block = dest_blk_id; memcpy(&cs_data->reorg_restart_key[0], &gv_currkey->base[0], gv_currkey->end+1); return FALSE; } /* Now swap the working block */ if (0 == (reorg_op & NOSWAP)) { t_begin(ERR_MUREORGFAIL, UPDTRNS_DB_UPDATED_MASK); /* Following loop is to handle concurrency retry for swap */ for (; ;) /* === START OF SWAP LOOP === */ { kill_set_list.used = 0; gv_target->clue.end = 0; /* search gv_currkey and get the result in gv_target */ if ((status = gvcst_search(gv_currkey, NULL)) != cdb_sc_normal) { assert(CDB_STAGNATE > t_tries); t_retry(status); continue; } else if (gv_currkey->end + 1 != gv_target->hist.h[0].curr_rec.match) { if (SIZEOF(blk_hdr) == ((blk_hdr_ptr_t)gv_target->hist.h[0].buffaddr)->bsiz && 1 == gv_target->hist.depth) { if (cs_addrs->now_crit) { t_abort(gv_cur_region, cs_addrs); /* do crit and other cleanup */ gtm_putmsg(VARLSTCNT(4) ERR_GBLNOEXIST, 2, gn->str.len, gn->str.addr); reorg_finish(dest_blk_id, blks_processed, blks_killed, blks_reused, file_extended, lvls_reduced, blks_coalesced, blks_split, blks_swapped); return TRUE; /* It is not an error that global was killed */ } else { assert(CDB_STAGNATE > t_tries); t_retry(status); continue; } } } if (gv_target->hist.depth <= level) break; /* swap working block with appropriate dest_blk_id block. Historys are sent as gv_target->hist and reorg_gv_target->hist */ mu_reorg_in_swap_blk = TRUE; status = mu_swap_blk(level, &dest_blk_id, &kill_set_list, exclude_glist_ptr); mu_reorg_in_swap_blk = FALSE; if (cdb_sc_oprnotneeded == status) { if (cs_data->trans_hist.total_blks <= dest_blk_id) { util_out_print("REORG may be incomplete for this global.", TRUE); reorg_finish(dest_blk_id, blks_processed, blks_killed, blks_reused, file_extended, lvls_reduced, blks_coalesced, blks_split, blks_swapped); return TRUE; } } else if (cdb_sc_normal == status) { if (0 < kill_set_list.used) { need_kip_incr = TRUE; if (!cs_addrs->now_crit) /* Do not sleep while holding crit */ WAIT_ON_INHIBIT_KILLS(cs_addrs->nl, MAXWAIT2KILL); /* second history not needed, because, we are reusing a free block, which does not need history */ if ((trans_num)0 == (ret_tn = t_end(&(gv_target->hist), NULL, TN_NOT_SPECIFIED))) { need_kip_incr = FALSE; assert(NULL == kip_csa); DECR_BLK_NUM(dest_blk_id); continue; } if (detailed_log) log_detailed_log("SWA", &(gv_target->hist), NULL, level, NULL, ret_tn); gvcst_kill_sort(&kill_set_list); GVCST_BMP_MARK_FREE(&kill_set_list, ret_tn, inctn_mu_reorg, inctn_bmp_mark_free_mu_reorg, inctn_opcode, cs_addrs) DECR_KIP(cs_data, cs_addrs, kip_csa); if (detailed_log) log_detailed_log("KIL", &(gv_target->hist), NULL, level, &kill_set_list, ret_tn); blks_reused += kill_set_list.used; blks_killed += kill_set_list.used; } /* gv_target->hist is for working block's history, and reorg_gv_target->hist is for destinition block's history. Note: gv_target and reorg_gv_target can be part of different GVT. */ else if ((trans_num)0 == (ret_tn = t_end(&(gv_target->hist), &(reorg_gv_target->hist), TN_NOT_SPECIFIED))) { need_kip_incr = FALSE; assert(NULL == kip_csa); DECR_BLK_NUM(dest_blk_id); continue; } if ((0 >= kill_set_list.used) && detailed_log) log_detailed_log("SWA", &(gv_target->hist), &(reorg_gv_target->hist), level, NULL, ret_tn); blks_swapped++; if (reorg_op & SWAPHIST) util_out_print("Dest !SL From !SL", TRUE, dest_blk_id, gv_target->hist.h[level].blk_num); } else { assert(CDB_STAGNATE > t_tries); t_retry(status); continue; } break; } /* === END OF SWAP LOOP === */ t_abort(gv_cur_region, cs_addrs); /* do crit and other cleanup */ } if (mu_ctrlc_occurred || mu_ctrly_occurred) { cs_data->reorg_restart_block = dest_blk_id; memcpy(&cs_data->reorg_restart_key[0], &gv_currkey->base[0], gv_currkey->end + 1); return FALSE; } if (end_of_tree) break; if (0 < level) level--; /* Order of reorg is root towards leaf */ else { level = pre_order_successor_level; memcpy(&gv_currkey->base[0], &gv_currkey_next_reorg->base[0], gv_currkey_next_reorg->end + 1); gv_currkey->end = gv_currkey_next_reorg->end; cs_data->reorg_restart_block = dest_blk_id; memcpy(&cs_data->reorg_restart_key[0], &gv_currkey->base[0], gv_currkey->end + 1); } } /* ================ END MAIN LOOP ================ */ /* =========== START REDUCE LEVEL ============== */ memcpy(&gv_currkey->base[0], gn->str.addr, gn->str.len); gv_currkey->base[gn->str.len] = gv_currkey->base[gn->str.len + 1] = 0; gv_currkey->end = gn->str.len + 1; for (;;) /* Reduce level continues until it fails to reduce */ { t_begin(ERR_MUREORGFAIL, UPDTRNS_DB_UPDATED_MASK); cnt1 = 0; for (; ;) /* main reduce level loop starts */ { kill_set_list.used = 0; gv_target->clue.end = 0; /* search gv_currkey and get the result in gv_target */ if ((status = gvcst_search(gv_currkey, NULL)) != cdb_sc_normal) { assert(CDB_STAGNATE > t_tries); t_retry(status); continue; } else if (gv_currkey->end + 1 != gv_target->hist.h[0].curr_rec.match) { if (SIZEOF(blk_hdr) == ((blk_hdr_ptr_t)gv_target->hist.h[0].buffaddr)->bsiz && 1 == gv_target->hist.depth) { if (cs_addrs->now_crit) { t_abort(gv_cur_region, cs_addrs); /* do crit and other cleanup */ gtm_putmsg(VARLSTCNT(4) ERR_GBLNOEXIST, 2, gn->str.len, gn->str.addr); reorg_finish(dest_blk_id, blks_processed, blks_killed, blks_reused, file_extended, lvls_reduced, blks_coalesced, blks_split, blks_swapped); return TRUE; /* It is not an error that global was killed */ } else { assert(CDB_STAGNATE > t_tries); t_retry(status); continue; } } } if (gv_target->hist.depth <= level) break; /* History is passed in gv_target->hist */ status = mu_reduce_level(&kill_set_list); if (cdb_sc_oprnotneeded != status && cdb_sc_normal != status) { assert(CDB_STAGNATE > t_tries); t_retry(status); continue; } else if (cdb_sc_normal == status) { assert(0 < kill_set_list.used); need_kip_incr = TRUE; if (!cs_addrs->now_crit) /* Do not sleep while holding crit */ WAIT_ON_INHIBIT_KILLS(cs_addrs->nl, MAXWAIT2KILL); if ((trans_num)0 == (ret_tn = t_end(&(gv_target->hist), NULL, TN_NOT_SPECIFIED))) { need_kip_incr = FALSE; assert(NULL == kip_csa); continue; } if (detailed_log) log_detailed_log("RDL", &(gv_target->hist), NULL, level, NULL, ret_tn); gvcst_kill_sort(&kill_set_list); GVCST_BMP_MARK_FREE(&kill_set_list, ret_tn, inctn_mu_reorg, inctn_bmp_mark_free_mu_reorg, inctn_opcode, cs_addrs) DECR_KIP(cs_data, cs_addrs, kip_csa); if (detailed_log) log_detailed_log("KIL", &(gv_target->hist), NULL, level, &kill_set_list, ret_tn); blks_reused += kill_set_list.used; blks_killed += kill_set_list.used; cnt1 = 1; lvls_reduced++; } break; } /* main reduce level loop ends */ t_abort(gv_cur_region, cs_addrs); /* do crit and other cleanup */ if (0 == cnt1) break; } /* =========== END REDUCE LEVEL ===========*/ reorg_finish(dest_blk_id, blks_processed, blks_killed, blks_reused, file_extended, lvls_reduced, blks_coalesced, blks_split, blks_swapped); return TRUE; } /* end mu_reorg() */