int UNIX_ONLY(main)VMS_ONLY(dbcertify)(int argc, char **argv) { DCL_THREADGBL_ACCESS; /* Initialization of scaffolding we run on */ GTM_THREADGBL_INIT; gtm_imagetype_init(DBCERTIFY_IMAGE); gtm_env_init(); gtm_utf8_mode = FALSE; /* Only ever runs in V4 database so NO utf8 mode -- ever */ psa_gbl = malloc(SIZEOF(*psa_gbl)); memset(psa_gbl, 0, SIZEOF(*psa_gbl)); UNIX_ONLY(err_init(dbcertify_base_ch)); UNIX_ONLY(sig_init(dbcertify_signal_handler, dbcertify_signal_handler, NULL)); VMS_ONLY(util_out_open(0)); VMS_ONLY(SET_EXIT_HANDLER(exi_blk, dbcertify_exit_handler, exi_condition)); /* Establish exit handler */ VMS_ONLY(ESTABLISH(dbcertify_base_ch)); process_id = getpid(); /* Structure checks .. */ assert((24 * 1024) == SIZEOF(v15_sgmnt_data)); /* Verify V4 file header hasn't suddenly increased for some odd reason */ /* Platform dependent method to get the option scan going and invoke necessary driver routine */ dbcertify_parse_and_dispatch(argc, argv); return SS_NORMAL; }
void jnl_send_oper(jnl_private_control *jpc, uint4 status) { sgmnt_addrs *csa; sgmnt_data_ptr_t csd; jnl_buffer_ptr_t jb; uint4 now_writer, fsync_pid; int4 io_in_prog, fsync_in_prog; boolean_t ok_to_log; /* TRUE except when we avoid flooding operator log due to ENOSPC error */ error_def(ERR_CALLERID); error_def(ERR_JNLBUFINFO); error_def(ERR_JNLPVTINFO); error_def(ERR_JNLSENDOPER); switch(jpc->region->dyn.addr->acc_meth) { case dba_mm: case dba_bg: csa = &FILE_INFO(jpc->region)->s_addrs; break; default: GTMASSERT; } csd = csa->hdr; jb = jpc->jnl_buff; UNIX_ONLY(assert((ENOSPC != jpc->status) || jb->enospc_errcnt || WBTEST_ENABLED(WBTEST_RECOVER_ENOSPC))); UNIX_ONLY(assert((SS_NORMAL == jpc->status) || (ENOSPC == jpc->status) || !jb->enospc_errcnt)); VMS_ONLY(assert(!jb->enospc_errcnt)); /* currently not updated in VMS, so should be 0 */ ok_to_log = (jb->enospc_errcnt ? (1 == (jb->enospc_errcnt % ENOSPC_LOGGING_PERIOD)) : TRUE); caller_id_flag = FALSE; if (ok_to_log) { SEND_CALLERID("jnl_send_oper()"); if (0 != status) { if (SS_NORMAL != jpc->status) { if (SS_NORMAL != jpc->status2) { send_msg(VARLSTCNT(14) ERR_JNLSENDOPER, 5, process_id, status, jpc->status, jpc->status2, jb->iosb.cond, status, 2, JNL_LEN_STR(csd), jpc->status, 0, jpc->status2); } else send_msg(VARLSTCNT(12) ERR_JNLSENDOPER, 5, process_id, status, jpc->status, jpc->status2, jb->iosb.cond, status, 2, JNL_LEN_STR(csd), jpc->status); } else send_msg(VARLSTCNT(11) ERR_JNLSENDOPER, 5, process_id, status, jpc->status, jpc->status2, jb->iosb.cond, status, 2, JNL_LEN_STR(csd)); } } jpc->status = SS_NORMAL; jpc->status2 = SS_NORMAL; UNIX_ONLY( io_in_prog = (jb->io_in_prog_latch.u.parts.latch_pid ? TRUE : FALSE); now_writer = jb->io_in_prog_latch.u.parts.latch_pid; )
void mupip_freeze(void) { int4 status; bool record; tp_region *rptr, *rptr1; boolean_t freeze, override; uint4 online; freeze_status freeze_ret; int dummy_errno; const char *msg1[] = { "unfreeze", "freeze" } ; const char *msg2[] = { "UNFROZEN", "FROZEN" } ; const char *msg3[] = { "unfrozen", "frozen" } ; status = SS_NORMAL; in_mupip_freeze = TRUE; UNIX_ONLY(jnlpool_init_needed = TRUE); mu_outofband_setup(); gvinit(); freeze = (CLI_PRESENT == cli_present("ON")); online = (CLI_PRESENT == cli_present("ONLINE")); if (online) online |= ((!cli_negated("AUTORELEASE")) ? CHILLED_AUTORELEASE_MASK : 0); if (CLI_PRESENT == cli_present("OFF")) { if (TRUE == freeze) { util_out_print("The /ON qualifier is invalid with the /OFF qualifier", TRUE); mupip_exit(ERR_MUPCLIERR); } } if (CLI_PRESENT == cli_present("RECORD")) { record = TRUE; if (FALSE == freeze) { util_out_print("The /RECORD qualifier is invalid with the /OFF qualifier", TRUE); mupip_exit(ERR_MUPCLIERR); } } else record = FALSE; if (CLI_PRESENT == cli_present("OVERRIDE")) { override = TRUE; if (freeze) { util_out_print("The /OVERRIDE qualifier is invalid with the /ON qualifier", TRUE); mupip_exit(ERR_MUPCLIERR); } } else
void gtm_threadgbl_init(void) { void *lcl_gtm_threadgbl; if (SIZEOF(gtm_threadgbl_true_t) != size_gtm_threadgbl_struct) { /* Size mismatch with gtm_threadgbl_deftypes.h - no error handling yet available so do * the best we can. */ FPRINTF(stderr, "GTM-F-GTMASSERT gtm_threadgbl_true_t and gtm_threadgbl_t are different sizes\n"); EXIT(ERR_GTMASSERT); } if (NULL != gtm_threadgbl) { /* has already been initialized - don't re-init */ FPRINTF(stderr, "GTM-F-GTMASSERT gtm_threadgbl is already initialized\n"); EXIT(ERR_GTMASSERT); } gtm_threadgbl = lcl_gtm_threadgbl = malloc(size_gtm_threadgbl_struct); if (NULL == gtm_threadgbl) { /* Storage was not allocated for some reason - no error handling yet still */ perror("GTM-F-MEMORY Unable to allocate startup thread structure"); EXIT(UNIX_ONLY(ERR_MEMORY) VMS_ONLY(ERR_VMSMEMORY)); } memset(gtm_threadgbl, 0, size_gtm_threadgbl_struct); gtm_threadgbl_true = (gtm_threadgbl_true_t *)gtm_threadgbl; /* Add specific initializations if other than 0s here using the TREF() family of macros: */ (TREF(director_ident)).addr = TADR(director_string); TREF(for_stack_ptr) = TADR(for_stack); (TREF(gtmprompt)).addr = TADR(prombuf); (TREF(gtmprompt)).len = SIZEOF(DEFAULT_PROMPT) - 1; TREF(lv_null_subs) = LVNULLSUBS_OK; /* UNIX: set in gtm_env_init_sp(), VMS: set in gtm$startup() - init'd here * in case alternative invocation methods bypass gtm_startup() */ MEMCPY_LIT(TADR(prombuf), DEFAULT_PROMPT); (TREF(replgbl)).jnl_release_timeout = DEFAULT_JNL_RELEASE_TIMEOUT; (TREF(window_ident)).addr = TADR(window_string); ASSERT_SAFE_TO_UPDATE_THREAD_GBLS; TREF(util_outbuff_ptr) = TADR(util_outbuff); /* Point util_outbuff_ptr to the beginning of util_outbuff at first. */ TREF(util_outptr) = TREF(util_outbuff_ptr); (TREF(source_buffer)).addr = (char *)&aligned_source_buffer; (TREF(source_buffer)).len = MAX_SRCLINE; }
trans_num gvcst_bmp_mark_free(kill_set *ks) { block_id bit_map, next_bm, *updptr; blk_ident *blk, *blk_top, *nextblk; trans_num ctn, start_db_fmt_tn; unsigned int len; # if defined(UNIX) && defined(DEBUG) unsigned int lcl_t_tries; # endif int4 blk_prev_version; srch_hist alt_hist; trans_num ret_tn = 0; boolean_t visit_blks; srch_blk_status bmphist; cache_rec_ptr_t cr; enum db_ver ondsk_blkver; enum cdb_sc status; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; TREF(in_gvcst_bmp_mark_free) = TRUE; assert(inctn_bmp_mark_free_gtm == inctn_opcode || inctn_bmp_mark_free_mu_reorg == inctn_opcode); /* Note down the desired_db_format_tn before you start relying on cs_data->fully_upgraded. * If the db is fully_upgraded, take the optimal path that does not need to read each block being freed. * But in order to detect concurrent desired_db_format changes, note down the tn (when the last format change occurred) * before the fully_upgraded check and after having noted down the database current_tn. * If they are the same, then we are guaranteed no concurrent desired_db_format change occurred. * If they are not, then fall through to the non-optimal path where each to-be-killed block has to be visited. * The reason we need to visit every block in case desired_db_format changes is to take care of the case where * MUPIP REORG DOWNGRADE concurrently changes a block that we are about to free. */ start_db_fmt_tn = cs_data->desired_db_format_tn; visit_blks = (!cs_data->fully_upgraded); /* Local evaluation */ assert(!visit_blks || (visit_blks && dba_bg == cs_addrs->hdr->acc_meth)); /* must have blks_to_upgrd == 0 for non-BG */ assert(!dollar_tlevel); /* Should NOT be in TP now */ blk = &ks->blk[0]; blk_top = &ks->blk[ks->used]; if (!visit_blks) { /* Database has been completely upgraded. Free all blocks in one bitmap as part of one transaction. */ assert(cs_data->db_got_to_v5_once); /* assert all V4 fmt blocks (including RECYCLED) have space for V5 upgrade */ inctn_detail.blknum_struct.blknum = 0; /* to indicate no adjustment to "blks_to_upgrd" necessary */ /* If any of the mini transaction below restarts because of an online rollback, we don't want the application * refresh to happen (like $ZONLNRLBK++ or rts_error(DBROLLEDBACK). This is because, although we are currently in * non-tp (dollar_tleve = 0), we could actually be in a TP transaction and have actually faked dollar_tlevel. In * such a case, we should NOT * be issuing a DBROLLEDBACK error as TP transactions are supposed to just restart in * case of an online rollback. So, set the global variable that gtm_onln_rlbk_clnup can check and skip doing the * application refresh, but will reset the clues. The next update will see the cycle mismatch and will accordingly * take the right action. */ for ( ; blk < blk_top; blk = nextblk) { if (0 != blk->flag) { nextblk = blk + 1; continue; } assert(0 < blk->block); assert((int4)blk->block < cs_addrs->ti->total_blks); bit_map = ROUND_DOWN2((int)blk->block, BLKS_PER_LMAP); next_bm = bit_map + BLKS_PER_LMAP; CHECK_AND_RESET_UPDATE_ARRAY; /* reset update_array_ptr to update_array */ /* Scan for the next local bitmap */ updptr = (block_id *)update_array_ptr; for (nextblk = blk; (0 == nextblk->flag) && (nextblk < blk_top) && ((block_id)nextblk->block < next_bm); ++nextblk) { assert((block_id)nextblk->block - bit_map); *updptr++ = (block_id)nextblk->block - bit_map; } len = (unsigned int)((char *)nextblk - (char *)blk); update_array_ptr = (char *)updptr; alt_hist.h[0].blk_num = 0; /* need for calls to T_END for bitmaps */ alt_hist.h[0].blk_target = NULL; /* need to initialize for calls to T_END */ /* the following assumes SIZEOF(blk_ident) == SIZEOF(int) */ assert(SIZEOF(blk_ident) == SIZEOF(int)); *(int *)update_array_ptr = 0; t_begin(ERR_GVKILLFAIL, UPDTRNS_DB_UPDATED_MASK); for (;;) { ctn = cs_addrs->ti->curr_tn; /* Need a read fence before reading fields from cs_data as we are reading outside * of crit and relying on this value to detect desired db format state change. */ SHM_READ_MEMORY_BARRIER; if (start_db_fmt_tn != cs_data->desired_db_format_tn) { /* Concurrent db format change has occurred. Need to visit every block to be killed * to determine its block format. Fall through to the non-optimal path below */ ret_tn = 0; break; } bmphist.blk_num = bit_map; if (NULL == (bmphist.buffaddr = t_qread(bmphist.blk_num, (sm_int_ptr_t)&bmphist.cycle, &bmphist.cr))) { t_retry((enum cdb_sc)rdfail_detail); continue; } t_write_map(&bmphist, (uchar_ptr_t)update_array, ctn, -(int4)(nextblk - blk)); UNIX_ONLY(DEBUG_ONLY(lcl_t_tries = t_tries)); if ((trans_num)0 == (ret_tn = t_end(&alt_hist, NULL, TN_NOT_SPECIFIED))) { # ifdef UNIX assert((CDB_STAGNATE == t_tries) || (lcl_t_tries == t_tries - 1)); status = LAST_RESTART_CODE; if ((cdb_sc_onln_rlbk1 == status) || (cdb_sc_onln_rlbk2 == status) || TREF(rlbk_during_redo_root)) { /* t_end restarted due to online rollback. Discard bitmap free-up and return control * to the application. But, before that reset only_reset_clues_if_onln_rlbk to FALSE */ TREF(in_gvcst_bmp_mark_free) = FALSE; send_msg(VARLSTCNT(6) ERR_IGNBMPMRKFREE, 4, REG_LEN_STR(gv_cur_region), DB_LEN_STR(gv_cur_region)); t_abort(gv_cur_region, cs_addrs); return ret_tn; /* actually 0 */ } # endif continue; } break; } if (0 == ret_tn) /* db format change occurred. Fall through to below for loop to visit each block */ { /* Abort any active transaction to get rid of lingering Non-TP artifacts */ t_abort(gv_cur_region, cs_addrs); break; } } } /* for all blocks in the kill_set */
uint4 jnl_file_lost(jnl_private_control *jpc, uint4 jnl_stat) { /* Notify operator and terminate journaling */ unsigned int status; sgmnt_addrs *csa; seq_num reg_seqno, jnlseqno; boolean_t was_lockid = FALSE, instfreeze_environ; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; switch(jpc->region->dyn.addr->acc_meth) { case dba_mm: case dba_bg: csa = &FILE_INFO(jpc->region)->s_addrs; break; default: assertpro(FALSE && jpc->region->dyn.addr->acc_meth); } # ifdef VMS /* The following assert has been removed as it could be FALSE if the caller is "jnl_file_extend" * assert(0 != memcmp(csa->nl->jnl_file.jnl_file_id.fid, zero_fid, SIZEOF(zero_fid))); */ # endif assert(csa->now_crit); /* We issue an rts_error (instead of shutting off journaling) in the following cases : {BYPASSOK} * 1) $gtm_error_on_jnl_file_lost is set to issue runtime error (if not already issued) in case of journaling issues. * 2) The process has the given message set in $gtm_custom_errors (indicative of instance freeze on error setup) * in which case the goal is to never shut-off journaling */ UNIX_ONLY(assert(jnlpool.jnlpool_ctl == jnlpool_ctl)); UNIX_ONLY(instfreeze_environ = INST_FREEZE_ON_MSG_ENABLED(csa, jnl_stat)); VMS_ONLY(instfreeze_environ = FALSE); if ((JNL_FILE_LOST_ERRORS == TREF(error_on_jnl_file_lost)) || instfreeze_environ) { VMS_ONLY(assert(FALSE)); /* Not fully implemented / supported on VMS. */ if (!process_exiting || instfreeze_environ || !csa->jnl->error_reported) { csa->jnl->error_reported = TRUE; in_wcs_recover = FALSE; /* in case we're called in wcs_recover() */ if (SS_NORMAL != jpc->status) rts_error_csa(CSA_ARG(csa) VARLSTCNT(7) jnl_stat, 4, JNL_LEN_STR(csa->hdr), DB_LEN_STR(gv_cur_region), jpc->status); else rts_error_csa(CSA_ARG(csa) VARLSTCNT(6) jnl_stat, 4, JNL_LEN_STR(csa->hdr), DB_LEN_STR(gv_cur_region)); } return jnl_stat; } if (0 != jnl_stat) jnl_send_oper(jpc, jnl_stat); csa->hdr->jnl_state = jnl_closed; jpc->jnl_buff->cycle++; /* increment shared cycle so all future callers of jnl_ensure_open recognize journal switch */ assert(jpc->cycle < jpc->jnl_buff->cycle); if (REPL_ENABLED(csa->hdr)) { csa->hdr->repl_state = repl_was_open; reg_seqno = csa->hdr->reg_seqno; jnlseqno = (NULL != jnlpool.jnlpool_ctl) ? jnlpool.jnlpool_ctl->jnl_seqno : MAX_SEQNO; send_msg_csa(CSA_ARG(csa) VARLSTCNT(8) ERR_REPLJNLCLOSED, 6, DB_LEN_STR(jpc->region), ®_seqno, ®_seqno, &jnlseqno, &jnlseqno); } else send_msg_csa(CSA_ARG(csa) VARLSTCNT(5) ERR_JNLCLOSED, 3, DB_LEN_STR(jpc->region), &csa->ti->curr_tn); #ifdef VMS /* We can get a jnl_file_lost before the file is even created, so locking is done only if the lock exist */ if (0 != csa->jnl->jnllsb->lockid) { was_lockid = TRUE; status = gtm_enqw(EFN$C_ENF, LCK$K_EXMODE, csa->jnl->jnllsb, LCK$M_CONVERT | LCK$M_NODLCKBLK, NULL, 0, NULL, 0, NULL, PSL$C_USER, 0); if (SS$_NORMAL == status) status = csa->jnl->jnllsb->cond; } jnl_file_close(jpc->region, FALSE, FALSE); if (was_lockid) { if (SS$_NORMAL == status) status = gtm_deq(csa->jnl->jnllsb->lockid, NULL, PSL$C_USER, 0); assertpro(SS$_NORMAL == status); } # else jnl_file_close(jpc->region, FALSE, FALSE); #endif return EXIT_NRM; }
void gv_rundown(void) { gd_region *r_top, *r_save, *r_local; gd_addr *addr_ptr; sgm_info *si; #ifdef VMS vms_gds_info *gds_info; #endif error_def(ERR_TEXT); r_save = gv_cur_region; /* Save for possible core dump */ gvcmy_rundown(); ENABLE_AST if (pool_init) rel_lock(jnlpool.jnlpool_dummy_reg); for (addr_ptr = get_next_gdr(NULL); addr_ptr; addr_ptr = get_next_gdr(addr_ptr)) { for (r_local = addr_ptr->regions, r_top = r_local + addr_ptr->n_regions; r_local < r_top; r_local++) { if (r_local->open && !r_local->was_open && dba_cm != r_local->dyn.addr->acc_meth) { /* Rundown has already occurred for GT.CM client regions through gvcmy_rundown() above. * Hence the (dba_cm != ...) check in the if above. Note that for GT.CM client regions, * region->open is TRUE although cs_addrs is NULL. */ gv_cur_region = r_local; tp_change_reg(); gds_rundown(); /* Now that gds_rundown is done, free up the memory associated with the region. * Ideally the following memory freeing code should go to gds_rundown, but * GT.CM calls gds_rundown() and we want to reuse memory for GT.CM. */ if (NULL != cs_addrs) { if (NULL != cs_addrs->dir_tree) FREE_CSA_DIR_TREE(cs_addrs); if (cs_addrs->sgm_info_ptr) { si = cs_addrs->sgm_info_ptr; /* It is possible we got interrupted before initializing all fields of "si" * completely so account for NULL values while freeing/releasing those fields. */ assert((si->tp_csa == cs_addrs) || (NULL == si->tp_csa)); if (si->jnl_tail) { CAREFUL_FREEUP_BUDDY_LIST(si->format_buff_list); CAREFUL_FREEUP_BUDDY_LIST(si->jnl_list); } CAREFUL_FREEUP_BUDDY_LIST(si->recompute_list); CAREFUL_FREEUP_BUDDY_LIST(si->new_buff_list); CAREFUL_FREEUP_BUDDY_LIST(si->tlvl_info_list); CAREFUL_FREEUP_BUDDY_LIST(si->tlvl_cw_set_list); CAREFUL_FREEUP_BUDDY_LIST(si->cw_set_list); if (NULL != si->blks_in_use) { free_hashtab_int4(si->blks_in_use); free(si->blks_in_use); si->blks_in_use = NULL; } if (si->cr_array_size) { assert(NULL != si->cr_array); if (NULL != si->cr_array) free(si->cr_array); } if (NULL != si->first_tp_hist) free(si->first_tp_hist); free(si); } if (cs_addrs->jnl) { assert(&FILE_INFO(cs_addrs->jnl->region)->s_addrs == cs_addrs); if (cs_addrs->jnl->jnllsb) { UNIX_ONLY(assert(FALSE)); free(cs_addrs->jnl->jnllsb); } free(cs_addrs->jnl); } GTMCRYPT_ONLY( if (cs_addrs->encrypted_blk_contents) free(cs_addrs->encrypted_blk_contents); ) } assert(gv_cur_region->dyn.addr->file_cntl->file_info); VMS_ONLY( gds_info = (vms_gds_info *)gv_cur_region->dyn.addr->file_cntl->file_info; if (gds_info->xabpro) free(gds_info->xabpro); if (gds_info->xabfhc) free(gds_info->xabfhc); if (gds_info->nam) { free(gds_info->nam->nam$l_esa); free(gds_info->nam); } if (gds_info->fab) free(gds_info->fab); ) free(gv_cur_region->dyn.addr->file_cntl->file_info); free(gv_cur_region->dyn.addr->file_cntl); } r_local->open = r_local->was_open = FALSE; }
void dse_crit(void) { int util_len, dse_crit_count; char util_buff[MAX_UTIL_LEN]; boolean_t crash = FALSE, cycle = FALSE, owner = FALSE; gd_region *save_region, *r_local, *r_top; crash = ((cli_present("CRASH") == CLI_PRESENT) || (cli_present("RESET") == CLI_PRESENT)); cycle = (CLI_PRESENT == cli_present("CYCLE")); if (cli_present("SEIZE") == CLI_PRESENT || cycle) { if (gv_cur_region->read_only && !cycle) rts_error_csa(CSA_ARG(cs_addrs) VARLSTCNT(4) ERR_DBRDONLY, 2, DB_LEN_STR(gv_cur_region)); if (cs_addrs->now_crit) { util_out_print("!/Write critical section already seized.!/", TRUE); return; } crash_count = cs_addrs->critical->crashcnt; grab_crit_encr_cycle_sync(gv_cur_region); cs_addrs->hold_onto_crit = TRUE; /* need to do this AFTER grab_crit */ cs_addrs->dse_crit_seize_done = TRUE; util_out_print("!/Seized write critical section.!/", TRUE); if (!cycle) return; } if (cli_present("RELEASE") == CLI_PRESENT || cycle) { if (gv_cur_region->read_only && !cycle) rts_error_csa(CSA_ARG(cs_addrs) VARLSTCNT(4) ERR_DBRDONLY, 2, DB_LEN_STR(gv_cur_region)); if (!cs_addrs->now_crit) { util_out_print("!/Critical section already released.!/", TRUE); return; } crash_count = cs_addrs->critical->crashcnt; if (cs_addrs->now_crit) { /* user wants crit to be released unconditionally so "was_crit" not checked like everywhere else */ assert(cs_addrs->hold_onto_crit && cs_addrs->dse_crit_seize_done); cs_addrs->dse_crit_seize_done = FALSE; cs_addrs->hold_onto_crit = FALSE; /* need to do this before the rel_crit */ rel_crit(gv_cur_region); util_out_print("!/Released write critical section.!/", TRUE); } # ifdef DEBUG else assert(!cs_addrs->hold_onto_crit && !cs_addrs->dse_crit_seize_done); # endif return; } if (cli_present("INIT") == CLI_PRESENT) { if (gv_cur_region->read_only) rts_error_csa(CSA_ARG(cs_addrs) VARLSTCNT(4) ERR_DBRDONLY, 2, DB_LEN_STR(gv_cur_region)); cs_addrs->hdr->image_count = 0; UNIX_ONLY(gtm_mutex_init(gv_cur_region, NUM_CRIT_ENTRY(cs_addrs->hdr), crash)); VMS_ONLY(mutex_init(cs_addrs->critical, NUM_CRIT_ENTRY(cs_addrs->hdr), crash)); cs_addrs->nl->in_crit = 0; cs_addrs->now_crit = FALSE; util_out_print("!/Reinitialized critical section.!/", TRUE); return; } if (cli_present("REMOVE") == CLI_PRESENT) { if (gv_cur_region->read_only) rts_error_csa(CSA_ARG(cs_addrs) VARLSTCNT(4) ERR_DBRDONLY, 2, DB_LEN_STR(gv_cur_region)); if (cs_addrs->nl->in_crit == 0) { util_out_print("!/The write critical section is unowned!/", TRUE); return; } UNIX_ONLY(assert(LOCK_AVAILABLE != cs_addrs->critical->semaphore.u.parts.latch_pid);) VMS_ONLY(assert(cs_addrs->critical->semaphore >= 0);)
int m_set(void) { /* Some comment on "parse_warn". It is set to TRUE whenever the parse encounters an invalid setleft target. * Note that even if "parse_warn" is TRUE, we should not return FALSE right away but need to continue the parse * until the end of the current SET command. This way any remaining commands in the current parse line will be * parsed and triples generated for them. This is necessary just in case the currently parsed invalid SET command * does not get executed at runtime (due to postconditionals etc.) * * Some comment on the need for "first_setleft_invalid". This variable is needed only in the * case we encounter an invalid-SVN/invalid-FCN/unsettable-SVN as a target of the SET. We need to evaluate the * right-hand-side of the SET command only if at least one valid setleft target is parsed before an invalid setleft * target is encountered. This is because we still need to execute the valid setlefts at runtime before triggering * a runtime error for the invalid setleft. If the first setleft target is an invalid one, then there is no need * to evaluate the right-hand-side. In fact, in this case, adding triples (corresponding to the right hand side) * to the execution chain could cause problems with emit_code later in the compilation as the destination * for the right hand side triples could now be undefined (for example a valid SVN on the left side of the * SET would have generated an OC_SVPUT triple with one of its operands holding the result of the right * hand side evaluation, but an invalid SVN on the left side which would have instead caused an OC_RTERROR triple * to have been generated leaving no triple to receive the result of the right hand side evaluation thus causing * emit_code to be confused and GTMASSERT). Therefore discard all triples generated by the right hand side in this case. * By the same reasoning, discard all triples generated by setleft targets AFTER this invalid one as well. * "first_setleft_invalid" is set to TRUE if the first setleft target is invalid and set to FALSE if the first setleft * target is valid. It is initialized to -1 before the start of the parse. */ int index, setop, delimlen; int first_val_lit, last_val_lit, nakedzalias; boolean_t first_is_lit, last_is_lit, got_lparen, delim1char, is_extract, valid_char; boolean_t alias_processing, have_lh_alias; opctype put_oc; oprtype v, delimval, firstval, lastval, *result, resptr; triple *curtargchain, *delimiter, discardcurtchain, *first, *get, *jmptrp1, *jmptrp2, *last, *obp, *put; triple *s, *s0, *s1, save_targchain, *save_curtchain, *save_curtchain1, *sub, targchain, *tmp; mint delimlit; mval *delim_mval; mvar *mvarptr; boolean_t parse_warn; /* set to TRUE in case of an invalid SVN etc. */ boolean_t curtchain_switched; /* set to TRUE if a setcurtchain was done */ int first_setleft_invalid; /* set to TRUE if the first setleft target is invalid */ boolean_t temp_subs_was_FALSE; union { uint4 unichar_val; unsigned char unibytes_val[4]; } unichar; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; TREF(temp_subs) = FALSE; dqinit(&targchain, exorder); result = (oprtype *)mcalloc(SIZEOF(oprtype)); resptr = put_indr(result); delimiter = sub = last = NULL; /* A SET clause must be entirely alias related or a normal set. Parenthized multiple sets of aliases are not allowed * and will trigger an error. This is because the source and targets of aliases require different values and references * than normal sets do and thus cannot be mixed. */ if (alias_processing = (TK_ASTERISK == window_token)) advancewindow(); if (got_lparen = (TK_LPAREN == window_token)) { if (alias_processing) stx_error(ERR_NOALIASLIST); advancewindow(); TREF(temp_subs) = TRUE; } /* Some explanation: The triples from the left hand side of the SET expression that are * expressly associated with fetching (in case of set $piece/$extract) and/or storing of * the target value are removed from curtchain and placed on the targchain. Later, these * triples will be added to the end of curtchain to do the finishing store of the target * after the righthand side has been evaluated. This is per the M standard. * * Note that SET $PIECE/$EXTRACT have special conditions in which the first argument is not referenced at all. * (e.g. set $piece(^a," ",3,2) in this case 3 > 2 so this should not evaluate ^a and therefore should not * modify the naked indicator). That is, the triples that do these conditional checks need to be inserted * ahead of the OC_GVNAME of ^a, all of which need to be inserted on the targchain. But the conditionalization * can be done only after parsing the first argument of the SET $PIECE and examining the remaining arguments. * Therefore we maintain the "curtargchain" variable which stores the value of the "targchain" at the beginning * of the iteration (at the start of the $PIECE parsing) and all the conditionalization will be inserted right * here which is guaranteed to be ahead of where the OC_GVNAME gets inserted. * * For example, SET $PIECE(^A(x,y),delim,first,last)=RHS will generate a final triple chain as follows * * A - Triples to evaluate subscripts (x,y) of the global ^A * A - Triples to evaluate delim * A - Triples to evaluate first * A - Triples to evaluate last * B - Triples to evaluate RHS * C - Triples to do conditional check (e.g. first > last etc.) * C - Triples to branch around if the checks indicate this is a null operation SET $PIECE * D - Triple that does OC_GVNAME of ^A * D - Triple that does OC_SETPIECE to determine the new value * D - Triple that does OC_GVPUT of the new value into ^A(x,y) * This is the point where the conditional check triples will branch around to if they chose to. * * A - triples that evaluates the arguments/subscripts in the left-hand-side of the SET command * These triples are built in "curtchain" * B - triples that evaluates the arguments/subscripts in the right-hand-side of the SET command * These triples are built in "curtchain" * C - triples that do conditional check for any $PIECE/$EXTRACT in the left side of the SET command. * These triples are built in "curtargchain" * D - triples that generate the reference to the target of the SET and the store into the target. * These triples are built in "targchain" * * Note alias processing does not support the SET *(...)=.. type syntax because the type of argument * created for RHS processing is dependent on the LHS receiver type and we do not support more than one * type of source argument in a single SET. */ first_setleft_invalid = FIRST_SETLEFT_NOTSEEN; curtchain_switched = FALSE; nakedzalias = have_lh_alias = FALSE; save_curtchain = NULL; assert(FIRST_SETLEFT_NOTSEEN != TRUE); assert(FIRST_SETLEFT_NOTSEEN != FALSE); for (parse_warn = FALSE; ; parse_warn = FALSE) { curtargchain = targchain.exorder.bl; jmptrp1 = jmptrp2 = NULL; delim1char = is_extract = FALSE; allow_dzwrtac_as_mident(); /* Allows $ZWRTACxxx as target to be treated as an mident */ switch (window_token) { case TK_IDENT: /* A slight diversion first. If this is a $ZWRTAC set (indication of $ in first char * is currently enough to signify that), then we need to check a few conditions first. * If this is a "naked $ZWRTAC", meaning no numeric suffix, then this is a flag that * all the $ZWRTAC vars in the local variable tree need to be kill *'d which will not * be generating a SET instruction. First we need to verify that fact and make sure * we are not in PARENs and not doing alias processing. Note *any* value can be * specified as the source but while it will be evaluated, it is NOT stored anywhere. */ if ('$' == *window_ident.addr) { /* We have a $ZWRTAC<xx> target */ if (got_lparen) /* We don't allow $ZWRTACxxx to be specified in a parenthesized list. * Verify that first */ SYNTAX_ERROR(ERR_DZWRNOPAREN); if (STR_LIT_LEN(DOLLAR_ZWRTAC) == window_ident.len) { /* Ok, this is a naked $ZWRTAC targeted set */ if (alias_processing) SYNTAX_ERROR(ERR_DZWRNOALIAS); nakedzalias = TRUE; /* This opcode doesn't really need args but it is easier to fit in with the rest * of m_set processing to pass it the result arg, which there may actually be * a use for someday.. */ put = maketriple(OC_CLRALSVARS); put->operand[0] = resptr; dqins(targchain.exorder.bl, exorder, put); advancewindow(); break; } } /* If we are doing alias processing, there are two possibilities: * 1) LHS is unsubscripted - it is an alias variable being created or replaced. Need to parse * the varname as if this were a regular set. * 2) LHS is subscripted - it is an alias container variable being created or replaced. The * processing here is to pass the base variable index to the store routine so bypass the * lvn() call. */ if (!alias_processing || TK_LPAREN == director_token) { /* Normal variable processing or we have a lh alias container */ if (!lvn(&v, OC_PUTINDX, 0)) SYNTAX_ERROR_NOREPORT_HERE; if (OC_PUTINDX == v.oprval.tref->opcode) { dqdel(v.oprval.tref, exorder); dqins(targchain.exorder.bl, exorder, v.oprval.tref); sub = v.oprval.tref; put_oc = OC_PUTINDX; if (TREF(temp_subs)) m_set_create_temporaries(sub, put_oc); } } else { /* Have alias variable. Argument is index into var table rather than pointer to var */ have_lh_alias = TRUE; /* We only want the variable index in this case. Since the entire hash structure to which * this variable is going to be pointing to is changing, doing anything that calls fetch() * is somewhat pointless so we avoid it by just accessing the variable information * directly. */ mvarptr = get_mvaddr(&window_ident); v = put_ilit(mvarptr->mvidx); advancewindow(); } /* Determine correct storing triple */ put = maketriple((!alias_processing ? OC_STO : (have_lh_alias ? OC_SETALS2ALS : OC_SETALSIN2ALSCT))); put->operand[0] = v; put->operand[1] = resptr; dqins(targchain.exorder.bl, exorder, put); break; case TK_CIRCUMFLEX: if (alias_processing) SYNTAX_ERROR(ERR_ALIASEXPECTED); s1 = curtchain->exorder.bl; if (!gvn()) SYNTAX_ERROR_NOREPORT_HERE; for (sub = curtchain->exorder.bl; sub != s1; sub = sub->exorder.bl) { put_oc = sub->opcode; if (OC_GVNAME == put_oc || OC_GVNAKED == put_oc || OC_GVEXTNAM == put_oc) break; } assert(OC_GVNAME == put_oc || OC_GVNAKED == put_oc || OC_GVEXTNAM == put_oc); dqdel(sub, exorder); dqins(targchain.exorder.bl, exorder, sub); if (TREF(temp_subs)) m_set_create_temporaries(sub, put_oc); put = maketriple(OC_GVPUT); put->operand[0] = resptr; dqins(targchain.exorder.bl, exorder, put); break; case TK_ATSIGN: if (alias_processing) SYNTAX_ERROR(ERR_ALIASEXPECTED); if (!indirection(&v)) SYNTAX_ERROR_NOREPORT_HERE; if (!got_lparen && TK_EQUAL != window_token) { assert(!curtchain_switched); put = newtriple(OC_COMMARG); put->operand[0] = v; put->operand[1] = put_ilit(indir_set); return TRUE; } put = maketriple(OC_INDSET); put->operand[0] = v; put->operand[1] = resptr; dqins(targchain.exorder.bl, exorder, put); break; case TK_DOLLAR: if (alias_processing) SYNTAX_ERROR(ERR_ALIASEXPECTED); advancewindow(); if (TK_IDENT != window_token) SYNTAX_ERROR(ERR_VAREXPECTED); if (TK_LPAREN != director_token) { /* Look for intrinsic special variables */ s1 = curtchain->exorder.bl; if (0 > (index = namelook(svn_index, svn_names, window_ident.addr, window_ident.len))) { STX_ERROR_WARN(ERR_INVSVN); /* sets "parse_warn" to TRUE */ } else if (!svn_data[index].can_set) { STX_ERROR_WARN(ERR_SVNOSET); /* sets "parse_warn" to TRUE */ } advancewindow(); if (!parse_warn) { if (SV_ETRAP != svn_data[index].opcode && SV_ZTRAP != svn_data[index].opcode) { /* Setting of $ZTRAP or $ETRAP must go through opp_svput because they * may affect the stack pointer. All others directly to op_svput(). */ put = maketriple(OC_SVPUT); } else put = maketriple(OC_PSVPUT); put->operand[0] = put_ilit(svn_data[index].opcode); put->operand[1] = resptr; dqins(targchain.exorder.bl, exorder, put); } else { /* OC_RTERROR triple would have been inserted in curtchain by ins_errtriple * (invoked by stx_error). To maintain consistency with the "if" portion of * this code, we need to move this triple to the "targchain". */ tmp = curtchain->exorder.bl; /* corresponds to put_ilit(FALSE) in ins_errtriple */ tmp = tmp->exorder.bl; /* corresponds to put_ilit(in_error) in ins_errtriple */ tmp = tmp->exorder.bl; /* corresponds to newtriple(OC_RTERROR) in ins_errtriple */ assert(OC_RTERROR == tmp->opcode); dqdel(tmp, exorder); dqins(targchain.exorder.bl, exorder, tmp); CHKTCHAIN(&targchain); } break; } /* Only 4 function names allowed on left side: $[Z]Piece and $[Z]Extract */ index = namelook(fun_index, fun_names, window_ident.addr, window_ident.len); if (0 > index) { STX_ERROR_WARN(ERR_INVFCN); /* sets "parse_warn" to TRUE */ /* OC_RTERROR triple would have been inserted in "curtchain" by ins_errtriple * (invoked by stx_error). We need to switch it to "targchain" to be consistent * with every other codepath in this module. */ tmp = curtchain->exorder.bl; /* corresponds to put_ilit(FALSE) in ins_errtriple */ tmp = tmp->exorder.bl; /* corresponds to put_ilit(in_error) in ins_errtriple */ tmp = tmp->exorder.bl; /* corresponds to newtriple(OC_RTERROR) in ins_errtriple */ assert(OC_RTERROR == tmp->opcode); dqdel(tmp, exorder); dqins(targchain.exorder.bl, exorder, tmp); CHKTCHAIN(&targchain); advancewindow(); /* skip past the function name */ advancewindow(); /* skip past the left paren */ /* Parse the remaining arguments until corresponding RIGHT-PAREN/SPACE/EOL is reached */ if (!parse_until_rparen_or_space()) SYNTAX_ERROR_NOREPORT_HERE; } else { switch(fun_data[index].opcode) { case OC_FNPIECE: setop = OC_SETPIECE; break; case OC_FNEXTRACT: is_extract = TRUE; setop = OC_SETEXTRACT; break; case OC_FNZPIECE: setop = OC_SETZPIECE; break; case OC_FNZEXTRACT: is_extract = TRUE; setop = OC_SETZEXTRACT; break; default: SYNTAX_ERROR(ERR_VAREXPECTED); } advancewindow(); advancewindow(); /* Although we see the get (target) variable first, we need to save it's processing * on another chain -- the targchain -- because the retrieval of the target is bypassed * and the naked indicator is not reset if the first/last parameters are not set in a * logical manner (must be > 0 and first <= last). So the evaluation order is * delimiter (if $piece), first, last, RHS of the set and then the target if applicable. * Set up primary action triple now since it is ref'd by the put triples generated below. */ s = maketriple(setop); /* Even for SET[Z]PIECE and SET[Z]EXTRACT, the SETxxxxx opcodes * do not do the final store, they only create the final value TO be * stored so generate the triples that will actually do the store now. * Note we are still building triples on the original curtchain. */ switch (window_token) { case TK_IDENT: if (!lvn(&v, OC_PUTINDX, 0)) SYNTAX_ERROR(ERR_VAREXPECTED); if (OC_PUTINDX == v.oprval.tref->opcode) { dqdel(v.oprval.tref, exorder); dqins(targchain.exorder.bl, exorder, v.oprval.tref); sub = v.oprval.tref; put_oc = OC_PUTINDX; if (TREF(temp_subs)) m_set_create_temporaries(sub, put_oc); } get = maketriple(OC_FNGET); get->operand[0] = v; put = maketriple(OC_STO); put->operand[0] = v; put->operand[1] = put_tref(s); break; case TK_ATSIGN: if (!indirection(&v)) SYNTAX_ERROR(ERR_VAREXPECTED); get = maketriple(OC_INDGET); get->operand[0] = v; get->operand[1] = put_str(0, 0); put = maketriple(OC_INDSET); put->operand[0] = v; put->operand[1] = put_tref(s); break; case TK_CIRCUMFLEX: s1 = curtchain->exorder.bl; if (!gvn()) SYNTAX_ERROR_NOREPORT_HERE; for (sub = curtchain->exorder.bl; sub != s1 ; sub = sub->exorder.bl) { put_oc = sub->opcode; if ((OC_GVNAME == put_oc) || (OC_GVNAKED == put_oc) || (OC_GVEXTNAM == put_oc)) break; } assert((OC_GVNAME == put_oc) || (OC_GVNAKED == put_oc) || (OC_GVEXTNAM == put_oc)); dqdel(sub, exorder); dqins(targchain.exorder.bl, exorder, sub); if (TREF(temp_subs)) m_set_create_temporaries(sub, put_oc); get = maketriple(OC_FNGVGET); get->operand[0] = put_str(0, 0); put = maketriple(OC_GVPUT); put->operand[0] = put_tref(s); break; default: SYNTAX_ERROR(ERR_VAREXPECTED); } s->operand[0] = put_tref(get); /* Code to fetch args for target triple are on targchain. Put get there now too. */ dqins(targchain.exorder.bl, exorder, get); CHKTCHAIN(&targchain); if (!is_extract) { /* Set $[z]piece */ delimiter = newtriple(OC_PARAMETER); s->operand[1] = put_tref(delimiter); first = newtriple(OC_PARAMETER); delimiter->operand[1] = put_tref(first); /* Process delimiter string ($[z]piece only) */ if (TK_COMMA != window_token) SYNTAX_ERROR(ERR_COMMA); advancewindow(); if (!strexpr(&delimval)) SYNTAX_ERROR_NOREPORT_HERE; assert(TRIP_REF == delimval.oprclass); } else { /* Set $[Z]Extract */ first = newtriple(OC_PARAMETER); s->operand[1] = put_tref(first); } /* Process first integer value */ if (window_token != TK_COMMA) firstval = put_ilit(1); else { advancewindow(); if (!intexpr(&firstval)) SYNTAX_ERROR(ERR_COMMA); assert(firstval.oprclass == TRIP_REF); } first->operand[0] = firstval; if (first_is_lit = (OC_ILIT == firstval.oprval.tref->opcode)) { assert(ILIT_REF ==firstval.oprval.tref->operand[0].oprclass); first_val_lit = firstval.oprval.tref->operand[0].oprval.ilit; } if (TK_COMMA != window_token) { /* There is no "last" value. Only if 1 char literal delimiter and * no "last" value can we generate shortcut code to op_set[z]p1 entry * instead of op_set[z]piece. Note if UTF8 mode is in effect, then this * optimization applies if the literal is one unicode char which may in * fact be up to 4 bytes but will still be passed as a single unsigned * integer. */ if (!is_extract) { delim_mval = &delimval.oprval.tref->operand[0].oprval.mlit->v; valid_char = TRUE; /* Basic assumption unles proven otherwise */ if (delimval.oprval.tref->opcode == OC_LIT && (1 == (gtm_utf8_mode ? MV_FORCE_LEN(delim_mval) : delim_mval->str.len))) { /* Single char delimiter for set $piece */ UNICODE_ONLY( if (gtm_utf8_mode) { /* We have a supposed single char delimiter but it * must be a valid utf8 char to be used by * op_setp1() and MV_FORCE_LEN won't tell us that. */ valid_char = UTF8_VALID(delim_mval->str.addr, (delim_mval->str.addr + delim_mval->str.len), delimlen); if (!valid_char && !badchar_inhibit) UTF8_BADCHAR(0, delim_mval->str.addr, (delim_mval->str.addr + delim_mval->str.len), 0, NULL); } ); if (valid_char || 1 == delim_mval->str.len) { /* This reference to a one character literal or a single * byte invalid utf8 character that needs to be turned into * an explict formated integer literal instead */ unichar.unichar_val = 0; if (!gtm_utf8_mode) { /* Single byte delimiter */ assert(1 == delim_mval->str.len); UNIX_ONLY(s->opcode = OC_SETZP1); VMS_ONLY(s->opcode = OC_SETP1); unichar.unibytes_val[0] = *delim_mval->str.addr; } UNICODE_ONLY( else { /* Potentially multiple bytes in one int */ assert(SIZEOF(int) >= delim_mval->str.len); memcpy(unichar.unibytes_val, delim_mval->str.addr, delim_mval->str.len); s->opcode = OC_SETP1; } ); delimlit = (mint)unichar.unichar_val; delimiter->operand[0] = put_ilit(delimlit); delim1char = TRUE; } } }
void preemptive_db_clnup(int preemptive_severe) { sgmnt_addrs *csa; sgm_info *si; gd_region *r_top, *reg; gd_addr *addr_ptr; DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; /* Clear "inctn_opcode" global variable now that any in-progress transaction is aborted at this point. * Not doing so would cause future calls to "t_end" to get confused and skip writing logical jnl recs * and instead incorrectly write an INCTN record (GTM-8425). */ if (bml_save_dollar_tlevel) { assert(!dollar_tlevel); dollar_tlevel = bml_save_dollar_tlevel; bml_save_dollar_tlevel = 0; } assert(!dollar_tlevel || (inctn_invalid_op == inctn_opcode) || (inctn_bmp_mark_free_gtm == inctn_opcode)); assert(dollar_tlevel || update_trans || (inctn_invalid_op == inctn_opcode)); inctn_opcode = inctn_invalid_op; if (!dollar_tlevel && update_trans) { /* It's possible we hit an error in the middle of an update, at which point we have * a valid clue and non-NULL cse. However, this causes problems for subsequent * transactions (see comment in t_begin). In particular we could end up pinning buffers * unnecessarily. So clear the cse of any histories that may have been active during the update. */ CLEAR_CSE(gv_target); if ((NULL != gv_target) && (NULL != gv_target->gd_csa)) { CLEAR_CSE(gv_target->gd_csa->dir_tree); GTMTRIG_ONLY(CLEAR_CSE(gv_target->gd_csa->hasht_tree)); } /* Resetting this is necessary to avoid blowing an assert in t_begin that it is 0 at the start of a transaction. */ update_trans = 0; } if (INVALID_GV_TARGET != reset_gv_target) { if (SUCCESS != preemptive_severe && INFO != preemptive_severe) { /* We know of a few cases in Unix where gv_target and gv_currkey could be out of sync at this point. * a) If we are inside trigger code which in turn does an update that does * reads of ^#t global and ends up in a restart. This restart would * in turn do a rts_error_csa(TPRETRY) which would invoke mdb_condition_handler * that would in turn invoke preemptive_db_clnup which invokes this macro. * In this tp restart case though, it is ok for gv_target and gv_currkey * to be out of sync because they are going to be reset by tp_clean_up anyways. * So skip the dbg-only in-sync check. * b) If we are in gvtr_init reading the ^#t global and detect an error (e.g. TRIGINVCHSET) * gv_target after the reset would be pointing to a regular global whereas gv_currkey * would be pointing to ^#t. It is ok to be out-of-sync since in this case, we expect * mdb_condition_handler to be calling us. That has code to reset gv_currkey (and * cs_addrs/cs_data etc.) to reflect gv_target (i.e. get them back in sync). * Therefore in Unix we pass SKIP_GVT_GVKEY_CHECK to skip the gvtarget/gvcurrkey out-of-sync check * in RESET_GV_TARGET. In VMS we pass DO_GVT_GVKEY_CHECK as we dont yet know of an out-of-sync situation. */ RESET_GV_TARGET(UNIX_ONLY(SKIP_GVT_GVKEY_CHECK) VMS_ONLY(DO_GVT_GVKEY_CHECK)); } } need_kip_incr = FALSE; /* in case we got an error in t_end (e.g. GBLOFLOW), dont want this global variable to get * carried over to the next non-TP transaction that this process does (e.g. inside an error trap). */ TREF(expand_prev_key) = FALSE; /* reset global (in case it is TRUE) so it does not get carried over to future operations */ if (dollar_tlevel) { for (si = first_sgm_info; si != NULL; si = si->next_sgm_info) { if (NULL != si->kip_csa) { csa = si->tp_csa; assert(si->tp_csa == si->kip_csa); PROBE_DECR_KIP(csa->hdr, csa, si->kip_csa); } } } else if (NULL != kip_csa && (NULL != kip_csa->hdr) && (NULL != kip_csa->nl)) PROBE_DECR_KIP(kip_csa->hdr, kip_csa, kip_csa); if (IS_DSE_IMAGE) { /* Release crit on any region that was obtained for the current erroring DSE operation. * Take care NOT to release crits obtained by a previous CRIT -SEIZE command. */ for (addr_ptr = get_next_gdr(NULL); addr_ptr; addr_ptr = get_next_gdr(addr_ptr)) { for (reg = addr_ptr->regions, r_top = reg + addr_ptr->n_regions; reg < r_top; reg++) { if (reg->open && !reg->was_open) { csa = &FILE_INFO(reg)->s_addrs; assert(csa->hold_onto_crit || !csa->dse_crit_seize_done); assert(!csa->hold_onto_crit || csa->now_crit); if (csa->now_crit && (!csa->hold_onto_crit || !csa->dse_crit_seize_done)) { rel_crit(reg); csa->hold_onto_crit = FALSE; t_abort(reg, csa); /* cancel mini-transaction if any in progress */ } } } } } }
void mu_int_reg(gd_region *reg, boolean_t *return_value) { boolean_t read_only, was_crit; freeze_status status; node_local_ptr_t cnl; sgmnt_addrs *csa; sgmnt_data_ptr_t csd; # ifdef DEBUG boolean_t need_to_wait = FALSE; int trynum; uint4 curr_wbox_seq_num; # endif sgmnt_data *csd_copy_ptr; gd_segment *seg; int gtmcrypt_errno; *return_value = FALSE; UNIX_ONLY(jnlpool_init_needed = TRUE); ESTABLISH(mu_int_reg_ch); if (dba_usr == reg->dyn.addr->acc_meth) { util_out_print("!/Can't integ region !AD; not GDS format", TRUE, REG_LEN_STR(reg)); mu_int_skipreg_cnt++; return; } gv_cur_region = reg; if (reg_cmcheck(reg)) { util_out_print("!/Can't integ region across network", TRUE); mu_int_skipreg_cnt++; return; } gvcst_init(gv_cur_region); if (gv_cur_region->was_open) { /* already open under another name */ gv_cur_region->open = FALSE; return; } change_reg(); csa = &FILE_INFO(gv_cur_region)->s_addrs; cnl = csa->nl; csd = csa->hdr; read_only = gv_cur_region->read_only; assert(NULL != mu_int_master); /* Ensure that we don't see an increase in the file header and master map size compared to it's maximum values */ assert(SGMNT_HDR_LEN >= SIZEOF(sgmnt_data) && (MASTER_MAP_SIZE_MAX >= MASTER_MAP_SIZE(csd))); /* ONLINE INTEG if asked for explicitly by specifying -ONLINE is an error if the db has partial V4 blocks. * However, if -ONLINE is not explicitly specified but rather assumed implicitly (as default for -REG) * then turn off ONLINE INTEG for this region and continue as if -NOONLINE was specified */ # ifdef GTM_SNAPSHOT if (!csd->fully_upgraded) { ointeg_this_reg = FALSE; /* Turn off ONLINE INTEG for this region */ if (online_specified) { gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_SSV4NOALLOW, 2, DB_LEN_STR(gv_cur_region)); util_out_print(NO_ONLINE_ERR_MSG, TRUE); mu_int_skipreg_cnt++; return; } } # endif if (!ointeg_this_reg || read_only) { status = region_freeze(gv_cur_region, TRUE, FALSE, TRUE); switch (status) { case REG_ALREADY_FROZEN: UNIX_ONLY(if (csa->read_only_fs) break); util_out_print("!/Database for region !AD is already frozen, not integing", TRUE, REG_LEN_STR(gv_cur_region)); mu_int_skipreg_cnt++; return; case REG_HAS_KIP: /* We have already waited for KIP to reset. This time do not wait for KIP */ status = region_freeze(gv_cur_region, TRUE, FALSE, FALSE); if (REG_ALREADY_FROZEN == status) { UNIX_ONLY(if (csa->read_only_fs) break); util_out_print("!/Database for region !AD is already frozen, not integing", TRUE, REG_LEN_STR(gv_cur_region)); mu_int_skipreg_cnt++; return; } break; case REG_FREEZE_SUCCESS: break; default: assert(FALSE); }
void gv_rundown(void) { gd_region *r_top, *r_save, *r_local; gd_addr *addr_ptr; sgm_info *si; int4 rundown_status = EXIT_NRM; /* if gds_rundown went smoothly */ # ifdef VMS vms_gds_info *gds_info; # elif UNIX unix_db_info *udi; # endif #if defined(DEBUG) && defined(UNIX) sgmnt_addrs *csa; # endif DCL_THREADGBL_ACCESS; SETUP_THREADGBL_ACCESS; r_save = gv_cur_region; /* Save for possible core dump */ gvcmy_rundown(); ENABLE_AST if (pool_init) rel_lock(jnlpool.jnlpool_dummy_reg); for (addr_ptr = get_next_gdr(NULL); addr_ptr; addr_ptr = get_next_gdr(addr_ptr)) { for (r_local = addr_ptr->regions, r_top = r_local + addr_ptr->n_regions; r_local < r_top; r_local++) { if (r_local->open && !r_local->was_open && dba_cm != r_local->dyn.addr->acc_meth) { /* Rundown has already occurred for GT.CM client regions through gvcmy_rundown() above. * Hence the (dba_cm != ...) check in the if above. Note that for GT.CM client regions, * region->open is TRUE although cs_addrs is NULL. */ # if defined(DEBUG) && defined(UNIX) if (is_jnlpool_creator && ANTICIPATORY_FREEZE_AVAILABLE && TREF(gtm_test_fake_enospc)) { /* Clear ENOSPC faking now that we are running down */ csa = REG2CSA(r_local); if (csa->nl->fake_db_enospc || csa->nl->fake_jnl_enospc) { send_msg_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_TEXT, 2, DB_LEN_STR(r_local), ERR_TEXT, 2, LEN_AND_LIT("Resetting fake_db_enospc and fake_jnl_enospc")); csa->nl->fake_db_enospc = FALSE; csa->nl->fake_jnl_enospc = FALSE; } } # endif gv_cur_region = r_local; tp_change_reg(); UNIX_ONLY(rundown_status |=) gds_rundown(); /* Now that gds_rundown is done, free up the memory associated with the region. * Ideally the following memory freeing code should go to gds_rundown, but * GT.CM calls gds_rundown() and we want to reuse memory for GT.CM. */ if (NULL != cs_addrs) { if (NULL != cs_addrs->dir_tree) FREE_CSA_DIR_TREE(cs_addrs); if (cs_addrs->sgm_info_ptr) { si = cs_addrs->sgm_info_ptr; /* It is possible we got interrupted before initializing all fields of "si" * completely so account for NULL values while freeing/releasing those fields. */ assert((si->tp_csa == cs_addrs) || (NULL == si->tp_csa)); if (si->jnl_tail) { CAREFUL_FREEUP_BUDDY_LIST(si->format_buff_list); CAREFUL_FREEUP_BUDDY_LIST(si->jnl_list); } CAREFUL_FREEUP_BUDDY_LIST(si->recompute_list); CAREFUL_FREEUP_BUDDY_LIST(si->new_buff_list); CAREFUL_FREEUP_BUDDY_LIST(si->tlvl_info_list); CAREFUL_FREEUP_BUDDY_LIST(si->tlvl_cw_set_list); CAREFUL_FREEUP_BUDDY_LIST(si->cw_set_list); if (NULL != si->blks_in_use) { free_hashtab_int4(si->blks_in_use); free(si->blks_in_use); si->blks_in_use = NULL; } if (si->cr_array_size) { assert(NULL != si->cr_array); if (NULL != si->cr_array) free(si->cr_array); } if (NULL != si->first_tp_hist) free(si->first_tp_hist); free(si); } if (cs_addrs->jnl) { assert(&FILE_INFO(cs_addrs->jnl->region)->s_addrs == cs_addrs); if (cs_addrs->jnl->jnllsb) { UNIX_ONLY(assert(FALSE)); free(cs_addrs->jnl->jnllsb); } free(cs_addrs->jnl); } GTMCRYPT_ONLY( if (cs_addrs->encrypted_blk_contents) free(cs_addrs->encrypted_blk_contents); ) } assert(gv_cur_region->dyn.addr->file_cntl->file_info); VMS_ONLY( gds_info = (vms_gds_info *)gv_cur_region->dyn.addr->file_cntl->file_info; if (gds_info->xabpro) free(gds_info->xabpro); if (gds_info->xabfhc) free(gds_info->xabfhc); if (gds_info->nam) { free(gds_info->nam->nam$l_esa); free(gds_info->nam); } if (gds_info->fab) free(gds_info->fab); ) free(gv_cur_region->dyn.addr->file_cntl->file_info); free(gv_cur_region->dyn.addr->file_cntl); } r_local->open = r_local->was_open = FALSE; } }
sm_uc_ptr_t t_qread(block_id blk, sm_int_ptr_t cycle, cache_rec_ptr_ptr_t cr_out) /* cycle is used in t_end to detect if the buffer has been refreshed since the t_qread */ { uint4 status, duint4, blocking_pid; cache_rec_ptr_t cr; bt_rec_ptr_t bt; bool clustered, was_crit; int dummy, lcnt, ocnt; cw_set_element *cse; off_chain chain1; register sgmnt_addrs *csa; register sgmnt_data_ptr_t csd; int4 dummy_errno; boolean_t already_built, is_mm, reset_first_tp_srch_status, set_wc_blocked; error_def(ERR_DBFILERR); error_def(ERR_BUFOWNERSTUCK); first_tp_srch_status = NULL; reset_first_tp_srch_status = FALSE; csa = cs_addrs; csd = csa->hdr; INCR_DB_CSH_COUNTER(csa, n_t_qreads, 1); is_mm = (dba_mm == csd->acc_meth); assert((t_tries < CDB_STAGNATE) || csa->now_crit); if (0 < dollar_tlevel) { assert(sgm_info_ptr); if (0 != sgm_info_ptr->cw_set_depth) { chain1 = *(off_chain *)&blk; if (1 == chain1.flag) { assert(sgm_info_ptr->cw_set_depth); if ((int)chain1.cw_index < sgm_info_ptr->cw_set_depth) tp_get_cw(sgm_info_ptr->first_cw_set, (int)chain1.cw_index, &cse); else { assert(FALSE == csa->now_crit); rdfail_detail = cdb_sc_blknumerr; return (sm_uc_ptr_t)NULL; } } else { first_tp_srch_status = (srch_blk_status *)lookup_hashtab_ent(sgm_info_ptr->blks_in_use, (void *)blk, &duint4); ASSERT_IS_WITHIN_TP_HIST_ARRAY_BOUNDS(first_tp_srch_status, sgm_info_ptr); cse = first_tp_srch_status ? first_tp_srch_status->ptr : NULL; } assert(!cse || !cse->high_tlevel); if (cse) { /* transaction has modified the sought after block */ assert(gds_t_writemap != cse->mode); if (FALSE == cse->done) { /* out of date, so make it current */ already_built = (NULL != cse->new_buff); gvcst_blk_build(cse, (uchar_ptr_t)cse->new_buff, 0); assert(cse->blk_target); if (!already_built && !chain1.flag) { assert(first_tp_srch_status && (is_mm || first_tp_srch_status->cr) && first_tp_srch_status->buffaddr); if (first_tp_srch_status->tn <= ((blk_hdr_ptr_t)(first_tp_srch_status->buffaddr))->tn) { assert(CDB_STAGNATE > t_tries); rdfail_detail = cdb_sc_blkmod; /* should this be something else */ TP_TRACE_HIST_MOD(blk, gv_target, tp_blkmod_t_qread, cs_data, first_tp_srch_status->tn, ((blk_hdr_ptr_t)(first_tp_srch_status->buffaddr))->tn, ((blk_hdr_ptr_t)(first_tp_srch_status->buffaddr))->levl); return (sm_uc_ptr_t)NULL; } if ((!is_mm) && (first_tp_srch_status->cycle != first_tp_srch_status->cr->cycle || first_tp_srch_status->blk_num != first_tp_srch_status->cr->blk)) { assert(CDB_STAGNATE > t_tries); rdfail_detail = cdb_sc_lostcr; /* should this be something else */ return (sm_uc_ptr_t)NULL; } if (certify_all_blocks && FALSE == cert_blk(gv_cur_region, blk, (blk_hdr_ptr_t)cse->new_buff, cse->blk_target->root)) GTMASSERT; } cse->done = TRUE; } *cycle = CYCLE_PVT_COPY; *cr_out = 0; return (sm_uc_ptr_t)cse->new_buff; } assert(!chain1.flag); } else first_tp_srch_status = (srch_blk_status *)lookup_hashtab_ent(sgm_info_ptr->blks_in_use, (void *)blk, &duint4); ASSERT_IS_WITHIN_TP_HIST_ARRAY_BOUNDS(first_tp_srch_status, sgm_info_ptr); if (!is_mm && first_tp_srch_status) { assert(first_tp_srch_status->cr && !first_tp_srch_status->ptr); if (first_tp_srch_status->cycle == first_tp_srch_status->cr->cycle) { *cycle = first_tp_srch_status->cycle; *cr_out = first_tp_srch_status->cr; first_tp_srch_status->cr->refer = TRUE; if (CDB_STAGNATE <= t_tries) /* mu_reorg doesn't use TP else should have an || for that */ CWS_INSERT(blk); return (sm_uc_ptr_t)first_tp_srch_status->buffaddr; } else { /* Block was already part of the read-set of this transaction, but got recycled. Allow for * recycling. But update the first_tp_srch_status (for this blk) in the si->first_tp_hist * array to reflect the new buffer, cycle and cache-record. Since we know those only at the end of * t_qread, set a variable here that will enable the updation before returning from t_qread(). */ reset_first_tp_srch_status = TRUE; } } } if ((blk >= csa->ti->total_blks) || (blk < 0)) { /* requested block out of range; could occur because of a concurrency conflict */ if ((&FILE_INFO(gv_cur_region)->s_addrs != csa) || (csd != cs_data)) GTMASSERT; assert(FALSE == csa->now_crit); rdfail_detail = cdb_sc_blknumerr; return (sm_uc_ptr_t)NULL; } if (is_mm) { *cycle = CYCLE_SHRD_COPY; *cr_out = 0; return (sm_uc_ptr_t)(mm_read(blk)); } assert(dba_bg == csd->acc_meth); assert(!first_tp_srch_status || !first_tp_srch_status->cr || first_tp_srch_status->cycle != first_tp_srch_status->cr->cycle); if (FALSE == (clustered = csd->clustered)) bt = NULL; was_crit = csa->now_crit; ocnt = 0; set_wc_blocked = FALSE; /* to indicate whether csd->wc_blocked was set to TRUE by us */ do { if (NULL == (cr = db_csh_get(blk))) { /* not in memory */ if (clustered && (NULL != (bt = bt_get(blk))) && (FALSE == bt->flushing)) bt = NULL; if (FALSE == csa->now_crit) { if (NULL != bt) { /* at this point, bt is not NULL only if clustered and flushing - wait no crit */ assert(clustered); wait_for_block_flush(bt, blk); /* try for no other node currently writing the block */ } if (csd->flush_trigger <= csa->nl->wcs_active_lvl && FALSE == gv_cur_region->read_only) JNL_ENSURE_OPEN_WCS_WTSTART(csa, gv_cur_region, 0, dummy_errno); /* a macro that dclast's wcs_wtstart() and checks for errors etc. */ grab_crit(gv_cur_region); cr = db_csh_get(blk); /* in case blk arrived before crit */ } if (clustered && (NULL != (bt = bt_get(blk))) && (TRUE == bt->flushing)) { /* Once crit, need to assure that if clustered, that flushing is [still] complete * If it isn't, we missed an entire WM cycle and have to wait for another node to finish */ wait_for_block_flush(bt, blk); /* ensure no other node currently writing the block */ } if (NULL == cr) { /* really not in memory - must get a new buffer */ assert(csa->now_crit); cr = db_csh_getn(blk); if (CR_NOTVALID == (sm_long_t)cr) { SET_TRACEABLE_VAR(cs_data->wc_blocked, TRUE); BG_TRACE_PRO_ANY(csa, wc_blocked_t_qread_db_csh_getn_invalid_blk); set_wc_blocked = TRUE; break; } assert(0 <= cr->read_in_progress); *cycle = cr->cycle; cr->tn = csa->ti->curr_tn; if (FALSE == was_crit) rel_crit(gv_cur_region); /* read outside of crit may be of a stale block but should be detected by t_end or tp_tend */ assert(0 == cr->dirty); assert(cr->read_in_progress >= 0); INCR_DB_CSH_COUNTER(csa, n_dsk_reads, 1); if (SS_NORMAL != (status = dsk_read(blk, GDS_REL2ABS(cr->buffaddr)))) { RELEASE_BUFF_READ_LOCK(cr); assert(was_crit == csa->now_crit); if (FUTURE_READ == status) { /* in cluster, block can be in the "future" with respect to the local history */ assert(TRUE == clustered); assert(FALSE == csa->now_crit); rdfail_detail = cdb_sc_future_read; /* t_retry forces the history up to date */ return (sm_uc_ptr_t)NULL; } rts_error(VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(gv_cur_region), status); } assert(0 <= cr->read_in_progress); assert(0 == cr->dirty); cr->r_epid = 0; RELEASE_BUFF_READ_LOCK(cr); assert(-1 <= cr->read_in_progress); *cr_out = cr; assert(was_crit == csa->now_crit); if (reset_first_tp_srch_status) { /* keep the parantheses for the if (although single line) since the following is a macro */ RESET_FIRST_TP_SRCH_STATUS(first_tp_srch_status, cr, *cycle); } return (sm_uc_ptr_t)GDS_REL2ABS(cr->buffaddr); } else if ((FALSE == was_crit) && (BAD_LUCK_ABOUNDS > ocnt)) { assert(TRUE == csa->now_crit); assert(csa->nl->in_crit == process_id); rel_crit(gv_cur_region); } } if (CR_NOTVALID == (sm_long_t)cr) { SET_TRACEABLE_VAR(cs_data->wc_blocked, TRUE); BG_TRACE_PRO_ANY(csa, wc_blocked_t_qread_db_csh_get_invalid_blk); set_wc_blocked = TRUE; break; } for (lcnt = 1; ; lcnt++) { if (0 > cr->read_in_progress) { /* it's not being read */ if (clustered && (0 == cr->bt_index) && (cr->tn < ((th_rec *)((uchar_ptr_t)csa->th_base + csa->th_base->tnque.fl))->tn)) { /* can't rely on the buffer */ cr->cycle++; /* increment cycle whenever blk number changes (tp_hist depends on this) */ cr->blk = CR_BLKEMPTY; break; } *cycle = cr->cycle; *cr_out = cr; VMS_ONLY( /* If we were doing the db_csh_get() above (in t_qread itself) and located the cache-record * which, before coming here and taking a copy of cr->cycle a few lines above, was made an * older twin by another process in bg_update (note this can happen in VMS only) which has * already incremented the cycle, we will end up having a copy of the old cache-record with * its incremented cycle number and hence will succeed in tp_hist validation if we return * this <cr,cycle> combination although we don't want to since this "cr" is not current for * the given block as of now. Note that the "indexmod" optimization in tp_tend() relies on * an accurate intermediate validation by tp_hist() which in turn relies on the <cr,cycle> * value returned by t_qread() to be accurate for a given blk at the current point in time. * We detect the older-twin case by the following check. Note that here we depend on the * the fact that bg_update() sets cr->bt_index to 0 before incrementing cr->cycle. * Given that order, cr->bt_index can be guaranteed to be 0 if we read the incremented cycle */ if (cr->twin && (0 == cr->bt_index)) break; ) if (cr->blk != blk) break; if (was_crit != csa->now_crit) rel_crit(gv_cur_region); assert(was_crit == csa->now_crit); if (reset_first_tp_srch_status) { /* keep the parantheses for the if (although single line) since the following is a macro */ RESET_FIRST_TP_SRCH_STATUS(first_tp_srch_status, cr, *cycle); } /* Note that at this point we expect t_qread() to return a <cr,cycle> combination that * corresponds to "blk" passed in. It is crucial to get an accurate value for both the fields * since tp_hist() relies on this for its intermediate validation. */ return (sm_uc_ptr_t)GDS_ANY_REL2ABS(csa, cr->buffaddr); } if (blk != cr->blk) break; if (lcnt >= BUF_OWNER_STUCK && (0 == (lcnt % BUF_OWNER_STUCK))) { if (FALSE == csa->now_crit) grab_crit(gv_cur_region); if (cr->read_in_progress < -1) { /* outside of design; clear to known state */ BG_TRACE_PRO(t_qread_out_of_design); INTERLOCK_INIT(cr); assert(0 == cr->r_epid); cr->r_epid = 0; } else if (cr->read_in_progress >= 0) { BG_TRACE_PRO(t_qread_buf_owner_stuck); if (0 != (blocking_pid = cr->r_epid)) { if (FALSE == is_proc_alive(blocking_pid, cr->image_count)) { /* process gone: release that process's lock */ assert(0 == cr->bt_index); if (cr->bt_index) { SET_TRACEABLE_VAR(csd->wc_blocked, TRUE); BG_TRACE_PRO_ANY(csa, wc_blocked_t_qread_bad_bt_index1); set_wc_blocked = TRUE; break; } cr->cycle++; /* increment cycle for blk number changes (for tp_hist) */ cr->blk = CR_BLKEMPTY; RELEASE_BUFF_READ_LOCK(cr); } else { rel_crit(gv_cur_region); send_msg(VARLSTCNT(4) ERR_DBFILERR, 2, DB_LEN_STR(gv_cur_region)); send_msg(VARLSTCNT(9) ERR_BUFOWNERSTUCK, 7, process_id, blocking_pid, cr->blk, cr->blk, (lcnt / BUF_OWNER_STUCK), cr->read_in_progress, cr->rip_latch.latch_pid); if ((4 * BUF_OWNER_STUCK) <= lcnt) GTMASSERT; /* Kickstart the process taking a long time in case it was suspended */ UNIX_ONLY(continue_proc(blocking_pid)); } } else { /* process stopped before could set r_epid */ assert(0 == cr->bt_index); if (cr->bt_index) { SET_TRACEABLE_VAR(csd->wc_blocked, TRUE); BG_TRACE_PRO_ANY(csa, wc_blocked_t_qread_bad_bt_index2); set_wc_blocked = TRUE; break; } cr->cycle++; /* increment cycle for blk number changes (for tp_hist) */ cr->blk = CR_BLKEMPTY; RELEASE_BUFF_READ_LOCK(cr); if (cr->read_in_progress < -1) /* race: process released since if r_epid */ LOCK_BUFF_FOR_READ(cr, dummy); } } if (was_crit != csa->now_crit) rel_crit(gv_cur_region); } else wcs_sleep(lcnt); } if (set_wc_blocked) /* cannot use csd->wc_blocked here as we might not necessarily have crit */ break; ocnt++; if (BAD_LUCK_ABOUNDS <= ocnt) { if (BAD_LUCK_ABOUNDS < ocnt || csa->now_crit) { rel_crit(gv_cur_region); GTMASSERT; } if (FALSE == csa->now_crit) grab_crit(gv_cur_region); } } while (TRUE);
/* Return number of regions (including jnlpool dummy region) if have or are aquiring crit or in_wtstart * ** NOTE ** This routine is called from signal handlers and is thus called asynchronously. * If CRIT_IN_COMMIT bit is set, we check if in middle of commit (PHASE1 inside crit or PHASE2 outside crit) on some region. * If CRIT_RELEASE bit is set, we release crit on region(s) that: * 1) we hold crit on (neither CRIT_IN_COMMIT NOR CRIT_TRANS_NO_REG is specified) * 2) are part of the current transactions except those regions that are marked as being valid * to have crit in by virtue of their crit_check_cycle value is the same as crit_deadlock_check_cycle. * Note: CRIT_RELEASE implies CRIT_ALL_REGIONS * If CRIT_ALL_REGIONS bit is set, go through the entire list of regions */ uint4 have_crit(uint4 crit_state) { gd_region *r_top, *r_local; gd_addr *addr_ptr; sgmnt_addrs *csa; uint4 crit_reg_cnt = 0; /* in order to proper release the necessary regions, CRIT_RELEASE implies going through all the regions */ if (crit_state & CRIT_RELEASE) { UNIX_ONLY(assert(!jgbl.onlnrlbk)); /* should not request crit to be released if online rollback */ crit_state |= CRIT_ALL_REGIONS; } if (0 != crit_count) { crit_reg_cnt++; if (0 == (crit_state & CRIT_ALL_REGIONS)) return crit_reg_cnt; } for (addr_ptr = get_next_gdr(NULL); addr_ptr; addr_ptr = get_next_gdr(addr_ptr)) { for (r_local = addr_ptr->regions, r_top = r_local + addr_ptr->n_regions; r_local < r_top; r_local++) { if (r_local->open && !r_local->was_open) { csa = &FILE_INFO(r_local)->s_addrs; if (NULL != csa) { if (csa->now_crit) { crit_reg_cnt++; /* It is possible that if DSE has done a CRIT REMOVE and stolen our crit, it * could be given to someone else which would cause this test to fail. The * current thinking is that the state DSE put this process is no longer viable * and it should die at the earliest opportunity, there being no way to know if * that is what happened anyway. */ if (csa->nl->in_crit != process_id) GTMASSERT; /* If we are releasing (all) regions with critical section or if special * TP case, release if the cycle number doesn't match meaning this is a * region we should not hold crit in (even if it is part of tp_reg_list). */ if ((0 != (crit_state & CRIT_RELEASE)) && (0 == (crit_state & CRIT_NOT_TRANS_REG) || crit_deadlock_check_cycle != csa->crit_check_cycle)) { assert(FALSE); assert(!csa->hold_onto_crit); rel_crit(r_local); send_msg(VARLSTCNT(8) ERR_MUTEXRELEASED, 6, process_id, process_id, DB_LEN_STR(r_local), dollar_tlevel, t_tries); } if (0 == (crit_state & CRIT_ALL_REGIONS)) return crit_reg_cnt; } /* In Commit-crit is defined as the time since when early_tn is 1 + curr_tn upto when * t_commit_crit is set to FALSE. Note that the first check should be done only if we * hold crit as otherwise we could see inconsistent values. */ if ((crit_state & CRIT_IN_COMMIT) && (csa->now_crit && (csa->ti->early_tn != csa->ti->curr_tn) || csa->t_commit_crit)) { crit_reg_cnt++; if (0 == (crit_state & CRIT_ALL_REGIONS)) return crit_reg_cnt; } if ((crit_state & CRIT_IN_WTSTART) && csa->in_wtstart) { crit_reg_cnt++; if (0 == (crit_state & CRIT_ALL_REGIONS)) return crit_reg_cnt; } } } } } if (NULL != jnlpool.jnlpool_ctl) { csa = &FILE_INFO(jnlpool.jnlpool_dummy_reg)->s_addrs; if (NULL != csa && csa->now_crit) { crit_reg_cnt++; if (0 != (crit_state & CRIT_RELEASE)) { assert(!csa->hold_onto_crit); rel_lock(jnlpool.jnlpool_dummy_reg); } } } return crit_reg_cnt; }