Ejemplo n.º 1
0
/* mupfndfil.c
 * Description:
 *	For a region find if the corresponding database is present.
 * Arguments:
 *	reg: Region's pointer
 *	filestr: Sent as allocated memory, if returned full path is needed in this mstr
 *	Returns: TRUE if region's database file is found
 *		 FALSE, otherwise
 * Side Effects:
 *	reg->dyn.addr->fname_len and reg->dyn.addr->fname are updated
 */
boolean_t mupfndfil(gd_region *reg, mstr *filestr)
{
	char 	filename[MAX_FN_LEN];
	mstr 	file, def, ret, *retptr;
	uint4	ustatus;

	switch(reg->dyn.addr->acc_meth)
	{
	case dba_mm:
	case dba_bg:
		break;
#	ifdef VMS
	case dba_usr:
		gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(6) ERR_NOUSERDB, 4, LEN_AND_LIT("specified function"), REG_LEN_STR(reg));
		return FALSE;		/* This is currently a VMS only possibility and has no corresponding test case */
#	endif
	default:
		util_out_print("REGION !AD has an unrecognized access method.", TRUE, REG_LEN_STR(reg));
		return FALSE;
	}
	file.addr = (char *)reg->dyn.addr->fname;
	file.len = reg->dyn.addr->fname_len;
#if defined(UNIX)
	file.addr[file.len] = 0;
	if (is_raw_dev(file.addr))
	{
		def.addr = DEF_NODBEXT;
		def.len = SIZEOF(DEF_NODBEXT) - 1;
	} else
	{
		def.addr = DEF_DBEXT;	/* UNIX need to pass "*.dat" but reg->dyn.addr->defext has "DAT" */
		def.len = SIZEOF(DEF_DBEXT) - 1;
	}
#elif defined(VMS)
	def.addr = (char *)reg->dyn.addr->defext;
	def.len = SIZEOF(reg->dyn.addr->defext);
#endif
	if (NULL == filestr)
	{
		ret.len = SIZEOF(filename);
		ret.addr = filename;
		retptr = &ret;
	} else
		retptr = filestr;
	if (FILE_PRESENT != gtm_file_stat(&file, &def, retptr, FALSE, &ustatus))
	{
		if (!jgbl.mupip_journal)
		{	/* Do not print error messages in case of call from mur_open_files().
			 * Currently we use "jgbl.mupip_journal" to identify a call from mupip_recover code */
			util_out_print("REGION !AD's file !AD cannot be found.", TRUE, REG_LEN_STR(reg), LEN_AND_STR(file.addr));
			gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(1) ustatus);
		}
		return FALSE;
	}
	reg->dyn.addr->fname_len = retptr->len;
	memcpy(reg->dyn.addr->fname, retptr->addr, retptr->len + 1);
	return TRUE;
}
Ejemplo n.º 2
0
/* returns FALSE if gv_currkey is undefined in the server end and undef_inhibit is turned OFF */
void	gvcmx_increment(mval *increment, mval *result)
{
	unsigned char	buff[MAX_ZWR_KEY_SZ], *end;
	mval		tmpmval;

	error_def(ERR_UNIMPLOP);
	error_def(ERR_TEXT);
	error_def(ERR_GVIS);

	if (!((link_info *)gv_cur_region->dyn.addr->cm_blk->usr)->server_supports_dollar_incr)
	{
		assert(dba_cm == gv_cur_region->dyn.addr->acc_meth); /* we should've covered all other access methods elsewhere */
		end = format_targ_key(buff, MAX_ZWR_KEY_SZ, gv_currkey, TRUE);
		rts_error(VARLSTCNT(14) ERR_UNIMPLOP, 0,
					ERR_TEXT, 2, LEN_AND_LIT("GT.CM server does not support $INCREMENT operation"),
					ERR_GVIS, 2, end - buff, buff,
					ERR_TEXT, 2, REG_LEN_STR(gv_cur_region));
	}
	/* gvcmz_doop() currently accepts only one argument.
	 * It serves as an input argument for SET.
	 * It serves as an output argument for GET etc.
	 * $INCR is unique in that it needs to pass the increment as input and expects the post-increment as output.
	 *
	 * In order to accomplish this without changing the gvcmz_doop() interface, we overload the one argument to
	 *	serve two purposes. It will be an input argument until the send of the message to the server and will
	 *	then serve as an output argument after the response from the server. ("result" is used for this purpose)
	 * i.e.
	 *	to serve as increment            for client --> server message
	 *	to serve as post-increment value for server --> client message
	 */
	assert(MV_IS_NUMERIC(increment));	/* op_gvincr would have forced it to be a NUMERIC */
	MV_FORCE_STR(increment);		/* convert it to a string before sending it to gvcmz_doop */
	*result = *increment;
	gvcmz_doop(CMMS_Q_INCREMENT, CMMS_R_INCREMENT, result);
}
Ejemplo n.º 3
0
STATICFNDEF void cleanup_trigger_hash(char *trigvn, int trigvn_len, char **values, uint4 *value_len, stringkey *set_hash,
		stringkey *kill_hash, boolean_t del_kill_hash, int match_index)
{
	sgmnt_addrs		*csa;
	uint4			len;
	char			save_currkey[SIZEOF(gv_key) + DBKEYSIZE(MAX_KEY_SZ)];
	gv_key			*save_gv_currkey;
	gd_region		*save_gv_cur_region;
	gv_namehead		*save_gv_target;
	sgm_info		*save_sgm_info_ptr;
	mstr			trigger_key;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	SAVE_TRIGGER_REGION_INFO;
	SWITCH_TO_DEFAULT_REGION;
	assert(0 != gv_target->root);
	if (gv_cur_region->read_only)
		rts_error_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_TRIGMODREGNOTRW, 2, REG_LEN_STR(gv_cur_region));
	if (NULL != strchr(values[CMD_SUB], 'S'))
	{
		SEARCH_AND_KILL_BY_HASH(trigvn, trigvn_len, set_hash, match_index, csa)
	}
	if (del_kill_hash)
	{
		SEARCH_AND_KILL_BY_HASH(trigvn, trigvn_len, kill_hash, match_index, csa);
	}
	RESTORE_TRIGGER_REGION_INFO;
}
Ejemplo n.º 4
0
int main(int argc, char *argv[])
{
	DCL_THREADGBL_ACCESS;

	GTM_THREADGBL_INIT;
	set_blocksig();
	gtm_imagetype_init(DSE_IMAGE);
	gtm_wcswidth_fnptr = gtm_wcswidth;
	gtm_env_init();	/* read in all environment variables */
	licensed = TRUE;
	TREF(transform) = TRUE;
	op_open_ptr = op_open;
	patch_curr_blk = get_dir_root();
	err_init(util_base_ch);
	GTM_ICU_INIT_IF_NEEDED;	/* Note: should be invoked after err_init (since it may error out) and before CLI parsing */
	sig_init(generic_signal_handler, dse_ctrlc_handler, suspsigs_handler);
	atexit(util_exit_handler);
	SET_LATCH_GLOBAL(&defer_latch, LOCK_AVAILABLE);
	get_page_size();
	stp_init(STP_INITSIZE);
	rts_stringpool = stringpool;
	getjobname();
	INVOKE_INIT_SECSHR_ADDRS;
	getzdir();
	prealloc_gt_timers();
	initialize_pattern_table();
	gvinit();
	region_init(FALSE);
	INIT_GBL_ROOT(); /* Needed for GVT initialization */
	getjobnum();
	util_out_print("!/File  !_!AD", TRUE, DB_LEN_STR(gv_cur_region));
	util_out_print("Region!_!AD!/", TRUE, REG_LEN_STR(gv_cur_region));
	cli_lex_setup(argc, argv);
	CREATE_DUMMY_GBLDIR(gd_header, original_header, gv_cur_region, gd_map, gd_map_top);
	gtm_chk_dist(argv[0]);
#	ifdef DEBUG
	if ((gtm_white_box_test_case_enabled && (WBTEST_SEMTOOLONG_STACK_TRACE == gtm_white_box_test_case_number) ))
	{
		sgmnt_addrs     * csa;
		node_local_ptr_t cnl;
		csa = &FILE_INFO(gv_cur_region)->s_addrs;
		cnl = csa->nl;
		cnl->wbox_test_seq_num  = 1; /*Signal the first step and wait here*/
		while (2 != cnl->wbox_test_seq_num) /*Wait for another process to get hold of the semaphore and signal next step*/
			LONG_SLEEP(10);
	}
#	endif
	if (argc < 2)
                display_prompt();
	io_init(TRUE);
	while (1)
	{
		if (!dse_process(argc))
			break;
		display_prompt();
	}
	dse_exit();
	REVERT;
}
Ejemplo n.º 5
0
STATICFNDEF int4 update_trigger_name_value(int trigvn_len, char *trig_name, int trig_name_len, int new_trig_index)
{
	sgmnt_addrs		*csa;
	mname_entry		gvent;
	gv_namehead		*hasht_tree;
	int			len;
	char			name_and_index[MAX_MIDENT_LEN + 1 + MAX_DIGITS_IN_INT];
	char			new_trig_name[MAX_TRIGNAME_LEN + 1];
	int			num_len;
	char			*ptr;
	int4			result;
	char			save_currkey[SIZEOF(gv_key) + DBKEYSIZE(MAX_KEY_SZ)];
	gv_key			*save_gv_currkey;
	gd_region		*save_gv_cur_region;
	gv_namehead		*save_gv_target;
	sgm_info		*save_sgm_info_ptr;
	mval			trig_gbl;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	if (MAX_AUTO_TRIGNAME_LEN < trigvn_len)
		return PUT_SUCCESS;
	SAVE_TRIGGER_REGION_INFO;
	SWITCH_TO_DEFAULT_REGION;
	if (gv_cur_region->read_only)
		rts_error_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_TRIGMODREGNOTRW, 2, REG_LEN_STR(gv_cur_region));
	assert(0 != gv_target->root);
	/* $get(^#t("#TNAME",^#t(GVN,index,"#TRIGNAME")) */
	BUILD_HASHT_SUB_SUB_CURRKEY(LITERAL_HASHTNAME, STRLEN(LITERAL_HASHTNAME), trig_name, trig_name_len - 1);
	if (!gvcst_get(&trig_gbl))
	{	/* There has to be a #TNAME entry */
		if (CDB_STAGNATE > t_tries)
			t_retry(cdb_sc_triggermod);
		else
		{
			assert(WBTEST_HELPOUT_TRIGDEFBAD == gtm_white_box_test_case_number);
			rts_error_csa(CSA_ARG(NULL) VARLSTCNT(6) ERR_TRIGNAMBAD, 4, LEN_AND_LIT("\"#TNAME\""), trig_name_len - 1,
					trig_name);
		}
	}
	len = STRLEN(trig_gbl.str.addr) + 1;
	assert(MAX_MIDENT_LEN >= len);
	memcpy(name_and_index, trig_gbl.str.addr, len);
	ptr = name_and_index + len;
	num_len = 0;
	I2A(ptr, num_len, new_trig_index);
	len += num_len;
	/* set ^#t(GVN,index,"#TRIGNAME")=trig_name $C(0) new_trig_index */
	SET_TRIGGER_GLOBAL_SUB_SUB_STR(LITERAL_HASHTNAME, STRLEN(LITERAL_HASHTNAME), trig_name, trig_name_len - 1,
		name_and_index, len, result);
	RESTORE_TRIGGER_REGION_INFO;
	return result;
}
Ejemplo n.º 6
0
void	op_gvincr(mval *increment, mval *result)
{
	unsigned char	buff[MAX_ZWR_KEY_SZ], *end;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	/* If specified var name is global ^%Y*, the name is illegal to use in a SET or KILL command, only GETs are allowed */
	if ((RESERVED_NAMESPACE_LEN <= gv_currkey->end) && (0 == MEMCMP_LIT(gv_currkey->base, RESERVED_NAMESPACE)))
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_PCTYRESERVED);
	if (gv_cur_region->read_only)
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(4) ERR_DBPRIVERR, 2, DB_LEN_STR(gv_cur_region));
	if ((TREF(gv_last_subsc_null) || TREF(gv_some_subsc_null)) && (ALWAYS != gv_cur_region->null_subs))
		sgnl_gvnulsubsc();
	assert(gv_currkey->end + 1 <= gv_cur_region->max_key_size);
	MV_FORCE_NUM(increment);
	switch (gv_cur_region->dyn.addr->acc_meth)
	{
		case dba_bg:
		case dba_mm:
			gvcst_incr(increment, result);
			break;
		case dba_cm:
			gvcmx_increment(increment, result);
			break;
		case dba_usr:
			/* $INCR not supported for DDP/USR access method */
			if (0 == (end = format_targ_key(buff, MAX_ZWR_KEY_SZ, gv_currkey, TRUE)))
				end = &buff[MAX_ZWR_KEY_SZ - 1];
			rts_error_csa(CSA_ARG(NULL) VARLSTCNT(10) ERR_UNIMPLOP, 0,
				      ERR_TEXT, 2, LEN_AND_LIT("GTCM DDP server does not support $INCREMENT"),
				      ERR_GVIS, 2, end - buff, buff,
				      ERR_TEXT, 2, REG_LEN_STR(gv_cur_region));
			break;
		default:
			assertpro(FALSE);
	}
	assert(MV_DEFINED(result));
}
Ejemplo n.º 7
0
/* Upgrade ^#t global in "reg" region */
void	trigger_upgrade(gd_region *reg)
{
	boolean_t		est_first_pass, do_upgrade, is_defined;
	boolean_t		was_null = FALSE, is_null = FALSE;
	int			seq_num, trig_seq_num;
	int			currlabel;
	mval			tmpmval, xecuteimval, *gvname, *tmpmv, *tmpmv2;
	int4			result, tmpint4;
	uint4			curend, gvname_prev, xecute_curend;
	uint4			hash_code, kill_hash_code;
	int			count, i, xecutei, tncount;
	char			*trigname, *trigindex, *ptr;
	char			name_and_index[MAX_MIDENT_LEN + 1 + MAX_DIGITS_IN_INT];
	char			trigvn[MAX_MIDENT_LEN + 1 + MAX_DIGITS_IN_INT], nullbyte[1];
	uint4			trigname_len, name_index_len;
	int			ilen;
	sgmnt_addrs		*csa;
	jnl_private_control	*jpc;
	uint4			sts;
	int			close_res;
	hash128_state_t		hash_state, kill_hash_state;
	uint4			hash_totlen, kill_hash_totlen;
	int			trig_protected_mval_push_count;
#	ifdef DEBUG
	int			save_dollar_tlevel;
#	endif
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	assert(gv_cur_region == reg);
	assert(!dollar_tlevel);	/* caller should have ensured this. this is needed as otherwise things get complicated. */
	assert(!is_replicator);	/* caller should have ensured this. this is needed so we dont bump jnl_seqno (if replicating) */
	csa = &FILE_INFO(reg)->s_addrs;
	assert(csa->hdr->hasht_upgrade_needed);
	/* If before-image journaling is turned on in this region (does not matter if replication is turned on or not),
	 * once this transaction is done, we need to switch to new journal file and cut the back link because
	 * otherwise it is possible for backward journal recovery (or rollback) or source server to encounter
	 * the journal records generated in this ^#t-upgrade-transaction in which case they dont know to handle
	 * it properly (e.g. rollback or backward recovery does not know to restore csa->hdr->hasht_upgrade_needed
	 * if it rolls back this transaction). To achieve this, we set hold_onto_crit to TRUE and do the jnl link
	 * cut AFTER the transaction commits but before anyone else can sneak in to do any more updates.
	 * Since most often we expect databases to be journaled, we do this hold_onto_crit even for the non-journaled case.
	 */
	grab_crit(reg);
	csa->hold_onto_crit = TRUE;
	DEBUG_ONLY(save_dollar_tlevel = dollar_tlevel);
	assert(!donot_INVOKE_MUMTSTART);
	DEBUG_ONLY(donot_INVOKE_MUMTSTART = TRUE);
	op_tstart(IMPLICIT_TSTART, TRUE, &literal_batch, 0); /* 0 ==> save no locals but RESTART OK */
	ESTABLISH_NORET(trigger_upgrade_ch, est_first_pass);
	/* On a TP restart anywhere down below, this line is where the restart resumes execution from */
	assert(donot_INVOKE_MUMTSTART);	/* Make sure still set for every try/retry of TP transaction */
	change_reg(); /* TP_CHANGE_REG wont work as we need to set sgm_info_ptr */
	assert(NULL != cs_addrs);
	assert(csa == cs_addrs);
	SET_GVTARGET_TO_HASHT_GBL(csa);	/* sets up gv_target */
	assert(NULL != gv_target);
	INITIAL_HASHT_ROOT_SEARCH_IF_NEEDED;	/* Needed to do every retry in case restart was due to an online rollback.
						 * This also sets up gv_currkey */
	/* Do actual upgrade of ^#t global.
	 *
	 * Below is a sample layout of the label 2 ^#t global
	 * -------------------------------------------------------
	 * ^#t("#TNAME","x")="a"_$C(0)_"1"		(present in DEFAULT only)
	 * ^#t("#TRHASH",89771515,1)="a"_$C(0)_"1"	(present in DEFAULT only)
	 * ^#t("#TRHASH",106937755,1)="a"_$C(0)_"1"	(present in DEFAULT only)
	 * ^#t("a",1,"BHASH")="106937755"
	 * ^#t("a",1,"CHSET")="M"
	 * ^#t("a",1,"CMD")="S"
	 * ^#t("a",1,"LHASH")="89771515"
	 * ^#t("a",1,"TRIGNAME")="x#"
	 * ^#t("a",1,"XECUTE")=" do ^twork"
	 * ^#t("a","#COUNT")="1"
	 * ^#t("a","#CYCLE")="1"
	 * ^#t("a","#LABEL")="2"
	 *
	 * Below is a sample layout of the label 3 ^#t global
	 * -------------------------------------------------------
	 * ^#t("#LABEL")="3"				(present only after upgrade, not regular trigger load)
	 * ^#t("#TNAME","x")="a"_$C(0)_"1"		(present in CURRENT region)
	 * ^#t("a",1,"BHASH")="71945627"
	 * ^#t("a",1,"CHSET")="M"
	 * ^#t("a",1,"CMD")="S"
	 * ^#t("a",1,"LHASH")="71945627"
	 * ^#t("a",1,"TRIGNAME")="x#"
	 * ^#t("a",1,"XECUTE")=" do ^twork"
	 * ^#t("a","#COUNT")="1"
	 * ^#t("a","#CYCLE")="2"
	 * ^#t("a","#LABEL")="3"
	 * ^#t("a","#TRHASH",71945627,1)="a"_$C(0)_"1"
	 *
	 * Key aspects of the format change
	 * ----------------------------------
	 * 1) New ^#t("#LABEL")="3" to indicate the format of the ^#t global. This is in addition to
	 * 	^#t("a","#LABEL") etc. which is already there. This way we have a #LABEL for not just the installed
	 * 	triggers but also for the name information stored in the #TNAME nodes.
	 * 2) In the BHASH and LHASH fields. The hash computation is different so there are more chances of BHASH and LHASH
	 * 	matching in which case we store only one #TRHASH entry (instead of two). So thre is fewer ^#t records in the new
	 * 	format in most cases.
	 * 3) ^#t("a","#LABEL") bumps from 2 to 3. Similarly ^#t("a","#CYCLE") bumps by one (to make sure triggers for this
	 *	global get re-read if and when we implement an -ONLINE upgrade).
	 * 4) DEFAULT used to have ^#t("#TNAME",...) nodes corresponding to triggers across ALL regions in the gbldir and
	 * 	other regions used to have NO ^#t("#TNAME",...) nodes whereas after the upgrade every region have
	 *	^#t("#TNAME",...) nodes	corresponding to triggers installed in that region. So it is safer to kill ^#t("#TNAME")
	 *	nodes and add them as needed.
	 * 5) #TRHASH has moved from ^#t() to ^#t(<gbl>). So it is safer to kill ^#t("#TRHASH")	nodes and add them as needed.
	 *
	 * Below is a sample layout of the label 4 ^#t global
	 * -------------------------------------------------------
	 * ^#t("#TNAME","x")="a"_$C(0)_"1"		(present in CURRENT region)
	 * ^#t("a",1,"BHASH")="71945627"
	 * ^#t("a",1,"CHSET")="M"
	 * ^#t("a",1,"CMD")="S"
	 * ^#t("a",1,"LHASH")="71945627"
	 * ^#t("a",1,"TRIGNAME")="x#"
	 * ^#t("a",1,"XECUTE")=" do ^twork"
	 * ^#t("a","#COUNT")="1"
	 * ^#t("a","#CYCLE")="2"
	 * ^#t("a","#LABEL")="4"
	 * ^#t("a","#TRHASH",71945627,1)="a"_$C(0)_"1"
	 *
	 * Key aspects of the format change
	 * ----------------------------------
	 * 1) Removed ^#t("#LABEL") as it is redundant information and trigger load does not include it
	 * 2) Multiline triggers were incorrectly processed resulting in incorrect BHASH and LHASH values. Upgrade fixes this
	 * 3) ^#t("a","#LABEL") bumps from 3 to 4. Similarly ^#t("a","#CYCLE") bumps by one (to make sure
	 * 	triggers for this global get re-read if and when we implement an -ONLINE upgrade).
	 */
	tmpmv = &tmpmval;	/* At all points maintain this relationship. The two are used interchangeably below */
	if (gv_target->root)
		do_upgrade = TRUE;
	/* The below logic assumes ^#t global does not have any integrity errors */
	assert(do_upgrade);	/* caller should have not invoked us otherwise */
	if (do_upgrade)
	{	/* kill ^#t("#TRHASH"), ^#t("#TNAME") and ^#t("#LABEL") first. Regenerate each again as we process ^#t(<gbl>,...) */
		csa->incr_db_trigger_cycle = TRUE; /* so that we increment csd->db_trigger_cycle at commit time.
							 * this forces concurrent processes to read upgraded triggers.
							 */
		if (JNL_WRITE_LOGICAL_RECS(csa))
		{	/* Note that the ^#t upgrade is a physical layout change. But it has no logical change (i.e. users
			 * see the same MUPIP TRIGGER -SELECT output as before). So write only a dummy LGTRIG journal
			 * record for this operation. Hence write a string that starts with a trigger comment character ";".
			 */
			assert(!gv_cur_region->read_only);
			jnl_format(JNL_LGTRIG, NULL, (mval *)&literal_trigjnlrec, 0);
		}
		/* KILL ^#t("#LABEL") unconditionally */
		BUILD_HASHT_SUB_CURRKEY(LITERAL_HASHLABEL, STRLEN(LITERAL_HASHLABEL));
		if (0 != gvcst_data())
			gvcst_kill(TRUE);
		/* KILL ^#t("#TNAME") unconditionally and regenerate */
		BUILD_HASHT_SUB_CURRKEY(LITERAL_HASHTNAME, STRLEN(LITERAL_HASHTNAME));
		if (0 != gvcst_data())
			gvcst_kill(TRUE);
		/* KILL ^#t("#TRHASH") unconditionally and regenerate */
		BUILD_HASHT_SUB_CURRKEY(LITERAL_HASHTRHASH, STRLEN(LITERAL_HASHTRHASH));
		if (0 != gvcst_data())
			gvcst_kill(TRUE);
		/* Loop through all global names for which ^#t(<gvn>) exists. The only first-level subscripts of ^#t starting
		 * with # are #TNAME and #TRHASH in collation order. So after #TRHASH we expect to find subscripts that are
		 * global names. Hence the HASHTRHASH code is placed AFTER the HASHTNAME code above.
		 */
		TREF(gd_targ_gvnh_reg) = NULL;	/* needed so op_gvorder below goes through gvcst_order (i.e. focuses only
						 * on the current region) and NOT through gvcst_spr_order (which does not
						 * apply anyways in the case of ^#t).
						 */
		nullbyte[0] = '\0';
		trig_protected_mval_push_count = 0;
		INCR_AND_PUSH_MV_STENT(gvname); /* Protect gvname from garbage collection */
		do
		{
			op_gvorder(gvname);
			if (0 == gvname->str.len)
				break;
			assert(ARRAYSIZE(trigvn) > gvname->str.len);
			memcpy(&trigvn[0], gvname->str.addr, gvname->str.len);
			gvname->str.addr = &trigvn[0];	/* point away from stringpool to avoid stp_gcol issues */
			/* Save gv_currkey->prev so it is restored before next call to op_gvorder (which cares about this field).
			 * gv_currkey->prev gets tampered with in the for loop below (e.g. BUILD_HASHT_SUB_CURRKEY macro).
			 * No need to do this for gv_currkey->end since the body of the for loop takes care of restoring it.
			 */
			gvname_prev = gv_currkey->prev;
			BUILD_HASHT_SUB_CURRKEY(gvname->str.addr, gvname->str.len);
			/* At this point, gv_currkey is ^#t(<gvn>) */
			/* Increment ^#t(<gvn>,"#CYCLE") */
			is_defined = gvtr_get_hasht_gblsubs((mval *)&literal_hashcycle, tmpmv);
			assert(is_defined);
			tmpint4 = mval2i(tmpmv);
			tmpint4++;
			i2mval(tmpmv, tmpint4);
			gvtr_set_hasht_gblsubs((mval *)&literal_hashcycle, tmpmv);
			/* Read ^#t(<gvn>,"#COUNT") */
			is_defined = gvtr_get_hasht_gblsubs((mval *)&literal_hashcount, tmpmv);
			if (is_defined)
			{
				tmpint4 = mval2i(tmpmv);
				count = tmpint4;
				/* Get ^#t(<gvn>,"#LABEL"), error out for invalid values. Upgrade disallowed for label 1 triggers */
				is_defined = gvtr_get_hasht_gblsubs((mval *)&literal_hashlabel, tmpmv);
				assert(is_defined);
				currlabel = mval2i(tmpmv);
				if ((V19_HASHT_GBL_LABEL_INT >= currlabel) || (HASHT_GBL_CURLABEL_INT <= currlabel))
					rts_error_csa(CSA_ARG(csa) VARLSTCNT(8) ERR_TRIGUPBADLABEL, 6, currlabel,
							HASHT_GBL_CURLABEL_INT, gvname->str.len, gvname->str.addr,
							REG_LEN_STR(reg));
				/* Set ^#t(<gvn>,"#LABEL")=HASHT_GBL_CURLABEL */
				gvtr_set_hasht_gblsubs((mval *)&literal_hashlabel, (mval *)&literal_curlabel);
			} else
				count = 0;
			/* Kill ^#t(<gvn>,"#TRHASH") unconditionally and regenerate */
			gvtr_kill_hasht_gblsubs((mval *)&literal_hashtrhash, TRUE);
			/* At this point, gv_currkey is ^#t(<gvn>) */
			for (i = 1; i <= count; i++)
			{
				/* At this point, gv_currkey is ^#t(<gvn>) */
				curend = gv_currkey->end; /* note gv_currkey->end before changing it so we can restore it later */
				assert(KEY_DELIMITER == gv_currkey->base[curend]);
				assert(gv_target->gd_csa == cs_addrs);
				i2mval(tmpmv, i);
				COPY_SUBS_TO_GVCURRKEY(tmpmv, gv_cur_region, gv_currkey, was_null, is_null);
				/* At this point, gv_currkey is ^#t(<gvn>,i) */
				/* Compute new LHASH and BHASH hash values.
				 *	LHASH uses : GVSUBS,                        XECUTE
				 *	BHASH uses : GVSUBS, DELIM, ZDELIM, PIECES, XECUTE
				 * So reach each of these pieces and compute hash along the way.
				 */
				STR_PHASH_INIT(hash_state, hash_totlen);
				STR_PHASH_PROCESS(hash_state, hash_totlen, gvname->str.addr, gvname->str.len);
				STR_PHASH_PROCESS(hash_state, hash_totlen, nullbyte, 1);
				/* Read in ^#t(<gvn>,i,"GVSUBS") */
				is_defined = gvtr_get_hasht_gblsubs((mval *)&literal_gvsubs, tmpmv);
				if (is_defined)
				{
					STR_PHASH_PROCESS(hash_state, hash_totlen, tmpmval.str.addr, tmpmval.str.len);
					STR_PHASH_PROCESS(hash_state, hash_totlen, nullbyte, 1);
				}
				/* Copy over SET hash state (2-tuple <state,totlen>) to KILL hash state before adding
				 * the PIECES, DELIM, ZDELIM portions (those are only part of the SET hash).
				 */
				kill_hash_state = hash_state;
				kill_hash_totlen = hash_totlen;
				/* Read in ^#t(<gvn>,i,"PIECES") */
				is_defined = gvtr_get_hasht_gblsubs((mval *)&literal_pieces, tmpmv);
				if (is_defined)
				{
					STR_PHASH_PROCESS(hash_state, hash_totlen, tmpmval.str.addr, tmpmval.str.len);
					STR_PHASH_PROCESS(hash_state, hash_totlen, nullbyte, 1);
				}
				/* Read in ^#t(<gvn>,i,"DELIM") */
				is_defined = gvtr_get_hasht_gblsubs((mval *)&literal_delim, tmpmv);
				if (is_defined)
				{
					STR_PHASH_PROCESS(hash_state, hash_totlen, tmpmval.str.addr, tmpmval.str.len);
					STR_PHASH_PROCESS(hash_state, hash_totlen, nullbyte, 1);
				}
				/* Read in ^#t(<gvn>,i,"ZDELIM") */
				is_defined = gvtr_get_hasht_gblsubs((mval *)&literal_zdelim, tmpmv);
				if (is_defined)
				{
					STR_PHASH_PROCESS(hash_state, hash_totlen, tmpmval.str.addr, tmpmval.str.len);
					STR_PHASH_PROCESS(hash_state, hash_totlen, nullbyte, 1);
				}
				/* Read in ^#t(<gvn>,i,"XECUTE").
				 * Note: The XECUTE portion of the trigger definition is used in SET and KILL hash.
				 * But since we have started maintaining "hash_state" and "kill_hash_state" separately
				 * (due to PIECES, DELIM, ZDELIM) we need to update the hash for both using same input string.
				 */
				is_defined = gvtr_get_hasht_gblsubs((mval *)&literal_xecute, tmpmv);
				if (is_defined)
				{
					STR_PHASH_PROCESS(hash_state, hash_totlen, tmpmval.str.addr, tmpmval.str.len);
					STR_PHASH_PROCESS(kill_hash_state, kill_hash_totlen, tmpmval.str.addr, tmpmval.str.len);
				} else
				{	/* Multi-record XECUTE string */
					/* At this point, gv_currkey is ^#t(<gvn>,i) */
					xecute_curend = gv_currkey->end; /* note gv_currkey->end so we can restore it later */
					assert(KEY_DELIMITER == gv_currkey->base[xecute_curend]);
					tmpmv2 = (mval *)&literal_xecute;
					COPY_SUBS_TO_GVCURRKEY(tmpmv2, gv_cur_region, gv_currkey, was_null, is_null);
					xecutei = 1;
					do
					{
						i2mval(&xecuteimval, xecutei);
						is_defined = gvtr_get_hasht_gblsubs(&xecuteimval, tmpmv);
						if (!is_defined)
							break;
						STR_PHASH_PROCESS(hash_state, hash_totlen, tmpmval.str.addr, tmpmval.str.len);
						STR_PHASH_PROCESS(kill_hash_state, kill_hash_totlen,
									tmpmval.str.addr, tmpmval.str.len);
						xecutei++;
					} while (TRUE);
					/* Restore gv_currkey to ^#t(<gvn>,i) */
					gv_currkey->end = xecute_curend;
					gv_currkey->base[xecute_curend] = KEY_DELIMITER;
				}
				STR_PHASH_RESULT(hash_state, hash_totlen, hash_code);
				STR_PHASH_RESULT(kill_hash_state, kill_hash_totlen, kill_hash_code);
				/* Set ^#t(<gvn>,i,"LHASH") */
				MV_FORCE_UMVAL(tmpmv, kill_hash_code);
				gvtr_set_hasht_gblsubs((mval *)&literal_lhash, tmpmv);
				/* Set ^#t(<gvn>,i,"BHASH") */
				MV_FORCE_UMVAL(tmpmv, hash_code);
				gvtr_set_hasht_gblsubs((mval *)&literal_bhash, tmpmv);
				/* Read in ^#t(<gvn>,i,"TRIGNAME") to determine if #SEQNUM/#TNCOUNT needs to be maintained */
				is_defined = gvtr_get_hasht_gblsubs((mval *)&literal_trigname, tmpmv);
				assert(is_defined);
				assert('#' == tmpmval.str.addr[tmpmval.str.len - 1]);
				tmpmval.str.len--;
				if ((tmpmval.str.len <= ARRAYSIZE(name_and_index)) &&
						(NULL != (ptr = memchr(tmpmval.str.addr, '#', tmpmval.str.len))))
				{	/* Auto-generated name. Need to maintain #SEQNUM/#TNCOUNT */
					/* Take copy of trigger name into non-stringpool location to avoid stp_gcol issues */
					trigname_len = ptr - tmpmval.str.addr;
					ptr++;
					name_index_len = (tmpmval.str.addr + tmpmval.str.len) - ptr;
					assert(ARRAYSIZE(name_and_index) >= (trigname_len + 1 + name_index_len));
					trigname = &name_and_index[0];
					trigindex = ptr;
					memcpy(trigname, tmpmval.str.addr, tmpmval.str.len);
					A2I(ptr, ptr + name_index_len, trig_seq_num);
					/* At this point, gv_currkey is ^#t(<gvn>,i) */
					/* $get(^#t("#TNAME",<trigger name>,"#SEQNUM")) */
					BUILD_HASHT_SUB_SUB_SUB_CURRKEY(LITERAL_HASHTNAME, STR_LIT_LEN(LITERAL_HASHTNAME),
						trigname, trigname_len, LITERAL_HASHSEQNUM, STR_LIT_LEN(LITERAL_HASHSEQNUM));
					seq_num = gvcst_get(tmpmv) ? mval2i(tmpmv) : 0;
					if (trig_seq_num > seq_num)
					{	/* Set ^#t("#TNAME",<trigger name>,"#SEQNUM") = trig_seq_num */
						SET_TRIGGER_GLOBAL_SUB_SUB_SUB_STR(LITERAL_HASHTNAME,
							STR_LIT_LEN(LITERAL_HASHTNAME), trigname, trigname_len,
							LITERAL_HASHSEQNUM, STR_LIT_LEN(LITERAL_HASHSEQNUM),
							trigindex, name_index_len, result);
						assert(PUT_SUCCESS == result);
					}
					/* set ^#t("#TNAME",<trigger name>,"#TNCOUNT")++ */
					BUILD_HASHT_SUB_SUB_SUB_CURRKEY(LITERAL_HASHTNAME, STR_LIT_LEN(LITERAL_HASHTNAME),
						trigname, trigname_len, LITERAL_HASHTNCOUNT, STR_LIT_LEN(LITERAL_HASHTNCOUNT));
					tncount = gvcst_get(tmpmv) ? mval2i(tmpmv) + 1 : 1;
					i2mval(tmpmv, tncount);
					SET_TRIGGER_GLOBAL_SUB_SUB_SUB_MVAL(LITERAL_HASHTNAME, STR_LIT_LEN(LITERAL_HASHTNAME),
						trigname, trigname_len, LITERAL_HASHTNCOUNT, STR_LIT_LEN(LITERAL_HASHTNCOUNT),
						tmpmval, result);
					trigname_len += 1 + name_index_len; /* in preparation for ^#t("#TNAME") set below */
					assert(PUT_SUCCESS == result);
					BUILD_HASHT_SUB_CURRKEY(gvname->str.addr, gvname->str.len);
					/* At this point, gv_currkey is ^#t(<gvn>) */
				} else
				{
					/* Take copy of trigger name into non-stringpool location to avoid stp_gcol issues */
					trigname = &name_and_index[0];  /* in preparation for ^#t("#TNAME") set below */
					trigname_len = MIN(tmpmval.str.len, ARRAYSIZE(name_and_index));
					assert(ARRAYSIZE(name_and_index) >= trigname_len);
					memcpy(trigname, tmpmval.str.addr, trigname_len);
					/* Restore gv_currkey to what it was at beginning of for loop iteration */
					gv_currkey->end = curend;
					gv_currkey->base[curend] = KEY_DELIMITER;
				}
				/* At this point, gv_currkey is ^#t(<gvn>) */
				if (kill_hash_code != hash_code)
					gvtr_set_hashtrhash(gvname->str.addr, gvname->str.len, kill_hash_code, i);
				/* Set ^#t(<gvn>,"#TRHASH",hash_code,i) */
				gvtr_set_hashtrhash(gvname->str.addr, gvname->str.len, hash_code, i);
				/* Set ^#t("#TNAME",<trigname>)=<gvn>_$c(0)_<trigindx> */
				/* The upgrade assumes that the region does not contain two triggers with the same name.
				 * V62000 and before could potentially have this out of design case. Once implemented
				 * the trigger integrity check will warn users of this edge case */
				ptr = &trigvn[gvname->str.len];
				*ptr++ = '\0';
				ilen = 0;
				I2A(ptr, ilen, i);
				ptr += ilen;
				assert(ptr <= ARRAYTOP(trigvn));
				SET_TRIGGER_GLOBAL_SUB_SUB_STR(LITERAL_HASHTNAME, STR_LIT_LEN(LITERAL_HASHTNAME),
					trigname, trigname_len, trigvn, ptr - gvname->str.addr, result);
				assert(PUT_SUCCESS == result);
				BUILD_HASHT_SUB_CURRKEY(gvname->str.addr, gvname->str.len);
				/* At this point, gv_currkey is ^#t(<gvn>) */
			}
			/* At this point, gv_currkey is ^#t(<gvn>) i.e. gv_currkey->end is correct but gv_currkey->prev
			 * might have been tampered with. Restore it to proper value first.
			 */
			 gv_currkey->prev = gvname_prev;
			gvname->mvtype = 0; /* can now be garbage collected in the next iteration */
		} while (TRUE);
	}
	op_tcommit();
	REVERT; /* remove our condition handler */
	DEBUG_ONLY(donot_INVOKE_MUMTSTART = FALSE;)
	if (csa->hold_onto_crit)
Ejemplo n.º 8
0
boolean_t trigger_delete_name(char *trigger_name, uint4 trigger_name_len, uint4 *trig_stats)
{
	sgmnt_addrs		*csa;
	char			curr_name[MAX_MIDENT_LEN + 1];
	uint4			curr_name_len, orig_name_len;
	mval			mv_curr_nam;
	char			*ptr;
	char			*name_tail_ptr;
	char			save_name[MAX_MIDENT_LEN + 1];
	gv_key			save_currkey[DBKEYALLOC(MAX_KEY_SZ)];
	gd_region		*save_gv_cur_region, *lgtrig_reg;
	gv_namehead		*save_gv_target;
	sgm_info		*save_sgm_info_ptr;
	mval			trig_gbl;
	mval			*trigger_count;
	char			trigvn[MAX_MIDENT_LEN + 1];
	int			trigvn_len;
	int			trig_indx;
	int			badpos;
	boolean_t		wildcard;
	char			utilprefix[1024];
	int			utilprefixlen;
	boolean_t		first_gtmio;
	uint4			triggers_deleted;
	mval			trigjrec;
	boolean_t		jnl_format_done;
	gd_region		*reg, *reg_top;
	char			disp_trigvn[MAX_MIDENT_LEN + SPANREG_REGION_LITLEN + MAX_RN_LEN + 1 + 1];
					/* SPANREG_REGION_LITLEN for " (region ", MAX_RN_LEN for region name,
					 * 1 for ")" and 1 for trailing '\0'.
					 */
	int			disp_trigvn_len;
	int			trig_protected_mval_push_count;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	badpos = 0;
	trigjrec.mvtype = MV_STR;
	trigjrec.str.len = trigger_name_len--;
	trigjrec.str.addr = trigger_name++;
	orig_name_len = trigger_name_len;
	if ((0 == trigger_name_len)
		|| (trigger_name_len != (badpos = validate_input_trigger_name(trigger_name, trigger_name_len, &wildcard))))
	{	/* is the input name valid */
		CONV_STR_AND_PRINT("Invalid trigger NAME string: ", orig_name_len, trigger_name);
		/* badpos is the string position where the bad character was found, pretty print it */
		trig_stats[STATS_ERROR_TRIGFILE]++;
		return TRIG_FAILURE;
	}
	name_tail_ptr = trigger_name + trigger_name_len - 1;
	if ((TRIGNAME_SEQ_DELIM == *name_tail_ptr) || wildcard)
		trigger_name_len--; /* drop the trailing # sign for wildcard */
	jnl_format_done = FALSE;
	lgtrig_reg = NULL;
	first_gtmio = TRUE;
	triggers_deleted = 0;
	assert(trigger_name_len < MAX_MIDENT_LEN);
	memcpy(save_name, trigger_name, trigger_name_len);
	save_name[trigger_name_len] = '\0';
	utilprefixlen = ARRAYSIZE(utilprefix);
	trig_protected_mval_push_count = 0;
	INCR_AND_PUSH_MV_STENT(trigger_count); /* Protect trigger_count from garbage collection */
	for (reg = gd_header->regions, reg_top = reg + gd_header->n_regions; reg < reg_top; reg++)
	{
		GVTR_SWITCH_REG_AND_HASHT_BIND_NAME(reg);
		csa = cs_addrs;
		if (NULL == csa)	/* not BG or MM access method */
			continue;
		/* gv_target now points to ^#t in region "reg" */
		/* To write the LGTRIG logical jnl record, choose some region that has journaling enabled */
		if (!reg->read_only && !jnl_format_done && JNL_WRITE_LOGICAL_RECS(csa))
			lgtrig_reg = reg;
		if (!gv_target->root)
			continue;
		memcpy(curr_name, save_name, trigger_name_len);
		curr_name_len = trigger_name_len;
		do {
			/* GVN = $get(^#t("#TNAME",curr_name)) */
			BUILD_HASHT_SUB_SUB_CURRKEY(LITERAL_HASHTNAME, STRLEN(LITERAL_HASHTNAME), curr_name, curr_name_len);
			if (gvcst_get(&trig_gbl))
			{
				if (reg->read_only)
					rts_error_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_TRIGMODREGNOTRW, 2, REG_LEN_STR(reg));
				SAVE_TRIGGER_REGION_INFO(save_currkey);
				ptr = trig_gbl.str.addr;
				trigvn_len = MIN(trig_gbl.str.len, MAX_MIDENT_LEN);
				STRNLEN(ptr, trigvn_len, trigvn_len);
				ptr += trigvn_len;
				if ((trig_gbl.str.len == trigvn_len) || ('\0' != *ptr))
				{	/* We expect $c(0) in the middle of ptr. If not found, this is a restartable situation */
					if (CDB_STAGNATE > t_tries)
						t_retry(cdb_sc_triggermod);
					assert(WBTEST_HELPOUT_TRIGDEFBAD == gtm_white_box_test_case_number);
					rts_error_csa(CSA_ARG(NULL) VARLSTCNT(6) ERR_TRIGNAMBAD, 4, LEN_AND_LIT("\"#TNAME\""),
							curr_name_len, curr_name);
				}
				memcpy(trigvn, trig_gbl.str.addr, trigvn_len);
				/* the index is just beyond the length of the GVN string */
				ptr++;
				A2I(ptr, trig_gbl.str.addr + trig_gbl.str.len, trig_indx);
				if (1 > trig_indx)
				{	/* Trigger indexes start from 1 */
					if (CDB_STAGNATE > t_tries)
						t_retry(cdb_sc_triggermod);
					assert(WBTEST_HELPOUT_TRIGDEFBAD == gtm_white_box_test_case_number);
					rts_error_csa(CSA_ARG(NULL) VARLSTCNT(6) ERR_TRIGNAMBAD, 4, LEN_AND_LIT("\"#TNAME\""),
							curr_name_len, curr_name);
				}
				SET_DISP_TRIGVN(reg, disp_trigvn, disp_trigvn_len, trigvn, trigvn_len);
				/* $get(^#t(GVN,"COUNT") */
				BUILD_HASHT_SUB_SUB_CURRKEY(trigvn, trigvn_len, LITERAL_HASHCOUNT, STRLEN(LITERAL_HASHCOUNT));
				if (!gvcst_get(trigger_count))
				{
					UTIL_PRINT_PREFIX_IF_NEEDED(first_gtmio, utilprefix, &utilprefixlen);
					util_out_print_gtmio("Trigger named !AD exists in the lookup table, "
							"but global ^!AD has no triggers",
							FLUSH, curr_name_len, curr_name, disp_trigvn_len, disp_trigvn);
					trig_stats[STATS_ERROR_TRIGFILE]++;
					RETURN_AND_POP_MVALS(TRIG_FAILURE);
				}
				if (!jnl_format_done && JNL_WRITE_LOGICAL_RECS(csa))
				{
					jnl_format(JNL_LGTRIG, NULL, &trigjrec, 0);
					jnl_format_done = TRUE;
				}
				/* kill the target trigger for GVN at index trig_indx */
				if (PUT_SUCCESS != (trigger_delete(trigvn, trigvn_len, trigger_count, trig_indx)))
				{
					UTIL_PRINT_PREFIX_IF_NEEDED(first_gtmio, utilprefix, &utilprefixlen);
					util_out_print_gtmio("Trigger named !AD exists in the lookup table for global ^!AD,"	\
								" but was not deleted!", FLUSH, orig_name_len, trigger_name,
								disp_trigvn_len, disp_trigvn);
					trig_stats[STATS_ERROR_TRIGFILE]++;
					RETURN_AND_POP_MVALS(TRIG_FAILURE);
				} else
				{
					csa->incr_db_trigger_cycle = TRUE;
					trigger_incr_cycle(trigvn, trigvn_len);	/* ^#t records changed, increment cycle */
					if (dollar_ztrigger_invoked)
					{	/* Increment db_dztrigger_cycle so that next gvcst_put/gvcst_kill in this
						 * transaction, on this region, will re-read triggers. See trigger_update.c
						 * for a comment on why it is okay for db_dztrigger_cycle to be incremented
						 * more than once in the same transaction.
						 */
						csa->db_dztrigger_cycle++;
					}
					trig_stats[STATS_DELETED]++;
					if (0 == trig_stats[STATS_ERROR_TRIGFILE])
					{
						UTIL_PRINT_PREFIX_IF_NEEDED(first_gtmio, utilprefix, &utilprefixlen);
						util_out_print_gtmio("Deleted trigger named '!AD' for global ^!AD",
								FLUSH, curr_name_len, curr_name, disp_trigvn_len, disp_trigvn);
					}
				}
				trigger_count->mvtype = 0; /* allow stp_gcol to release the current contents if necessary */
				RESTORE_TRIGGER_REGION_INFO(save_currkey);
				triggers_deleted++;
			}
			if (!wildcard)
				/* not a wild card, don't $order for the next match */
				break;
			op_gvorder(&mv_curr_nam);
			if (0 == mv_curr_nam.str.len)
				break;
			assert(mv_curr_nam.str.len < MAX_MIDENT_LEN);
			memcpy(curr_name, mv_curr_nam.str.addr, mv_curr_nam.str.len);
			curr_name_len = mv_curr_nam.str.len;
			if (0 != memcmp(curr_name, save_name, trigger_name_len))
				/* stop when gv_order returns a string that no longer starts save_name */
				break;
		} while (TRUE);
	}
	DECR_AND_POP_MV_STENT();
	if (!jnl_format_done && (NULL != lgtrig_reg))
	{	/* There was no journaled region that had a ^#t update, but found at least one journaled region
		 * so write a LGTRIG logical jnl record there.
		 */
		GVTR_SWITCH_REG_AND_HASHT_BIND_NAME(lgtrig_reg);
		csa = cs_addrs;
		/* Attach to jnlpool. Normally SET or KILL of the ^#t records take care of this but in
		 * case this is a NO-OP trigger operation that wont update any ^#t records and we still
		 * want to write a TLGTRIG/ULGTRIG journal record. Hence the need to do this.
		 */
		JNLPOOL_INIT_IF_NEEDED(csa, csa->hdr, csa->nl);
		assert(dollar_tlevel);
		/* below is needed to set update_trans TRUE on this region even if NO db updates happen to ^#t nodes */
		T_BEGIN_SETORKILL_NONTP_OR_TP(ERR_TRIGLOADFAIL);
		jnl_format(JNL_LGTRIG, NULL, &trigjrec, 0);
		jnl_format_done = TRUE;
	}
	if (wildcard)
	{
		UTIL_PRINT_PREFIX_IF_NEEDED(first_gtmio, utilprefix, &utilprefixlen);
		if (triggers_deleted)
		{
			trig_stats[STATS_NOERROR_TRIGFILE]++;
			util_out_print_gtmio("All existing triggers named !AD (count = !UL) now deleted",
				FLUSH, orig_name_len, trigger_name, triggers_deleted);
		} else
		{
			trig_stats[STATS_UNCHANGED_TRIGFILE]++;
			util_out_print_gtmio("No matching triggers of the form !AD found for deletion",
				FLUSH, orig_name_len, trigger_name);
		}
	} else if (triggers_deleted)
	{
		/* util_out_print_gtmio of "Deleted trigger named ..." already done so no need to do it again */
		trig_stats[STATS_NOERROR_TRIGFILE]++;
	} else
	{	/* No names match. But treat it as a no-op (i.e. success). */
		UTIL_PRINT_PREFIX_IF_NEEDED(first_gtmio, utilprefix, &utilprefixlen);
		util_out_print_gtmio("Trigger named !AD does not exist", FLUSH, orig_name_len, trigger_name);
		trig_stats[STATS_UNCHANGED_TRIGFILE]++;
	}
	return TRIG_SUCCESS;
}
Ejemplo n.º 9
0
void trigger_delete_all(char *trigger_rec, uint4 len, uint4 *trig_stats)
{
	int			count;
	sgmnt_addrs		*csa;
	mval			curr_gbl_name;
	int			cycle;
	mval			*mv_count_ptr;
	mval			*mv_cycle_ptr;
	gd_region		*reg, *reg_top;
	int4			result;
	gd_region		*lgtrig_reg;
	mval			trigger_cycle;
	mval			trigger_count;
	boolean_t		this_db_updated;
	uint4			triggers_deleted;
	mval			trigjrec;
	boolean_t		jnl_format_done;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	assert(0 < dollar_tlevel);
	jnl_format_done = FALSE;
	lgtrig_reg = NULL;
	trigjrec.mvtype = MV_STR;
	trigjrec.str.len = len;
	trigjrec.str.addr = trigger_rec;
	triggers_deleted = 0;
	for (reg = gd_header->regions, reg_top = reg + gd_header->n_regions; reg < reg_top; reg++)
	{
		GVTR_SWITCH_REG_AND_HASHT_BIND_NAME(reg);
		csa = cs_addrs;
		if (NULL == csa)	/* not BG or MM access method */
			continue;
		/* gv_target now points to ^#t in region "reg" */
		/* To write the LGTRIG logical jnl record, choose some region that has journaling enabled */
		if (!reg->read_only && !jnl_format_done && JNL_WRITE_LOGICAL_RECS(csa))
			lgtrig_reg = reg;
		if (!gv_target->root)
			continue;
		/* kill ^#t("#TNAME") */
		BUILD_HASHT_SUB_CURRKEY(LITERAL_HASHTNAME, STRLEN(LITERAL_HASHTNAME));
		if (0 != gvcst_data())
		{	/* Issue error if we dont have permissions to touch ^#t global */
			if (reg->read_only)
				rts_error_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_TRIGMODREGNOTRW, 2, REG_LEN_STR(reg));
			gvcst_kill(TRUE);
		}
		/* Kill all descendents of ^#t(trigvn, ...) where trigvn is any global with a trigger,
		 * but skip the ^#t("#...",...) entries. Setup ^#t("$") as the key for op_gvorder
		 */
		BUILD_HASHT_SUB_CURRKEY(LITERAL_MAXHASHVAL, STRLEN(LITERAL_MAXHASHVAL));
		TREF(gv_last_subsc_null) = FALSE; /* We know its not null, but prior state is unreliable */
		this_db_updated = FALSE;
		while (TRUE)
		{
			op_gvorder(&curr_gbl_name);
			/* quit:$length(curr_gbl_name)=0 */
			if (0 == curr_gbl_name.str.len)
				break;
			/* $get(^#t(curr_gbl_name,#COUNT)) */
			BUILD_HASHT_SUB_SUB_CURRKEY(curr_gbl_name.str.addr, curr_gbl_name.str.len,
							LITERAL_HASHCOUNT, STRLEN(LITERAL_HASHCOUNT));
			if (gvcst_get(&trigger_count))
			{
				/* Now that we know there is something to kill, check if we have permissions to touch ^#t global */
				if (reg->read_only)
					rts_error_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_TRIGMODREGNOTRW, 2, REG_LEN_STR(reg));
				mv_count_ptr = &trigger_count;
				count = MV_FORCE_UINT(mv_count_ptr);
				/* $get(^#t(curr_gbl_name,#CYCLE)) */
				BUILD_HASHT_SUB_SUB_CURRKEY(curr_gbl_name.str.addr, curr_gbl_name.str.len,
					LITERAL_HASHCYCLE, STRLEN(LITERAL_HASHCYCLE));
				if (!gvcst_get(&trigger_cycle))
				{	/* Found #COUNT, there must be #CYCLE */
					if (CDB_STAGNATE > t_tries)
						t_retry(cdb_sc_triggermod);
					assert(WBTEST_HELPOUT_TRIGDEFBAD == gtm_white_box_test_case_number);
					rts_error_csa(CSA_ARG(csa) VARLSTCNT(12) ERR_TRIGDEFBAD, 6,
							curr_gbl_name.str.len, curr_gbl_name.str.addr,
							curr_gbl_name.str.len, curr_gbl_name.str.addr, LEN_AND_LIT("\"#CYCLE\""),
							ERR_TEXT, 2, RTS_ERROR_TEXT("#CYCLE field is missing"));
				}
				mv_cycle_ptr = &trigger_cycle;
				cycle = MV_FORCE_UINT(mv_cycle_ptr);
				if (!jnl_format_done && JNL_WRITE_LOGICAL_RECS(csa))
				{
					jnl_format(JNL_LGTRIG, NULL, &trigjrec, 0);
					jnl_format_done = TRUE;
				}
				/* kill ^#t(curr_gbl_name) */
				BUILD_HASHT_SUB_CURRKEY(curr_gbl_name.str.addr, curr_gbl_name.str.len);
				gvcst_kill(TRUE);
				/* Note : ^#t(curr_gbl_name,"#TRHASH") is also killed as part of the above */
				cycle++;
				MV_FORCE_MVAL(&trigger_cycle, cycle);
				/* set ^#t(curr_gbl_name,#CYCLE)=trigger_cycle */
				SET_TRIGGER_GLOBAL_SUB_SUB_MVAL(curr_gbl_name.str.addr, curr_gbl_name.str.len,
					LITERAL_HASHCYCLE, STRLEN(LITERAL_HASHCYCLE), trigger_cycle, result);
				assert(PUT_SUCCESS == result);
				this_db_updated = TRUE;
				triggers_deleted += count;
			} /* else there is no #COUNT, then no triggers, leave #CYCLE alone */
			/* get ready for op_gvorder() call for next trigger under ^#t */
			BUILD_HASHT_SUB_CURRKEY(curr_gbl_name.str.addr, curr_gbl_name.str.len);
		}
		if (this_db_updated)
		{
			csa->incr_db_trigger_cycle = TRUE;
			if (dollar_ztrigger_invoked)
			{	/* increment db_dztrigger_cycle so that next gvcst_put/gvcst_kill in this transaction,
				 * on this region, will re-read. See trigger_update.c for a comment on why it is okay
				 * for db_dztrigger_cycle to be incremented more than once in the same transaction
				 */
				csa->db_dztrigger_cycle++;
			}
		}
	}
	if (!jnl_format_done && (NULL != lgtrig_reg))
	{	/* There was no journaled region that had a ^#t update, but found at least one journaled region
		 * so write a LGTRIG logical jnl record there.
		 */
		GVTR_SWITCH_REG_AND_HASHT_BIND_NAME(lgtrig_reg);
		csa = cs_addrs;
		JNLPOOL_INIT_IF_NEEDED(csa, csa->hdr, csa->nl);	/* see previous usage for comment on why it is needed */
		assert(dollar_tlevel);
		T_BEGIN_SETORKILL_NONTP_OR_TP(ERR_TRIGLOADFAIL);	/* needed to set update_trans TRUE on this region
									 * even if NO db updates happen to ^#t nodes. */
		jnl_format(JNL_LGTRIG, NULL, &trigjrec, 0);
		jnl_format_done = TRUE;
	}
	if (triggers_deleted)
	{
		util_out_print_gtmio("All existing triggers (count = !UL) deleted", FLUSH, triggers_deleted);
		trig_stats[STATS_DELETED] += triggers_deleted;
		trig_stats[STATS_NOERROR_TRIGFILE]++;
	} else
	{
		util_out_print_gtmio("No matching triggers found for deletion", FLUSH);
		trig_stats[STATS_UNCHANGED_TRIGFILE]++;
	}
}
Ejemplo n.º 10
0
void	lke_show(void)
{
	bool			locks, all = TRUE, wait = TRUE, interactive = FALSE, match = FALSE, memory = TRUE, nocrit = TRUE;
	boolean_t		exact = FALSE, was_crit;
	int4			pid;
	size_t			ls_len;
	int			n;
	char 			regbuf[MAX_RN_LEN], nodebuf[32], one_lockbuf[MAX_KEY_SZ];
	mlk_ctldata_ptr_t	ctl;
	mstr			reg, node, one_lock;

	error_def(ERR_UNIMPLOP);
	error_def(ERR_TEXT);

	/* Get all command parameters */
	reg.addr = regbuf;
	reg.len = SIZEOF(regbuf);
	node.addr = nodebuf;
	node.len = SIZEOF(nodebuf);
	one_lock.addr = one_lockbuf;
	one_lock.len = SIZEOF(one_lockbuf);

	if (lke_getcli(&all, &wait, &interactive, &pid, &reg, &node, &one_lock, &memory, &nocrit, &exact) == 0)
		return;

	/* Search all regions specified on the command line */
	for (gv_cur_region = gd_header->regions, n = 0; n != gd_header->n_regions; ++gv_cur_region, ++n)
	{
		/* If region matches and is open */
		if ((reg.len == 0  ||
		     gv_cur_region->rname_len == reg.len  &&  memcmp(gv_cur_region->rname, reg.addr, reg.len) == 0)  &&
		    gv_cur_region->open)
		{
			match = TRUE;
			util_out_print("!/!AD!/", NOFLUSH, REG_LEN_STR(gv_cur_region));

			/* If distributed database, the region is located on another node */
			if (gv_cur_region->dyn.addr->acc_meth == dba_cm)
			{
#				if defined(LKE_WORKS_OK_WITH_CM)
				/* Obtain lock info from the remote node */
				locks = gtcmtr_lke_showreq(gv_cur_region->dyn.addr->cm_blk, gv_cur_region->cmx_regnum,
							   all, wait, pid, &node);
#				else
				gtm_putmsg(VARLSTCNT(10) ERR_UNIMPLOP, 0, ERR_TEXT, 2,
						LEN_AND_LIT("GT.CM region - locks must be displayed on the local node"),
						ERR_TEXT, 2, REG_LEN_STR(gv_cur_region));
				continue;
#				endif
			} else if (gv_cur_region->dyn.addr->acc_meth == dba_bg  || gv_cur_region->dyn.addr->acc_meth == dba_mm)
			{	/* Local region */
				cs_addrs = &FILE_INFO(gv_cur_region)->s_addrs;
				ls_len = (size_t)(cs_addrs->lock_addrs[1] - cs_addrs->lock_addrs[0]);
				ctl = (mlk_ctldata_ptr_t)malloc(ls_len);
				/* Prevent any modification of the lock space while we make a local copy of it */
				if (cs_addrs->critical != NULL)
					crash_count = cs_addrs->critical->crashcnt;
				was_crit = cs_addrs->now_crit;
				if (!nocrit && !was_crit)
					grab_crit(gv_cur_region);
				longcpy((uchar_ptr_t)ctl, (uchar_ptr_t)cs_addrs->lock_addrs[0], ls_len);
				if (!nocrit && !was_crit)
					rel_crit(gv_cur_region);
				locks = ctl->blkroot == 0 ?
						FALSE:
						lke_showtree(NULL, (mlk_shrblk_ptr_t)R2A(ctl->blkroot), all, wait, pid,
												one_lock, memory);
				free(ctl);
			} else
			{
				util_out_print(NULL, RESET);
				util_out_print("Region is not BG, MM, or CM", FLUSH);
				locks = TRUE;
			}
			if (!locks)
			{
				util_out_print(NULL, RESET);
				util_out_print("No locks were found in !AD", FLUSH, REG_LEN_STR(gv_cur_region));
			}
		}
	}

	if (!match  &&  reg.len != 0)
		rts_error(VARLSTCNT(4) ERR_NOREGION, 2, reg.len, reg.addr);

}
Ejemplo n.º 11
0
int4 mupip_set_file(int db_fn_len, char *db_fn)
{
	bool			got_standalone;
	boolean_t		bypass_partial_recov, need_standalone = FALSE;
	char			acc_spec[MAX_ACC_METH_LEN], ver_spec[MAX_DB_VER_LEN], exit_stat, *fn;
	unsigned short		acc_spec_len = MAX_ACC_METH_LEN, ver_spec_len = MAX_DB_VER_LEN;
	int			fd, fn_len;
	int4			status;
	int4			status1;
	int			glbl_buff_status, defer_status, rsrvd_bytes_status,
				extn_count_status, lock_space_status, disk_wait_status;
	int4			new_disk_wait, new_cache_size, new_extn_count, new_lock_space, reserved_bytes, defer_time;
	sgmnt_data_ptr_t	csd;
	tp_region		*rptr, single;
	enum db_acc_method	access, access_new;
	enum db_ver		desired_dbver;
	gd_region		*temp_cur_region;
	char			*errptr, *command = "MUPIP SET VERSION";
	int			save_errno;

	error_def(ERR_DBPREMATEOF);
	error_def(ERR_DBRDERR);
	error_def(ERR_DBRDONLY);
	error_def(ERR_INVACCMETHOD);
	error_def(ERR_MUNOACTION);
	error_def(ERR_RBWRNNOTCHG);
	error_def(ERR_WCERRNOTCHG);
	error_def(ERR_WCWRNNOTCHG);
	error_def(ERR_MMNODYNDWNGRD);

	exit_stat = EXIT_NRM;
	defer_status = cli_present("DEFER_TIME");
	if (defer_status)
		need_standalone = TRUE;
	bypass_partial_recov = cli_present("PARTIAL_RECOV_BYPASS") == CLI_PRESENT;
	if (bypass_partial_recov)
		need_standalone = TRUE;
	if (disk_wait_status = cli_present("WAIT_DISK"))
	{
		if (cli_get_int("WAIT_DISK", &new_disk_wait))
		{
			if (new_disk_wait < 0)
			{
				util_out_print("!UL negative, minimum WAIT_DISK allowed is 0.", TRUE, new_disk_wait);
				return (int4)ERR_WCWRNNOTCHG;
			}
			need_standalone = TRUE;
		} else
		{
			util_out_print("Error getting WAIT_DISK qualifier value", TRUE);
			return (int4)ERR_WCWRNNOTCHG;
		}
	}
	if (glbl_buff_status = cli_present("GLOBAL_BUFFERS"))
	{
		if (cli_get_int("GLOBAL_BUFFERS", &new_cache_size))
		{
			if (new_cache_size > WC_MAX_BUFFS)
			{
				util_out_print("!UL too large, maximum write cache buffers allowed is !UL", TRUE, new_cache_size,
						WC_MAX_BUFFS);
				return (int4)ERR_WCWRNNOTCHG;
			}
			if (new_cache_size < WC_MIN_BUFFS)
			{
				util_out_print("!UL too small, minimum cache buffers allowed is !UL", TRUE, new_cache_size,
						WC_MIN_BUFFS);
				return (int4)ERR_WCWRNNOTCHG;
			}
		} else
		{
			util_out_print("Error getting GLOBAL BUFFER qualifier value", TRUE);
			return (int4)ERR_WCWRNNOTCHG;
		}
		need_standalone = TRUE;
	}
	/* EXTENSION_COUNT does not require standalone access and hence need_standalone will not be set to TRUE for this. */
	if (extn_count_status = cli_present("EXTENSION_COUNT"))
	{
		if (cli_get_int("EXTENSION_COUNT", &new_extn_count))
		{
			if (new_extn_count > MAX_EXTN_COUNT)
			{
				util_out_print("!UL too large, maximum extension count allowed is !UL", TRUE, new_extn_count,
						MAX_EXTN_COUNT);
				return (int4)ERR_WCWRNNOTCHG;
			}
			if (new_extn_count < MIN_EXTN_COUNT)
			{
				util_out_print("!UL too small, minimum extension count allowed is !UL", TRUE, new_extn_count,
						MIN_EXTN_COUNT);
				return (int4)ERR_WCWRNNOTCHG;
			}
		} else
		{
			util_out_print("Error getting EXTENSION COUNT qualifier value", TRUE);
			return (int4)ERR_WCWRNNOTCHG;
		}
	}
	if (lock_space_status = cli_present("LOCK_SPACE"))
	{
		if (cli_get_int("LOCK_SPACE", &new_lock_space))
		{
			if (new_lock_space > MAX_LOCK_SPACE)
			{
				util_out_print("!UL too large, maximum lock space allowed is !UL", TRUE,
						new_lock_space, MAX_LOCK_SPACE);
				return (int4)ERR_WCWRNNOTCHG;
			}
			else if (new_lock_space < MIN_LOCK_SPACE)
			{
				util_out_print("!UL too small, minimum lock space allowed is !UL", TRUE,
						new_lock_space, MIN_LOCK_SPACE);
				return (int4)ERR_WCWRNNOTCHG;
			}
		} else
		{
			util_out_print("Error getting LOCK_SPACE qualifier value", TRUE);
			return (int4)ERR_WCWRNNOTCHG;
		}
		need_standalone = TRUE;
	}
	if (rsrvd_bytes_status = cli_present("RESERVED_BYTES"))
	{
		if (!cli_get_int("RESERVED_BYTES", &reserved_bytes))
		{
			util_out_print("Error getting RESERVED BYTES qualifier value", TRUE);
			return (int4)ERR_RBWRNNOTCHG;
		}
		need_standalone = TRUE;
	}
	if (cli_present("ACCESS_METHOD"))
	{
		cli_get_str("ACCESS_METHOD", acc_spec, &acc_spec_len);
		cli_strupper(acc_spec);
		if (0 == memcmp(acc_spec, "MM", acc_spec_len))
			access = dba_mm;
		else  if (0 == memcmp(acc_spec, "BG", acc_spec_len))
			access = dba_bg;
		else
			mupip_exit(ERR_INVACCMETHOD);
		need_standalone = TRUE;
	} else
		access = n_dba;		/* really want to keep current method,
					    which has not yet been read */
	if (cli_present("VERSION"))
	{
		assert(!need_standalone);
		cli_get_str("VERSION", ver_spec, &ver_spec_len);
		cli_strupper(ver_spec);
		if (0 == memcmp(ver_spec, "V4", ver_spec_len))
			desired_dbver = GDSV4;
		else  if (0 == memcmp(ver_spec, "V5", ver_spec_len))
			desired_dbver = GDSV5;
		else
			GTMASSERT;		/* CLI should prevent us ever getting here */
	} else
		desired_dbver = GDSVLAST;	/* really want to keep version, which has not yet been read */
	if (region)
		rptr = grlist;
	else
	{
		rptr = &single;
		memset(&single, 0, sizeof(single));
	}

	csd = (sgmnt_data *)malloc(ROUND_UP(sizeof(sgmnt_data), DISK_BLOCK_SIZE));
	in_backup = FALSE;		/* Only want yes/no from mupfndfil, not an address */
	for (;  rptr != NULL;  rptr = rptr->fPtr)
	{
		if (region)
		{
			if (dba_usr == rptr->reg->dyn.addr->acc_meth)
			{
				util_out_print("!/Region !AD is not a GDS access type", TRUE, REG_LEN_STR(rptr->reg));
				exit_stat |= EXIT_WRN;
				continue;
			}
			if (!mupfndfil(rptr->reg, NULL))
				continue;
			fn = (char *)rptr->reg->dyn.addr->fname;
			fn_len = rptr->reg->dyn.addr->fname_len;
		} else
		{
			fn = db_fn;
			fn_len = db_fn_len;
		}
		mu_gv_cur_reg_init();
		strcpy((char *)gv_cur_region->dyn.addr->fname, fn);
		gv_cur_region->dyn.addr->fname_len = fn_len;
		if (!need_standalone)
		{
			gvcst_init(gv_cur_region);
			change_reg();	/* sets cs_addrs and cs_data */
			if (gv_cur_region->read_only)
			{
				gtm_putmsg(VARLSTCNT(4) ERR_DBRDONLY, 2, DB_LEN_STR(gv_cur_region));
				exit_stat |= EXIT_ERR;
				gds_rundown();
				mu_gv_cur_reg_free();
				continue;
			}
			grab_crit(gv_cur_region);
			status = EXIT_NRM;
			access_new = (n_dba == access ? cs_data->acc_meth : access);
							/* recalculate; n_dba is a proxy for no change */
			change_fhead_timer("FLUSH_TIME", cs_data->flush_time,
					   (dba_bg == access_new ? TIM_FLU_MOD_BG : TIM_FLU_MOD_MM),
					   FALSE);
			if (GDSVLAST != desired_dbver)
			{
				if ((dba_mm != access_new) || (GDSV4 != desired_dbver))
					status1 = desired_db_format_set(gv_cur_region, desired_dbver, command);
				else
				{
					status1 = ERR_MMNODYNDWNGRD;
					gtm_putmsg(VARLSTCNT(4) status1, 2, REG_LEN_STR(gv_cur_region));
				}
				if (SS_NORMAL != status1)
				{	/* "desired_db_format_set" would have printed appropriate error messages */
					if (ERR_MUNOACTION != status1)
					{	/* real error occurred while setting the db format. skip to next region */
						status = EXIT_ERR;
					}
				}
			}
			if (EXIT_NRM == status)
			{
				if (extn_count_status)
					cs_data->extension_size = (uint4)new_extn_count;
				wcs_flu(WCSFLU_FLUSH_HDR);
				if (extn_count_status)
					util_out_print("Database file !AD now has extension count !UL",
						TRUE, fn_len, fn, cs_data->extension_size);
				if (GDSVLAST != desired_dbver)
					util_out_print("Database file !AD now has desired DB format !AD", TRUE,
						fn_len, fn, LEN_AND_STR(gtm_dbversion_table[cs_data->desired_db_format]));
			} else
				exit_stat |= status;
			rel_crit(gv_cur_region);
			gds_rundown();
		} else
		{	/* Following part needs standalone access */
			assert(GDSVLAST == desired_dbver);
			got_standalone = mu_rndwn_file(gv_cur_region, TRUE);
			if (FALSE == got_standalone)
				return (int4)ERR_WCERRNOTCHG;
			/* we should open it (for changing) after mu_rndwn_file, since mu_rndwn_file changes the file header too */
			if (-1 == (fd = OPEN(fn, O_RDWR)))
			{
				save_errno = errno;
				errptr = (char *)STRERROR(save_errno);
				util_out_print("open : !AZ", TRUE, errptr);
				exit_stat |= EXIT_ERR;
				db_ipcs_reset(gv_cur_region, FALSE);
				mu_gv_cur_reg_free();
				continue;
			}
			LSEEKREAD(fd, 0, csd, sizeof(sgmnt_data), status);
			if (0 != status)
			{
				save_errno = errno;
				PERROR("Error reading header of file");
				errptr = (char *)STRERROR(save_errno);
				util_out_print("read : !AZ", TRUE, errptr);
				util_out_print("Error reading header of file", TRUE);
				util_out_print("Database file !AD not changed:  ", TRUE, fn_len, fn);
				if (-1 != status)
					rts_error(VARLSTCNT(4) ERR_DBRDERR, 2, fn_len, fn);
				else
					rts_error(VARLSTCNT(4) ERR_DBPREMATEOF, 2, fn_len, fn);
			}
			if (rsrvd_bytes_status)
			{
				if (reserved_bytes > MAX_RESERVE_B(csd))
				{
					util_out_print("!UL too large, maximum reserved bytes allowed is !UL for database file !AD",
							TRUE, reserved_bytes, MAX_RESERVE_B(csd), fn_len, fn);
					close(fd);
					db_ipcs_reset(gv_cur_region, FALSE);
					return (int4)ERR_RBWRNNOTCHG;
				}
				csd->reserved_bytes = reserved_bytes;
			}
			access_new = (n_dba == access ? csd->acc_meth : access);
							/* recalculate; n_dba is a proxy for no change */
			change_fhead_timer("FLUSH_TIME", csd->flush_time,
					   (dba_bg == access_new ? TIM_FLU_MOD_BG : TIM_FLU_MOD_MM),
					   FALSE);
			if ((n_dba != access) && (csd->acc_meth != access))	/* n_dba is a proxy for no change */
			{
				if (dba_mm == access)
					csd->defer_time = 1;			/* defer defaults to 1 */
				csd->acc_meth = access;
				if (0 == csd->n_bts)
				{
					csd->n_bts = WC_DEF_BUFFS;
					csd->bt_buckets = getprime(csd->n_bts);
				}
			}
			if (glbl_buff_status)
			{
				csd->n_bts = BT_FACTOR(new_cache_size);
				csd->bt_buckets = getprime(csd->n_bts);
				csd->n_wrt_per_flu = 7;
				csd->flush_trigger = FLUSH_FACTOR(csd->n_bts);
			}
			if (disk_wait_status)
				csd->wait_disk_space = new_disk_wait;
			if (extn_count_status)
				csd->extension_size = (uint4)new_extn_count;
			if (lock_space_status)
				csd->lock_space_size = (uint4)new_lock_space * OS_PAGELET_SIZE;
			if (bypass_partial_recov)
			{
				csd->file_corrupt = FALSE;
				util_out_print("Database file !AD now has partial recovery flag set to  !UL(FALSE) ",
						TRUE, fn_len, fn, csd->file_corrupt);
			}
			if (dba_mm == access_new)
			{
				if (CLI_NEGATED == defer_status)
					csd->defer_time = 0;
				else  if (CLI_PRESENT == defer_status)
				{
					if (!cli_get_num("DEFER_TIME", &defer_time))
					{
						util_out_print("Error getting DEFER_TIME qualifier value", TRUE);
						db_ipcs_reset(gv_cur_region, FALSE);
						return (int4)ERR_RBWRNNOTCHG;
					}
					if (-1 > defer_time)
					{
						util_out_print("DEFER_TIME cannot take negative values less than -1", TRUE);
						util_out_print("Database file !AD not changed", TRUE, fn_len, fn);
						exit_stat |= EXIT_WRN;
						db_ipcs_reset(gv_cur_region, FALSE);
						mu_gv_cur_reg_free();
						continue;
					}
					csd->defer_time = defer_time;
				}
				if (csd->blks_to_upgrd)
				{
					util_out_print("MM access method cannot be set if there are blocks to upgrade",	TRUE);
					util_out_print("Database file !AD not changed", TRUE, fn_len, fn);
					exit_stat |= EXIT_WRN;
					db_ipcs_reset(gv_cur_region, FALSE);
					mu_gv_cur_reg_free();
					continue;
				}
				if (GDSVCURR != csd->desired_db_format)
				{
					util_out_print("MM access method cannot be set in DB compatibility mode",
						TRUE);
					util_out_print("Database file !AD not changed", TRUE, fn_len, fn);
					exit_stat |= EXIT_WRN;
					db_ipcs_reset(gv_cur_region, FALSE);
					mu_gv_cur_reg_free();
					continue;
				}
				if (JNL_ENABLED(csd) && csd->jnl_before_image)
				{
					util_out_print("MM access method cannot be set with BEFORE image journaling", TRUE);
					util_out_print("Database file !AD not changed", TRUE, fn_len, fn);
					exit_stat |= EXIT_WRN;
					db_ipcs_reset(gv_cur_region, FALSE);
					mu_gv_cur_reg_free();
					continue;
				}
				csd->jnl_before_image = FALSE;
			} else
			{
				if (defer_status)
				{
					util_out_print("DEFER cannot be specified with BG access method.", TRUE);
					util_out_print("Database file !AD not changed", TRUE, fn_len, fn);
					exit_stat |= EXIT_WRN;
					db_ipcs_reset(gv_cur_region, FALSE);
					mu_gv_cur_reg_free();
					continue;
				}
			}
			LSEEKWRITE(fd, 0, csd, sizeof(sgmnt_data), status);
			if (0 != status)
			{
				save_errno = errno;
				errptr = (char *)STRERROR(save_errno);
				util_out_print("write : !AZ", TRUE, errptr);
				util_out_print("Error writing header of file", TRUE);
				util_out_print("Database file !AD not changed: ", TRUE, fn_len, fn);
				rts_error(VARLSTCNT(4) ERR_DBRDERR, 2, fn_len, fn);
			}
			close(fd);
			/* --------------------- report results ------------------------- */
			if (glbl_buff_status)
				util_out_print("Database file !AD now has !UL global buffers",
						TRUE, fn_len, fn, csd->n_bts);
			if (defer_status && (dba_mm == csd->acc_meth))
				util_out_print("Database file !AD now has defer_time set to !SL",
						TRUE, fn_len, fn, csd->defer_time);
			if (rsrvd_bytes_status)
				util_out_print("Database file !AD now has !UL reserved bytes",
						TRUE, fn_len, fn, csd->reserved_bytes);
			if (extn_count_status)
				util_out_print("Database file !AD now has extension count !UL",
						TRUE, fn_len, fn, csd->extension_size);
			if (lock_space_status)
				util_out_print("Database file !AD now has lock space !UL pages",
						TRUE, fn_len, fn, csd->lock_space_size/OS_PAGELET_SIZE);
			if (disk_wait_status)
				util_out_print("Database file !AD now has wait disk set to !UL seconds",
						TRUE, fn_len, fn, csd->wait_disk_space);
			db_ipcs_reset(gv_cur_region, FALSE);
		} /* end of else part if (!need_standalone) */
		mu_gv_cur_reg_free();
	}
	free(csd);
	assert(!(exit_stat & EXIT_INF));
	return (exit_stat & EXIT_ERR ? (int4)ERR_WCERRNOTCHG :
		(exit_stat & EXIT_WRN ? (int4)ERR_WCWRNNOTCHG : SS_NORMAL));
}
Ejemplo n.º 12
0
unsigned char mu_cre_file(void)
{
	unsigned char		*inadr[2], *c, exit_stat;
	enum db_acc_method	temp_acc_meth;
	uint4			lcnt, retadr[2];
	int4			blk_init_size, initial_alq, free_blocks;
	gtm_uint64_t		free_blocks_ll, blocks_for_extension;
	char			buff[GLO_NAME_MAXLEN], fn_buff[MAX_FN_LEN];
	unsigned int		status;
	int			free_space;
	struct FAB		*fcb;
	struct NAM		nam;
	gds_file_id		new_id;
	io_status_block_disk	iosb;
	char			node[16];
	short			len;
	struct {
		short	blen;
		short	code;
		char	*buf;
		short	*len;
		int4	terminator;
	} item = {15, SYI$_NODENAME, &node, &len, 0};
	$DESCRIPTOR(desc, buff);

	exit_stat = EXIT_NRM;
/* The following calculations should duplicate the BT_SIZE macro from GDSBT and the LOCK_BLOCK macro from GDSFHEAD.H,
 * but without using a sgmnt_data which is not yet set up at this point
 */

#ifdef GT_CX_DEF
	/* This section needs serious chnages for the fileheader changes in V5 if it is ever resurrected */
	over_head = DIVIDE_ROUND_UP(SIZEOF_FILE_HDR_DFLT
			+ (WC_MAX_BUFFS + getprime(WC_MAX_BUFFS) + 1) * SIZEOF(bt_rec), DISK_BLOCK_SIZE);
	if (gv_cur_region->dyn.addr->acc_meth == dba_bg)
	{
		free_space = over_head - DIVIDE_ROUND_UP(SIZEOF_FILE_HDR_DFLT
			+ (gv_cur_region->dyn.addr->global_buffers + getprime(gv_cur_region->dyn.addr->global_buffers) + 1)
				* SIZEOF(bt_rec), DISK_BLOCK_SIZE);
		over_head += gv_cur_region->dyn.addr->lock_space ? gv_cur_region->dyn.addr->lock_space
								 : DEF_LOCK_SIZE / OS_PAGELET_SIZE;
	} else if (gv_cur_region->dyn.addr->acc_meth == dba_mm)
	{
		free_space = over_head - DIVIDE_ROUND_UP(SIZEOF_FILE_HDR_DFLT, DISK_BLOCK_SIZE);
		if (gv_cur_region->dyn.addr->lock_space)
		{
			over_head += gv_cur_region->dyn.addr->lock_space;
			free_space += gv_cur_region->dyn.addr->lock_space;
		} else
		{
			over_head += DEF_LOCK_SIZE / OS_PAGELET_SIZE;
			free_space += DEF_LOCK_SIZE / OS_PAGELET_SIZE;
		}
	}
	free_space *= DISK_BLOCK_SIZE;
#else
	assert(START_VBN_CURRENT > DIVIDE_ROUND_UP(SIZEOF_FILE_HDR_DFLT, DISK_BLOCK_SIZE));
	free_space = ((START_VBN_CURRENT - 1) * DISK_BLOCK_SIZE) - SIZEOF_FILE_HDR_DFLT;
#endif
	switch (gv_cur_region->dyn.addr->acc_meth)
	{
		case dba_bg:
		case dba_mm:
			mu_cre_vms_structs(gv_cur_region);
			fcb = ((vms_gds_info *)(gv_cur_region->dyn.addr->file_cntl->file_info))->fab;
			cs_addrs = &((vms_gds_info *)(gv_cur_region->dyn.addr->file_cntl->file_info))->s_addrs;

			fcb->fab$b_shr &= FAB$M_NIL;	/* No access to this file while it is created */
			fcb->fab$l_nam = &nam;
			nam = cc$rms_nam;
			/* There are (bplmap - 1) non-bitmap blocks per bitmap, so add (bplmap - 2) to number of non-bitmap blocks
			 * and divide by (bplmap - 1) to get total number of bitmaps for expanded database. (must round up in this
			 * manner as every non-bitmap block must have an associated bitmap)
			*/
			fcb->fab$l_alq += DIVIDE_ROUND_UP(fcb->fab$l_alq, BLKS_PER_LMAP - 1);	/* Bitmaps */
			blk_init_size = fcb->fab$l_alq;
			fcb->fab$l_alq *= BLK_SIZE / DISK_BLOCK_SIZE;
			fcb->fab$l_alq += START_VBN_CURRENT - 1;
			initial_alq = fcb->fab$l_alq;
			fcb->fab$w_mrs = 512;				/* no longer a relevent field to us */
			break;
		case dba_usr:
			util_out_print("Database file for region !AD not created; access method is not GDS.", TRUE,
				REG_LEN_STR(gv_cur_region));
			return EXIT_WRN;
		default:
			gtm_putmsg(VARLSTCNT(1) ERR_BADACCMTHD);
			return EXIT_ERR;
	}
	nam.nam$b_ess = SIZEOF(fn_buff);
	nam.nam$l_esa = fn_buff;
	nam.nam$b_nop |= NAM$M_SYNCHK;
	status = sys$parse(fcb, 0, 0);
	if (RMS$_NORMAL != status)
	{
		gtm_putmsg(VARLSTCNT(8) ERR_DBFILERR, 2, fcb->fab$b_fns, fcb->fab$l_fna, status, 0, fcb->fab$l_stv, 0);
		return EXIT_ERR;
	}
	if (nam.nam$b_node != 0)
	{
		status = sys$getsyiw(EFN$C_ENF, 0, 0, &item, &iosb, 0, 0);
		if (SS$_NORMAL == status)
			status = iosb.cond;
		if (SS$_NORMAL == status)
		{
			if (len == nam.nam$b_node-2 && !memcmp(nam.nam$l_esa, node, len))
			{
				fcb->fab$l_fna = nam.nam$l_esa + nam.nam$b_node;
				fcb->fab$b_fns = nam.nam$b_esl - nam.nam$b_node;
			}
		} else
		{
			util_out_print("Could not get node for !AD.", TRUE, REG_LEN_STR(gv_cur_region));
			exit_stat = EXIT_WRN;
		}
	}
	assert(gv_cur_region->dyn.addr->acc_meth == dba_bg || gv_cur_region->dyn.addr->acc_meth == dba_mm);
	nam.nam$l_esa = NULL;
	nam.nam$b_esl = 0;
	status = sys$create(fcb);
	if (status != RMS$_CREATED && status != RMS$_FILEPURGED)
	{
		switch(status)
		{
			case RMS$_FLK:
		 		util_out_print("Database file for region !AD not created; currently locked by another user.", TRUE,
					REG_LEN_STR(gv_cur_region));
				exit_stat = EXIT_INF;
				break;
			case RMS$_NORMAL:
		 		util_out_print("Database file for region !AD not created; already exists.", TRUE,
					REG_LEN_STR(gv_cur_region));
				exit_stat = EXIT_INF;
				break;
			case RMS$_SUPPORT:
				util_out_print("Database file for region !AD not created; cannot create across network.", TRUE,
					REG_LEN_STR(gv_cur_region));
				exit_stat = EXIT_WRN;
				break;
			case RMS$_FUL:
				send_msg(VARLSTCNT(8) ERR_DBFILERR, 2, fcb->fab$b_fns, fcb->fab$l_fna,
					status, 0, fcb->fab$l_stv, 0);
				/* intentionally falling through */
			default:
				gtm_putmsg(VARLSTCNT(8) ERR_DBFILERR, 2, fcb->fab$b_fns, fcb->fab$l_fna,
					status, 0, fcb->fab$l_stv, 0);
				exit_stat = EXIT_ERR;
		}
		sys$dassgn(fcb->fab$l_stv);
		return exit_stat;
	}

	memcpy(new_id.dvi, nam.nam$t_dvi, SIZEOF(nam.nam$t_dvi));
	memcpy(new_id.did, nam.nam$w_did, SIZEOF(nam.nam$w_did));
	memcpy(new_id.fid, nam.nam$w_fid, SIZEOF(nam.nam$w_fid));
	global_name("GT$S", &new_id, buff);		/* 2nd parm is actually a gds_file_id * in global_name */
	desc.dsc$w_length = buff[0];			/* By definition, a gds_file_id is dvi,fid,did from nam */
	desc.dsc$a_pointer = &buff[1];
	cs_addrs->db_addrs[0] = cs_addrs->db_addrs[1] = inadr[0] = inadr[1] = inadr;	/* used to determine p0 or p1 allocation */
	status = init_sec(cs_addrs->db_addrs, &desc, fcb->fab$l_stv, (START_VBN_CURRENT - 1),
			  SEC$M_DZRO|SEC$M_GBL|SEC$M_WRT|SEC$M_EXPREG);
	if ((SS$_CREATED != status) && (SS$_NORMAL != status))
	{
		gtm_putmsg(VARLSTCNT(8) ERR_DBFILERR, 2, fcb->fab$b_fns, fcb->fab$l_fna, status, 0, fcb->fab$l_stv, 0);
		sys$dassgn(fcb->fab$l_stv);
		return EXIT_ERR;
	}
	cs_data = (sgmnt_data *)cs_addrs->db_addrs[0];
	memset(cs_data, 0, SIZEOF_FILE_HDR_DFLT);
	cs_data->createinprogress = TRUE;
	cs_data->trans_hist.total_blks = (initial_alq - (START_VBN_CURRENT - 1)) / (BLK_SIZE / DISK_BLOCK_SIZE);
	/* assert that total_blks stored in file-header = non-bitmap blocks (initial allocation) + bitmap blocks */
	assert(cs_data->trans_hist.total_blks == gv_cur_region->dyn.addr->allocation +
				DIVIDE_ROUND_UP(gv_cur_region->dyn.addr->allocation, BLKS_PER_LMAP - 1));
	cs_data->start_vbn = START_VBN_CURRENT;
	temp_acc_meth = gv_cur_region->dyn.addr->acc_meth;
	cs_data->acc_meth = gv_cur_region->dyn.addr->acc_meth = dba_bg;
	cs_data->extension_size = gv_cur_region->dyn.addr->ext_blk_count;
	mucregini(blk_init_size);
	cs_addrs->hdr->free_space = free_space;
#ifndef GT_CX_DEF
	cs_addrs->hdr->unbacked_cache = TRUE;
#endif
	cs_data->acc_meth = gv_cur_region->dyn.addr->acc_meth = temp_acc_meth;
	cs_data->createinprogress = FALSE;
	if (SS$_NORMAL == (status = disk_block_available(fcb->fab$l_stv, &free_blocks)))
	{
		blocks_for_extension = (cs_data->blk_size / DISK_BLOCK_SIZE *
				  (DIVIDE_ROUND_UP(EXTEND_WARNING_FACTOR * (gtm_uint64_t)cs_data->extension_size, BLKS_PER_LMAP - 1)
					 + EXTEND_WARNING_FACTOR * (gtm_uint64_t)cs_data->extension_size));
		if ((gtm_uint64_t)free_blocks < blocks_for_extension)
		{
			free_blocks_ll = (gtm_uint64_t)free_blocks;
			gtm_putmsg(VARLSTCNT(8) ERR_LOWSPACECRE, 6, fcb->fab$b_fns, fcb->fab$l_fna, EXTEND_WARNING_FACTOR,
					&blocks_for_extension, DISK_BLOCK_SIZE, &free_blocks_ll);
			send_msg(VARLSTCNT(8) ERR_LOWSPACECRE, 6, fcb->fab$b_fns, fcb->fab$l_fna, EXTEND_WARNING_FACTOR,
					&blocks_for_extension, DISK_BLOCK_SIZE, &free_blocks_ll);
		}
	}
	if (SS$_NORMAL == (status = sys$updsec(((vms_gds_info *)(gv_cur_region->dyn.addr->file_cntl->file_info))->s_addrs.db_addrs,
			NULL, PSL$C_USER, 0, efn_immed_wait, &iosb, NULL, 0)))
	{
		status = sys$synch(efn_immed_wait, &iosb);
		if (SS$_NORMAL == status)
			status = iosb.cond;
	} else  if (SS$_NOTMODIFIED == status)
		status = SS$_NORMAL;
	if (SS$_NORMAL == status)
		status = del_sec(SEC$M_GBL, &desc, 0);
	if (SS$_NORMAL == status)
		status = sys$deltva(cs_addrs->db_addrs, retadr, PSL$C_USER);
	if (SS$_NORMAL == status)
		status = sys$dassgn(fcb->fab$l_stv);
	if (SS$_NORMAL == status)
	{
	 	util_out_print("Database file for region !AD created.", TRUE, REG_LEN_STR(gv_cur_region));
		/* the open and close are an attempt to ensure that the file is available, not under the control of an ACP,
		 * before MUPIP exits */
		fcb->fab$b_shr = FAB$M_SHRPUT | FAB$M_SHRGET | FAB$M_UPI;
		fcb->fab$l_fop = 0;
		for (lcnt = 1;  (60 * MAX_OPEN_RETRY) >= lcnt;  lcnt++)
		{	/* per VMS engineering a delay is expected.  We will wait up to an hour as a
			 * Delete Global Section operation is essentially and inherently asynchronous in nature
			 * and could take an arbitrary amount of time.
			 */
			if (RMS$_FLK != (status = sys$open(fcb, NULL, NULL)))
				break;
			wcs_sleep(lcnt);
		}
		assert(RMS$_NORMAL == status);
		if (RMS$_NORMAL == status)
		{
			status = sys$close(fcb);
			assert(RMS$_NORMAL == status);
		}
		if (RMS$_NORMAL != status)
			exit_stat = EXIT_WRN;
	} else
		exit_stat = EXIT_ERR;
	if (RMS$_NORMAL != status)
		gtm_putmsg(VARLSTCNT(8) ERR_DBFILERR, 2, fcb->fab$b_fns, fcb->fab$l_fna, status, 0, fcb->fab$l_stv, 0);
	if ((MAX_RMS_RECORDSIZE - SIZEOF(shmpool_blk_hdr)) < cs_data->blk_size)
		gtm_putmsg(VARLSTCNT(5) ERR_MUNOSTRMBKUP, 3, fcb->fab$b_fns, fcb->fab$l_fna, 32 * 1024 - DISK_BLOCK_SIZE);
	return exit_stat;
}
Ejemplo n.º 13
0
void gds_rundown(void)
{
	bool			is_mm, we_are_last_user, we_are_last_writer;
	boolean_t		ipc_deleted, remove_shm, cancelled_timer, cancelled_dbsync_timer, vermismatch;
	now_t			now;	/* for GET_CUR_TIME macro */
	char			*time_ptr, time_str[CTIME_BEFORE_NL + 2]; /* for GET_CUR_TIME macro */
	gd_region		*reg;
	int			save_errno, status;
	int4			semval, ftok_semval, sopcnt, ftok_sopcnt;
	short			crash_count;
	sm_long_t		munmap_len;
	sgmnt_addrs		*csa;
	sgmnt_data_ptr_t	csd;
	struct shmid_ds		shm_buf;
	struct sembuf		sop[2], ftok_sop[2];
	uint4           	jnl_status;
	unix_db_info		*udi;
	jnl_private_control	*jpc;
	jnl_buffer_ptr_t	jbp;

	error_def(ERR_CRITSEMFAIL);
	error_def(ERR_DBCCERR);
	error_def(ERR_DBFILERR);
	error_def(ERR_DBRNDWNWRN);
	error_def(ERR_ERRCALL);
	error_def(ERR_GBLOFLOW);
	error_def(ERR_GTMASSERT);
	error_def(ERR_IPCNOTDEL);
	error_def(ERR_JNLFLUSH);
	error_def(ERR_RNDWNSEMFAIL);
	error_def(ERR_TEXT);
	error_def(ERR_WCBLOCKED);

	forced_exit = FALSE;		/* Okay, we're dying already -- let rel_crit live in peace now.
					 * If coming through a DAL, not necessarily dying. what to do then? -- nars -- 8/15/2001
					 */
	grabbed_access_sem = FALSE;
	jnl_status = 0;
	reg = gv_cur_region;			/* Local copy */

	/*
	 * early out for cluster regions
	 * to avoid tripping the assert below.
	 * Note:
	 *	This early out is consistent with VMS.  It has been
	 *	noted that all of the gtcm assignments
	 *      to gv_cur_region should use the TP_CHANGE_REG
	 *	macro.  This would also avoid the assert problem
	 *	and should be done eventually.
	 */
	if (dba_cm == reg->dyn.addr->acc_meth)
		return;

	udi = FILE_INFO(reg);
	csa = &udi->s_addrs;
	csd = csa->hdr;
	assert(csa == cs_addrs && csd == cs_data);
	if ((reg->open) && (dba_usr == csd->acc_meth))
	{
		change_reg();
		gvusr_rundown();
		return;
	}
	ESTABLISH(gds_rundown_ch);
	if (!reg->open)				/* Not open, no point to rundown */
	{
		if (reg->opening)		/* Died partway open, kill rest of way */
		{
			rel_crit(reg);
			mutex_cleanup(reg);
/* revist this to handle MM properly  SMW 98/12/16
                        if (NULL != csa->nl)
                        {
                                status = shmdt((caddr_t)csa->nl);
                                if (-1 == status)
                                        send_msg(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
                                                ERR_TEXT, 2, LEN_AND_LIT("Error during shmdt"), errno);
                        }
*/
			shmdt((caddr_t)csa->nl);
			csa->nl = NULL;
		}
		REVERT;
		return;
	}
	switch(csd->acc_meth)
	{	/* Pass mm and bg through */
	    case dba_bg:
		is_mm = FALSE;
		break;
	    case dba_mm:
		is_mm = TRUE;
		break;
	    case dba_usr:
		assert(FALSE);
	    default:
		REVERT;
		return;
	}
	/* Cancel any pending flush timer for this region by this task */
	CANCEL_DB_TIMERS(reg, cancelled_timer, cancelled_dbsync_timer);
	we_are_last_user = FALSE;
	if (!csa->persistent_freeze)
		region_freeze(reg, FALSE, FALSE, FALSE);
	assert(!csa->read_lock);
	rel_crit(reg);		/* get locks to known state */
	mutex_cleanup(reg);
	/*
	 * We need to guarantee that none else access database file header when semid/shmid fields are reset.
	 * We already have created ftok semaphore in db_init or, mu_rndwn_file and did not remove it.
	 * So just lock it. We do it in blocking mode.
	 */
	if (!ftok_sem_lock(reg, FALSE, FALSE))
		rts_error(VARLSTCNT(4) ERR_DBFILERR, 2, DB_LEN_STR(reg));
	/*
	 * For mupip_jnl_recover we already have database access control semaphore.
	 * We do not release it. We release it from  mur_close_files.
	 */
	if (!mupip_jnl_recover)
	{
		sop[0].sem_num = 0; sop[0].sem_op = 0;	/* Wait for 0 */
		sop[1].sem_num = 0; sop[1].sem_op = 1;	/* Lock */
		sopcnt = 2;
		sop[0].sem_flg = sop[1].sem_flg = SEM_UNDO | IPC_NOWAIT; /* Don't wait the first time thru */
		SEMOP(udi->semid, sop, sopcnt, status);
		if (-1 == status)			/* We couldn't get it in one shot -- see if we already have it */
		{
			save_errno = errno;
			/* see comment about Linux specific difference in behaviour of semctl() with GETPID in gds_rundown_ch() */
			if (semctl(udi->semid, 0, GETPID) == process_id)
			{
				send_msg(VARLSTCNT(5) MAKE_MSG_INFO(ERR_CRITSEMFAIL), 2,
					DB_LEN_STR(reg),
					ERR_RNDWNSEMFAIL);
				REVERT;
				return;			/* Already in rundown for this region */
			}
			if (EAGAIN != save_errno)
			{
				assert(FALSE);
				rts_error(VARLSTCNT(9) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg),
					ERR_TEXT, 2, RTS_ERROR_TEXT("gds_rundown first semop/semctl"), save_errno);
			}
			sop[0].sem_flg = sop[1].sem_flg = SEM_UNDO;	/* Try again - blocking this time */
			SEMOP(udi->semid, sop, 2, status);
			if (-1 == status)			/* We couldn't get it at all.. */
				rts_error(VARLSTCNT(5) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), errno);
		}
	}
	grabbed_access_sem = TRUE;
	/*
	 * We now have the dbinit/rundown lock, so we are alone in this code for this region
	 * and nobody else can attach.
	 * See if we are all alone in accessing this database shared memory.
	 */
	assert(csa->ref_cnt);	/* decrement private ref_cnt before shared ref_cnt decrement. */
	csa->ref_cnt--;		/* Currently journaling logic in gds_rundown() in VMS relies on this order to detect last writer */
	assert(!csa->ref_cnt);
	--csa->nl->ref_cnt;
	if (memcmp(csa->nl->now_running, gtm_release_name, gtm_release_name_len + 1))
	{	/* VERMISMATCH condition. Possible only if DSE */
		assert(dse_running);
		vermismatch = TRUE;
	} else
		vermismatch = FALSE;
	if (-1 == shmctl(udi->shmid, IPC_STAT, &shm_buf))
	{
		save_errno = errno;
		rts_error(VARLSTCNT(9) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg),
			ERR_TEXT, 2, RTS_ERROR_TEXT("gds_rundown shmctl"), save_errno);
	} else
		we_are_last_user =  (1 == shm_buf.shm_nattch) && !vermismatch;
	assert(!mupip_jnl_recover || we_are_last_user); /* recover => one user */
	if (-1 == (semval = semctl(udi->semid, 1, GETVAL)))
		rts_error(VARLSTCNT(5) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), errno);
	we_are_last_writer = (1 == semval) && (FALSE == reg->read_only) && !vermismatch;/* There's one writer left and I am it */
	assert(!(mupip_jnl_recover && !reg->read_only) || we_are_last_writer); /* recover + R/W region => one writer */
	if (-1 == (ftok_semval = semctl(udi->ftok_semid, 1, GETVAL)))
		rts_error(VARLSTCNT(5) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), errno);
	/* If csa->nl->donotflush_dbjnl is set, it means mupip recover/rollback was interrupted and therefore we should
	 * 	not flush shared memory contents to disk as they might be in an inconsistent state.
	 * In this case, we will go ahead and remove shared memory (without flushing the contents) in this routine.
	 * A reissue of the recover/rollback command will restore the database to a consistent state.
	 * Otherwise, if we have write access to this region, let us perform a few writing tasks.
	 */
	if (csa->nl->donotflush_dbjnl)
		csa->wbuf_dqd = 0;	/* ignore csa->wbuf_dqd status as we do not care about the cache contents */
	else if (!reg->read_only && !vermismatch)
	{	/* If we had an orphaned block and were interrupted, set wc_blocked so we can invoke wcs_recover */
		if (csa->wbuf_dqd)
		{
			grab_crit(reg);
			SET_TRACEABLE_VAR(csd->wc_blocked, TRUE);
			BG_TRACE_PRO_ANY(csa, wcb_gds_rundown);
                        send_msg(VARLSTCNT(8) ERR_WCBLOCKED, 6, LEN_AND_LIT("wcb_gds_rundown"),
                                process_id, &csa->ti->curr_tn, DB_LEN_STR(reg));
			csa->wbuf_dqd = 0;
			wcs_recover(reg);
			if (is_mm)
			{
				assert(FALSE);
				csd = csa->hdr;
			}
			BG_TRACE_PRO_ANY(csa, lost_block_recovery);
			rel_crit(reg);
		}
		if (JNL_ENABLED(csd) && (GTCM_GNP_SERVER_IMAGE == image_type))
			originator_prc_vec = NULL;
		/* If we are the last writing user, then everything must be flushed */
		if (we_are_last_writer)
		{	/* Time to flush out all of our buffers */
			if (is_mm)
			{
				if (csa->total_blks != csa->ti->total_blks)	/* do remap if file had been extended */
				{
					grab_crit(reg);
					wcs_mm_recover(reg);
					csd = csa->hdr;
					rel_crit(reg);
				}
				csa->nl->remove_shm = TRUE;
			}
			/* Note WCSFLU_SYNC_EPOCH ensures the epoch is synced to the journal and indirectly
			 * also ensures that the db is fsynced. We don't want to use it in the calls to
			 * wcs_flu() from t_end() and tp_tend() since we can defer it to out-of-crit there.
			 * In this case, since we are running down, we don't have any such option.
			 */
			csa->nl->remove_shm = wcs_flu(WCSFLU_FLUSH_HDR | WCSFLU_WRITE_EPOCH | WCSFLU_SYNC_EPOCH);
			/* Since we_are_last_writer, we should be guaranteed that wcs_flu() did not change csd, (in
			 * case of MM for potential file extension), even if it did a grab_crit().  Therefore, make
			 * sure that's true.
			 */
			assert(csd == csa->hdr);
			assert(0 == memcmp(csd->label, GDS_LABEL, GDS_LABEL_SZ - 1));
			csd->trans_hist.header_open_tn = csd->trans_hist.curr_tn;
		} else if ((cancelled_timer && (0 > csa->nl->wcs_timers)) || cancelled_dbsync_timer)
		{	/* cancelled pending db or jnl flush timers - flush database and journal buffers to disk */
			grab_crit(reg);
			/* we need to sync the epoch as the fact that there is no active pending flush timer implies
			 * there will be noone else who will flush the dirty buffers and EPOCH to disk in a timely fashion
			 */
			wcs_flu(WCSFLU_FLUSH_HDR | WCSFLU_WRITE_EPOCH | WCSFLU_SYNC_EPOCH);
			rel_crit(reg);
			assert((dba_mm == cs_data->acc_meth) || (csd == cs_data));
			csd = cs_data;	/* In case this is MM and wcs_flu() remapped an extended database, reset csd */
		}
		/* Do rundown journal processing after buffer flushes since they require jnl to be open */
		if (JNL_ENABLED(csd))
		{	/* the following tp_change_reg() is not needed due to the assert csa == cs_addrs at the beginning
			 * of gds_rundown(), but just to be safe. To be removed by 2002!! --- nars -- 2001/04/25.
			 */
			tp_change_reg();	/* call this because jnl_ensure_open checks cs_addrs rather than gv_cur_region */
			jpc = csa->jnl;
			jbp = jpc->jnl_buff;
			if (jbp->fsync_in_prog_latch.u.parts.latch_pid == process_id)
                        {
                                assert(FALSE);
                                COMPSWAP_UNLOCK(&jbp->fsync_in_prog_latch, process_id, 0, LOCK_AVAILABLE, 0);
                        }
                        if (jbp->io_in_prog_latch.u.parts.latch_pid == process_id)
                        {
                                assert(FALSE);
                                COMPSWAP_UNLOCK(&jbp->io_in_prog_latch, process_id, 0, LOCK_AVAILABLE, 0);
                        }
			if (((NOJNL != jpc->channel) && !JNL_FILE_SWITCHED(jpc))
				|| we_are_last_writer && (0 != csa->nl->jnl_file.u.inode))
			{	/* We need to close the journal file cleanly if we have the latest generation journal file open
				 *	or if we are the last writer and the journal file is open in shared memory (not necessarily
				 *	by ourselves e.g. the only process that opened the journal got shot abnormally)
				 * Note: we should not infer anything from the shared memory value of csa->nl->jnl_file.u.inode
				 * 	if we are not the last writer as it can be concurrently updated.
				 */
				grab_crit(reg);
				if (JNL_ENABLED(csd))
				{
					SET_GBL_JREC_TIME; /* jnl_ensure_open/jnl_put_jrt_pini/pfin/jnl_file_close all need it */
					/* Before writing to jnlfile, adjust jgbl.gbl_jrec_time if needed to maintain time order
					 * of jnl records. This needs to be done BEFORE the jnl_ensure_open as that could write
					 * journal records (if it decides to switch to a new journal file).
					 */
					ADJUST_GBL_JREC_TIME(jgbl, jbp);
					jnl_status = jnl_ensure_open();
					if (0 == jnl_status)
					{	/* If we_are_last_writer, we would have already done a wcs_flu() which would
						 * have written an epoch record and we are guaranteed no further updates
						 * since we are the last writer. So, just close the journal.
						 * Although we assert pini_addr should be non-zero for last_writer, we
						 * play it safe in PRO and write a PINI record if not written already.
						 */
						assert(!jbp->before_images || is_mm
								|| !we_are_last_writer || 0 != jpc->pini_addr);
						if (we_are_last_writer && 0 == jpc->pini_addr)
							jnl_put_jrt_pini(csa);
						if (0 != jpc->pini_addr)
							jnl_put_jrt_pfin(csa);
						/* If not the last writer and no pending flush timer left, do jnl flush now */
						if (!we_are_last_writer && (0 > csa->nl->wcs_timers))
						{
							if (SS_NORMAL == (jnl_status = jnl_flush(reg)))
							{
								assert(jbp->freeaddr == jbp->dskaddr);
								jnl_fsync(reg, jbp->dskaddr);
								assert(jbp->fsync_dskaddr == jbp->dskaddr);
							} else
							{
								send_msg(VARLSTCNT(9) ERR_JNLFLUSH, 2, JNL_LEN_STR(csd),
									ERR_TEXT, 2,
									RTS_ERROR_TEXT("Error with journal flush in gds_rundown"),
									jnl_status);
								assert(NOJNL == jpc->channel);/* jnl file lost has been triggered */
								/* In this routine, all code that follows from here on does not
								 * assume anything about the journaling characteristics of this
								 * database so it is safe to continue execution even though
								 * journaling got closed in the middle.
								 */
							}
						}
						jnl_file_close(reg, we_are_last_writer, FALSE);
					} else
						send_msg(VARLSTCNT(6) jnl_status, 4, JNL_LEN_STR(csd), DB_LEN_STR(reg));
				}
				rel_crit(reg);
			}
		}
		if (we_are_last_writer)			/* Flush the fileheader last and harden the file to disk */
		{
			grab_crit(reg);			/* To satisfy crit requirement in fileheader_sync() */
			memset(csd->machine_name, 0, MAX_MCNAMELEN); /* clear the machine_name field */
			if (!mupip_jnl_recover && we_are_last_user)
			{	/* mupip_jnl_recover will do this after mur_close_file */
				csd->semid = INVALID_SEMID;
				csd->shmid = INVALID_SHMID;
				csd->gt_sem_ctime.ctime = 0;
				csd->gt_shm_ctime.ctime = 0;
			}
			fileheader_sync(reg);
			rel_crit(reg);
			if (FALSE == is_mm)
			{
				if (-1 == fsync(udi->fd))		/* Sync it all */
				{
					rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
						  ERR_TEXT, 2, RTS_ERROR_TEXT("Error during file sync at close"), errno);
				}
			} else
			{	/* Now do final MM file sync before exit */
#if !defined(TARGETED_MSYNC) && !defined(NO_MSYNC)
				if (-1 == fsync(udi->fd))		/* Sync it all */
				{
					rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
						  ERR_TEXT, 2, RTS_ERROR_TEXT("Error during file sync at close"), errno);
				}
#else
				if (-1 == msync((caddr_t)csa->db_addrs[0], (size_t)(csa->db_addrs[1] - csa->db_addrs[0]), MS_SYNC))
				{
					rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
						  ERR_TEXT, 2, RTS_ERROR_TEXT("Error during file msync at close"), errno);
				}
#endif
			}
                }
	} /* end if (!reg->read_only && !csa->nl->donotflush_dbjnl) */
	if (reg->read_only && we_are_last_user && !mupip_jnl_recover)
	{	/* mupip_jnl_recover will do this after mur_close_file */
		db_ipcs.semid = INVALID_SEMID;
		db_ipcs.shmid = INVALID_SHMID;
		db_ipcs.gt_sem_ctime = 0;
		db_ipcs.gt_shm_ctime = 0;
		db_ipcs.fn_len = reg->dyn.addr->fname_len;
		memcpy(db_ipcs.fn, reg->dyn.addr->fname, reg->dyn.addr->fname_len);
		db_ipcs.fn[reg->dyn.addr->fname_len] = 0;
 		/* request gtmsecshr to flush. read_only cannot flush itself */
		if (0 != send_mesg2gtmsecshr(FLUSH_DB_IPCS_INFO, 0, (char *)NULL, 0))
			rts_error(VARLSTCNT(8) ERR_DBFILERR, 2, DB_LEN_STR(reg),
				  ERR_TEXT, 2, RTS_ERROR_TEXT("gtmsecshr failed to update database file header"));
	}
	/* Done with file now, close it */
	if (-1 == close(udi->fd))
	{
		rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
			  ERR_TEXT, 2, LEN_AND_LIT("Error during file close"), errno);
	}
	/* Unmap storage if mm mode but only the part that is not the fileheader (so shows up in dumps) */
	if (is_mm)
	{
		munmap_len = (sm_long_t)((csa->db_addrs[1] - csa->db_addrs[0]) - ROUND_UP(SIZEOF_FILE_HDR(csa->hdr),
											 MSYNC_ADDR_INCS));
		if (munmap_len > 0)
		{
			munmap((caddr_t)(csa->db_addrs[0] + ROUND_UP(SIZEOF_FILE_HDR(csa->hdr), MSYNC_ADDR_INCS)),
			       (size_t)(munmap_len));
#ifdef DEBUG_DB64
			rel_mmseg((caddr_t)csa->db_addrs[0]);
#endif
		}
	}
	/* Detach our shared memory while still under lock so reference counts will be
	 * correct for the next process to run down this region.
	 * In the process also get the remove_shm status from node_local before detaching.
	 * If csa->nl->donotflush_dbjnl is TRUE, it means we can safely remove shared memory without compromising data
	 * 	integrity as a reissue of recover will restore the database to a consistent state.
	 */
	remove_shm = !vermismatch && (csa->nl->remove_shm || csa->nl->donotflush_dbjnl);
	status = shmdt((caddr_t)csa->nl);
	csa->nl = NULL; /* dereferencing nl after detach is not right, so we set it to NULL so that we can test before dereference*/
	if (-1 == status)
		send_msg(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2, LEN_AND_LIT("Error during shmdt"), errno);
	reg->open = FALSE;

	/* If file is still not in good shape, die here and now before we get rid of our storage */
	if (csa->wbuf_dqd)
		GTMASSERT;
	ipc_deleted = FALSE;
	/* If we are the very last user, remove shared storage id and the semaphores */
	if (we_are_last_user)
	{	/* remove shared storage, only if last writer to rundown did a successful wcs_flu() */
		assert(!vermismatch);
		if (remove_shm)
		{
			ipc_deleted = TRUE;
			if (0 != shm_rmid(udi->shmid))
				rts_error(VARLSTCNT(8) ERR_DBFILERR, 2, DB_LEN_STR(reg),
					ERR_TEXT, 2, RTS_ERROR_TEXT("Unable to remove shared memory"));
		} else if (is_src_server || is_updproc)
		{
			gtm_putmsg(VARLSTCNT(6) ERR_DBRNDWNWRN, 4, DB_LEN_STR(reg), process_id, process_id);
			send_msg(VARLSTCNT(6) ERR_DBRNDWNWRN, 4, DB_LEN_STR(reg), process_id, process_id);
		} else
			send_msg(VARLSTCNT(6) ERR_DBRNDWNWRN, 4, DB_LEN_STR(reg), process_id, process_id);
		/*
		 * Don't release semaphore in case of mupip recover/rollback; since it has standalone access.
		 * It will release the semaphore in mur_close_files.
		 */
		if (!mupip_jnl_recover)
		{
			if (0 != sem_rmid(udi->semid))
				rts_error(VARLSTCNT(8) ERR_DBFILERR, 2, DB_LEN_STR(reg),
					ERR_TEXT, 2, RTS_ERROR_TEXT("Unable to remove semaphore"));
			grabbed_access_sem = FALSE;
		}
	} else
	{
		assert(!mupip_jnl_recover);
		/* If we were writing, get rid of our writer access count semaphore */
		if (!reg->read_only)
			if (0 != (save_errno = do_semop(udi->semid, 1, -1, SEM_UNDO)))
				rts_error(VARLSTCNT(9) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg),
					ERR_TEXT, 2, RTS_ERROR_TEXT("gds_rundown write semaphore release"), save_errno);
		/* Now remove the rundown lock */
		if (0 != (save_errno = do_semop(udi->semid, 0, -1, SEM_UNDO)))
			rts_error(VARLSTCNT(9) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg),
				ERR_TEXT, 2, RTS_ERROR_TEXT("gds_rundown rundown semaphore release"), save_errno);
		grabbed_access_sem = FALSE;
	}
	if (!ftok_sem_release(reg, !mupip_jnl_recover, FALSE))
			rts_error(VARLSTCNT(4) ERR_DBFILERR, 2, DB_LEN_STR(reg));
	if (!ipc_deleted)
	{
		GET_CUR_TIME;
		if (is_src_server)
			gtm_putmsg(VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_ptr,
				LEN_AND_LIT("Source server"), REG_LEN_STR(reg));
		if (is_updproc)
			gtm_putmsg(VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_ptr,
				LEN_AND_LIT("Update process"), REG_LEN_STR(reg));
		if (mupip_jnl_recover)
		{
			gtm_putmsg(VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_ptr,
				LEN_AND_LIT("Mupip journal process"), REG_LEN_STR(reg));
			send_msg(VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_ptr,
				LEN_AND_LIT("Mupip journal process"), REG_LEN_STR(reg));
		}
	}
	REVERT;
}
Ejemplo n.º 14
0
void	mu_reorg_upgrd_dwngrd(void)
{
	blk_hdr			new_hdr;
	blk_segment		*bs1, *bs_ptr;
	block_id		*blkid_ptr, curblk, curbmp, start_blk, stop_blk, start_bmp, last_bmp;
	block_id		startblk_input, stopblk_input;
	boolean_t		upgrade, downgrade, safejnl, nosafejnl, region, first_reorg_in_this_db_fmt, reorg_entiredb;
	boolean_t		startblk_specified, stopblk_specified, set_fully_upgraded, db_got_to_v5_once, mark_blk_free;
	cache_rec_ptr_t		cr;
	char			*bml_lcl_buff = NULL, *command, *reorg_command;
	sm_uc_ptr_t		bptr = NULL;
	cw_set_element		*cse;
	enum cdb_sc		cdb_status;
	enum db_ver		new_db_format, ondsk_blkver;
	gd_region		*reg;
	int			cycle;
	int4			blk_seg_cnt, blk_size;	/* needed for BLK_INIT,BLK_SEG and BLK_FINI macros */
	int4			blocks_left, expected_blks2upgrd, actual_blks2upgrd, total_blks, free_blks;
	int4			status, status1, mapsize, lcnt, bml_status;
	reorg_stats_t		reorg_stats;
	sgmnt_addrs		*csa;
	sgmnt_data_ptr_t	csd;
	sm_uc_ptr_t		blkBase, bml_sm_buff;	/* shared memory pointer to the bitmap global buffer */
	srch_hist		alt_hist;
	srch_blk_status		*blkhist, bmlhist;
	tp_region		*rptr;
	trans_num		curr_tn;
	unsigned char    	save_cw_set_depth;
	uint4			lcl_update_trans;

	region    = (CLI_PRESENT == cli_present("REGION"));
	upgrade   = (CLI_PRESENT == cli_present("UPGRADE"));
	downgrade = (CLI_PRESENT == cli_present("DOWNGRADE"));
	assert(upgrade && !downgrade || !upgrade && downgrade);
	command = upgrade ? "UPGRADE" : "DOWNGRADE";
	reorg_command = upgrade ? "MUPIP REORG UPGRADE" : "MUPIP REORG DOWNGRADE";
	reorg_entiredb = TRUE;	/* unless STARTBLK or STOPBLK is specified we are going to {up,down}grade the entire database */
	startblk_specified = FALSE;
	assert(SIZEOF(block_id) == SIZEOF(uint4));
	if ((CLI_PRESENT == cli_present("STARTBLK")) && (cli_get_hex("STARTBLK", (uint4 *)&startblk_input)))
	{
		reorg_entiredb = FALSE;
		startblk_specified = TRUE;
	}
	stopblk_specified = FALSE;
	assert(SIZEOF(block_id) == SIZEOF(uint4));
	if ((CLI_PRESENT == cli_present("STOPBLK")) && (cli_get_hex("STOPBLK", (uint4 *)&stopblk_input)))
	{
		reorg_entiredb = FALSE;
		stopblk_specified = TRUE;
	}
	mu_reorg_upgrd_dwngrd_in_prog = TRUE;
	mu_reorg_nosafejnl = (CLI_NEGATED == cli_present("SAFEJNL")) ? TRUE : FALSE;

	assert(region);
	status = SS_NORMAL;
	error_mupip = FALSE;
	gvinit();	/* initialize gd_header (needed by the later call to mu_getlst) */
	mu_getlst("REG_NAME", SIZEOF(tp_region));	/* get the parameter corresponding to REGION qualifier */
	if (error_mupip)
	{
		util_out_print("!/MUPIP REORG !AD cannot proceed with above errors!/", TRUE, LEN_AND_STR(command));
		mupip_exit(ERR_MUNOACTION);
	}
	assert(DBKEYSIZE(MAX_KEY_SZ) == gv_keysize);	/* no need to invoke GVKEYSIZE_INIT_IF_NEEDED macro */
	gv_target = targ_alloc(gv_keysize, NULL, NULL);	/* t_begin needs this initialized */
	gv_target_list = NULL;
	memset(&alt_hist, 0, SIZEOF(alt_hist));	/* null-initialize history */
	blkhist = &alt_hist.h[0];
	for (rptr = grlist;  NULL != rptr;  rptr = rptr->fPtr)
	{
		if (mu_ctrly_occurred || mu_ctrlc_occurred)
			break;
		reg = rptr->reg;
		util_out_print("!/Region !AD : MUPIP REORG !AD started", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
		if (reg_cmcheck(reg))
		{
			util_out_print("Region !AD : MUPIP REORG !AD cannot run across network",
				TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			status = ERR_MUNOFINISH;
			continue;
		}
		mu_reorg_process = TRUE;	/* gvcst_init will use this value to use gtm_poollimit settings. */
		gvcst_init(reg);
		mu_reorg_process = FALSE;
		assert(update_array != NULL);
		/* access method stored in global directory and database file header might be different in which case
		 * the database setting prevails. therefore, the access method check can be done only after opening
		 * the database (i.e. after the gvcst_init)
		 */
		if (dba_bg != REG_ACC_METH(reg))
		{
			util_out_print("Region !AD : MUPIP REORG !AD cannot continue as access method is not BG",
				TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			status = ERR_MUNOFINISH;
			continue;
		}
		/* The mu_getlst call above uses insert_region to create the grlist, which ensures that duplicate regions mapping to
		 * the same db file correspond to only one grlist entry.
		 */
		assert(FALSE == reg->was_open);
		TP_CHANGE_REG(reg);	/* sets gv_cur_region, cs_addrs, cs_data */
		csa = cs_addrs;
		csd = cs_data;
		blk_size = csd->blk_size;	/* "blk_size" is used by the BLK_FINI macro */
		if (reg->read_only)
		{
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_DBRDONLY, 2, DB_LEN_STR(reg));
			status = ERR_MUNOFINISH;
			continue;
		}
		assert(GDSVCURR == GDSV6); /* so we trip this assert in case GDSVCURR changes without a change to this module */
		new_db_format = (upgrade ? GDSV6 : GDSV4);
		grab_crit(reg);
		curr_tn = csd->trans_hist.curr_tn;
		/* set the desired db format in the file header to the appropriate version, increment transaction number */
		status1 = desired_db_format_set(reg, new_db_format, reorg_command);
		assert(csa->now_crit);	/* desired_db_format_set() should not have released crit */
		first_reorg_in_this_db_fmt = TRUE;	/* with the current desired_db_format, this is the first reorg */
		if (SS_NORMAL != status1)
		{	/* "desired_db_format_set" would have printed appropriate error messages */
			if (ERR_MUNOACTION != status1)
			{	/* real error occurred while setting the db format. skip to next region */
				status = ERR_MUNOFINISH;
				rel_crit(reg);
				continue;
			}
			util_out_print("Region !AD : Desired DB Format remains at !AD after !AD", TRUE, REG_LEN_STR(reg),
				LEN_AND_STR(gtm_dbversion_table[new_db_format]), LEN_AND_STR(reorg_command));
			if (csd->reorg_db_fmt_start_tn == csd->desired_db_format_tn)
				first_reorg_in_this_db_fmt = FALSE;
		} else
			util_out_print("Region !AD : Desired DB Format set to !AD by !AD", TRUE, REG_LEN_STR(reg),
				LEN_AND_STR(gtm_dbversion_table[new_db_format]), LEN_AND_STR(reorg_command));
		assert(dba_bg == csd->acc_meth);
		/* Check blks_to_upgrd counter to see if upgrade/downgrade is complete */
		total_blks = csd->trans_hist.total_blks;
		free_blks = csd->trans_hist.free_blocks;
		actual_blks2upgrd = csd->blks_to_upgrd;
		/* If MUPIP REORG UPGRADE and there is no block to upgrade in the database as indicated by BOTH
		 * 	"csd->blks_to_upgrd" and "csd->fully_upgraded", then we can skip processing.
		 * If MUPIP REORG UPGRADE and all non-free blocks need to be upgraded then again we can skip processing.
		 */
		if ((upgrade && (0 == actual_blks2upgrd) && csd->fully_upgraded)
			|| (!upgrade && ((total_blks - free_blks) == actual_blks2upgrd)))
		{
			util_out_print("Region !AD : Blocks to Upgrade counter indicates no action needed for MUPIP REORG !AD",
				       TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			util_out_print("Region !AD : Total Blocks = [0x!XL] : Free Blocks = [0x!XL] : "
				       "Blocks to upgrade = [0x!XL]",
				       TRUE, REG_LEN_STR(reg), total_blks, free_blks, actual_blks2upgrd);
			util_out_print("Region !AD : MUPIP REORG !AD finished!/", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			rel_crit(reg);
			continue;
		}
		stop_blk = total_blks;
		if (stopblk_specified && stopblk_input <= stop_blk)
			stop_blk = stopblk_input;
		if (first_reorg_in_this_db_fmt)
		{	/* Note down reorg start tn (in case we are interrupted, future reorg will know to resume) */
			csd->reorg_db_fmt_start_tn = csd->desired_db_format_tn;
			csd->reorg_upgrd_dwngrd_restart_block = 0;
			start_blk = (startblk_specified ? startblk_input : 0);
		} else
		{	/* Either a concurrent MUPIP REORG of the same type ({up,down}grade) is currently running
			 * or a previously running REORG of the same type was interrupted (Ctrl-Ced).
			 * In either case resume processing from whatever restart block number is stored in fileheader
			 * the only exception is if "STARTBLK" was specified in the input in which use that unconditionally.
			 */
			start_blk = (startblk_specified ? startblk_input : csd->reorg_upgrd_dwngrd_restart_block);
		}
		if (start_blk > stop_blk)
			start_blk = stop_blk;
		mu_reorg_upgrd_dwngrd_start_tn = csd->reorg_db_fmt_start_tn;
		/* Before releasing crit, flush the file-header and dirty buffers in cache to disk. This is because we are now
		 * going to read each GDS block directly from disk to determine if it needs to be upgraded/downgraded or not.
		 */
		if (!wcs_flu(WCSFLU_FLUSH_HDR))	/* wcs_flu assumes gv_cur_region is set (which it is in this routine) */
		{
			rel_crit(reg);
			gtm_putmsg_csa(CSA_ARG(csa)
				VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT("MUPIP REORG UPGRADE/DOWNGRADE"), DB_LEN_STR(reg));
			status = ERR_MUNOFINISH;
			continue;
		}
		rel_crit(reg);
		/* Loop through entire database one GDS block at a time and upgrade/downgrade each of them */
		status1 = SS_NORMAL;
		start_bmp = ROUND_DOWN2(start_blk, BLKS_PER_LMAP);
		last_bmp  = ROUND_DOWN2(stop_blk - 1, BLKS_PER_LMAP);
		curblk = start_blk;	/* curblk is the block to be upgraded/downgraded */
		util_out_print("Region !AD : Started processing from block number [0x!XL]", TRUE, REG_LEN_STR(reg), curblk);
		if (NULL != bptr)
		{	/* malloc/free "bptr" for each region as GDS block-size can be different */
			free(bptr);
			bptr = NULL;
		}
		memset(&reorg_stats, 0, SIZEOF(reorg_stats));	/* initialize statistics for this region */
		for (curbmp = start_bmp; curbmp <= last_bmp; curbmp += BLKS_PER_LMAP)
		{
			if (mu_ctrly_occurred || mu_ctrlc_occurred)
			{
				status1 = ERR_MUNOFINISH;
				break;
			}
			/* --------------------------------------------------------------
			 *             Read in current bitmap block
			 * --------------------------------------------------------------
			 */
			assert(!csa->now_crit);
			bml_sm_buff = t_qread(curbmp, (sm_int_ptr_t)&cycle, &cr); /* bring block into the cache outside of crit */
			reorg_stats.blks_read_from_disk_bmp++;
			grab_crit_encr_cycle_sync(reg); /* needed so t_qread does not return NULL below */
			if (mu_reorg_upgrd_dwngrd_start_tn != csd->desired_db_format_tn)
			{	/* csd->desired_db_format changed since reorg started. discontinue the reorg */
				/* see later comment on "csd->reorg_upgrd_dwngrd_restart_block" for why the assignment
				 * of this field should be done only if a db format change did not occur.
				 */
				rel_crit(reg);
				status1 = ERR_MUNOFINISH;
				/* This "start_tn" check is redone after the for-loop and an error message is printed there */
				break;
			} else if (reorg_entiredb)
			{	/* Change "csd->reorg_upgrd_dwngrd_restart_block" only if STARTBLK or STOPBLK was NOT specified */
				assert(csd->reorg_upgrd_dwngrd_restart_block <= MAX(start_blk, curbmp));
				csd->reorg_upgrd_dwngrd_restart_block = curbmp;	/* previous blocks have been upgraded/downgraded */
			}
			/* Check blks_to_upgrd counter to see if upgrade/downgrade is complete.
			 * Repeat check done a few steps earlier outside of this for loop.
			 */
			total_blks = csd->trans_hist.total_blks;
			free_blks = csd->trans_hist.free_blocks;
			actual_blks2upgrd = csd->blks_to_upgrd;
			if ((upgrade && (0 == actual_blks2upgrd) && csd->fully_upgraded)
				|| (!upgrade && ((total_blks - free_blks) == actual_blks2upgrd)))
			{
				rel_crit(reg);
				break;
			}
			bml_sm_buff = t_qread(curbmp, (sm_int_ptr_t)&cycle, &cr); /* now that in crit, note down stable buffer */
			if (NULL == bml_sm_buff)
				rts_error_csa(CSA_ARG(csa) VARLSTCNT(1) ERR_DSEBLKRDFAIL);
			ondsk_blkver = cr->ondsk_blkver;	/* note down db fmt on disk for bitmap block */
			/* Take a copy of the shared memory bitmap buffer into process-private memory before releasing crit.
			 * We are interested in those blocks that are currently marked as USED in the bitmap.
			 * It is possible that once we release crit, concurrent updates change the bitmap state of those blocks.
			 * In that case, those updates will take care of doing the upgrade/downgrade of those blocks in the
			 * format currently set in csd->desired_db_format i.e. accomplishing MUPIP REORG UPGRADE/DOWNGRADE's job.
			 * If the desired_db_format changes concurrently, we will stop doing REORG UPGRADE/DOWNGRADE processing.
			 */
			if (NULL == bml_lcl_buff)
				bml_lcl_buff = malloc(BM_SIZE(BLKS_PER_LMAP));
			memcpy(bml_lcl_buff, (blk_hdr_ptr_t)bml_sm_buff, BM_SIZE(BLKS_PER_LMAP));
			if (FALSE == cert_blk(reg, curbmp, (blk_hdr_ptr_t)bml_lcl_buff, 0, FALSE))
			{	/* certify the block while holding crit as cert_blk uses fields from file-header (shared memory) */
				assert(FALSE);	/* in pro, skip ugprading/downgarding all blks in this unreliable local bitmap */
				rel_crit(reg);
				util_out_print("Region !AD : Bitmap Block [0x!XL] has integrity errors. Skipping this bitmap.",
					TRUE, REG_LEN_STR(reg), curbmp);
				status1 = ERR_MUNOFINISH;
				continue;
			}
			rel_crit(reg);
			/* ------------------------------------------------------------------------
			 *         Upgrade/Downgrade all BUSY blocks in the current bitmap
			 * ------------------------------------------------------------------------
			 */
			curblk = (curbmp == start_bmp) ? start_blk : curbmp;
			mapsize = (curbmp == last_bmp) ? (stop_blk - curbmp) : BLKS_PER_LMAP;
			assert(0 != mapsize);
			assert(mapsize <= BLKS_PER_LMAP);
			db_got_to_v5_once = csd->db_got_to_v5_once;
			for (lcnt = curblk - curbmp; lcnt < mapsize; lcnt++, curblk++)
			{
				if (mu_ctrly_occurred || mu_ctrlc_occurred)
				{
					status1 = ERR_MUNOFINISH;
					goto stop_reorg_on_this_reg;	/* goto needed because of nested FOR Loop */
				}
				GET_BM_STATUS(bml_lcl_buff, lcnt, bml_status);
				assert(BLK_MAPINVALID != bml_status); /* cert_blk ran clean so we dont expect invalid entries */
				if (BLK_FREE == bml_status)
				{
					reorg_stats.blks_skipped_free++;
					continue;
				}
				/* MUPIP REORG UPGRADE/DOWNGRADE will convert USED & RECYCLED blocks */
				if (db_got_to_v5_once || (BLK_RECYCLED != bml_status))
				{	/* Do NOT read recycled V4 block from disk unless it is guaranteed NOT to be too full */
					if (lcnt)
					{	/* non-bitmap block */
						/* read in block from disk into private buffer. dont pollute the cache yet */
						if (NULL == bptr)
							bptr = (sm_uc_ptr_t)malloc(blk_size);
						status1 = dsk_read(curblk, bptr, &ondsk_blkver, FALSE);
						/* dsk_read on curblk could return an error (DYNUPGRDFAIL) if curblk needs to be
						 * upgraded and if its block size was too big to allow the extra block-header space
						 * requirements for a dynamic upgrade. a MUPIP REORG DOWNGRADE should not error out
						 * in that case as the block is already in the downgraded format.
						 */
						if (SS_NORMAL != status1)
						{
							if (!upgrade && (ERR_DYNUPGRDFAIL == status1))
							{
								assert(GDSV4 == new_db_format);
								ondsk_blkver = new_db_format;
							} else
							{
								gtm_putmsg_csa(CSA_ARG(csa)
									VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(reg), status1);
								util_out_print("Region !AD : Error occurred while reading block "
									"[0x!XL]", TRUE, REG_LEN_STR(reg), curblk);
								status1 = ERR_MUNOFINISH;
								goto stop_reorg_on_this_reg;/* goto needed due to nested FOR Loop */
							}
						}
						reorg_stats.blks_read_from_disk_nonbmp++;
					} /* else bitmap block has been read in crit earlier and ondsk_blkver appropriately set */
					if (new_db_format == ondsk_blkver)
					{
						assert((SS_NORMAL == status1) || (!upgrade && (ERR_DYNUPGRDFAIL == status1)));
						status1 = SS_NORMAL;	/* treat DYNUPGRDFAIL as no error in case of downgrade */
						reorg_stats.blks_skipped_newfmtindisk++;
						continue;	/* current disk version is identical to what is desired */
					}
					assert(SS_NORMAL == status1);
				}
				/* Begin non-TP transaction to upgrade/downgrade the block.
				 * The way we do that is by updating the block using a null update array.
				 * Any update to a block will trigger an automatic upgrade/downgrade of the block based on
				 * 	the current fileheader desired_db_format setting and we use that here.
				 */
				t_begin(ERR_MUREORGFAIL, UPDTRNS_DB_UPDATED_MASK);
				for (; ;)
				{
					CHECK_AND_RESET_UPDATE_ARRAY;	/* reset update_array_ptr to update_array */
					curr_tn = csd->trans_hist.curr_tn;
					db_got_to_v5_once = csd->db_got_to_v5_once;
					if (db_got_to_v5_once || (BLK_RECYCLED != bml_status))
					{
						blkhist->cse = NULL;	/* start afresh (do not use value from previous retry) */
						blkBase = t_qread(curblk, (sm_int_ptr_t)&blkhist->cycle, &blkhist->cr);
						if (NULL == blkBase)
						{
							t_retry((enum cdb_sc)rdfail_detail);
							continue;
						}
						blkhist->blk_num = curblk;
						blkhist->buffaddr = blkBase;
						ondsk_blkver = blkhist->cr->ondsk_blkver;
						new_hdr = *(blk_hdr_ptr_t)blkBase;
						mu_reorg_upgrd_dwngrd_blktn = new_hdr.tn;
						mark_blk_free = FALSE;
						inctn_opcode = upgrade ? inctn_blkupgrd : inctn_blkdwngrd;
					} else
					{
						mark_blk_free = TRUE;
						inctn_opcode = inctn_blkmarkfree;
					}
					inctn_detail.blknum_struct.blknum = curblk;
					/* t_end assumes that the history it is passed does not contain a bitmap block.
					 * for bitmap block, the history validation information is passed through cse instead.
					 * therefore we need to handle bitmap and non-bitmap cases separately.
					 */
					if (!lcnt)
					{	/* Means a bitmap block.
						 * At this point we can do a "new_db_format != ondsk_blkver" check to determine
						 * if the block got converted since we did the dsk_read (see the non-bitmap case
						 * for a similar check done there), but in that case we will have a transaction
						 * which has read 1 bitmap block and is updating no block. "t_end" currently cannot
						 * handle this case as it expects any bitmap block that needs validation to also
						 * have a corresponding cse which will hold its history. Hence we avoid doing the
						 * new_db_format check. The only disadvantage of this is that we will end up
						 * modifying the bitmap block as part of this transaction (in an attempt to convert
						 * its ondsk_blkver) even though it is already in the right format. Since this
						 * overhead is going to be one per bitmap block and since the block is in the cache
						 * at this point, we should not lose much.
						 */
						assert(!mark_blk_free);
						BLK_ADDR(blkid_ptr, SIZEOF(block_id), block_id);
						*blkid_ptr = 0;
						t_write_map(blkhist, (unsigned char *)blkid_ptr, curr_tn, 0);
						assert(&alt_hist.h[0] == blkhist);
						alt_hist.h[0].blk_num = 0; /* create empty history for bitmap block */
						assert(update_trans);
					} else
					{	/* non-bitmap block. fill in history for validation in t_end */
						assert(curblk);	/* we should never come here for block 0 (bitmap) */
						if (!mark_blk_free)
						{
							assert(blkhist->blk_num == curblk);
							assert(blkhist->buffaddr == blkBase);
							blkhist->tn      = curr_tn;
							alt_hist.h[1].blk_num = 0;
						}
						/* Also need to pass the bitmap as history to detect if any concurrent M-kill
						 * is freeing up the same USED block that we are trying to convert OR if any
						 * concurrent M-set is reusing the same RECYCLED block that we are trying to
						 * convert. Because of t_end currently not being able to validate a bitmap
						 * without that simultaneously having a cse, we need to create a cse for the
						 * bitmap that is used only for bitmap history validation, but should not be
						 * used to update the contents of the bitmap block in bg_update.
						 */
						bmlhist.buffaddr = t_qread(curbmp, (sm_int_ptr_t)&bmlhist.cycle, &bmlhist.cr);
						if (NULL == bmlhist.buffaddr)
						{
							t_retry((enum cdb_sc)rdfail_detail);
							continue;
						}
						bmlhist.blk_num = curbmp;
						bmlhist.tn = curr_tn;
						GET_BM_STATUS(bmlhist.buffaddr, lcnt, bml_status);
						if (BLK_MAPINVALID == bml_status)
						{
							t_retry(cdb_sc_lostbmlcr);
							continue;
						}
						if (!mark_blk_free)
						{
							if ((new_db_format != ondsk_blkver) && (BLK_FREE != bml_status))
							{	/* block still needs to be converted. create cse */
								BLK_INIT(bs_ptr, bs1);
								BLK_SEG(bs_ptr, blkBase + SIZEOF(new_hdr),
									new_hdr.bsiz - SIZEOF(new_hdr));
								BLK_FINI(bs_ptr, bs1);
								t_write(blkhist, (unsigned char *)bs1, 0, 0,
									((blk_hdr_ptr_t)blkBase)->levl, FALSE,
									FALSE, GDS_WRITE_PLAIN);
								/* The directory tree status for now is only used to determine
								 * whether writing the block to snapshot file (see t_end_sysops.c).
 								 * For reorg upgrade/downgrade process, the block is updated in a
								 * sequential way without changing the gv_target. In this case, we
								 * assume the block is in directory tree so as to have it written to
								 * the snapshot file.
			 					 */
								BIT_SET_DIR_TREE(cw_set[cw_set_depth-1].blk_prior_state);
								/* reset update_trans in case previous retry had set it to 0 */
								update_trans = UPDTRNS_DB_UPDATED_MASK;
								if (BLK_RECYCLED == bml_status)
								{	/* If block that we are upgarding is RECYCLED, indicate to
									 * bg_update that blks_to_upgrd counter should NOT be
									 * touched in this case by setting "mode" to a special value
									 */
									assert(cw_set[cw_set_depth-1].mode == gds_t_write);
									cw_set[cw_set_depth-1].mode = gds_t_write_recycled;
									/* we SET block as NOT RECYCLED, otherwise, the mm_update()
									 * or bg_update_phase2 may skip writing it to snapshot file
									 * when its level is 0
									 */
									BIT_CLEAR_RECYCLED(cw_set[cw_set_depth-1].blk_prior_state);
								}
							} else
							{	/* Block got converted by another process since we did the dsk_read.
								 * 	or this block became marked free in the bitmap.
								 * No need to update this block. just call t_end for validation of
								 * 	both the non-bitmap block as well as the bitmap block.
								 * Note down that this transaction is no longer updating any blocks.
								 */
								update_trans = 0;
							}
							/* Need to put bit maps on the end of the cw set for concurrency checking.
							 * We want to simulate t_write_map, except we want to update "cw_map_depth"
							 * instead of "cw_set_depth". Hence the save and restore logic below.
							 * This part of the code is similar to the one in mu_swap_blk.c
							 */
							save_cw_set_depth = cw_set_depth;
							assert(!cw_map_depth);
							t_write_map(&bmlhist, NULL, curr_tn, 0); /* will increment cw_set_depth */
							cw_map_depth = cw_set_depth; /* set cw_map_depth to latest cw_set_depth */
							cw_set_depth = save_cw_set_depth;/* restore cw_set_depth */
							/* t_write_map simulation end */
						} else
						{
							if (BLK_RECYCLED != bml_status)
							{	/* Block was RECYCLED at beginning but no longer so. Retry */
								t_retry(cdb_sc_bmlmod);
								continue;
							}
							/* Mark recycled block as FREE in bitmap */
							assert(lcnt == (curblk - curbmp));
							assert(update_array_ptr == update_array);
							*((block_id *)update_array_ptr) = lcnt;
							update_array_ptr += SIZEOF(block_id);
							/* the following assumes SIZEOF(block_id) == SIZEOF(int) */
							assert(SIZEOF(block_id) == SIZEOF(int));
							*(int *)update_array_ptr = 0;
							t_write_map(&bmlhist, (unsigned char *)update_array, curr_tn, 0);
							update_trans = UPDTRNS_DB_UPDATED_MASK;
						}
					}
					assert(SIZEOF(lcl_update_trans) == SIZEOF(update_trans));
					lcl_update_trans = update_trans;	/* take a copy before t_end modifies it */
					if ((trans_num)0 != t_end(&alt_hist, NULL, TN_NOT_SPECIFIED))
					{	/* In case this is MM and t_end() remapped an extended database, reset csd */
						assert(csd == cs_data);
						if (!lcl_update_trans)
						{
							assert(lcnt);
							assert(!mark_blk_free);
							assert((new_db_format == ondsk_blkver) || (BLK_BUSY != bml_status));
							if (BLK_BUSY != bml_status)
								reorg_stats.blks_skipped_free++;
							else
								reorg_stats.blks_skipped_newfmtincache++;
						} else if (!lcnt)
							reorg_stats.blks_converted_bmp++;
						else
							reorg_stats.blks_converted_nonbmp++;
						break;
					}
					assert(csd == cs_data);
				}
			}
		}
	stop_reorg_on_this_reg:
		/* even though ctrl-c occurred, update file-header fields to store reorg's progress before exiting */
		grab_crit(reg);
		blocks_left = 0;
		assert(csd->trans_hist.total_blks >= csd->blks_to_upgrd);
		actual_blks2upgrd = csd->blks_to_upgrd;
		total_blks = csd->trans_hist.total_blks;
		free_blks = csd->trans_hist.free_blocks;
		/* Care should be taken not to set "csd->reorg_upgrd_dwngrd_restart_block" in case of a concurrent db fmt
		 * change. This is because let us say we are doing REORG UPGRADE. A concurrent REORG DOWNGRADE would
		 * have reset "csd->reorg_upgrd_dwngrd_restart_block" field to 0 and if that reorg is interrupted by a
		 * Ctrl-C (before this reorg came here) it would have updated "csd->reorg_upgrd_dwngrd_restart_block" to
		 * a non-zero value indicating how many blocks from 0 have been downgraded. We should not reset this
		 * field to "curblk" as it will be mis-interpreted as the number of blocks that have been DOWNgraded.
		 */
		set_fully_upgraded = FALSE;
		if (mu_reorg_upgrd_dwngrd_start_tn != csd->desired_db_format_tn)
		{	/* csd->desired_db_format changed since reorg started. discontinue the reorg */
			util_out_print("Region !AD : Desired DB Format changed during REORG. Stopping REORG.",
				TRUE, REG_LEN_STR(reg));
			status1 = ERR_MUNOFINISH;
		} else if (reorg_entiredb)
		{	/* Change "csd->reorg_upgrd_dwngrd_restart_block" only if STARTBLK or STOPBLK was NOT specified */
			assert(csd->reorg_upgrd_dwngrd_restart_block <= curblk);
			csd->reorg_upgrd_dwngrd_restart_block = curblk;	/* blocks lesser than this have been upgraded/downgraded */
			expected_blks2upgrd = upgrade ? 0 : (total_blks - free_blks);
			blocks_left = upgrade ? actual_blks2upgrd : (expected_blks2upgrd - actual_blks2upgrd);
			/* If this reorg command went through all blocks in the database, then it should have
			 * 	correctly concluded at this point whether the reorg is complete or not.
			 * If this reorg command started from where a previous incomplete reorg left
			 *	(i.e. first_reorg_in_this_db_fmt is FALSE), it cannot determine if the initial
			 *	GDS blocks that it skipped are completely {up,down}graded or not.
			 */
			assert((0 == blocks_left) || (SS_NORMAL != status1) || !first_reorg_in_this_db_fmt);
			/* If this is a MUPIP REORG UPGRADE that did go through every block in the database (indicated by
			 * "reorg_entiredb" && "first_reorg_in_this_db_fmt") and the current count of "blks_to_upgrd" is
			 * 0 in the file-header and the desired_db_format did not change since the start of the REORG,
			 * we can be sure that the entire database has been upgraded. Set "csd->fully_upgraded" to TRUE.
			 */
			if ((SS_NORMAL == status1) && first_reorg_in_this_db_fmt && upgrade && (0 == actual_blks2upgrd))
			{
				csd->fully_upgraded = TRUE;
				csd->db_got_to_v5_once = TRUE;
				set_fully_upgraded = TRUE;
			}
			/* flush all changes noted down in the file-header */
			if (!wcs_flu(WCSFLU_FLUSH_HDR))	/* wcs_flu assumes gv_cur_region is set (which it is in this routine) */
			{
				gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4,
					LEN_AND_LIT("MUPIP REORG UPGRADE/DOWNGRADE"), DB_LEN_STR(reg));
				status = ERR_MUNOFINISH;
				rel_crit(reg);
				continue;
			}
		}
		curr_tn = csd->trans_hist.curr_tn;
		rel_crit(reg);
		util_out_print("Region !AD : Stopped processing at block number [0x!XL]", TRUE, REG_LEN_STR(reg), curblk);
		/* Print statistics */
		util_out_print("Region !AD : Statistics : Blocks Read From Disk (Bitmap)     : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_read_from_disk_bmp);
		util_out_print("Region !AD : Statistics : Blocks Skipped (Free)              : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_skipped_free);
		util_out_print("Region !AD : Statistics : Blocks Read From Disk (Non-Bitmap) : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_read_from_disk_nonbmp);
		util_out_print("Region !AD : Statistics : Blocks Skipped (new fmt in disk)   : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_skipped_newfmtindisk);
		util_out_print("Region !AD : Statistics : Blocks Skipped (new fmt in cache)  : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_skipped_newfmtincache);
		util_out_print("Region !AD : Statistics : Blocks Converted (Bitmap)          : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_converted_bmp);
		util_out_print("Region !AD : Statistics : Blocks Converted (Non-Bitmap)      : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_converted_nonbmp);
		if (reorg_entiredb && (SS_NORMAL == status1) && (0 != blocks_left))
		{	/* file-header counter does not match what reorg on the entire database expected to see */
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_DBBTUWRNG, 2, expected_blks2upgrd, actual_blks2upgrd);
			util_out_print("Region !AD : Run MUPIP INTEG (without FAST qualifier) to fix the counter",
				TRUE, REG_LEN_STR(reg));
			status1 = ERR_MUNOFINISH;
		} else
			util_out_print("Region !AD : Total Blocks = [0x!XL] : Free Blocks = [0x!XL] : "
				       "Blocks to upgrade = [0x!XL]",
				       TRUE, REG_LEN_STR(reg), total_blks, free_blks, actual_blks2upgrd);
		/* Issue success or failure message for this region */
		if (SS_NORMAL == status1)
		{	/* issue success only if REORG did not encounter any error in its processing */
			if (set_fully_upgraded)
				util_out_print("Region !AD : Database is now FULLY UPGRADED", TRUE, REG_LEN_STR(reg));
			util_out_print("Region !AD : MUPIP REORG !AD finished!/", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			send_msg_csa(CSA_ARG(csa) VARLSTCNT(7) ERR_MUREUPDWNGRDEND, 5, REG_LEN_STR(reg),
										process_id, process_id, &curr_tn);
		} else
		{
			assert(ERR_MUNOFINISH == status1);
			assert((SS_NORMAL == status) || (ERR_MUNOFINISH == status));
			util_out_print("Region !AD : MUPIP REORG !AD incomplete. See above messages.!/",
					TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			status = status1;
		}
	}
	if (NULL != bptr)
		free(bptr);
	if (NULL != bml_lcl_buff)
		free(bml_lcl_buff);
	if (mu_ctrly_occurred || mu_ctrlc_occurred)
	{
		gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_REORGCTRLY);
		status = ERR_MUNOFINISH;
	}
	mupip_exit(status);
}
Ejemplo n.º 15
0
void	lke_show(void)
{
	bool			locks, all = TRUE, wait = TRUE, interactive = FALSE, match = FALSE, memory = TRUE, nocrit = TRUE;
	boolean_t		exact = FALSE, was_crit;
	int4			pid;
	size_t			ls_len;
	int			n;
	char 			regbuf[MAX_RN_LEN], nodebuf[32], one_lockbuf[MAX_KEY_SZ];
	mlk_ctldata_ptr_t	ctl;
	mstr			reg, node, one_lock;
	int			shr_sub_len = 0;
	float			ls_free = 0;	/* Free space in bottleneck subspace */
	/* Get all command parameters */
	reg.addr = regbuf;
	reg.len = SIZEOF(regbuf);
	node.addr = nodebuf;
	node.len = SIZEOF(nodebuf);
	one_lock.addr = one_lockbuf;
	one_lock.len = SIZEOF(one_lockbuf);
	if (lke_getcli(&all, &wait, &interactive, &pid, &reg, &node, &one_lock, &memory, &nocrit, &exact) == 0)
		return;

	/* Search all regions specified on the command line */
	for (gv_cur_region = gd_header->regions, n = 0; n != gd_header->n_regions; ++gv_cur_region, ++n)
	{
		/* If region matches and is open */
		if ((reg.len == 0  ||
		     gv_cur_region->rname_len == reg.len  &&  memcmp(gv_cur_region->rname, reg.addr, reg.len) == 0)  &&
		    gv_cur_region->open)
		{
			match = TRUE;
			util_out_print("!/!AD!/", NOFLUSH, REG_LEN_STR(gv_cur_region));

			/* If distributed database, the region is located on another node */
			if (gv_cur_region->dyn.addr->acc_meth == dba_cm)
			{
#				if defined(LKE_WORKS_OK_WITH_CM)
				/* Obtain lock info from the remote node */
				locks = gtcmtr_lke_showreq(gv_cur_region->dyn.addr->cm_blk, gv_cur_region->cmx_regnum,
							   all, wait, pid, &node);
#				else
				gtm_putmsg(VARLSTCNT(10) ERR_UNIMPLOP, 0, ERR_TEXT, 2,
						LEN_AND_LIT("GT.CM region - locks must be displayed on the local node"),
						ERR_TEXT, 2, REG_LEN_STR(gv_cur_region));
				continue;
#				endif
			} else if (gv_cur_region->dyn.addr->acc_meth == dba_bg  || gv_cur_region->dyn.addr->acc_meth == dba_mm)
			{	/* Local region */
				cs_addrs = &FILE_INFO(gv_cur_region)->s_addrs;
				ls_len = (size_t)(cs_addrs->lock_addrs[1] - cs_addrs->lock_addrs[0]);
				ctl = (mlk_ctldata_ptr_t)malloc(ls_len);
				/* Prevent any modification of the lock space while we make a local copy of it */
				if (cs_addrs->critical != NULL)
					crash_count = cs_addrs->critical->crashcnt;
				was_crit = cs_addrs->now_crit;
				if (!nocrit && !was_crit)
					grab_crit(gv_cur_region);
				longcpy((uchar_ptr_t)ctl, (uchar_ptr_t)cs_addrs->lock_addrs[0], ls_len);
				assert((ctl->max_blkcnt > 0) && (ctl->max_prccnt > 0) && ((ctl->subtop - ctl->subbase) > 0));
				if (!nocrit && !was_crit)
					rel_crit(gv_cur_region);
				shr_sub_len = 0;
				locks = ctl->blkroot == 0 ?
						FALSE:
						lke_showtree(NULL, (mlk_shrblk_ptr_t)R2A(ctl->blkroot), all, wait, pid,
							     one_lock, memory, &shr_sub_len);
				/* lock space usage consists of: control_block + nodes(locks) +  processes + substrings */
				/* any of those subspaces can be bottleneck.
				 * Therefore we will report the subspace which is running out.
				 */
				ls_free = MIN(((float)ctl->blkcnt) / ctl->max_blkcnt, ((float)ctl->prccnt) / ctl->max_prccnt);
				ls_free = MIN(1-(((float)shr_sub_len) / (ctl->subtop - ctl->subbase)), ls_free);
				ls_free *= 100;	/* Scale to [0-100] range. (couldn't do this inside util_out_print) */
				if (ls_free < 1) /* No memory? Notify user. */
					gtm_putmsg(VARLSTCNT(4) ERR_LOCKSPACEFULL, 2, DB_LEN_STR(gv_cur_region));
				if (ls_free < 1 || memory)
				{
					if (ctl->subtop > ctl->subfree)
						gtm_putmsg(VARLSTCNT(10) ERR_LOCKSPACEINFO, 8, REG_LEN_STR(gv_cur_region),
							   (ctl->max_prccnt - ctl->prccnt), ctl->max_prccnt,
							   (ctl->max_blkcnt - ctl->blkcnt), ctl->max_blkcnt, LEN_AND_LIT(" not "));
					else
						gtm_putmsg(VARLSTCNT(10) ERR_LOCKSPACEINFO, 8, REG_LEN_STR(gv_cur_region),
							   (ctl->max_prccnt - ctl->prccnt), ctl->max_prccnt,
							   (ctl->max_blkcnt - ctl->blkcnt), ctl->max_blkcnt, LEN_AND_LIT(" "));
				}
				free(ctl);
			} else
			{
				gtm_putmsg(VARLSTCNT(2) ERR_BADREGION, 0);
				locks = TRUE;
			}
			if (!locks)
			{
				gtm_putmsg(VARLSTCNT(4) ERR_NOLOCKMATCH, 2, REG_LEN_STR(gv_cur_region));
			}
			assert((ls_free <= 100) && (ls_free >= 0));
			gtm_putmsg(VARLSTCNT(4) ERR_LOCKSPACEUSE, 2, ((int)ls_free),
				       cs_addrs->hdr->lock_space_size/OS_PAGELET_SIZE);
		}
	}

	if (!match  &&  reg.len != 0)
		rts_error(VARLSTCNT(4) ERR_NOREGION, 2, reg.len, reg.addr);

}
Ejemplo n.º 16
0
bool gtcmtr_bufflush(void)
{
	cm_region_list	*reg_ref;
	mval		v;
	short		n;
	unsigned short	num_trans, data_len;
	unsigned char	buff[MAX_ZWR_KEY_SZ], *end;
	unsigned char	*ptr, regnum, len, cc, prv;
	static readonly gds_file_id file;

	error_def(ERR_KEY2BIG);
	error_def(ERR_REC2BIG);
	error_def(ERR_GVIS);

	ptr = curr_entry->clb_ptr->mbf;
	assert(*ptr == CMMS_B_BUFFLUSH);
	ptr++;
	v.mvtype = MV_STR;
	GET_USHORT(num_trans, ptr);
	ptr += sizeof (short);
	for (; num_trans-- > 0;)
	{
		regnum = *ptr++;
		reg_ref = gtcm_find_region(curr_entry, regnum);
		len = *ptr++;
		cc = *ptr++;
		prv = *ptr++;
		assert (len + cc - 1 < gv_currkey->top);
		memcpy(&gv_currkey->base[cc], ptr, len);
		ptr += len;
		gv_currkey->end = len + cc - 1;
		gv_currkey->prev = prv;
		assert(prv < gv_currkey->end);
		if ((n = gv_currkey->end + 1) > gv_cur_region->max_key_size)
		{
			if ((end = format_targ_key(&buff[0], MAX_ZWR_KEY_SZ, gv_currkey, TRUE)) == 0)
				end = &buff[MAX_ZWR_KEY_SZ - 1];
			rts_error(VARLSTCNT(11) ERR_KEY2BIG, 4, n, (int4)gv_cur_region->max_key_size,
				REG_LEN_STR(gv_cur_region), 0, ERR_GVIS, 2, end - buff, buff);
		}
		gtcm_bind_name(reg_ref->reghead, TRUE);
		if (JNL_ENABLED(cs_addrs->hdr))
		{
			cs_addrs->jnl->pini_addr = reg_ref->pini_addr;
			originator_prc_vec = curr_entry->pvec;
		}
		GET_USHORT(data_len, ptr);
		ptr += sizeof(short);
		v.str.len = data_len;
		v.str.addr = (char *)ptr;
		if (n + v.str.len + sizeof(rec_hdr) > gv_cur_region->max_rec_size)
		{
			if ((end = format_targ_key(&buff[0], MAX_ZWR_KEY_SZ, gv_currkey, TRUE)) == 0)
				end = &buff[MAX_ZWR_KEY_SZ - 1];
			rts_error(VARLSTCNT(11) ERR_REC2BIG, 4, n + v.str.len + sizeof(rec_hdr), (int4)gv_cur_region->max_rec_size,
				REG_LEN_STR(gv_cur_region), 0, ERR_GVIS, 2, end - buff, buff);
		}
		gvcst_put(&v);
		if (JNL_ENABLED(cs_addrs->hdr))
			reg_ref->pini_addr = cs_addrs->jnl->pini_addr; /* In case  journal switch occurred */
		ptr += data_len;
	}
	ptr = curr_entry->clb_ptr->mbf;
	*ptr++ = CMMS_C_BUFFLUSH;
	curr_entry->clb_ptr->cbl = S_HDRSIZE;
	return TRUE;
}
Ejemplo n.º 17
0
bool mubinccpy(backup_reg_list *list)
{
	static readonly mval	null_str = {MV_STR, 0, 0 , 0 , 0, 0};

	int			backup_socket;
	int4                    size, size1, bsize, bm_num, hint, lmsize, save_blks, rsize, match, timeout, outsize;
	uint4                   status, total_blks, bplmap, gds_ratio, blks_per_buff, counter, i, lcnt, read_size;
	uchar_ptr_t		bm_blk_buff, ptr1, ptr1_top, ptr, ptr_top;
	char_ptr_t		outptr, data_ptr;
	unsigned short		rd_iosb[4], port;
	enum db_acc_method	access;
	blk_hdr			*bp, *bptr;
	struct FAB		*fcb, temp_fab, mubincfab;
	struct RAB		temp_rab, mubincrab;
	inc_header		*outbuf;
	mval			val;
	mstr                    *file;
	sgmnt_data_ptr_t        header;
	char			*common, addr[SA_MAXLEN + 1];
	void			(*common_write)();
	void			(*common_close)();
	muinc_blk_hdr_ptr_t	sblkh_p;
	trans_num		blk_tn;
	block_id		blk_num_base, blk_num;
	boolean_t		is_bitmap_blk, backup_this_blk;
	enum db_ver		dummy_odbv;
	int4			blk_bsiz;

	error_def(ERR_BCKUPBUFLUSH);
	error_def(ERR_COMMITWAITSTUCK);
	error_def(ERR_DBCCERR);
	error_def(ERR_ERRCALL);

	assert(list->reg == gv_cur_region);
	assert(incremental);
	/* Make sure inc_header  can be same size on all platforms. Some platforms pad 8 byte aligned structures
	   that end on a 4 byte boundary and some do not. It is critical that this structure is the same size on
	   all platforms as it is sent across TCP connections when doing TCP backup.
	*/
	assert(0 == (SIZEOF(inc_header) % 8));

	/* ================= Initialization and some checks ======================== */

	header  =       list->backup_hdr;
	file    =       &(list->backup_file);

	if (!mubtomag)
		mubmaxblk = BACKUP_TEMPFILE_BUFF_SIZE;
	fcb = ((vms_gds_info *)(gv_cur_region->dyn.addr->file_cntl->file_info))->fab;
	if (list->tn >= header->trans_hist.curr_tn)
	{
		util_out_print("!/TRANSACTION number is greater than or equal to current transaction,", TRUE);
		util_out_print("No blocks backed up from database !AD", TRUE, fcb->fab$b_fns, fcb->fab$l_fna);
		return TRUE;
	}

	/* =========== open backup destination and define common_write ================= */
	backup_write_errno = 0;
	backup_close_errno = 0;
	switch(list->backup_to)
	{
		case backup_to_file:
			/* open the file and define the common_write function */
			mubincfab = cc$rms_fab;
			mubincfab.fab$b_fac = FAB$M_PUT;
			mubincfab.fab$l_fop = FAB$M_CBT | FAB$M_MXV | FAB$M_TEF | FAB$M_POS & (~FAB$M_RWC) & (~FAB$M_RWO);
			mubincfab.fab$l_fna = file->addr;
			mubincfab.fab$b_fns = file->len;
			mubincfab.fab$l_alq = cs_addrs->hdr->start_vbn +
				STARTING_BLOCKS * cs_addrs->hdr->blk_size / DISK_BLOCK_SIZE;
			mubincfab.fab$w_mrs = mubmaxblk;
			mubincfab.fab$w_deq = EXTEND_SIZE;
			switch (status = sys$create(&mubincfab))
			{
				case RMS$_NORMAL:
				case RMS$_CREATED:
				case RMS$_SUPERSEDE:
				case RMS$_FILEPURGED:
					break;
				default:
					gtm_putmsg(status, 0, mubincfab.fab$l_stv);
					util_out_print("Error: Cannot create backup file !AD.",
						       TRUE, mubincfab.fab$b_fns, mubincfab.fab$l_fna);
					return FALSE;
			}

			mubincrab = cc$rms_rab;
			mubincrab.rab$l_fab = &mubincfab;
			mubincrab.rab$l_rop = RAB$M_WBH;
			if (RMS$_NORMAL != (status = sys$connect(&mubincrab)))
			{
				gtm_putmsg(status, 0, mubincrab.rab$l_stv);
				util_out_print("Error: Cannot connect to backup file !AD.",
					       TRUE, mubincfab.fab$b_fns, mubincfab.fab$l_fna);
				mubincfab.fab$l_fop |= FAB$M_DLT;
				sys$close(&mubincfab);
				return FALSE;
			}
			common = (char *)(&mubincrab);
			common_write = file_write;
			common_close = file_close;
			break;
		case backup_to_exec:
			util_out_print("Error: Backup to pipe is yet to be implemented.", TRUE);
			util_out_print("Error: Your request to backup database !AD to !AD is currently not valid.", TRUE,
				       fcb->fab$b_fns, fcb->fab$l_fna, file->len, file->addr);
			return FALSE;
		case backup_to_tcp:
			iotcp_fillroutine();
			/* parse it first */
			switch (match = SSCANF(file->addr, "%[^:]:%hu", addr, &port))
			{
				case 1 :
					port = DEFAULT_BKRS_PORT;
				case 2 :
					break;
				default :
					util_out_print("ERROR: A hostname has to be specified to backup through a TCP connection.",
						       TRUE);
					return FALSE;
			}
			if ((0 == cli_get_int("NETTIMEOUT", &timeout)) || (0 > timeout))
				timeout = DEFAULT_BKRS_TIMEOUT;
			if (0 > (backup_socket = tcp_open(addr, port, timeout, FALSE)))
			{
				util_out_print("ERROR: Cannot open tcp connection due to the above error.", TRUE);
				return FALSE;
			}
			common_write = tcp_write;
			common_close = tcp_close;
			common = (char *)(&backup_socket);
			break;
		default :
			util_out_print("ERROR: Backup format !UL not supported.", TRUE, list->backup_to);
			util_out_print("Error: Your request to backup database !AD to !AD is not valid.", TRUE,
				       fcb->fab$b_fns, fcb->fab$l_fna, file->len, file->addr);
			return FALSE;
	}

	/* ============================= write inc_header =========================================== */

	outptr = malloc(SIZEOF(inc_header));
	outbuf = (inc_header *)outptr;
	MEMCPY_LIT(&outbuf->label[0], INC_HEADER_LABEL);
	stringpool.free = stringpool.base;
	op_horolog(&val);
	stringpool.free = stringpool.base;
	op_fnzdate(&val, &mu_bin_datefmt, &null_str, &null_str, &val);
	memcpy(&outbuf->date[0], val.str.addr, val.str.len);
	memcpy(&outbuf->reg[0], gv_cur_region->rname, MAX_RN_LEN);
	outbuf->start_tn = list->tn;
	outbuf->end_tn = header->trans_hist.curr_tn;
	outbuf->db_total_blks = header->trans_hist.total_blks;
	outbuf->blk_size = header->blk_size;
	outbuf->blks_to_upgrd = header->blks_to_upgrd;
	COMMON_WRITE(common, outptr, SIZEOF(inc_header));
	free(outptr);

	if (mu_ctrly_occurred || mu_ctrlc_occurred)
	{
		error_mupip = TRUE;
		COMMON_CLOSE(common);
		util_out_print("WARNING:  DB file !AD backup aborted.", TRUE, fcb->fab$b_fns, fcb->fab$l_fna);
		return FALSE;
	}

	/* ============================ read/write appropriate blocks =============================== */

	bsize		= header->blk_size;
	gds_ratio	= bsize / DISK_BLOCK_SIZE;
	blks_per_buff	= BACKUP_READ_SIZE / bsize;
	read_size	= blks_per_buff * bsize;
	outsize		= SIZEOF(muinc_blk_hdr) + bsize;
	outptr		= (char_ptr_t)malloc(MAX(outsize, mubmaxblk));
	sblkh_p		= (muinc_blk_hdr_ptr_t)outptr;
	data_ptr	= (char_ptr_t)(sblkh_p + 1);
	bp		= (blk_hdr_ptr_t)mubbuf;
	bm_blk_buff	= (uchar_ptr_t)malloc(SIZEOF(blk_hdr) + (BLKS_PER_LMAP * BML_BITS_PER_BLK / BITS_PER_UCHAR));
	mubincrab.rab$l_rbf = outptr;
	save_blks	= 0;
	access = header->acc_meth;
	memset(sblkh_p, 0, SIZEOF(*sblkh_p));

	if (access == dba_bg)
		bp = mubbuf;
	else
	{
		ptr = cs_addrs->db_addrs[0] + (cs_addrs->hdr->start_vbn - 1) * DISK_BLOCK_SIZE;
		ptr_top = cs_addrs->db_addrs[1] + 1;
	}

	sblkh_p->use.bkup.ondsk_blkver = GDSNOVER;
	for (blk_num_base = 0; blk_num_base < header->trans_hist.total_blks; blk_num_base += blks_per_buff)
	{
		if (online && (0 != cs_addrs->shmpool_buffer->failed))
			break;
		if (header->trans_hist.total_blks - blk_num_base < blks_per_buff)
		{
			blks_per_buff = header->trans_hist.total_blks - blk_num_base;
			read_size = blks_per_buff * bsize;
		}

		if (access == dba_bg)
		{
			if ((SS$_NORMAL != (status = sys$qiow(EFN$C_ENF, fcb->fab$l_stv, IO$_READVBLK, &rd_iosb, 0, 0, bp,
							      read_size, cs_addrs->hdr->start_vbn + (gds_ratio * blk_num_base),
							      0, 0, 0)))
			    || (SS$_NORMAL != (status = rd_iosb[0])))
			{
				gtm_putmsg(VARLSTCNT(1) status);
				util_out_print("Error reading data from database !AD.", TRUE,
					       fcb->fab$b_fns, fcb->fab$l_fna);
				free(outptr);
				free(bm_blk_buff);
				error_mupip = TRUE;
				COMMON_CLOSE(common);
				return FALSE;
			}
		} else
		{
			assert(dba_mm == access);
			bp = ptr + blk_num_base * bsize;
		}

		bptr = (blk_hdr *)bp;
		/* The blocks we back up will be whatever version they are. There is no implicit conversion in this
		   part of the backup/restore. Since we aren't even looking at the blocks (and indeed some of these blocks
		   could potentially contain unintialized garbage data), we set the block version to GDSNOVER to signal
		   that the block version is unknown. The above applies to "regular" blocks but not to bitmap blocks which
		   we know are initialized. Because we have to read the bitmap blocks, they will be converted as necessary.
		*/
		for (i = 0;
		     i < blks_per_buff && ((blk_num_base + i) < header->trans_hist.total_blks);
		     i++, bptr = (blk_hdr *)((char *)bptr + bsize))
		{
			blk_num = blk_num_base + i;
			if (mu_ctrly_occurred  ||  mu_ctrlc_occurred)
			{
				free(outptr);
				free(bm_blk_buff);
				error_mupip = TRUE;
				COMMON_CLOSE(common);
				util_out_print("WARNING:  DB file !AD backup aborted.", TRUE, fcb->fab$b_fns, fcb->fab$l_fna);
				return FALSE;
			}
			/* Before we check if this block needs backing up, check if this is a new bitmap block or not. If it is,
			   we can fall through and back it up as normal. But if this is NOT a bitmap block, use the
			   existing bitmap to determine if this block has ever been allocated or not. If not, we don't want to
			   even look at this block. It could be uninitialized which will just make things run slower if we
			   go to read it and back it up.
			*/
			if (0 != ((BLKS_PER_LMAP - 1) & blk_num))
			{	/* Not a local bitmap block */
				if (!gvcst_blk_ever_allocated(bm_blk_buff + SIZEOF(blk_hdr),
							      ((blk_num * BML_BITS_PER_BLK)
							       % (BLKS_PER_LMAP * BML_BITS_PER_BLK))))
					continue;		/* Bypass never-set blocks to avoid conversion problems */
				is_bitmap_blk = FALSE;
				if (SIZEOF(v15_blk_hdr) <= (blk_bsiz = ((v15_blk_hdr_ptr_t)bptr)->bsiz))
				{	/* We have either a V4 block or uninitialized garbage */
					if (blk_bsiz > bsize)
						/* This is not a valid V4 block so ignore it */
						continue;
					blk_tn = ((v15_blk_hdr_ptr_t)bptr)->tn;
				} else
				{	/* Assume V5 block */
					if ((blk_bsiz = bptr->bsiz) > bsize)
						/* Not a valid V5 block either */
						continue;
					blk_tn = bptr->tn;
				}
			} else
			{	/* This is a bitmap block so save it into our bitmap block buffer. It is used as the
				   basis of whether or not we have to process a given block or not. We process allocated and
				   recycled blocks leaving free (never used) blocks alone as they have no data worth saving.
				   But after saving it, upgrade it to the current format if necessary.
				*/
				is_bitmap_blk = TRUE;
				memcpy(bm_blk_buff, bptr, BM_SIZE(header->bplmap));
				if (SIZEOF(v15_blk_hdr) <= ((v15_blk_hdr_ptr_t)bm_blk_buff)->bsiz)
				{	/* This is a V4 format block -- needs upgrading */
					status = gds_blk_upgrade(bm_blk_buff, bm_blk_buff, bsize, &dummy_odbv);
					if (SS_NORMAL != status)
					{
						free(outptr);
						free(bm_blk_buff);
						error_mupip = TRUE;
						COMMON_CLOSE(common);
						util_out_print("Error: Block 0x!XL is too large for automatic upgrade", TRUE,
							       sblkh_p->blkid);
						return FALSE;
					}
				}
				assert(BM_SIZE(header->bplmap) == ((blk_hdr_ptr_t)bm_blk_buff)->bsiz);
				assert(LCL_MAP_LEVL == ((blk_hdr_ptr_t)bm_blk_buff)->levl);
				assert(gvcst_blk_is_allocated(bm_blk_buff + SIZEOF(blk_hdr),
							      ((blk_num * BML_BITS_PER_BLK)
							       % (BLKS_PER_LMAP * BML_BITS_PER_BLK))));
				blk_bsiz = BM_SIZE(header->bplmap);
				blk_tn = ((blk_hdr_ptr_t)bm_blk_buff)->tn;
			}
			/* The conditions for backing up a block or ignoring it (in order of evaluation):

			   1) If blk is larger than size of db at time backup was initiated, we ignore the block.
			   2) Always backup blocks 0, 1, and 2 as these are the only blocks that can contain data
			      and still have a transaction number of 0.
			   3) For bitmap blocks, if blks_to_upgrd != 0 and the TN is 0 and the block number >=
			      last_blk_at_last_bkup, then backup the block. This way we get the correct version of
			      the bitmap block in the restore (otherwise have no clue what version to create them in
			      as bitmaps are created with a TN of 0 when before image journaling is enabled).
			   4) If the block TN is below our TN threshold, ignore the block.
			   5) Else if none of the above conditions, backup the block.
			*/
			if (online && (header->trans_hist.curr_tn <= blk_tn))
				backup_this_blk = FALSE;
			else if (3 > blk_num || (is_bitmap_blk && 0 != header->blks_to_upgrd && (trans_num)0 == blk_tn
						 && blk_num >= list->last_blk_at_last_bkup))
				backup_this_blk = TRUE;
			else if ((blk_tn < list->tn))
				backup_this_blk = FALSE;
			else
				backup_this_blk = TRUE;
			if (!backup_this_blk)
			{
				if (online)
					cs_addrs->nl->nbb = blk_num;
				continue; /* not applicable */
			}
			sblkh_p->blkid = blk_num;
			memcpy(data_ptr, bptr, blk_bsiz);
			sblkh_p->valid_data = TRUE;	/* Validation marker */
			COMMON_WRITE(common, outptr, outsize);
			if (online)
			{
				if (0 != cs_addrs->shmpool_buffer->failed)
					break;
				cs_addrs->nl->nbb = blk_num;
			}
			save_blks++;
		}
	}

	/* ============================= write saved information for online backup ========================== */

	if (online && (0 == cs_addrs->shmpool_buffer->failed))
	{
		/* -------- make sure everyone involved finishes -------- */
		cs_addrs->nl->nbb = BACKUP_NOT_IN_PROGRESS;
		/* By getting crit here, we ensure that there is no process still in transaction logic that sees
		   (nbb != BACKUP_NOT_IN_PRORESS). After rel_crit(), any process that enters transaction logic will
		   see (nbb == BACKUP_NOT_IN_PRORESS) because we just set it to that value. At this point, backup
		   buffer is complete and there will not be any more new entries in the backup buffer until the next
		   backup.
		*/
		grab_crit(gv_cur_region);
		assert(cs_data == cs_addrs->hdr);
		if (dba_bg == cs_data->acc_meth)
		{	/* Now that we have crit, wait for any pending phase2 updates to finish. Since phase2 updates happen
			 * outside of crit, we dont want them to keep writing to the backup temporary file even after the
			 * backup is complete and the temporary file has been deleted.
			 */
			if (cs_addrs->nl->wcs_phase2_commit_pidcnt && !wcs_phase2_commit_wait(cs_addrs, NULL))
			{
				gtm_putmsg(VARLSTCNT(7) ERR_COMMITWAITSTUCK, 5, process_id, 1,
					cs_addrs->nl->wcs_phase2_commit_pidcnt, DB_LEN_STR(gv_cur_region));
				rel_crit(gv_cur_region);
				free(outptr);
				free(bm_blk_buff);
				error_mupip = TRUE;
				COMMON_CLOSE(common);
				return FALSE;
			}
		}
		if (debug_mupip)
		{
			util_out_print("MUPIP INFO:   Current Transaction # at end of backup is 0x!16@XQ", TRUE,
				&cs_data->trans_hist.curr_tn);
		}
		rel_crit(gv_cur_region);
		counter = 0;
		while (0 != cs_addrs->shmpool_buffer->backup_cnt)
		{
			if (0 != cs_addrs->shmpool_buffer->failed)
			{
				util_out_print("Process !UL encountered the following error.", TRUE,
					       cs_addrs->shmpool_buffer->failed);
				if (0 != cs_addrs->shmpool_buffer->backup_errno)
					gtm_putmsg(VARLSTCNT(1) cs_addrs->shmpool_buffer->backup_errno);
				free(outptr);
				free(bm_blk_buff);
				error_mupip = TRUE;
				COMMON_CLOSE(common);
				return FALSE;
			}
			backup_buffer_flush(gv_cur_region);
			if (++counter > MAX_BACKUP_FLUSH_TRY)
			{
				gtm_putmsg(VARLSTCNT(1) ERR_BCKUPBUFLUSH);
				free(outptr);
				free(bm_blk_buff);
				error_mupip = TRUE;
				COMMON_CLOSE(common);
				return FALSE;
			}
			if (counter & 0xF)
				wcs_sleep(counter);
			else
			{	/* Force shmpool recovery to see if it can find the lost blocks */
				if (!shmpool_lock_hdr(gv_cur_region))
				{
					gtm_putmsg(VARLSTCNT(9) ERR_DBCCERR, 2, REG_LEN_STR(gv_cur_region),
						   ERR_ERRCALL, 3, CALLFROM);
					free(outptr);
					free(bm_blk_buff);
					error_mupip = TRUE;
					COMMON_CLOSE(common);
					assert(FALSE);
					return FALSE;;
				}
				shmpool_abandoned_blk_chk(gv_cur_region, TRUE);
				shmpool_unlock_hdr(gv_cur_region);
			}
		}

		/* -------- Open the temporary file -------- */
		temp_fab = cc$rms_fab;
		temp_fab.fab$b_fac = FAB$M_GET;
		temp_fab.fab$l_fna = list->backup_tempfile;
		temp_fab.fab$b_fns = strlen(list->backup_tempfile);
		temp_rab = cc$rms_rab;
		temp_rab.rab$l_fab = &temp_fab;

		for (lcnt = 1;  MAX_OPEN_RETRY >= lcnt;  lcnt++)
		{
			if (RMS$_FLK != (status = sys$open(&temp_fab, NULL, NULL)))
				break;
			wcs_sleep(lcnt);
		}

		if (RMS$_NORMAL != status)
		{
			gtm_putmsg(status, 0, temp_fab.fab$l_stv);
			util_out_print("WARNING:  DB file !AD backup aborted.", TRUE, fcb->fab$b_fns, fcb->fab$l_fna);
			free(outptr);
			free(bm_blk_buff);
			error_mupip = TRUE;
			COMMON_CLOSE(common);
			return FALSE;
		}

		if (RMS$_NORMAL != (status = sys$connect(&temp_rab)))
		{
			gtm_putmsg(status, 0, temp_rab.rab$l_stv);
			util_out_print("WARNING:  DB file !AD backup aborted.", TRUE, fcb->fab$b_fns, fcb->fab$l_fna);
			free(outptr);
			free(bm_blk_buff);
			error_mupip = TRUE;
			COMMON_CLOSE(common);
			return FALSE;
		}

		/* -------- read and write every record in the temporary file -------- */
		while (1)
		{
			temp_rab.rab$w_usz = outsize;
			temp_rab.rab$l_ubf = outptr;
			status = sys$get(&temp_rab);
			if (RMS$_NORMAL != status)
			{
				if (RMS$_EOF == status)
					status = RMS$_NORMAL;
				break;
			}
			assert(outsize == temp_rab.rab$w_rsz);
			/* Still validly sized blk? */
			assert((outsize - SIZEOF(shmpool_blk_hdr)) >= ((blk_hdr_ptr_t)(outptr + SIZEOF(shmpool_blk_hdr)))->bsiz);
			COMMON_WRITE(common, outptr, temp_rab.rab$w_rsz);
		}

		if (RMS$_NORMAL != status)
		{
			gtm_putmsg(status, 0, temp_rab.rab$l_stv);
			util_out_print("WARNING:  DB file !AD backup aborted.", TRUE, fcb->fab$b_fns, fcb->fab$l_fna);
			free(outptr);
			free(bm_blk_buff);
			error_mupip = TRUE;
			COMMON_CLOSE(common);
			return FALSE;
		}

		/* ---------------- Close the temporary file ----------------------- */
		if (RMS$_NORMAL != (status = sys$close(&temp_fab)))
		{
			gtm_putmsg(status, 0, temp_fab.fab$l_stv);
			util_out_print("WARNING:  DB file !AD backup aborted.", TRUE, fcb->fab$b_fns, fcb->fab$l_fna);
			free(outptr);
			free(bm_blk_buff);
			error_mupip = TRUE;
			COMMON_CLOSE(common);
			return FALSE;
		}
	}

	/* ============================= write end_msg and fileheader ======================================= */

	if ((!online) || (0 == cs_addrs->shmpool_buffer->failed))
	{
		MEMCPY_LIT(outptr, END_MSG);
		/* Although the write only need be of length SIZEOF(END_MSG) - 1 for file IO, if the write is going
		   to TCP we have to write all these records with common length so just write the "regular" sized
		   buffer. The extra garbage left over from the last write will be ignored as we key only on the
		   this end text.
		*/
		COMMON_WRITE(common, outptr, outsize);

		ptr1 = header;
		size1 = ROUND_UP(SIZEOF(sgmnt_data), DISK_BLOCK_SIZE);
		ptr1_top = ptr1 + size1;
		for (;ptr1 < ptr1_top ; ptr1 += size1)
		{
			if ((size1 = ptr1_top - ptr1) > mubmaxblk)
				size1 = (mubmaxblk / DISK_BLOCK_SIZE) * DISK_BLOCK_SIZE;
			COMMON_WRITE(common, ptr1, size1);
		}

		MEMCPY_LIT(outptr, HDR_MSG);
		COMMON_WRITE(common, outptr, SIZEOF(HDR_MSG));
		ptr1 = MM_ADDR(header);
		size1 = ROUND_UP(MASTER_MAP_SIZE(header), DISK_BLOCK_SIZE);
		ptr1_top = ptr1 + size1;
		for (;ptr1 < ptr1_top ; ptr1 += size1)
		{
			if ((size1 = ptr1_top - ptr1) > mubmaxblk)
				size1 = (mubmaxblk / DISK_BLOCK_SIZE) * DISK_BLOCK_SIZE;
			COMMON_WRITE(common, ptr1, size1);
		}

		MEMCPY_LIT(outptr, MAP_MSG);
		COMMON_WRITE(common, outptr, SIZEOF(MAP_MSG));
	}


	/* ================== close backup destination, output and return ================================== */

	if (online && (0 != cs_addrs->shmpool_buffer->failed))
	{
		util_out_print("Process !UL encountered the following error.", TRUE,
			       cs_addrs->shmpool_buffer->failed);
		if (0 != cs_addrs->shmpool_buffer->backup_errno)
			gtm_putmsg(VARLSTCNT(1) cs_addrs->shmpool_buffer->backup_errno);
		free(outptr);
		free(bm_blk_buff);
		error_mupip = TRUE;
		COMMON_CLOSE(common);
		return FALSE;
	}

	COMMON_CLOSE(common);
	free(outptr);
	free(bm_blk_buff);

	util_out_print("DB file !AD incrementally backed up in !AD", TRUE,
		       fcb->fab$b_fns, fcb->fab$l_fna, file->len, file->addr);
	util_out_print("!UL blocks saved.", TRUE, save_blks);
	util_out_print("Transactions from 0x!16@XQ to 0x!16@XQ are backed up.", TRUE,
		       &cs_addrs->shmpool_buffer->inc_backup_tn, &header->trans_hist.curr_tn);
	cs_addrs->hdr->last_inc_backup = header->trans_hist.curr_tn;
	if (record)
		cs_addrs->hdr->last_rec_backup = header->trans_hist.curr_tn;
	file_backed_up = TRUE;
	return TRUE;
}
Ejemplo n.º 18
0
void mu_int_reg(gd_region *reg, boolean_t *return_value)
{
	boolean_t		read_only, was_crit;
	freeze_status		status;
	node_local_ptr_t	cnl;
	sgmnt_addrs     	*csa;
	sgmnt_data_ptr_t	csd;
#	ifdef DEBUG
	boolean_t		need_to_wait = FALSE;
	int			trynum;
	uint4			curr_wbox_seq_num;
#	endif
	sgmnt_data		*csd_copy_ptr;
	gd_segment		*seg;
	int			gtmcrypt_errno;
	*return_value = FALSE;
	UNIX_ONLY(jnlpool_init_needed = TRUE);
	ESTABLISH(mu_int_reg_ch);
	if (dba_usr == reg->dyn.addr->acc_meth)
	{
		util_out_print("!/Can't integ region !AD; not GDS format", TRUE,  REG_LEN_STR(reg));
		mu_int_skipreg_cnt++;
		return;
	}
	gv_cur_region = reg;
	if (reg_cmcheck(reg))
	{
		util_out_print("!/Can't integ region across network", TRUE);
		mu_int_skipreg_cnt++;
		return;
	}
	gvcst_init(gv_cur_region);
	if (gv_cur_region->was_open)
	{	/* already open under another name */
		gv_cur_region->open = FALSE;
		return;
	}
	change_reg();
	csa = &FILE_INFO(gv_cur_region)->s_addrs;
	cnl = csa->nl;
	csd = csa->hdr;
	read_only = gv_cur_region->read_only;
	assert(NULL != mu_int_master);
	/* Ensure that we don't see an increase in the file header and master map size compared to it's maximum values */
	assert(SGMNT_HDR_LEN >= SIZEOF(sgmnt_data) && (MASTER_MAP_SIZE_MAX >= MASTER_MAP_SIZE(csd)));
	/* ONLINE INTEG if asked for explicitly by specifying -ONLINE is an error if the db has partial V4 blocks.
	 * However, if -ONLINE is not explicitly specified but rather assumed implicitly (as default for -REG)
	 * then turn off ONLINE INTEG for this region and continue as if -NOONLINE was specified
	 */
#	ifdef GTM_SNAPSHOT
	if (!csd->fully_upgraded)
	{
		ointeg_this_reg = FALSE; /* Turn off ONLINE INTEG for this region */
		if (online_specified)
		{
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_SSV4NOALLOW, 2, DB_LEN_STR(gv_cur_region));
			util_out_print(NO_ONLINE_ERR_MSG, TRUE);
			mu_int_skipreg_cnt++;
			return;
		}
	}
#	endif
	if (!ointeg_this_reg || read_only)
	{
		status = region_freeze(gv_cur_region, TRUE, FALSE, TRUE);
		switch (status)
		{
			case REG_ALREADY_FROZEN:
				UNIX_ONLY(if (csa->read_only_fs) break);
				util_out_print("!/Database for region !AD is already frozen, not integing",
					TRUE, REG_LEN_STR(gv_cur_region));
				mu_int_skipreg_cnt++;
				return;
			case REG_HAS_KIP:
				/* We have already waited for KIP to reset. This time do not wait for KIP */
				status = region_freeze(gv_cur_region, TRUE, FALSE, FALSE);
				if (REG_ALREADY_FROZEN == status)
				{
					UNIX_ONLY(if (csa->read_only_fs) break);
					util_out_print("!/Database for region !AD is already frozen, not integing",
						TRUE, REG_LEN_STR(gv_cur_region));
					mu_int_skipreg_cnt++;
					return;
				}
				break;
			case REG_FREEZE_SUCCESS:
				break;
			default:
				assert(FALSE);
		}
Ejemplo n.º 19
0
int4 gds_rundown(void)
{
	boolean_t		canceled_dbsync_timer, canceled_flush_timer, ok_to_write_pfin;
	boolean_t		have_standalone_access, ipc_deleted, err_caught;
	boolean_t		is_cur_process_ss_initiator, remove_shm, vermismatch, we_are_last_user, we_are_last_writer, is_mm;
	boolean_t		unsafe_last_writer;
	char			time_str[CTIME_BEFORE_NL + 2]; /* for GET_CUR_TIME macro */
	gd_region		*reg;
	int			save_errno, status, rc;
	int4			semval, ftok_semval, sopcnt, ftok_sopcnt;
	short			crash_count;
	sm_long_t		munmap_len;
	sgmnt_addrs		*csa;
	sgmnt_data_ptr_t	csd;
	node_local_ptr_t	cnl;
	struct shmid_ds		shm_buf;
	struct sembuf		sop[2], ftok_sop[2];
	uint4           	jnl_status;
	unix_db_info		*udi;
	jnl_private_control	*jpc;
	jnl_buffer_ptr_t	jbp;
	shm_snapshot_t		*ss_shm_ptr;
	uint4			ss_pid, onln_rlbk_pid, holder_pid;
	boolean_t		was_crit;
	boolean_t		safe_mode; /* Do not flush or take down shared memory. */
	boolean_t		bypassed_ftok = FALSE, bypassed_access = FALSE, may_bypass_ftok, inst_is_frozen,
				ftok_counter_halted,
				access_counter_halted;
	int			secshrstat;
	intrpt_state_t		prev_intrpt_state;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	jnl_status = 0;
	reg = gv_cur_region;			/* Local copy */

	/* early out for cluster regions
	 * to avoid tripping the assert below.
	 * Note:
	 *	This early out is consistent with VMS.  It has been
	 *	noted that all of the gtcm assignments
	 *      to gv_cur_region should use the TP_CHANGE_REG
	 *	macro.  This would also avoid the assert problem
	 *	and should be done eventually.
	 */
	if (dba_cm == reg->dyn.addr->acc_meth)
		return EXIT_NRM;

	udi = FILE_INFO(reg);
	csa = &udi->s_addrs;
	csd = csa->hdr;
	assert(csa == cs_addrs && csd == cs_data);
	if ((reg->open) && (dba_usr == csd->acc_meth))
	{
		change_reg();
		gvusr_rundown();
		return EXIT_NRM;
	}
	/* If the process has standalone access, it has udi->grabbed_access_sem set to TRUE at this point. Note that down in a local
	 * variable as the udi->grabbed_access_sem is set to TRUE even for non-standalone access below and hence we can't rely on
	 * that later to determine if the process had standalone access or not when it entered this function.  We need to guarantee
	 * that none else access database file header when semid/shmid fields are reset.  We already have created ftok semaphore in
	 * db_init or, mu_rndwn_file and did not remove it.  So just lock it. We do it in blocking mode.
	 */
	have_standalone_access = udi->grabbed_access_sem; /* process holds standalone access */
	DEFER_INTERRUPTS(INTRPT_IN_GDS_RUNDOWN, prev_intrpt_state);
	ESTABLISH_NORET(gds_rundown_ch, err_caught);
	if (err_caught)
	{
		REVERT;
		WITH_CH(gds_rundown_ch, gds_rundown_err_cleanup(have_standalone_access), 0);
		ENABLE_INTERRUPTS(INTRPT_IN_GDS_RUNDOWN, prev_intrpt_state);
		DEBUG_ONLY(ok_to_UNWIND_in_exit_handling = FALSE);
		return EXIT_ERR;
	}
	assert(reg->open);			/* if we failed to open, dbinit_ch should have taken care of proper clean up */
	assert(!reg->opening);			/* see comment above */
	assert((dba_bg == csd->acc_meth) || (dba_mm == csd->acc_meth));
	is_mm = (dba_bg != csd->acc_meth);
	assert(!csa->hold_onto_crit || (csa->now_crit && jgbl.onlnrlbk));
	/* If we are online rollback, we should already be holding crit and should release it only at the end of this module. This
	 * is usually done by noting down csa->now_crit in a local variable (was_crit) and using it whenever we are about to
	 * grab_crit. But, there are instances (like mupip_set_journal.c) where we grab_crit but invoke gds_rundown without any
	 * preceeding rel_crit. Such code relies on the fact that gds_rundown does rel_crit unconditionally (to get locks to a known
	 * state). So, augment csa->now_crit with jgbl.onlnrlbk to track if we can rel_crit unconditionally or not in gds_rundown.
	 */
	was_crit = (csa->now_crit && jgbl.onlnrlbk);
	/* Cancel any pending flush timer for this region by this task */
	canceled_flush_timer = FALSE;
	canceled_dbsync_timer = FALSE;
	CANCEL_DB_TIMERS(reg, csa, canceled_flush_timer, canceled_dbsync_timer);
	we_are_last_user = FALSE;
	inst_is_frozen = IS_REPL_INST_FROZEN && REPL_ALLOWED(csa->hdr);
	if (!csa->persistent_freeze)
		region_freeze(reg, FALSE, FALSE, FALSE);
	if (!was_crit)
	{
		rel_crit(reg);		/* get locks to known state */
		mutex_cleanup(reg);
	}
	/* The only process that can invoke gds_rundown while holding access control semaphore is RECOVER/ROLLBACK. All the others
	 * (like MUPIP SET -FILE/MUPIP EXTEND would have invoked db_ipcs_reset() before invoking gds_rundown (from
	 * mupip_exit_handler). The only exception is when these processes encounter a terminate signal and they reach
	 * mupip_exit_handler while holding access control semaphore. Assert accordingly.
	 */
	assert(!have_standalone_access || mupip_jnl_recover || process_exiting);
	/* If we have standalone access, then ensure that a concurrent online rollback cannot be running at the same time as it
	 * needs the access control lock as well. The only expection is we are online rollback and currently running down.
	 */
	cnl = csa->nl;
	onln_rlbk_pid = cnl->onln_rlbk_pid;
	assert(!have_standalone_access || mupip_jnl_recover || !onln_rlbk_pid || !is_proc_alive(onln_rlbk_pid, 0));
	if (!have_standalone_access)
	{
		if (-1 == (ftok_semval = semctl(udi->ftok_semid, DB_COUNTER_SEM, GETVAL))) /* Check # of procs counted on FTOK */
		{
			save_errno = errno;
			assert(FALSE);
			rts_error_csa(CSA_ARG(csa) VARLSTCNT(12) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), ERR_SYSCALL, 5,
				  RTS_ERROR_TEXT("gds_rundown SEMCTL failed to get ftok_semval"), CALLFROM, errno);
		}
		may_bypass_ftok = CAN_BYPASS(ftok_semval, csd, inst_is_frozen); /* Do we need a blocking wait? */
		/* We need to guarantee that no one else access database file header when semid/shmid fields are reset.
		 * We already have created ftok semaphore in db_init or mu_rndwn_file and did not remove it. So just lock it.
		 */
		if (!ftok_sem_lock(reg, may_bypass_ftok))
		{
			if (may_bypass_ftok)
			{	/* We did a non-blocking wait. It's ok to proceed without locking */
				bypassed_ftok = TRUE;
				holder_pid = semctl(udi->ftok_semid, DB_CONTROL_SEM, GETPID);
				if ((uint4)-1 == holder_pid)
					rts_error_csa(CSA_ARG(csa) VARLSTCNT(12) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg),
							ERR_SYSCALL, 5,
							RTS_ERROR_TEXT("gds_rundown SEMCTL failed to get holder_pid"),
							CALLFROM, errno);
				if (!IS_GTM_IMAGE) /* MUMPS processes should not flood syslog with bypass messages. */
				{
					send_msg_csa(CSA_ARG(csa) VARLSTCNT(12) ERR_RESRCINTRLCKBYPAS, 10,
						 LEN_AND_STR(gtmImageNames[image_type].imageName), process_id, LEN_AND_LIT("FTOK"),
						 REG_LEN_STR(reg), DB_LEN_STR(reg), holder_pid);
					send_msg_csa(CSA_ARG(NULL) VARLSTCNT(4) ERR_TEXT, 2,
							LEN_AND_LIT("FTOK bypassed at rundown"));
				}
			} else
			{	/* We did a blocking wait but something bad happened. */
				FTOK_TRACE(csa, csa->ti->curr_tn, ftok_ops_lock, process_id);
				rts_error_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_DBFILERR, 2, DB_LEN_STR(reg));
			}
		}
		sop[0].sem_num = DB_CONTROL_SEM; sop[0].sem_op = 0;	/* Wait for 0 */
		sop[1].sem_num = DB_CONTROL_SEM; sop[1].sem_op = 1;	/* Lock */
		sopcnt = 2;
		sop[0].sem_flg = sop[1].sem_flg = SEM_UNDO | IPC_NOWAIT; /* Don't wait the first time thru */
		SEMOP(udi->semid, sop, sopcnt, status, NO_WAIT);
		if (0 != status)
		{
			save_errno = errno;
			/* Check # of processes counted on access sem. */
			if (-1 == (semval = semctl(udi->semid, DB_COUNTER_SEM, GETVAL)))
			{
				assert(FALSE);
				rts_error_csa(CSA_ARG(csa) VARLSTCNT(12) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), ERR_SYSCALL, 5,
					  RTS_ERROR_TEXT("gds_rundown SEMCTL failed to get semval"), CALLFROM, errno);
			}
			bypassed_access = CAN_BYPASS(semval, csd, inst_is_frozen) || onln_rlbk_pid || csd->file_corrupt;
			/* Before attempting again in the blocking mode, see if the holding process is an online rollback.
			 * If so, it is likely we won't get the access control semaphore anytime soon. In that case, we
			 * are better off skipping rundown and continuing with sanity cleanup and exit.
			 */
			holder_pid = semctl(udi->semid, DB_CONTROL_SEM, GETPID);
			if ((uint4)-1 == holder_pid)
				rts_error_csa(CSA_ARG(csa) VARLSTCNT(12) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), ERR_SYSCALL, 5,
					  RTS_ERROR_TEXT("gds_rundown SEMCTL failed to get holder_pid"), CALLFROM, errno);
			if (!bypassed_access)
			{	/* We couldn't get it in one shot-- see if we already have it */
				if (holder_pid == process_id)
				{
					send_msg_csa(CSA_ARG(csa) VARLSTCNT(5) MAKE_MSG_INFO(ERR_CRITSEMFAIL), 2, DB_LEN_STR(reg),
							ERR_RNDWNSEMFAIL);
					REVERT;
					ENABLE_INTERRUPTS(INTRPT_IN_GDS_RUNDOWN, prev_intrpt_state);
					assert(FALSE);
					return EXIT_ERR;
				}
				if (EAGAIN != save_errno)
				{
					assert(FALSE);
					rts_error_csa(CSA_ARG(csa) VARLSTCNT(12) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg),
							ERR_SYSCALL, 5,
							RTS_ERROR_TEXT("gds_rundown SEMOP on access control semaphore"),
							CALLFROM, save_errno);
				}
				sop[0].sem_flg = sop[1].sem_flg = SEM_UNDO;	/* Try again - blocking this time */
				SEMOP(udi->semid, sop, 2, status, FORCED_WAIT);
				if (-1 == status)			/* We couldn't get it at all.. */
					rts_error_csa(CSA_ARG(csa) VARLSTCNT(12) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg),
							ERR_SYSCALL, 5,
							RTS_ERROR_TEXT("gds_rundown SEMOP on access control semaphore"),
							CALLFROM, errno);
			} else if (!IS_GTM_IMAGE)
			{
				send_msg_csa(CSA_ARG(csa) VARLSTCNT(12) ERR_RESRCINTRLCKBYPAS, 10,
						LEN_AND_STR(gtmImageNames[image_type].imageName), process_id,
						LEN_AND_LIT("access control"), REG_LEN_STR(reg), DB_LEN_STR(reg), holder_pid);
				send_msg_csa(CSA_ARG(NULL) VARLSTCNT(4) ERR_TEXT, 2,
						LEN_AND_LIT("Access control bypassed at rundown"));
			}
			udi->grabbed_access_sem = !bypassed_access;
		}
	} /* else we we hold the access control semaphore and therefore have standalone access. We do not release it now - we
	   * release it later in mupip_exit_handler.c. Since we already hold the access control semaphore, we don't need the
	   * ftok semaphore and trying it could cause deadlock
	   */
	/* Note that in the case of online rollback, "udi->grabbed_access_sem" (and in turn "have_standalone_access") is TRUE.
	 * But there could be other processes still having the database open so we cannot safely reset the halted fields.
	 */
	if (have_standalone_access && !jgbl.onlnrlbk)
		csd->ftok_counter_halted = csd->access_counter_halted = FALSE;
	ftok_counter_halted = csd->ftok_counter_halted;
	access_counter_halted = csd->access_counter_halted;
	/* If we bypassed any of the semaphores, activate safe mode.
	 * Also, if the replication instance is frozen and this db has replication turned on (which means
	 * no flushes of dirty buffers to this db can happen while the instance is frozen) activate safe mode.
	 */
	ok_to_write_pfin = !(bypassed_access || bypassed_ftok || inst_is_frozen);
	safe_mode = !ok_to_write_pfin || ftok_counter_halted || access_counter_halted;
	/* At this point we are guaranteed no one else is doing a db_init/rundown as we hold the access control semaphore */
	assert(csa->ref_cnt);	/* decrement private ref_cnt before shared ref_cnt decrement. */
	csa->ref_cnt--;		/* Currently journaling logic in gds_rundown() in VMS relies on this order to detect last writer */
	assert(!csa->ref_cnt);
	--cnl->ref_cnt;
	if (memcmp(cnl->now_running, gtm_release_name, gtm_release_name_len + 1))
	{	/* VERMISMATCH condition. Possible only if DSE */
		assert(dse_running);
		vermismatch = TRUE;
	} else
		vermismatch = FALSE;
	if (-1 == shmctl(udi->shmid, IPC_STAT, &shm_buf))
	{
		save_errno = errno;
		rts_error_csa(CSA_ARG(csa) VARLSTCNT(12) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), ERR_SYSCALL, 5,
				RTS_ERROR_TEXT("gds_rundown shmctl"), CALLFROM, save_errno);
	} else
		we_are_last_user =  (1 == shm_buf.shm_nattch) && !vermismatch && !safe_mode;
	/* recover => one user except ONLINE ROLLBACK, or standalone with frozen instance */
	assert(!have_standalone_access || we_are_last_user || jgbl.onlnrlbk || inst_is_frozen);
	if (-1 == (semval = semctl(udi->semid, DB_COUNTER_SEM, GETVAL)))
		rts_error_csa(CSA_ARG(csa) VARLSTCNT(12) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), ERR_SYSCALL, 5,
			  RTS_ERROR_TEXT("gds_rundown SEMCTL failed to get semval"), CALLFROM, errno);
	/* There's one writer left and I am it */
	assert(reg->read_only || semval >= 0);
	unsafe_last_writer = (DB_COUNTER_SEM_INCR == semval) && (FALSE == reg->read_only) && !vermismatch;
	we_are_last_writer = unsafe_last_writer && !safe_mode;
	assert(!we_are_last_writer || !safe_mode);
	assert(!we_are_last_user || !safe_mode);
	/* recover + R/W region => one writer except ONLINE ROLLBACK, or standalone with frozen instance, leading to safe_mode */
	assert(!(have_standalone_access && !reg->read_only) || we_are_last_writer || jgbl.onlnrlbk || inst_is_frozen);
	GTM_WHITE_BOX_TEST(WBTEST_ANTIFREEZE_JNLCLOSE, we_are_last_writer, 1); /* Assume we are the last writer to invoke wcs_flu */
	if (!have_standalone_access && (-1 == (ftok_semval = semctl(udi->ftok_semid, DB_COUNTER_SEM, GETVAL))))
		rts_error_csa(CSA_ARG(csa) VARLSTCNT(12) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg), ERR_SYSCALL, 5,
			  RTS_ERROR_TEXT("gds_rundown SEMCTL failed to get ftok_semval"), CALLFROM, errno);
	if (NULL != csa->ss_ctx)
		ss_destroy_context(csa->ss_ctx);
	/* SS_MULTI: If multiple snapshots are supported, then we have to run through each of the snapshots */
	assert(1 == MAX_SNAPSHOTS);
	ss_shm_ptr = (shm_snapshot_ptr_t)SS_GETSTARTPTR(csa);
	ss_pid = ss_shm_ptr->ss_info.ss_pid;
	is_cur_process_ss_initiator = (process_id == ss_pid);
	if (ss_pid && (is_cur_process_ss_initiator || we_are_last_user))
	{
		/* Try getting snapshot crit latch. If we don't get latch, we won't hang for eternity and will skip
		 * doing the orphaned snapshot cleanup. It will be cleaned up eventually either by subsequent MUPIP
		 * INTEG or by a MUPIP RUNDOWN.
		 */
		if (ss_get_lock_nowait(reg) && (ss_pid == ss_shm_ptr->ss_info.ss_pid)
			&& (is_cur_process_ss_initiator || !is_proc_alive(ss_pid, 0)))
		{
			ss_release(NULL);
			ss_release_lock(reg);
		}
	}
	/* If cnl->donotflush_dbjnl is set, it means mupip recover/rollback was interrupted and therefore we need not flush
	 * shared memory contents to disk as they might be in an inconsistent state. Moreover, any more flushing will only cause
	 * future rollback to undo more journal records (PBLKs). In this case, we will go ahead and remove shared memory (without
	 * flushing the contents) in this routine. A reissue of the recover/rollback command will restore the database to a
	 * consistent state.
	 */
	if (!cnl->donotflush_dbjnl && !reg->read_only && !vermismatch)
	{	/* If we had an orphaned block and were interrupted, set wc_blocked so we can invoke wcs_recover. Do it ONLY
		 * if there is NO concurrent online rollback running (as we need crit to set wc_blocked)
		 */
		if (csa->wbuf_dqd && !is_mm)
		{	/* If we had an orphaned block and were interrupted, mupip_exit_handler will invoke secshr_db_clnup which
			 * will clear this field and so we should never come to gds_rundown with a non-zero wbuf_dqd. The only
			 * exception is if we are recover/rollback in which case gds_rundown (from mur_close_files) is invoked
			 * BEFORE secshr_db_clnup in mur_close_files.
			 * Note: It is NOT possible for online rollback to reach here with wbuf_dqd being non-zero. This is because
			 * the moment we apply the first PBLK, we stop all interrupts and hence can never be interrupted in
			 * wcs_wtstart or wcs_get_space. Assert accordingly.
			 */
			assert(mupip_jnl_recover && !jgbl.onlnrlbk && !safe_mode);
			if (!was_crit)
				grab_crit(reg);
			SET_TRACEABLE_VAR(cnl->wc_blocked, TRUE);
			BG_TRACE_PRO_ANY(csa, wcb_gds_rundown);
                        send_msg_csa(CSA_ARG(csa) VARLSTCNT(8) ERR_WCBLOCKED, 6, LEN_AND_LIT("wcb_gds_rundown"),
                                process_id, &csa->ti->curr_tn, DB_LEN_STR(reg));
			csa->wbuf_dqd = 0;
			wcs_recover(reg);
			BG_TRACE_PRO_ANY(csa, lost_block_recovery);
			if (!was_crit)
				rel_crit(reg);
		}
		if (JNL_ENABLED(csd) && IS_GTCM_GNP_SERVER_IMAGE)
			originator_prc_vec = NULL;
		/* If we are the last writing user, then everything must be flushed */
		if (we_are_last_writer)
		{	/* Time to flush out all of our buffers */
			assert(!safe_mode);
			if (is_mm)
			{
				MM_DBFILEXT_REMAP_IF_NEEDED(csa, reg);
				cnl->remove_shm = TRUE;
			}
			if (cnl->wc_blocked && jgbl.onlnrlbk)
			{	/* if the last update done by online rollback was not committed in the normal code-path but was
				 * completed by secshr_db_clnup, wc_blocked will be set to TRUE. But, since online rollback never
				 * invokes grab_crit (since csa->hold_onto_crit is set to TRUE), wcs_recover is never invoked. This
				 * could result in the last update never getting flushed to the disk and if online rollback happened
				 * to be the last writer then the shared memory will be flushed and removed and the last update will
				 * be lost. So, force wcs_recover if we find ourselves in such a situation. But, wc_blocked is
				 * possible only if phase1 or phase2 errors are induced using white box test cases
				 */
				assert(WB_COMMIT_ERR_ENABLED);
				wcs_recover(reg);
			}
			/* Note WCSFLU_SYNC_EPOCH ensures the epoch is synced to the journal and indirectly
			 * also ensures that the db is fsynced. We don't want to use it in the calls to
			 * wcs_flu() from t_end() and tp_tend() since we can defer it to out-of-crit there.
			 * In this case, since we are running down, we don't have any such option.
			 */
			cnl->remove_shm = wcs_flu(WCSFLU_FLUSH_HDR | WCSFLU_WRITE_EPOCH | WCSFLU_SYNC_EPOCH);
			/* Since we_are_last_writer, we should be guaranteed that wcs_flu() did not change csd, (in
			 * case of MM for potential file extension), even if it did a grab_crit().  Therefore, make
			 * sure that's true.
			 */
			assert(csd == csa->hdr);
			assert(0 == memcmp(csd->label, GDS_LABEL, GDS_LABEL_SZ - 1));
		} else if (((canceled_flush_timer && (0 > cnl->wcs_timers)) || canceled_dbsync_timer) && !inst_is_frozen)
		{	/* canceled pending db or jnl flush timers - flush database and journal buffers to disk */
			if (!was_crit)
				grab_crit(reg);
			/* we need to sync the epoch as the fact that there is no active pending flush timer implies
			 * there will be noone else who will flush the dirty buffers and EPOCH to disk in a timely fashion
			 */
			wcs_flu(WCSFLU_FLUSH_HDR | WCSFLU_WRITE_EPOCH | WCSFLU_SYNC_EPOCH);
			if (!was_crit)
				rel_crit(reg);
			assert((dba_mm == cs_data->acc_meth) || (csd == cs_data));
			csd = cs_data;	/* In case this is MM and wcs_flu() remapped an extended database, reset csd */
		}
		/* Do rundown journal processing after buffer flushes since they require jnl to be open */
		if (JNL_ENABLED(csd))
		{	/* the following tp_change_reg() is not needed due to the assert csa == cs_addrs at the beginning
			 * of gds_rundown(), but just to be safe. To be removed by 2002!! --- nars -- 2001/04/25.
			 */
			tp_change_reg();	/* call this because jnl_ensure_open checks cs_addrs rather than gv_cur_region */
			jpc = csa->jnl;
			jbp = jpc->jnl_buff;
			if (jbp->fsync_in_prog_latch.u.parts.latch_pid == process_id)
                        {
                                assert(FALSE);
                                COMPSWAP_UNLOCK(&jbp->fsync_in_prog_latch, process_id, 0, LOCK_AVAILABLE, 0);
                        }
                        if (jbp->io_in_prog_latch.u.parts.latch_pid == process_id)
                        {
                                assert(FALSE);
                                COMPSWAP_UNLOCK(&jbp->io_in_prog_latch, process_id, 0, LOCK_AVAILABLE, 0);
                        }
			if ((((NOJNL != jpc->channel) && !JNL_FILE_SWITCHED(jpc))
				|| we_are_last_writer && (0 != cnl->jnl_file.u.inode)) && ok_to_write_pfin)
			{	/* We need to close the journal file cleanly if we have the latest generation journal file open
				 *	or if we are the last writer and the journal file is open in shared memory (not necessarily
				 *	by ourselves e.g. the only process that opened the journal got shot abnormally)
				 * Note: we should not infer anything from the shared memory value of cnl->jnl_file.u.inode
				 * 	if we are not the last writer as it can be concurrently updated.
				 */
				if (!was_crit)
					grab_crit(reg);
				if (JNL_ENABLED(csd))
				{
					SET_GBL_JREC_TIME; /* jnl_ensure_open/jnl_put_jrt_pini/pfin/jnl_file_close all need it */
					/* Before writing to jnlfile, adjust jgbl.gbl_jrec_time if needed to maintain time order
					 * of jnl records. This needs to be done BEFORE the jnl_ensure_open as that could write
					 * journal records (if it decides to switch to a new journal file).
					 */
					ADJUST_GBL_JREC_TIME(jgbl, jbp);
					jnl_status = jnl_ensure_open();
					if (0 == jnl_status)
					{	/* If we_are_last_writer, we would have already done a wcs_flu() which would
						 * have written an epoch record and we are guaranteed no further updates
						 * since we are the last writer. So, just close the journal.
						 * If the freeaddr == post_epoch_freeaddr, wcs_flu may have skipped writing
						 * a pini, so allow for that.
						 */
						assert(!jbp->before_images || is_mm
						    || !we_are_last_writer || (0 != jpc->pini_addr) || jgbl.mur_extract
						    || (jpc->jnl_buff->freeaddr == jpc->jnl_buff->post_epoch_freeaddr));
						/* If we haven't written a pini, let jnl_file_close write the pini/pfin. */
						if (!jgbl.mur_extract && (0 != jpc->pini_addr))
							jnl_put_jrt_pfin(csa);
						/* If not the last writer and no pending flush timer left, do jnl flush now */
						if (!we_are_last_writer && (0 > cnl->wcs_timers))
						{
							if (SS_NORMAL == (jnl_status = jnl_flush(reg)))
							{
								assert(jbp->freeaddr == jbp->dskaddr);
								jnl_fsync(reg, jbp->dskaddr);
								assert(jbp->fsync_dskaddr == jbp->dskaddr);
							} else
							{
								send_msg_csa(CSA_ARG(csa) VARLSTCNT(9) ERR_JNLFLUSH, 2,
									JNL_LEN_STR(csd), ERR_TEXT, 2,
									RTS_ERROR_TEXT("Error with journal flush in gds_rundown"),
									jnl_status);
								assert(NOJNL == jpc->channel);/* jnl file lost has been triggered */
								/* In this routine, all code that follows from here on does not
								 * assume anything about the journaling characteristics of this
								 * database so it is safe to continue execution even though
								 * journaling got closed in the middle.
								 */
							}
						}
						jnl_file_close(reg, we_are_last_writer, FALSE);
					} else
						send_msg_csa(CSA_ARG(csa) VARLSTCNT(6) jnl_status, 4, JNL_LEN_STR(csd),
								DB_LEN_STR(reg));
				}
				if (!was_crit)
					rel_crit(reg);
			}
		}
		if (we_are_last_writer)			/* Flush the fileheader last and harden the file to disk */
		{
			if (!was_crit)
				grab_crit(reg);			/* To satisfy crit requirement in fileheader_sync() */
			memset(csd->machine_name, 0, MAX_MCNAMELEN); /* clear the machine_name field */
			if (!have_standalone_access && we_are_last_user)
			{	/* mupip_exit_handler will do this after mur_close_file */
				csd->semid = INVALID_SEMID;
				csd->shmid = INVALID_SHMID;
				csd->gt_sem_ctime.ctime = 0;
				csd->gt_shm_ctime.ctime = 0;
			}
			fileheader_sync(reg);
			if (!was_crit)
				rel_crit(reg);
			if (!is_mm)
			{
				GTM_DB_FSYNC(csa, udi->fd, rc);		/* Sync it all */
				if (-1 == rc)
				{
					rts_error_csa(CSA_ARG(csa) VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
						  ERR_TEXT, 2, RTS_ERROR_TEXT("Error during file sync at close"), errno);
				}
			} else
			{	/* Now do final MM file sync before exit */
				assert(csa->ti->total_blks == csa->total_blks);
				#ifdef _AIX
				GTM_DB_FSYNC(csa, udi->fd, rc);
				if (-1 == rc)
				#else
				if (-1 == MSYNC((caddr_t)csa->db_addrs[0], (caddr_t)csa->db_addrs[1]))
				#endif
				{
					rts_error_csa(CSA_ARG(csa) VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
						  ERR_TEXT, 2, RTS_ERROR_TEXT("Error during file sync at close"), errno);
				}
			}
		} else if (unsafe_last_writer && !cnl->lastwriterbypas_msg_issued)
		{
			send_msg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_LASTWRITERBYPAS, 2, DB_LEN_STR(reg));
			cnl->lastwriterbypas_msg_issued = TRUE;
		}
	} /* end if (!reg->read_only && !cnl->donotflush_dbjnl) */
	/* We had canceled all db timers at start of rundown. In case as part of rundown (wcs_flu above), we had started
	 * any timers, cancel them BEFORE setting reg->open to FALSE (assert in wcs_clean_dbsync relies on this).
	 */
	CANCEL_DB_TIMERS(reg, csa, canceled_flush_timer, canceled_dbsync_timer);
	if (reg->read_only && we_are_last_user && !have_standalone_access && cnl->remove_shm)
	{	/* mupip_exit_handler will do this after mur_close_file */
		db_ipcs.semid = INVALID_SEMID;
		db_ipcs.shmid = INVALID_SHMID;
		db_ipcs.gt_sem_ctime = 0;
		db_ipcs.gt_shm_ctime = 0;
		db_ipcs.fn_len = reg->dyn.addr->fname_len;
		memcpy(db_ipcs.fn, reg->dyn.addr->fname, reg->dyn.addr->fname_len);
		db_ipcs.fn[reg->dyn.addr->fname_len] = 0;
 		/* request gtmsecshr to flush. read_only cannot flush itself */
		WAIT_FOR_REPL_INST_UNFREEZE_SAFE(csa);
		if (!csa->read_only_fs)
		{
			secshrstat = send_mesg2gtmsecshr(FLUSH_DB_IPCS_INFO, 0, (char *)NULL, 0);
			if (0 != secshrstat)
				rts_error_csa(CSA_ARG(csa) VARLSTCNT(8) ERR_DBFILERR, 2, DB_LEN_STR(reg),
					  ERR_TEXT, 2, RTS_ERROR_TEXT("gtmsecshr failed to update database file header"));
		}
	}
	/* Done with file now, close it */
	CLOSEFILE_RESET(udi->fd, rc);	/* resets "udi->fd" to FD_INVALID */
	if (-1 == rc)
	{
		rts_error_csa(CSA_ARG(csa) VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
			  ERR_TEXT, 2, LEN_AND_LIT("Error during file close"), errno);
	}
	/* Unmap storage if mm mode but only the part that is not the fileheader (so shows up in dumps) */
#	if !defined(_AIX)
	if (is_mm && (NULL != csa->db_addrs[0]))
	{
		assert(csa->db_addrs[1] > csa->db_addrs[0]);
		munmap_len = (sm_long_t)(csa->db_addrs[1] - csa->db_addrs[0]);
		if (0 < munmap_len)
			munmap((caddr_t)(csa->db_addrs[0]), (size_t)(munmap_len));
	}
#	endif
	/* Detach our shared memory while still under lock so reference counts will be correct for the next process to run down
	 * this region. In the process also get the remove_shm status from node_local before detaching.
	 * If cnl->donotflush_dbjnl is TRUE, it means we can safely remove shared memory without compromising data
	 * integrity as a reissue of recover will restore the database to a consistent state.
	 */
	remove_shm = !vermismatch && (cnl->remove_shm || cnl->donotflush_dbjnl);
	/* We are done with online rollback on this region. Indicate to other processes by setting the onln_rlbk_pid to 0.
	 * Do it before releasing crit (t_end relies on this ordering when accessing cnl->onln_rlbk_pid).
	 */
	if (jgbl.onlnrlbk)
		cnl->onln_rlbk_pid = 0;
	rel_crit(reg); /* Since we are about to detach from the shared memory, release crit and reset onln_rlbk_pid */
	/* If we had skipped flushing journal and database buffers due to a concurrent online rollback, increment the counter
	 * indicating that in the shared memory so that online rollback can report the # of such processes when it shuts down.
	 * The same thing is done for both FTOK and access control semaphores when there are too many MUMPS processes.
	 */
	if (safe_mode) /* indicates flushing was skipped */
	{
		if (bypassed_access)
			cnl->dbrndwn_access_skip++; /* Access semaphore can be bypassed during online rollback */
		if (bypassed_ftok)
			cnl->dbrndwn_ftok_skip++;
	}
	if (jgbl.onlnrlbk)
		csa->hold_onto_crit = FALSE;
	GTM_WHITE_BOX_TEST(WBTEST_HOLD_SEM_BYPASS, cnl->wbox_test_seq_num, 0);
	status = shmdt((caddr_t)cnl);
	csa->nl = NULL; /* dereferencing nl after detach is not right, so we set it to NULL so that we can test before dereference*/
	/* Note that although csa->nl is NULL, we use CSA_ARG(csa) below (not CSA_ARG(NULL)) to be consistent with similar
	 * usages before csa->nl became NULL. The "is_anticipatory_freeze_needed" function (which is in turn called by the
	 * CHECK_IF_FREEZE_ON_ERROR_NEEDED macro) does a check of csa->nl before dereferencing shared memory contents so
	 * we are safe passing "csa".
	 */
	if (-1 == status)
		send_msg_csa(CSA_ARG(csa) VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg), ERR_TEXT, 2,
				LEN_AND_LIT("Error during shmdt"), errno);
	REMOVE_CSA_FROM_CSADDRSLIST(csa);	/* remove "csa" from list of open regions (cs_addrs_list) */
	reg->open = FALSE;
	/* If file is still not in good shape, die here and now before we get rid of our storage */
	assertpro(0 == csa->wbuf_dqd);
	ipc_deleted = FALSE;
	/* If we are the very last user, remove shared storage id and the semaphores */
	if (we_are_last_user)
	{	/* remove shared storage, only if last writer to rundown did a successful wcs_flu() */
		assert(!vermismatch);
		if (remove_shm)
		{
			ipc_deleted = TRUE;
			if (0 != shm_rmid(udi->shmid))
				rts_error_csa(CSA_ARG(csa) VARLSTCNT(8) ERR_DBFILERR, 2, DB_LEN_STR(reg),
					ERR_TEXT, 2, RTS_ERROR_TEXT("Unable to remove shared memory"));
			/* Note that we no longer have a new shared memory. Currently only used/usable for standalone rollback. */
			udi->new_shm = FALSE;
			/* mupip recover/rollback don't release the semaphore here, but do it later in db_ipcs_reset (invoked from
			 * mur_close_files())
			 */
			if (!have_standalone_access)
			{
				if (0 != sem_rmid(udi->semid))
					rts_error_csa(CSA_ARG(csa) VARLSTCNT(8) ERR_DBFILERR, 2, DB_LEN_STR(reg),
						      ERR_TEXT, 2, RTS_ERROR_TEXT("Unable to remove semaphore"));
				udi->new_sem = FALSE;			/* Note that we no longer have a new semaphore */
				udi->grabbed_access_sem = FALSE;
				udi->counter_acc_incremented = FALSE;
			}
		} else if (is_src_server || is_updproc)
		{
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_DBRNDWNWRN, 4, DB_LEN_STR(reg), process_id, process_id);
			send_msg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_DBRNDWNWRN, 4, DB_LEN_STR(reg), process_id, process_id);
		} else
			send_msg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_DBRNDWNWRN, 4, DB_LEN_STR(reg), process_id, process_id);
	} else
	{
		assert(!have_standalone_access || jgbl.onlnrlbk || safe_mode);
		if (!jgbl.onlnrlbk && !have_standalone_access)
		{ 	/* If we were writing, get rid of our writer access count semaphore */
			if (!reg->read_only)
			{
				if (!access_counter_halted)
				{
					save_errno = do_semop(udi->semid, DB_COUNTER_SEM, -DB_COUNTER_SEM_INCR, SEM_UNDO);
					if (0 != save_errno)
						rts_error_csa(CSA_ARG(csa) VARLSTCNT(12) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg),
								ERR_SYSCALL, 5,
								RTS_ERROR_TEXT("gds_rundown access control semaphore decrement"),
								CALLFROM, save_errno);
				}
				udi->counter_acc_incremented = FALSE;
			}
			assert(safe_mode || !bypassed_access);
			/* Now remove the rundown lock */
			if (!bypassed_access)
			{
				if (0 != (save_errno = do_semop(udi->semid, DB_CONTROL_SEM, -1, SEM_UNDO)))
					rts_error_csa(CSA_ARG(csa) VARLSTCNT(12) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg),
							ERR_SYSCALL, 5,
							RTS_ERROR_TEXT("gds_rundown access control semaphore release"),
							CALLFROM, save_errno);
				udi->grabbed_access_sem = FALSE;
			}
		} /* else access control semaphore will be released in db_ipcs_reset */
	}
	if (!have_standalone_access)
	{
		if (bypassed_ftok)
		{
			if (!ftok_counter_halted)
				if (0 != (save_errno = do_semop(udi->ftok_semid, DB_COUNTER_SEM, -DB_COUNTER_SEM_INCR, SEM_UNDO)))
					rts_error_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_DBFILERR, 2, DB_LEN_STR(reg));
		} else if (!ftok_sem_release(reg, !ftok_counter_halted, FALSE))
		{
			FTOK_TRACE(csa, csa->ti->curr_tn, ftok_ops_release, process_id);
			rts_error_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_DBFILERR, 2, DB_LEN_STR(reg));
		}
		udi->grabbed_ftok_sem = FALSE;
		udi->counter_ftok_incremented = FALSE;
	}
	ENABLE_INTERRUPTS(INTRPT_IN_GDS_RUNDOWN, prev_intrpt_state);
	if (!ipc_deleted)
	{
		GET_CUR_TIME(time_str);
		if (is_src_server)
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_str,
				LEN_AND_LIT("Source server"), REG_LEN_STR(reg));
		if (is_updproc)
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_str,
				LEN_AND_LIT("Update process"), REG_LEN_STR(reg));
		if (mupip_jnl_recover && (!jgbl.onlnrlbk || !we_are_last_user))
		{
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_str,
				LEN_AND_LIT("Mupip journal process"), REG_LEN_STR(reg));
			send_msg_csa(CSA_ARG(csa) VARLSTCNT(8) ERR_IPCNOTDEL, 6, CTIME_BEFORE_NL, time_str,
				LEN_AND_LIT("Mupip journal process"), REG_LEN_STR(reg));
		}
	}
	REVERT;
	return EXIT_NRM;
}
Ejemplo n.º 20
0
void dse_f_reg(void)
{
	char rn[MAX_RN_LEN];
	unsigned short rnlen;
	int i;
	bool found;
	gd_region *ptr;
	gd_addr *temp_gdaddr;
	gd_binding *map;

	temp_gdaddr = gd_header;
	gd_header = original_header;
	rnlen = SIZEOF(rn);
	if (!cli_get_str("REGION",rn,&rnlen))
	{
		gd_header = temp_gdaddr;
		return;
	}
	if (rn[0] == '*' && rnlen == 1)
	{
		util_out_print("List of global directory:!_!AD!/",TRUE,dollar_zgbldir.str.len,dollar_zgbldir.str.addr);
		for (i=0, ptr = gd_header->regions; i < gd_header->n_regions ;i++, ptr++)
		{	util_out_print("!/File  !_!AD",TRUE, ptr->dyn.addr->fname_len,&ptr->dyn.addr->fname[0]);
			util_out_print("Region!_!AD",TRUE, REG_LEN_STR(ptr));
                 }
		gd_header = temp_gdaddr;
		 return;
	}
	assert(rn[0]);
	found = FALSE;
	for (i=0, ptr = gd_header->regions; i < gd_header->n_regions ;i++, ptr++)
	{
		if (found = !memcmp(&ptr->rname[0],&rn[0],MAX_RN_LEN))
			break;
	}
	if (!found)
	{
		util_out_print("Error:  region not found.",TRUE);
		gd_header = temp_gdaddr;
		return;
	}
	if (ptr == gv_cur_region)
	{
		util_out_print("Error:  already in region: !AD",TRUE,REG_LEN_STR(gv_cur_region));
		gd_header = temp_gdaddr;
		return;
	}
	if (ptr->dyn.addr->acc_meth == dba_cm)
	{
		util_out_print("Error:  Cannot edit an GT.CM database file.",TRUE);
		gd_header = temp_gdaddr;
		return;
	}
	if (ptr->dyn.addr->acc_meth == dba_usr)
	{
		util_out_print("Error:  Cannot edit a non-GDS format database file.",TRUE);
		gd_header = temp_gdaddr;
		return;
	}
	if (!ptr->open)
	{
		util_out_print("Error:  that region was not opened because it is not bound to any namespace.",TRUE);
		gd_header = temp_gdaddr;
		return;
	}
	if (TRUE == cs_addrs->now_crit)
	{
		util_out_print("Warning:  now leaving region in critical section: !AD",TRUE, gv_cur_region->rname_len,
				gv_cur_region->rname);
	}
	gv_cur_region = ptr;
	gv_target = NULL;	/* to prevent out-of-sync situations between gv_target and cs_addrs */
	gv_currkey->base[0] = '\0';	/* prevent fast-path from op_gvname from being taken as region has been switched
					 * and gv_target has been reset to NULL.
					 */
	gv_currkey->end = 0;	/* clear end so it is in sync with base[0] */
	switch (gv_cur_region->dyn.addr->acc_meth)
	{
	case dba_mm:
	case dba_bg:
		cs_addrs = &FILE_INFO(gv_cur_region)->s_addrs;
		cs_data = cs_addrs->hdr;
		break;
	default:
		GTMASSERT;
	}
	if (cs_addrs && cs_addrs->critical)
		crash_count = cs_addrs->critical->crashcnt;
	util_out_print("!/File  !_!AD",TRUE, DB_LEN_STR(gv_cur_region));
	util_out_print("Region!_!AD!/",TRUE, REG_LEN_STR(gv_cur_region));
	patch_curr_blk = get_dir_root();
	gv_init_reg(gv_cur_region);
	GET_SAVED_GDADDR(gd_header, temp_gdaddr, map, gv_cur_region);
	return;
}
Ejemplo n.º 21
0
void mu_int_reg(gd_region *reg, boolean_t *return_value, boolean_t return_after_open)
{
	boolean_t		read_only, was_crit;
	freeze_status		status;
	node_local_ptr_t	cnl;
	sgmnt_addrs     	*csa;
	sgmnt_data_ptr_t	csd;
	sgmnt_data		*csd_copy_ptr;
	gd_segment		*seg;
	int			gtmcrypt_errno;
#	ifdef DEBUG
	boolean_t		need_to_wait = FALSE;
	int			trynum;
	uint4			curr_wbox_seq_num;
#	endif

	*return_value = FALSE;
	jnlpool_init_needed = TRUE;
	ESTABLISH(mu_int_reg_ch);
	if (dba_usr == reg->dyn.addr->acc_meth)
	{
		util_out_print("!/Can't integ region !AD; not GDS format", TRUE,  REG_LEN_STR(reg));
		mu_int_skipreg_cnt++;
		return;
	}
	gv_cur_region = reg;
	if (reg_cmcheck(reg))
	{
		util_out_print("!/Can't integ region across network", TRUE);
		mu_int_skipreg_cnt++;
		return;
	}
	gvcst_init(gv_cur_region);
	if (gv_cur_region->was_open)
	{	/* already open under another name */
		gv_cur_region->open = FALSE;
		return;
	}
	if (return_after_open)
	{
		*return_value = TRUE;
		return;
	}
	change_reg();
	csa = &FILE_INFO(gv_cur_region)->s_addrs;
	cnl = csa->nl;
	csd = csa->hdr;
	read_only = gv_cur_region->read_only;
	assert(NULL != mu_int_master);
	/* Ensure that we don't see an increase in the file header and master map size compared to it's maximum values */
	assert(SGMNT_HDR_LEN >= SIZEOF(sgmnt_data) && (MASTER_MAP_SIZE_MAX >= MASTER_MAP_SIZE(csd)));
	/* ONLINE INTEG if asked for explicitly by specifying -ONLINE is an error if the db has partial V4 blocks.
	 * However, if -ONLINE is not explicitly specified but rather assumed implicitly (as default for -REG)
	 * then turn off ONLINE INTEG for this region and continue as if -NOONLINE was specified
	 */
	if (!csd->fully_upgraded)
	{
		ointeg_this_reg = FALSE; /* Turn off ONLINE INTEG for this region */
		if (online_specified)
		{
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_SSV4NOALLOW, 2, DB_LEN_STR(gv_cur_region));
			util_out_print(NO_ONLINE_ERR_MSG, TRUE);
			mu_int_skipreg_cnt++;
			return;
		}
	}
	if (!ointeg_this_reg || read_only)
	{
		status = region_freeze(gv_cur_region, TRUE, FALSE, TRUE, FALSE, !read_only);
		switch (status)
		{
			case REG_ALREADY_FROZEN:
				if (csa->read_only_fs)
					break;
				util_out_print("!/Database for region !AD is already frozen, not integing",
					TRUE, REG_LEN_STR(gv_cur_region));
				mu_int_skipreg_cnt++;
				return;
			case REG_FLUSH_ERROR:
				gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT(MUPIP_INTEG),
					DB_LEN_STR(gv_cur_region));
				mu_int_skipreg_cnt++;
				return;
			case REG_HAS_KIP:
				/* We have already waited for KIP to reset. This time do not wait for KIP */
				status = region_freeze(gv_cur_region, TRUE, FALSE, FALSE, FALSE, !read_only);
				if (REG_ALREADY_FROZEN == status)
				{
					if (csa->read_only_fs)
						break;
					util_out_print("!/Database for region !AD is already frozen, not integing",
						TRUE, REG_LEN_STR(gv_cur_region));
					mu_int_skipreg_cnt++;
					return;
				} else if (REG_FLUSH_ERROR == status)
				{
					gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT(MUPIP_INTEG),
						DB_LEN_STR(gv_cur_region));
					mu_int_skipreg_cnt++;
					return;
				}
				assert(REG_FREEZE_SUCCESS == status);
				/* no break */
			case REG_FREEZE_SUCCESS:
				break;
			default:
				assert(FALSE);
				/* no break */
		}
		if (read_only && (dba_bg == csa->hdr->acc_meth) && !mu_int_wait_rdonly(csa, MUPIP_INTEG))
		{
			mu_int_skipreg_cnt++;
			return;
		}
	}
	if (!ointeg_this_reg)
	{	/* Take a copy of the file-header. To ensure it is consistent, do it while holding crit. */
		was_crit = csa->now_crit;
		if (!was_crit)
			grab_crit(gv_cur_region);
		memcpy((uchar_ptr_t)&mu_int_data, (uchar_ptr_t)csd, SIZEOF(sgmnt_data));
		if (!was_crit)
			rel_crit(gv_cur_region);
		memcpy(mu_int_master, MM_ADDR(csd), MASTER_MAP_SIZE(csd));
		csd_copy_ptr = &mu_int_data;
	} else
	{
		if (!ss_initiate(gv_cur_region, util_ss_ptr, &csa->ss_ctx, preserve_snapshot, MUPIP_INTEG))
		{
			mu_int_skipreg_cnt++;
			assert(NULL != csa->ss_ctx);
			ss_release(&csa->ss_ctx);
			ointeg_this_reg = FALSE; /* Turn off ONLINE INTEG for this region */
			assert(process_id != cnl->in_crit); /* Ensure ss_initiate released the crit before returning */
			assert(!FROZEN_HARD(csd)); /* Ensure region is unfrozen before returning from ss_initiate */
			assert(INTRPT_IN_SS_INITIATE != intrpt_ok_state); /* Ensure ss_initiate released intrpt_ok_state */
			return;
		}
		assert(process_id != cnl->in_crit); /* Ensure ss_initiate released the crit before returning */
		assert(INTRPT_IN_SS_INITIATE != intrpt_ok_state); /* Ensure ss_initiate released intrpt_ok_state */
		csd_copy_ptr = &csa->ss_ctx->ss_shm_ptr->shadow_file_header;
#		if defined(DEBUG)
		curr_wbox_seq_num = 1;
		cnl->wbox_test_seq_num = curr_wbox_seq_num; /* indicate we took the next step */
		GTM_WHITE_BOX_TEST(WBTEST_OINTEG_WAIT_ON_START, need_to_wait, TRUE);
		if (need_to_wait) /* wait for them to take next step */
		{
			trynum = 30; /* given 30 cycles to tell you to go */
			while ((curr_wbox_seq_num == cnl->wbox_test_seq_num) && trynum--)
				LONG_SLEEP(1);
			cnl->wbox_test_seq_num++; /* let them know we took the next step */
			assert(trynum);
		}
#		endif
	}
	if (USES_ANY_KEY(csd_copy_ptr))
	{ 	/* Initialize mu_int_encrypt_key_handle to be used in mu_int_read */
		seg = gv_cur_region->dyn.addr;
		INIT_DB_OR_JNL_ENCRYPTION(&mu_int_encr_handles, csd_copy_ptr, seg->fname_len, (char *)seg->fname, gtmcrypt_errno);
		if (0 != gtmcrypt_errno)
		{
			GTMCRYPT_REPORT_ERROR(gtmcrypt_errno, gtm_putmsg, seg->fname_len, seg->fname);
			mu_int_skipreg_cnt++;
			return;
		}
	}
	*return_value = mu_int_fhead();
	REVERT;
	return;
}
Ejemplo n.º 22
0
void recover_truncate(sgmnt_addrs *csa, sgmnt_data_ptr_t csd, gd_region* reg)
{
	char			*err_msg;
	uint4			old_total, cur_total, new_total;
	off_t			old_size, cur_size, new_size;
	int			ftrunc_status, status;
	unix_db_info    	*udi;
	int			semval;

	if (NULL != csa->nl && csa->nl->trunc_pid && !is_proc_alive(csa->nl->trunc_pid, 0))
		csa->nl->trunc_pid = 0;
	if (!csd->before_trunc_total_blks)
		return;
	assert((GDSVCURR == csd->desired_db_format) && (csd->blks_to_upgrd == 0) && (dba_mm != csd->acc_meth));
	/* If called from db_init, assure we've grabbed the access semaphor and are the only process attached to the database.
	 * Otherwise, we should have crit when called from wcs_recover. */
	udi = FILE_INFO(reg);
	assert((udi->grabbed_access_sem && (1 == (semval = semctl(udi->semid, 1, GETVAL)))) || csa->now_crit);
	/* Interrupted truncate scenario */
	if (NULL != csa->nl)
		csa->nl->root_search_cycle++;
	old_total = csd->before_trunc_total_blks;					/* Pre-truncate total_blks */
	old_size = (off_t)SIZEOF_FILE_HDR(csd)						/* Pre-truncate file size (in bytes) */
			+ (off_t)old_total * csd->blk_size + DISK_BLOCK_SIZE;
	cur_total = csa->ti->total_blks;						/* Actual total_blks right now */
	cur_size = (off_t)gds_file_size(reg->dyn.addr->file_cntl) * DISK_BLOCK_SIZE;	/* Actual file size right now (in bytes) */
	new_total = csd->after_trunc_total_blks;					/* Post-truncate total_blks */
	new_size = old_size - (off_t)(old_total - new_total) * csd->blk_size;		/* Post-truncate file size (in bytes) */
	/* We don't expect FTRUNCATE to leave the file size in an 'in between' state, hence the assert below. */
	assert(old_size == cur_size || new_size == cur_size);
	if (new_total == cur_total && old_size == cur_size)
	{ /* Crash after reducing total_blks, before successful FTRUNCATE. Complete the FTRUNCATE here. */
		DBGEHND((stdout, "DBG:: recover_truncate() -- completing truncate, old_total = [%lu], cur_total = [%lu]\n",
			old_total, new_total));
		assert(csd->before_trunc_free_blocks >= DELTA_FREE_BLOCKS(old_total, new_total));
		csa->ti->free_blocks = csd->before_trunc_free_blocks - DELTA_FREE_BLOCKS(old_total, new_total);
		clear_cache_array(csa, csd, reg, new_total, old_total);
		WRITE_EOF_BLOCK(reg, csd, new_total, status);
		if (status != 0)
		{
			err_msg = (char *)STRERROR(errno);
			rts_error(VARLSTCNT(6) ERR_MUTRUNCERROR, 4, REG_LEN_STR(reg), LEN_AND_STR(err_msg));
			return;
		}
		FTRUNCATE(FILE_INFO(reg)->fd, new_size, ftrunc_status);
		if (ftrunc_status != 0)
		{
			err_msg = (char *)STRERROR(errno);
			rts_error(VARLSTCNT(6) ERR_MUTRUNCERROR, 4, REG_LEN_STR(reg), LEN_AND_STR(err_msg));
			return;
		}
	} else
	{
		/* Crash before even changing csa->ti->total_blks OR after successful FTRUNCATE */
		/* In either case, the db file is in a consistent state, so no need to do anything further */
		assert((old_total == cur_total && old_size == cur_size) || (new_total == cur_total && new_size == cur_size));
		if (!((old_total == cur_total && old_size == cur_size) || (new_total == cur_total && new_size == cur_size)))
		{
			rts_error(VARLSTCNT(4) ERR_DBFILERR, 2, DB_LEN_STR(reg));
		}
	}
	csd->before_trunc_total_blks = 0; /* indicate CONSISTENT */
}
Ejemplo n.º 23
0
bool	gtcmtr_increment(void)
{
	cm_region_list	*reg_ref;
	mval		incr_delta, post_incr;
	unsigned char	buff[MAX_ZWR_KEY_SZ], *end;
	unsigned char	*ptr, regnum;
	short		n;
	unsigned short	top, len, temp_short;
	static readonly	gds_file_id file;

	error_def(ERR_KEY2BIG);
	error_def(ERR_GVIS);
	error_def(ERR_DBPRIVERR);

	ptr = curr_entry->clb_ptr->mbf;
	assert(*ptr == CMMS_Q_INCREMENT);
	ptr++;
	GET_USHORT(len, ptr);
	ptr += SIZEOF(unsigned short);
	regnum = *ptr++;
	reg_ref = gtcm_find_region(curr_entry,regnum);
	len--; /* subtract size of regnum */
	CM_GET_GVCURRKEY(ptr, len);
	gtcm_bind_name(reg_ref->reghead, TRUE);
	if (gv_cur_region->read_only)
		rts_error(VARLSTCNT(4) ERR_DBPRIVERR, 2, DB_LEN_STR(gv_cur_region));
	if (JNL_ALLOWED(cs_addrs))
	{	/* we need to copy client's specific prc_vec into the global variable in order that the gvcst* routines
		 *	do the right job. actually we need to do this only if JNL_ENABLED(cs_addrs), but since it is not
		 *	easy to re-execute the following two assignments in case gvcst_incr's call to t_end encounters a
		 *	cdb_sc_jnlstatemod retry code, we choose the easier approach of executing the following segment
		 *	if JNL_ALLOWED(cs_addrs) is TRUE instead of checking for JNL_ENABLED(cs_addrs) to be TRUE.
		 * this approach has the overhead that we will be doing the following assignments even though JNL_ENABLED
		 * 	might not be TRUE but since the following two are just pointer copies, it is not considered a big overhead.
		 * this approach ensures that the jnl_put_jrt_pini gets the appropriate prc_vec for writing into the
		 * 	journal record in case JNL_ENABLED turns out to be TRUE in t_end time.
		 * note that the value of JNL_ALLOWED(cs_addrs) cannot be changed on the fly without obtaining standalone access
		 * 	and hence the correctness of prc_vec (whenever it turns out necessary) is guaranteed.
		 */
		originator_prc_vec = curr_entry->pvec;
		cs_addrs->jnl->pini_addr = reg_ref->pini_addr;
	}
	GET_USHORT(len, ptr);
	ptr += SIZEOF(unsigned short);
	incr_delta.mvtype = MV_STR;
	incr_delta.str.len = len;
	incr_delta.str.addr = (char *)ptr;
	if ((n = gv_currkey->end + 1) > gv_cur_region->max_key_size)
	{
		if ((end = format_targ_key(&buff[0], MAX_ZWR_KEY_SZ, gv_currkey, TRUE)) == 0)
			end = &buff[MAX_ZWR_KEY_SZ - 1];
		rts_error(VARLSTCNT(11) ERR_KEY2BIG, 4, n, (int4)gv_cur_region->max_key_size,
			REG_LEN_STR(gv_cur_region), 0, ERR_GVIS, 2, end - buff, buff);
	}
	MV_FORCE_NUMD(&incr_delta);
	gvcst_incr(&incr_delta, &post_incr);
	if (JNL_ALLOWED(cs_addrs))
		reg_ref->pini_addr = cs_addrs->jnl->pini_addr; /* In case journal switch occurred */
	ptr = curr_entry->clb_ptr->mbf;
	if (MV_DEFINED(&post_incr))
	{
		temp_short = (unsigned short)post_incr.str.len;
		assert((int4)temp_short == post_incr.str.len); /* ushort <- int4 assignment lossy? */
		if (curr_entry->clb_ptr->mbl < 1 +			/* msg header */
					       SIZEOF(temp_short) +	/* size of length of $INCR return value */
					       temp_short) 		/* length of $INCR return value */
		{	/* resize buffer */
			cmi_realloc_mbf(curr_entry->clb_ptr, 1 + SIZEOF(temp_short) + temp_short);
			ptr = curr_entry->clb_ptr->mbf;
		}
		*ptr++ = CMMS_R_INCREMENT;
		PUT_USHORT(ptr, temp_short);
		ptr += SIZEOF(unsigned short);
		memcpy(ptr, post_incr.str.addr, temp_short);
		ptr += temp_short;
	} else
Ejemplo n.º 24
0
/*
 * This function reads command line parameters and forms a configuration for mupip size invocation.
 * It later executes mupip size on each global based on the configuration
 *
 * MUPIP SIZE interface is described in GTM-7292
 */
void mupip_size(void)
{
	boolean_t		restrict_reg = FALSE;
	char 			buff[MAX_LINE], cli_buff[MAX_LINE];
	char 			*p_end;						/* used for strtol validation */
	glist			exclude_gl_head, gl_head, *gl_ptr;
	int4			reg_max_rec, reg_max_key, reg_max_blk;
	mupip_size_cfg_t	mupip_size_cfg = { impsample, 1000, 1, 0 };	/* configuration default values */
	uint4			status = EXIT_NRM;
	unsigned short		BUFF_LEN = SIZEOF(buff), n_len;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	mu_outofband_setup();
	error_mupip = FALSE;
	memset(mu_int_adj, 0, ARRAYSIZE(mu_int_adj));
	memset(mu_int_adj_prev, 0, ARRAYSIZE(mu_int_adj_prev));
	/* Region qualifier */
	grlist = NULL;
	if (CLI_PRESENT == cli_present("REGION"))
	{
		restrict_reg = TRUE;
		gvinit();							/* init gd_header (needed to call mu_getlst) */
		mu_getlst("REGION", SIZEOF(tp_region));
	}
	mupip_size_check_error();
	/* SELECT qualifier */
	memset(cli_buff, 0, SIZEOF(cli_buff));
	n_len = SIZEOF(cli_buff);
	if (CLI_PRESENT != cli_present("SELECT"))
	{
		n_len = 1;
		cli_buff[0] = '*';
	}
	else if (FALSE == cli_get_str("SELECT", cli_buff, &n_len))
	{
		n_len = 1;
		cli_buff[0] = '*';
	}
	/* gv_select will select globals for this clause*/
	gv_select(cli_buff, n_len, FALSE, "SELECT", &gl_head, &reg_max_rec, &reg_max_key, &reg_max_blk, restrict_reg);
	if (!gl_head.next)
	{
		error_mupip = TRUE;
		gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_NOSELECT);
	}
	mupip_size_check_error();
	if (CLI_PRESENT == cli_present("ADJACENCY"))
	{
		assert(SIZEOF(muint_adj) == SIZEOF(int4));
		if (0 == cli_get_int("ADJACENCY", (int4 *)&muint_adj))
		{
			error_mupip = TRUE;
			gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_MUPCLIERR);
		}
	} else
		muint_adj = DEFAULT_ADJACENCY;
	/* HEURISTIC qualifier */
	if (cli_present("HEURISTIC.SCAN") == CLI_PRESENT)
	{
		mupip_size_cfg.heuristic = scan;
		if (cli_present("HEURISTIC.LEVEL"))
		{
			boolean_t valid = TRUE;
			if (cli_get_str("HEURISTIC.LEVEL", buff, &BUFF_LEN))
			{
				mupip_size_cfg.level = strtol(buff, &p_end, 10);
				valid = (*p_end == '\0');
			}
			else
				valid = FALSE;
			if (!valid || mupip_size_cfg.level <= -MAX_BT_DEPTH || MAX_BT_DEPTH <= mupip_size_cfg.level)
			{
				error_mupip = TRUE;
				gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(4) ERR_MUSIZEINVARG, 2, LEN_AND_LIT("HEURISTIC.LEVEL"));
			}
		}
		/* else level is already initialized with default value */
	} else if (cli_present("HEURISTIC.ARSAMPLE") == CLI_PRESENT || cli_present("HEURISTIC.IMPSAMPLE") == CLI_PRESENT)
	{
		if (cli_present("HEURISTIC.ARSAMPLE") == CLI_PRESENT)
			mupip_size_cfg.heuristic = arsample;
		else if (cli_present("HEURISTIC.IMPSAMPLE") == CLI_PRESENT)
			mupip_size_cfg.heuristic = impsample;
		if (cli_present("HEURISTIC.SAMPLES"))
		{
			boolean_t valid = cli_get_int("HEURISTIC.SAMPLES", &(mupip_size_cfg.samples));
			if (!valid || mupip_size_cfg.samples <= 0){
				error_mupip = TRUE;
				gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(4) ERR_MUSIZEINVARG, 2, LEN_AND_LIT("HEURISTIC.SAMPLES"));
			}
		}
		/* else samples is already initialized with default value */
		/* undocumented SEED parameter used for testing sampling method */
		if (cli_present("HEURISTIC.SEED"))
		{
			boolean_t valid = cli_get_int("HEURISTIC.SEED", &(mupip_size_cfg.seed));
			if (!valid){
				error_mupip = TRUE;
				gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(4) ERR_MUSIZEINVARG, 2, LEN_AND_LIT("HEURISTIC.SEED"));
			}
		}
		/* else seed will be based on the time */
	}
	mupip_size_check_error();
	/* run mupip size on each global */
	for (gl_ptr = gl_head.next; gl_ptr; gl_ptr = gl_ptr->next)
	{
		util_out_print("!/Global: !AD (region !AD)", FLUSH,
			GNAME(gl_ptr).len, GNAME(gl_ptr).addr, REG_LEN_STR(gl_ptr->reg));
		switch (mupip_size_cfg.heuristic)
		{
		case scan:
			status |= mu_size_scan(gl_ptr, mupip_size_cfg.level);
			break;
		case arsample:
			status |= mu_size_arsample(gl_ptr, mupip_size_cfg.samples, mupip_size_cfg.seed);
			break;
		case impsample:
			status |= mu_size_impsample(gl_ptr, mupip_size_cfg.samples, mupip_size_cfg.seed);
			break;
		default:
			assertpro(FALSE && mupip_size_cfg.heuristic);
			break;
		}
		if (mu_ctrlc_occurred || mu_ctrly_occurred)
			mupip_exit(ERR_MUNOFINISH);
	}
	mupip_exit(status ==  EXIT_NRM ? SS_NORMAL : ERR_MUNOFINISH);
}
Ejemplo n.º 25
0
void	grab_crit(gd_region *reg)
{
	unsigned short		cycle_count, cycle;
	ccp_action_aux_value	msg;
	sgmnt_addrs		*csa;
	sgmnt_data_ptr_t	csd;
	node_local_ptr_t	cnl;
	enum cdb_sc		status;

	csa = &FILE_INFO(reg)->s_addrs;
	csd = csa->hdr;
	cnl = csa->nl;

	assert(!lib$ast_in_prog());

	if (!csa->now_crit)
	{
		assert(0 == crit_count);
		crit_count++;
		if (csd->clustered)
		{
			/* For an explanation of the code dealing with clusters, see CCP_EXITWM_ATTEMPT.C.
			   Please do not change this code without updating the comments in that file. */
			cycle = cnl->ccp_cycle;
			while (!CCP_SEGMENT_STATE(cnl, CCST_MASK_WRITE_MODE))
			{
				(void)ccp_sendmsg(CCTR_WRITEDB, &FILE_INFO(reg)->file_id);
				(void)ccp_userwait(reg, CCST_MASK_WRITE_MODE, 0, cycle);
				cycle = cnl->ccp_cycle;
			}
		}

		if (cdb_sc_normal !=
			(status = MUTEX_LOCKW(csa->critical, crash_count, &csa->now_crit, &csd->mutex_spin_parms)))
		{
			crit_count = 0;
			switch (status)
			{
			case cdb_sc_critreset:
				rts_error(ERR_CRITRESET, 2, REG_LEN_STR(reg));
			case cdb_sc_dbccerr:
				rts_error(ERR_DBCCERR, 2, REG_LEN_STR(reg));
			default:
				GTMASSERT;
			}
			return;
		}

		assert(cnl->in_crit == 0);
		cnl->in_crit = process_id;

		CRIT_TRACE(crit_ops_gw);		/* see gdsbt.h for comment on placement */

		if (csd->clustered)
		{
			cycle = cnl->ccp_cycle;
			if (cnl->ccp_crit_blocked)
			{
				msg.exreq.fid = FILE_INFO(reg)->file_id;
				msg.exreq.cycle = cycle;
				(void)ccp_sendmsg(CCTR_EXITWM, &msg);
				(void)ccp_userwait(reg, ~(CCST_MASK_WRITE_MODE), 0, msg.exreq.cycle);
				while (cnl->ccp_crit_blocked  &&  cnl->ccp_cycle == msg.exreq.cycle  ||
				       !CCP_SEGMENT_STATE(cnl, CCST_MASK_WRITE_MODE))
				{
					cycle = cnl->ccp_cycle;
					(void)ccp_sendmsg(CCTR_WRITEDB, &FILE_INFO(reg)->file_id);
					(void)ccp_userwait(reg, CCST_MASK_WRITE_MODE, 0, cycle);
				}
			}
		}
		crit_count = 0;
	}
	if (cnl->wc_blocked)
		wcs_recover(reg);
}
Ejemplo n.º 26
0
Archivo: dse.c Proyecto: mihawk/fis-gtm
int main(int argc, char *argv[])
{
	DCL_THREADGBL_ACCESS;

	GTM_THREADGBL_INIT;
	common_startup_init(DSE_IMAGE);
	licensed = TRUE;
	TREF(transform) = TRUE;
	TREF(no_spangbls) = TRUE;	/* dse operates on a per-region basis irrespective of global mapping in gld */
	TREF(skip_file_corrupt_check) = TRUE;	/* do not let csd->file_corrupt flag cause errors in dse */
	op_open_ptr = op_open;
	patch_curr_blk = get_dir_root();
	err_init(util_base_ch);
	UNICODE_ONLY(gtm_strToTitle_ptr = &gtm_strToTitle);
	GTM_ICU_INIT_IF_NEEDED;	/* Note: should be invoked after err_init (since it may error out) and before CLI parsing */
	sig_init(generic_signal_handler, dse_ctrlc_handler, suspsigs_handler, continue_handler);
	atexit(util_exit_handler);
	SET_LATCH_GLOBAL(&defer_latch, LOCK_AVAILABLE);
	stp_init(STP_INITSIZE);
	rts_stringpool = stringpool;
	getjobname();
	INVOKE_INIT_SECSHR_ADDRS;
	io_init(TRUE);
	getzdir();
	gtm_chk_dist(argv[0]);
	prealloc_gt_timers();
	gt_timers_add_safe_hndlrs();
	initialize_pattern_table();
	gvinit();
	region_init(FALSE);
	util_out_print("!/File  !_!AD", TRUE, DB_LEN_STR(gv_cur_region));
	util_out_print("Region!_!AD!/", TRUE, REG_LEN_STR(gv_cur_region));
	cli_lex_setup(argc, argv);
	/* Since DSE operates on a region-by-region basis (for the most part), do not use a global directory at all from now on */
	original_header = gd_header;
	gd_header = NULL;
	OPERATOR_LOG_MSG;
#	ifdef DEBUG
	if ((gtm_white_box_test_case_enabled && (WBTEST_SEMTOOLONG_STACK_TRACE == gtm_white_box_test_case_number) ))
	{
		sgmnt_addrs     * csa;
		node_local_ptr_t cnl;
		csa = &FILE_INFO(gv_cur_region)->s_addrs;
		cnl = csa->nl;
		cnl->wbox_test_seq_num  = 1; /*Signal the first step and wait here*/
		/* The signal to the shell. MUPIP must not start BEFORE DSE */
		util_out_print("DSE is ready. MUPIP can start. Note: This message is a part of WBTEST_SEMTOOLONG_STACK_TRACE test. "
			       "It will not appear in PRO version.", TRUE);
		while (2 != cnl->wbox_test_seq_num) /*Wait for another process to get hold of the semaphore and signal next step*/
			LONG_SLEEP(1);
	}
#	endif
	if (argc < 2)
                display_prompt();
	while (1)
	{
		if (!dse_process(argc))
			break;
		display_prompt();
	}
	dse_exit();
	REVERT;
	return 0;
}
Ejemplo n.º 27
0
boolean_t mu_truncate(int4 truncate_percent)
{
	sgmnt_addrs		*csa;
	sgmnt_data_ptr_t 	csd;
	int			num_local_maps;
	int 			lmap_num, lmap_blk_num;
	int			bml_status, sigkill;
	int			save_errno;
	int			ftrunc_status;
	uint4			jnl_status;
	uint4			old_total, new_total;
	uint4			old_free, new_free;
	uint4			end_blocks;
	int4			blks_in_lmap, blk;
	gtm_uint64_t		before_trunc_file_size;
	off_t			trunc_file_size;
	off_t			padding;
	uchar_ptr_t		lmap_addr;
	boolean_t		was_crit;
	uint4			found_busy_blk;
	srch_blk_status		bmphist;
	srch_blk_status 	*blkhist;
	srch_hist		alt_hist;
	trans_num		curr_tn;
	blk_hdr_ptr_t		lmap_blk_hdr;
	block_id		*blkid_ptr;
	unix_db_info    	*udi;
	jnl_private_control	*jpc;
	jnl_buffer_ptr_t	jbp;
	char			*err_msg;
	intrpt_state_t		prev_intrpt_state;
	off_t			offset;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	csa = cs_addrs;
	csd = cs_data;
	if (dba_mm == csd->acc_meth)
	{
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_MUTRUNCNOTBG, 2, REG_LEN_STR(gv_cur_region));
		return TRUE;
	}
	if ((GDSVCURR != csd->desired_db_format) || (csd->blks_to_upgrd != 0))
	{
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_MUTRUNCNOV4, 2, REG_LEN_STR(gv_cur_region));
		return TRUE;
	}
	if (csa->ti->free_blocks < (truncate_percent * csa->ti->total_blks / 100))
	{
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(5) ERR_MUTRUNCNOSPACE, 3, REG_LEN_STR(gv_cur_region), truncate_percent);
		return TRUE;
	}
	/* already checked for parallel truncates on this region --- see mupip_reorg.c */
	gv_target = NULL;
	assert(csa->nl->trunc_pid == process_id);
	assert(dba_mm != csd->acc_meth);
	old_total = csa->ti->total_blks;
	old_free = csa->ti->free_blocks;
	sigkill = 0;
	found_busy_blk = 0;
	memset(&alt_hist, 0, SIZEOF(alt_hist)); /* null-initialize history */
	assert(csd->bplmap == BLKS_PER_LMAP);
	end_blocks = old_total % BLKS_PER_LMAP; /* blocks in the last lmap (first one we start scanning) */
	if (0 == end_blocks)
		end_blocks = BLKS_PER_LMAP;
	num_local_maps = DIVIDE_ROUND_UP(old_total, BLKS_PER_LMAP);
	/* ======================================== PHASE 1 ======================================== */
	for (lmap_num = num_local_maps - 1; (lmap_num > 0 && !found_busy_blk); lmap_num--)
	{
		if (mu_ctrly_occurred || mu_ctrlc_occurred)
			return TRUE;
		assert(csa->ti->total_blks >= old_total); /* otherwise, a concurrent truncate happened... */
		if (csa->ti->total_blks != old_total) /* Extend (likely called by mupip extend) -- don't truncate */
		{
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(5) ERR_MUTRUNCNOSPACE, 3, REG_LEN_STR(gv_cur_region),
					truncate_percent);
			return TRUE;
		}
		lmap_blk_num = lmap_num * BLKS_PER_LMAP;
		if (csa->nl->highest_lbm_with_busy_blk >= lmap_blk_num)
		{
			found_busy_blk = lmap_blk_num;
			break;
		}
		blks_in_lmap = (lmap_num == num_local_maps - 1) ? end_blocks : BLKS_PER_LMAP;
		/* Loop through non-bitmap blocks of this lmap, do recycled2free */
		DBGEHND((stdout, "DBG:: lmap_num = [%lu], lmap_blk_num = [%lu], blks_in_lmap = [%lu]\n",
			lmap_num, lmap_blk_num, blks_in_lmap));
		for (blk = 1; blk < blks_in_lmap && blk != -1 && !found_busy_blk;)
		{
			t_begin(ERR_MUTRUNCFAIL, UPDTRNS_DB_UPDATED_MASK);
			for (;;) /* retry loop for recycled to free transactions */
			{
				curr_tn = csd->trans_hist.curr_tn;
				/* Read the nth local bitmap into memory */
				bmphist.blk_num = lmap_blk_num;
				bmphist.buffaddr = t_qread(bmphist.blk_num, &bmphist.cycle, &bmphist.cr);
				lmap_blk_hdr = (blk_hdr_ptr_t)bmphist.buffaddr;
				if (!(bmphist.buffaddr) || (BM_SIZE(BLKS_PER_LMAP) != lmap_blk_hdr->bsiz))
				{ /* Could not read the block successfully. Retry. */
					t_retry((enum cdb_sc)rdfail_detail);
					continue;
				}
				lmap_addr = bmphist.buffaddr + SIZEOF(blk_hdr);
				/* starting from the hint (blk itself), find the first busy or recycled block */
				blk = bml_find_busy_recycled(blk, lmap_addr, blks_in_lmap, &bml_status);
				assert(blk < BLKS_PER_LMAP);
				if (blk == -1 || blk >= blks_in_lmap)
				{ /* done with this lmap, continue to next */
					t_abort(gv_cur_region, csa);
					break;
				}
				else if (BLK_BUSY == bml_status || csa->nl->highest_lbm_with_busy_blk >= lmap_blk_num)
				{ /* stop processing blocks... skip ahead to phase 2 */
					found_busy_blk = lmap_blk_num;
					t_abort(gv_cur_region, csa);
					break;
				}
				else if (BLK_RECYCLED == bml_status)
				{ /* Write PBLK records for recycled blocks only if before_image journaling is
				   * enabled. t_end() takes care of checking if journaling is enabled and
				   * writing PBLK record. We have to at least mark the recycled block as free.
				   */
					RESET_UPDATE_ARRAY;
					update_trans = UPDTRNS_DB_UPDATED_MASK;
					*((block_id *)update_array_ptr) = blk;
					update_array_ptr += SIZEOF(block_id);
					*(int *)update_array_ptr = 0;
					alt_hist.h[1].blk_num = 0;
					alt_hist.h[0].level = 0;
					alt_hist.h[0].cse = NULL;
					alt_hist.h[0].tn = curr_tn;
					alt_hist.h[0].blk_num = lmap_blk_num + blk;
					alt_hist.h[0].buffaddr = t_qread(alt_hist.h[0].blk_num,
							&alt_hist.h[0].cycle, &alt_hist.h[0].cr);
					if (!alt_hist.h[0].buffaddr)
					{
						t_retry((enum cdb_sc)rdfail_detail);
						continue;
					}
					if (!t_recycled2free(&alt_hist.h[0]))
					{
						t_retry(cdb_sc_lostbmlcr);
						continue;
					}
					t_write_map(&bmphist, (unsigned char *)update_array, curr_tn, 0);
					/* Set the opcode for INCTN record written by t_end() */
					inctn_opcode = inctn_blkmarkfree;
					if ((trans_num)0 == t_end(&alt_hist, NULL, TN_NOT_SPECIFIED))
						continue;
					/* block processed, scan from the next one */
					blk++;
					break;
				} else
				{
					assert(t_tries < CDB_STAGNATE);
					t_retry(cdb_sc_badbitmap);
					continue;
				}
			} /* END recycled2free retry loop */
		} /* END scanning blocks of this particular lmap */
		/* Write PBLK for the bitmap block, in case it hasn't been written i.e. t_end() was never called above */
		/* Do a transaction that just increments the bitmap block's tn so that t_end() can do its thing */
		DBGEHND((stdout, "DBG:: bitmap block inctn -- lmap_blk_num = [%lu]\n", lmap_blk_num));
		t_begin(ERR_MUTRUNCFAIL, UPDTRNS_DB_UPDATED_MASK);
		for (;;)
		{
			RESET_UPDATE_ARRAY;
			BLK_ADDR(blkid_ptr, SIZEOF(block_id), block_id);
			*blkid_ptr = 0;
			update_trans = UPDTRNS_DB_UPDATED_MASK;
			inctn_opcode = inctn_mu_reorg; /* inctn_mu_truncate */
			curr_tn = csd->trans_hist.curr_tn;
			blkhist = &alt_hist.h[0];
			blkhist->blk_num = lmap_blk_num;
			blkhist->tn = curr_tn;
			blkhist->cse = NULL; /* start afresh (do not use value from previous retry) */
			/* Read the nth local bitmap into memory */
			blkhist->buffaddr = t_qread(lmap_blk_num, (sm_int_ptr_t)&blkhist->cycle, &blkhist->cr);
			lmap_blk_hdr = (blk_hdr_ptr_t)blkhist->buffaddr;
			if (!(blkhist->buffaddr) || (BM_SIZE(BLKS_PER_LMAP) != lmap_blk_hdr->bsiz))
			{ /* Could not read the block successfully. Retry. */
				t_retry((enum cdb_sc)rdfail_detail);
				continue;
			}
			t_write_map(blkhist, (unsigned char *)blkid_ptr, curr_tn, 0);
			blkhist->blk_num = 0; /* create empty history for bitmap block */
			if ((trans_num)0 == t_end(&alt_hist, NULL, TN_NOT_SPECIFIED))
				continue;
			break;
		}
	} /* END scanning lmaps */
	/* ======================================== PHASE 2 ======================================== */
	assert(!csa->now_crit);
	for (;;)
	{ /* wait for FREEZE, we don't want to truncate a frozen database */
		grab_crit(gv_cur_region);
		if (FROZEN_CHILLED(cs_data))
			DO_CHILLED_AUTORELEASE(csa, cs_data);
		if (!FROZEN(cs_data) && !IS_REPL_INST_FROZEN)
			break;
		rel_crit(gv_cur_region);
		while (FROZEN(cs_data) || IS_REPL_INST_FROZEN)
		{
			hiber_start(1000);
			if (FROZEN_CHILLED(cs_data) && CHILLED_AUTORELEASE(cs_data))
				break;
		}
	}
	assert(csa->nl->trunc_pid == process_id);
	/* Flush pending updates to disk. If this is not done, old updates can be flushed AFTER ftruncate, extending the file. */
	if (!wcs_flu(WCSFLU_FLUSH_HDR | WCSFLU_WRITE_EPOCH | WCSFLU_MSYNC_DB))
	{
		assert(FALSE);
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT("MUPIP REORG TRUNCATE"),
				DB_LEN_STR(gv_cur_region));
		rel_crit(gv_cur_region);
		return FALSE;
	}
	csa->nl->highest_lbm_with_busy_blk = MAX(found_busy_blk, csa->nl->highest_lbm_with_busy_blk);
	assert(IS_BITMAP_BLK(csa->nl->highest_lbm_with_busy_blk));
	new_total = MIN(old_total, csa->nl->highest_lbm_with_busy_blk + BLKS_PER_LMAP);
	if (mu_ctrly_occurred || mu_ctrlc_occurred)
	{
		rel_crit(gv_cur_region);
		return TRUE;
	} else if (csa->ti->total_blks != old_total || new_total == old_total)
	{
		assert(csa->ti->total_blks >= old_total); /* Better have been an extend, not a truncate... */
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(5) ERR_MUTRUNCNOSPACE, 3, REG_LEN_STR(gv_cur_region), truncate_percent);
		rel_crit(gv_cur_region);
		return TRUE;
	} else if (GDSVCURR != csd->desired_db_format || csd->blks_to_upgrd != 0 || !csd->fully_upgraded)
	{
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_MUTRUNCNOV4, 2, REG_LEN_STR(gv_cur_region));
		rel_crit(gv_cur_region);
		return TRUE;
	} else if (SNAPSHOTS_IN_PROG(csa->nl))
	{
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_MUTRUNCSSINPROG, 2, REG_LEN_STR(gv_cur_region));
		rel_crit(gv_cur_region);
		return TRUE;
	} else if (BACKUP_NOT_IN_PROGRESS != cs_addrs->nl->nbb)
	{
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_MUTRUNCBACKINPROG, 2, REG_LEN_STR(gv_cur_region));
		rel_crit(gv_cur_region);
		return TRUE;
	}
	DEFER_INTERRUPTS(INTRPT_IN_TRUNC, prev_intrpt_state);
	if (JNL_ENABLED(csa))
	{ /* Write JRT_TRUNC and INCTN records */
		if (!jgbl.dont_reset_gbl_jrec_time)
		SET_GBL_JREC_TIME;	/* needed before jnl_ensure_open as that can write jnl records */
		jpc = csa->jnl;
		jbp = jpc->jnl_buff;
		/* Before writing to jnlfile, adjust jgbl.gbl_jrec_time if needed to maintain time order
		 * of jnl records. This needs to be done BEFORE the jnl_ensure_open as that could write
		 * journal records (if it decides to switch to a new journal file).
		 */
		ADJUST_GBL_JREC_TIME(jgbl, jbp);
		jnl_status = jnl_ensure_open(gv_cur_region, csa);
		if (SS_NORMAL != jnl_status)
			send_msg_csa(CSA_ARG(csa) VARLSTCNT(6) jnl_status, 4, JNL_LEN_STR(csd), DB_LEN_STR(gv_cur_region));
		else
		{
			if (0 == jpc->pini_addr)
				jnl_put_jrt_pini(csa);
			jnl_write_trunc_rec(csa, old_total, csa->ti->free_blocks, new_total);
			inctn_opcode = inctn_mu_reorg;
			jnl_write_inctn_rec(csa);
			jnl_status = jnl_flush(gv_cur_region);
			if (SS_NORMAL != jnl_status)
			{
				send_msg_csa(CSA_ARG(csa) VARLSTCNT(9) ERR_JNLFLUSH, 2, JNL_LEN_STR(csd),
					ERR_TEXT, 2, RTS_ERROR_TEXT("Error with journal flush during mu_truncate"),
					jnl_status);
				assert(NOJNL == jpc->channel); /* jnl file lost has been triggered */
			}
		}
	}
	/* Good to go ahead and REALLY truncate (reduce total_blks, clear cache_array, FTRUNCATE) */
	curr_tn = csa->ti->curr_tn;
	CHECK_TN(csa, csd, curr_tn);
	udi = FILE_INFO(gv_cur_region);
	/* Information used by recover_truncate to check if the file size and csa->ti->total_blks are INCONSISTENT */
	trunc_file_size = BLK_ZERO_OFF(csd->start_vbn) + ((off_t)csd->blk_size * (new_total + 1));
	csd->after_trunc_total_blks = new_total;
	csd->before_trunc_free_blocks = csa->ti->free_blocks;
	csd->before_trunc_total_blks = old_total; /* Flags interrupted truncate for recover_truncate */
	/* file size and total blocks: INCONSISTENT */
	csa->ti->total_blks = new_total;
	/* past the point of no return -- shared memory intact */
	assert(csa->ti->free_blocks >= DELTA_FREE_BLOCKS(old_total, new_total));
	csa->ti->free_blocks -= DELTA_FREE_BLOCKS(old_total, new_total);
	new_free = csa->ti->free_blocks;
	KILL_TRUNC_TEST(WBTEST_CRASH_TRUNCATE_1); /* 55 : Issue a kill -9 before 1st fsync */
	fileheader_sync(gv_cur_region);
	DB_FSYNC(gv_cur_region, udi, csa, db_fsync_in_prog, save_errno);
	CHECK_DBSYNC(gv_cur_region, save_errno);
	/* past the point of no return -- shared memory deleted */
	KILL_TRUNC_TEST(WBTEST_CRASH_TRUNCATE_2); /* 56 : Issue a kill -9 after 1st fsync */
	clear_cache_array(csa, csd, gv_cur_region, new_total, old_total);
	offset = (off_t)BLK_ZERO_OFF(csd->start_vbn) + (off_t)new_total * csd->blk_size;
	save_errno = db_write_eof_block(udi, udi->fd, csd->blk_size, offset, &(TREF(dio_buff)));
	if (0 != save_errno)
	{
		err_msg = (char *)STRERROR(errno);
		rts_error_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_MUTRUNCERROR, 4, REG_LEN_STR(gv_cur_region), LEN_AND_STR(err_msg));
		return FALSE;
	}
	KILL_TRUNC_TEST(WBTEST_CRASH_TRUNCATE_3); /* 57 : Issue a kill -9 after reducing csa->ti->total_blks, before FTRUNCATE */
	/* Execute an ftruncate() and truncate the DB file
	 * ftruncate() is a SYSTEM CALL on almost all platforms (except SunOS)
	 * It ignores kill -9 signal till its operation is completed.
	 * So we can safely assume that the result of ftruncate() will be complete.
	 */
	FTRUNCATE(FILE_INFO(gv_cur_region)->fd, trunc_file_size, ftrunc_status);
	if (0 != ftrunc_status)
	{
		err_msg = (char *)STRERROR(errno);
		rts_error_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_MUTRUNCERROR, 4, REG_LEN_STR(gv_cur_region), LEN_AND_STR(err_msg));
		/* should go through recover_truncate now, which will again try to FTRUNCATE */
		return FALSE;
	}
	/* file size and total blocks: CONSISTENT (shrunk) */
	KILL_TRUNC_TEST(WBTEST_CRASH_TRUNCATE_4); /* 58 : Issue a kill -9 after FTRUNCATE, before 2nd fsync */
	csa->nl->root_search_cycle++;	/* Force concurrent processes to restart in t_end/tp_tend to make sure no one
					 * tries to commit updates past the end of the file. Bitmap validations together
					 * with highest_lbm_with_busy_blk should actually be sufficient, so this is
					 * just to be safe.
					 */
	csd->before_trunc_total_blks = 0; /* indicate CONSISTENT */
	/* Increment TN */
	assert(csa->ti->early_tn == csa->ti->curr_tn);
	csd->trans_hist.early_tn = csd->trans_hist.curr_tn + 1;
	INCREMENT_CURR_TN(csd);
	fileheader_sync(gv_cur_region);
	DB_FSYNC(gv_cur_region, udi, csa, db_fsync_in_prog, save_errno);
	KILL_TRUNC_TEST(WBTEST_CRASH_TRUNCATE_5); /* 58 : Issue a kill -9 after after 2nd fsync */
	CHECK_DBSYNC(gv_cur_region, save_errno);
	ENABLE_INTERRUPTS(INTRPT_IN_TRUNC, prev_intrpt_state);
	curr_tn = csa->ti->curr_tn;
	rel_crit(gv_cur_region);
	send_msg_csa(CSA_ARG(csa) VARLSTCNT(7) ERR_MUTRUNCSUCCESS, 5, DB_LEN_STR(gv_cur_region), old_total, new_total, &curr_tn);
	util_out_print("Truncated region: !AD. Reduced total blocks from [!UL] to [!UL]. Reduced free blocks from [!UL] to [!UL].",
					FLUSH, REG_LEN_STR(gv_cur_region), old_total, new_total, old_free, new_free);
	return TRUE;
} /* END of mu_truncate() */
Ejemplo n.º 28
0
void	lke_clear(void)
{
	bool		locks, all = TRUE, wait = FALSE, interactive = TRUE, match = FALSE, memory = FALSE, nocrit = FALSE;
	boolean_t	exact = TRUE, was_crit;
	int4		pid;
	int		n;
	char		regbuf[MAX_RN_LEN], nodebuf[32], one_lockbuf[MAX_KEY_SZ];
	mlk_ctldata_ptr_t	ctl;
	mstr		reg, node, one_lock;

	/* Get all command parameters */
	reg.addr = regbuf;
	reg.len = SIZEOF(regbuf);
	node.addr = nodebuf;
	node.len = SIZEOF(nodebuf);
	one_lock.addr = one_lockbuf;
	one_lock.len = SIZEOF(one_lockbuf);

	if (lke_getcli(&all, &wait, &interactive, &pid, &reg, &node, &one_lock, &memory, &nocrit, &exact) == 0)
		return;

	/* Search all regions specified on the command line */
	for (gv_cur_region = gd_header->regions, n = 0;
	     n != gd_header->n_regions;
	     ++gv_cur_region, ++n)
	{	/* If region matches and is open */
		if ((reg.len == 0  ||
		     gv_cur_region->rname_len == reg.len  &&  memcmp(gv_cur_region->rname, reg.addr, reg.len) == 0)  &&
		    gv_cur_region->open)
		{
			match = TRUE;
			util_out_print("!/!AD!/", NOFLUSH, REG_LEN_STR(gv_cur_region));
			/* If distributed database, the region is located on another node */
			if (gv_cur_region->dyn.addr->acc_meth == dba_cm)
			{
#				if defined(LKE_WORKS_OK_WITH_CM)
				/* Remote lock clears are not supported, so LKE CLEAR -EXACT qualifier
				 * will not be supported on GT.CM.*/
				locks = gtcmtr_lke_clearreq(gv_cur_region->dyn.addr->cm_blk, gv_cur_region->cmx_regnum,
							    all, interactive, pid, &node);
#				else
				gtm_putmsg(VARLSTCNT(10) ERR_UNIMPLOP, 0, ERR_TEXT, 2,
						LEN_AND_LIT("GT.CM region - locks must be cleared on the local node"),
						ERR_TEXT, 2, REG_LEN_STR(gv_cur_region));
				continue;
#				endif
			} else if ((dba_bg == gv_cur_region->dyn.addr->acc_meth) || (dba_mm == gv_cur_region->dyn.addr->acc_meth))
			{	/* Local region */
				cs_addrs = &FILE_INFO(gv_cur_region)->s_addrs;
				ctl = (mlk_ctldata_ptr_t)cs_addrs->lock_addrs[0];
				/* Prevent any modifications of locks while we are clearing */
				if (cs_addrs->critical != NULL)
					crash_count = cs_addrs->critical->crashcnt;
				was_crit = cs_addrs->now_crit;
				if (!was_crit)
					grab_crit(gv_cur_region);
				locks = ctl->blkroot == 0 ? FALSE
							  : lke_cleartree(gv_cur_region, NULL, ctl,
									 (mlk_shrblk_ptr_t)R2A(ctl->blkroot),
									  all, interactive, pid, one_lock, exact);
				if (!was_crit)
					rel_crit(gv_cur_region);
			} else
			{
				gtm_putmsg(VARLSTCNT(2) ERR_BADREGION, 0);
				locks = TRUE;
			}

			if (!locks)
			{
				gtm_putmsg(VARLSTCNT(4) ERR_NOLOCKMATCH, 2, REG_LEN_STR(gv_cur_region));
			}
		}
	}

	if (!match  &&  reg.len != 0)
		rts_error(VARLSTCNT(4) ERR_NOREGION, 2, reg.len, reg.addr);

}
Ejemplo n.º 29
0
trans_num gvcst_bmp_mark_free(kill_set *ks)
{
	block_id		bit_map, next_bm, *updptr;
	blk_ident		*blk, *blk_top, *nextblk;
	trans_num		ctn, start_db_fmt_tn;
	unsigned int		len;
#	if defined(UNIX) && defined(DEBUG)
	unsigned int		lcl_t_tries;
#	endif
	int4			blk_prev_version;
	srch_hist		alt_hist;
	trans_num		ret_tn = 0;
	boolean_t		visit_blks;
	srch_blk_status		bmphist;
	cache_rec_ptr_t		cr;
	enum db_ver		ondsk_blkver;
	enum cdb_sc		status;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	TREF(in_gvcst_bmp_mark_free) = TRUE;
	assert(inctn_bmp_mark_free_gtm == inctn_opcode || inctn_bmp_mark_free_mu_reorg == inctn_opcode);
	/* Note down the desired_db_format_tn before you start relying on cs_data->fully_upgraded.
	 * If the db is fully_upgraded, take the optimal path that does not need to read each block being freed.
	 * But in order to detect concurrent desired_db_format changes, note down the tn (when the last format change occurred)
	 * 	before the fully_upgraded check	and after having noted down the database current_tn.
	 * If they are the same, then we are guaranteed no concurrent desired_db_format change occurred.
	 * If they are not, then fall through to the non-optimal path where each to-be-killed block has to be visited.
	 * The reason we need to visit every block in case desired_db_format changes is to take care of the case where
	 *	MUPIP REORG DOWNGRADE concurrently changes a block that we are about to free.
	 */
	start_db_fmt_tn = cs_data->desired_db_format_tn;
	visit_blks = (!cs_data->fully_upgraded);	/* Local evaluation */
	assert(!visit_blks || (visit_blks && dba_bg == cs_addrs->hdr->acc_meth)); /* must have blks_to_upgrd == 0 for non-BG */
	assert(!dollar_tlevel); 			/* Should NOT be in TP now */
	blk = &ks->blk[0];
	blk_top = &ks->blk[ks->used];
	if (!visit_blks)
	{	/* Database has been completely upgraded. Free all blocks in one bitmap as part of one transaction. */
		assert(cs_data->db_got_to_v5_once); /* assert all V4 fmt blocks (including RECYCLED) have space for V5 upgrade */
		inctn_detail.blknum_struct.blknum = 0; /* to indicate no adjustment to "blks_to_upgrd" necessary */
		/* If any of the mini transaction below restarts because of an online rollback, we don't want the application
		 * refresh to happen (like $ZONLNRLBK++ or rts_error(DBROLLEDBACK). This is because, although we are currently in
		 * non-tp (dollar_tleve = 0), we could actually be in a TP transaction and have actually faked dollar_tlevel. In
		 * such a case, we should NOT * be issuing a DBROLLEDBACK error as TP transactions are supposed to just restart in
		 * case of an online rollback. So, set the global variable that gtm_onln_rlbk_clnup can check and skip doing the
		 * application refresh, but will reset the clues. The next update will see the cycle mismatch and will accordingly
		 * take the right action.
		 */
		for ( ; blk < blk_top;  blk = nextblk)
		{
			if (0 != blk->flag)
			{
				nextblk = blk + 1;
				continue;
			}
			assert(0 < blk->block);
			assert((int4)blk->block < cs_addrs->ti->total_blks);
			bit_map = ROUND_DOWN2((int)blk->block, BLKS_PER_LMAP);
			next_bm = bit_map + BLKS_PER_LMAP;
			CHECK_AND_RESET_UPDATE_ARRAY;	/* reset update_array_ptr to update_array */
			/* Scan for the next local bitmap */
			updptr = (block_id *)update_array_ptr;
			for (nextblk = blk;
				(0 == nextblk->flag) && (nextblk < blk_top) && ((block_id)nextblk->block < next_bm);
				++nextblk)
			{
				assert((block_id)nextblk->block - bit_map);
				*updptr++ = (block_id)nextblk->block - bit_map;
			}
			len = (unsigned int)((char *)nextblk - (char *)blk);
			update_array_ptr = (char *)updptr;
			alt_hist.h[0].blk_num = 0;			/* need for calls to T_END for bitmaps */
			alt_hist.h[0].blk_target = NULL;		/* need to initialize for calls to T_END */
			/* the following assumes SIZEOF(blk_ident) == SIZEOF(int) */
			assert(SIZEOF(blk_ident) == SIZEOF(int));
			*(int *)update_array_ptr = 0;
			t_begin(ERR_GVKILLFAIL, UPDTRNS_DB_UPDATED_MASK);
			for (;;)
			{
				ctn = cs_addrs->ti->curr_tn;
				/* Need a read fence before reading fields from cs_data as we are reading outside
				 * of crit and relying on this value to detect desired db format state change.
				 */
				SHM_READ_MEMORY_BARRIER;
				if (start_db_fmt_tn != cs_data->desired_db_format_tn)
				{	/* Concurrent db format change has occurred. Need to visit every block to be killed
					 * to determine its block format. Fall through to the non-optimal path below
					 */
					ret_tn = 0;
					break;
				}
				bmphist.blk_num = bit_map;
				if (NULL == (bmphist.buffaddr = t_qread(bmphist.blk_num, (sm_int_ptr_t)&bmphist.cycle,
									&bmphist.cr)))
				{
					t_retry((enum cdb_sc)rdfail_detail);
					continue;
				}
				t_write_map(&bmphist, (uchar_ptr_t)update_array, ctn, -(int4)(nextblk - blk));
				UNIX_ONLY(DEBUG_ONLY(lcl_t_tries = t_tries));
				if ((trans_num)0 == (ret_tn = t_end(&alt_hist, NULL, TN_NOT_SPECIFIED)))
				{
#					ifdef UNIX
					assert((CDB_STAGNATE == t_tries) || (lcl_t_tries == t_tries - 1));
					status = LAST_RESTART_CODE;
					if ((cdb_sc_onln_rlbk1 == status) || (cdb_sc_onln_rlbk2 == status)
						|| TREF(rlbk_during_redo_root))
					{	/* t_end restarted due to online rollback. Discard bitmap free-up and return control
						 * to the application. But, before that reset only_reset_clues_if_onln_rlbk to FALSE
						 */
						TREF(in_gvcst_bmp_mark_free) = FALSE;
						send_msg(VARLSTCNT(6) ERR_IGNBMPMRKFREE, 4, REG_LEN_STR(gv_cur_region),
								DB_LEN_STR(gv_cur_region));
						t_abort(gv_cur_region, cs_addrs);
						return ret_tn; /* actually 0 */
					}
#					endif
					continue;
				}
				break;
			}
			if (0 == ret_tn) /* db format change occurred. Fall through to below for loop to visit each block */
			{
				/* Abort any active transaction to get rid of lingering Non-TP artifacts */
				t_abort(gv_cur_region, cs_addrs);
				break;
			}
		}
	}	/* for all blocks in the kill_set */
Ejemplo n.º 30
0
void dse_save(void)
{
	block_id	blk;
	unsigned	i, j, util_len;
	unsigned short	buff_len;
	bool		was_block, was_crit;
	char		buff[100], *ptr, util_buff[MAX_UTIL_LEN];
	sm_uc_ptr_t	bp;
	int4		dummy_int, nocrit_present;
	cache_rec_ptr_t dummy_cr;

	error_def(ERR_DSEBLKRDFAIL);

	memset(util_buff, 0, MAX_UTIL_LEN);

	if (was_block = (cli_present("BLOCK") == CLI_PRESENT))
	{
		if (!cli_get_hex("BLOCK", &blk))
			return;
		if (blk < 0 || blk >= cs_addrs->ti->total_blks)
		{
			util_out_print("Error: invalid block number.", TRUE);
			return;
		}
		patch_curr_blk = blk;
	} else
		blk = patch_curr_blk;
	if (cli_present("LIST") == CLI_PRESENT)
	{
		if (was_block)
		{
			util_len = sizeof("!/Saved versions of block ");
			memcpy(util_buff, "!/Saved versions of block ", util_len);
			util_len += i2hex_nofill(blk, (uchar_ptr_t)&util_buff[util_len-1], 8);
			util_buff[util_len-1] = 0;
			util_out_print(util_buff, TRUE);
			for (i = j = 0;  i < patch_save_count;  i++)
				if (patch_save_set[i].blk == blk)
				{
					j++;

					if (*patch_save_set[i].comment)
						util_out_print("Version !UL  Region !AD  Comment: !AD!/", TRUE,
							patch_save_set[i].ver, REG_LEN_STR(patch_save_set[i].region),
							LEN_AND_STR(patch_save_set[i].comment));

					else
						util_out_print("Version !UL  Region !AD!/", TRUE, patch_save_set[i].ver,
							REG_LEN_STR(patch_save_set[i].region));
				}
			if (!j)
				util_out_print("None.!/", TRUE);
			return;
		}
		util_out_print("!/Save history:!/", TRUE);
		for (i = j = 0;  i < patch_save_count;  i++)
		{
			util_len = sizeof("Block ");
			memcpy(util_buff, "Block ", util_len);
			util_len += i2hex_nofill(patch_save_set[i].blk, (uchar_ptr_t)&util_buff[util_len-1], 8);
			util_buff[util_len-1] = 0;
			util_out_print(util_buff, TRUE);
			j++;
			if (*patch_save_set[i].comment)
			{
				util_out_print("Version !UL  Region !AD  Comment: !AD!/", TRUE,
					patch_save_set[i].ver, REG_LEN_STR(patch_save_set[i].region),
					LEN_AND_STR(patch_save_set[i].comment));

			} else
			{
				util_out_print("Version !UL  Region !AD!/", TRUE, patch_save_set[i].ver,
					REG_LEN_STR(patch_save_set[i].region));
			}
		}
		if (!j)
			util_out_print("  None.!/", TRUE);
		return;
	}
	j = 1;
	for (i = 0;  i < patch_save_count;  i++)
		if (patch_save_set[i].blk == blk && patch_save_set[i].region == gv_cur_region
			&& patch_save_set[i].ver >= j)
			j = patch_save_set[i].ver + 1;
	util_len = sizeof("!/Saving version !UL of block ");
	memcpy(util_buff, "!/Saving version !UL of block ", util_len);
	util_len += i2hex_nofill(blk, (uchar_ptr_t)&util_buff[util_len-1], 8);
	util_buff[util_len-1] = 0;
	util_out_print(util_buff, TRUE, j);
	patch_save_set[patch_save_count].ver = j;
	patch_save_set[patch_save_count].blk = blk;
	patch_save_set[patch_save_count].region = gv_cur_region;
	patch_save_set[patch_save_count].bp = (char *)malloc(cs_addrs->hdr->blk_size);
	if (blk >= cs_addrs->ti->total_blks)
		rts_error(VARLSTCNT(1) ERR_DSEBLKRDFAIL);
	was_crit = cs_addrs->now_crit;
	nocrit_present = (CLI_NEGATED == cli_present("CRIT"));

	if (!was_crit)
	{
		if (nocrit_present)
			cs_addrs->now_crit = TRUE;
		else
			grab_crit(gv_cur_region);
	}

	if (!(bp = t_qread(blk, &dummy_int, &dummy_cr)))
		rts_error(VARLSTCNT(1) ERR_DSEBLKRDFAIL);
	memcpy(patch_save_set[patch_save_count].bp, bp, cs_addrs->hdr->blk_size);
	if (!was_crit)
	{
		if (nocrit_present)
			cs_addrs->now_crit = FALSE;
		else
			rel_crit(gv_cur_region);
	}
	buff_len = sizeof(buff);
	if ((cli_present("COMMENT") == CLI_PRESENT) && cli_get_str("COMMENT", buff, &buff_len))
	{
		ptr = &buff[buff_len];
		*ptr = 0;
		j = ptr - &buff[0] + 1;
		patch_save_set[patch_save_count].comment = (char *)malloc(j);
		memcpy(patch_save_set[patch_save_count].comment, &buff[0], j);
	} else
		patch_save_set[patch_save_count].comment = "";
	patch_save_count++;
	return;
}