Example #1
0
STATICFNDEF void cleanup_trigger_hash(char *trigvn, int trigvn_len, char **values, uint4 *value_len, stringkey *set_hash,
		stringkey *kill_hash, boolean_t del_kill_hash, int match_index)
{
	sgmnt_addrs		*csa;
	uint4			len;
	char			save_currkey[SIZEOF(gv_key) + DBKEYSIZE(MAX_KEY_SZ)];
	gv_key			*save_gv_currkey;
	gd_region		*save_gv_cur_region;
	gv_namehead		*save_gv_target;
	sgm_info		*save_sgm_info_ptr;
	mstr			trigger_key;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	SAVE_TRIGGER_REGION_INFO;
	SWITCH_TO_DEFAULT_REGION;
	assert(0 != gv_target->root);
	if (NULL != strchr(values[CMD_SUB], 'S'))
	{
		SEARCH_AND_KILL_BY_HASH(trigvn, trigvn_len, set_hash, match_index)
	}
	if (del_kill_hash)
	{
		SEARCH_AND_KILL_BY_HASH(trigvn, trigvn_len, kill_hash, match_index);
	}
	RESTORE_TRIGGER_REGION_INFO;
}
Example #2
0
STATICFNDEF int4 update_trigger_name_value(int trigvn_len, char *trig_name, int trig_name_len, int new_trig_index)
{
	sgmnt_addrs		*csa;
	mname_entry		gvent;
	gv_namehead		*hasht_tree;
	int			len;
	char			name_and_index[MAX_MIDENT_LEN + 1 + MAX_DIGITS_IN_INT];
	char			new_trig_name[MAX_TRIGNAME_LEN + 1];
	int			num_len;
	char			*ptr;
	int4			result;
	char			save_currkey[SIZEOF(gv_key) + DBKEYSIZE(MAX_KEY_SZ)];
	gv_key			*save_gv_currkey;
	gd_region		*save_gv_cur_region;
	gv_namehead		*save_gv_target;
	sgm_info		*save_sgm_info_ptr;
	mval			trig_gbl;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	if (MAX_AUTO_TRIGNAME_LEN < trigvn_len)
		return PUT_SUCCESS;
	SAVE_TRIGGER_REGION_INFO;
	SWITCH_TO_DEFAULT_REGION;
	if (gv_cur_region->read_only)
		rts_error_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_TRIGMODREGNOTRW, 2, REG_LEN_STR(gv_cur_region));
	assert(0 != gv_target->root);
	/* $get(^#t("#TNAME",^#t(GVN,index,"#TRIGNAME")) */
	BUILD_HASHT_SUB_SUB_CURRKEY(LITERAL_HASHTNAME, STRLEN(LITERAL_HASHTNAME), trig_name, trig_name_len - 1);
	if (!gvcst_get(&trig_gbl))
	{	/* There has to be a #TNAME entry */
		if (CDB_STAGNATE > t_tries)
			t_retry(cdb_sc_triggermod);
		else
		{
			assert(WBTEST_HELPOUT_TRIGDEFBAD == gtm_white_box_test_case_number);
			rts_error_csa(CSA_ARG(NULL) VARLSTCNT(6) ERR_TRIGNAMBAD, 4, LEN_AND_LIT("\"#TNAME\""), trig_name_len - 1,
					trig_name);
		}
	}
	len = STRLEN(trig_gbl.str.addr) + 1;
	assert(MAX_MIDENT_LEN >= len);
	memcpy(name_and_index, trig_gbl.str.addr, len);
	ptr = name_and_index + len;
	num_len = 0;
	I2A(ptr, num_len, new_trig_index);
	len += num_len;
	/* set ^#t(GVN,index,"#TRIGNAME")=trig_name $C(0) new_trig_index */
	SET_TRIGGER_GLOBAL_SUB_SUB_STR(LITERAL_HASHTNAME, STRLEN(LITERAL_HASHTNAME), trig_name, trig_name_len - 1,
		name_and_index, len, result);
	RESTORE_TRIGGER_REGION_INFO;
	return result;
}
Example #3
0
void gv_init_reg (gd_region *reg)
{
	gv_namehead	*g;
	sgmnt_addrs	*csa;
#ifdef	NOLICENSE
	licensed= TRUE ;
#else
	CRYPT_CHKSYSTEM ;
#endif
	switch (reg->dyn.addr->acc_meth)
	{
	case dba_usr:
		gvusr_init (reg, &gv_cur_region, &gv_currkey, &gv_altkey);
		break;
		/* we may be left in dba_cm state for gt_cm, if we have rundown the db and again accessed
		   the db without quitting out of gtm */
	case dba_cm:
	case dba_mm:
	case dba_bg:
		if (!reg->open)
			gvcst_init(reg);
		break;
	default:
		GTMASSERT;
	}
	assert(reg->open);
	GVKEYSIZE_INCREASE_IF_NEEDED(DBKEYSIZE(reg->max_key_size));
	if (reg->dyn.addr->acc_meth == dba_bg || reg->dyn.addr->acc_meth == dba_mm)
	{
		if (!reg->was_open)
		{
			csa = (sgmnt_addrs*)&FILE_INFO(reg)->s_addrs;
			g = csa->dir_tree;
			if (NULL != g)
			{	/* It is possible that dir_tree has already been targ_alloc'ed. This is because GT.CM or VMS DAL
				 * calls can run down regions without the process halting out. We don't want to double malloc.
				 */
				g->clue.end = 0;
			}
			SET_CSA_DIR_TREE(csa, reg->max_key_size, reg);
		}
	}
	return;
}
Example #4
0
boolean_t gvcst_queryget(mval *val)
{
	bool		found, is_hidden, is_dummy = FALSE, sn_tpwrapped;
	boolean_t	est_first_pass;
	char		save_currkey[SIZEOF(gv_key) + DBKEYSIZE(MAX_KEY_SZ)];
	gv_key		*save_gv_currkey;
	int		save_dollar_tlevel;

	DEBUG_ONLY(save_dollar_tlevel = dollar_tlevel);
	found = gvcst_queryget2(val, NULL);
#	ifdef UNIX
	assert(save_dollar_tlevel == dollar_tlevel);
	CHECK_HIDDEN_SUBSCRIPT(gv_altkey, is_hidden);
	if (found && IS_SN_DUMMY(val->str.len, val->str.addr))
		is_dummy = TRUE;
	if (!found || (!is_dummy && !is_hidden))
		return found;
	IF_SN_DISALLOWED_AND_NO_SPAN_IN_DB(return found);
	SAVE_GV_CURRKEY;
	if (!dollar_tlevel)
	{
		sn_tpwrapped = TRUE;
		op_tstart((IMPLICIT_TSTART), TRUE, &literal_batch, 0);
		ESTABLISH_NORET(gvcst_queryget_ch, est_first_pass);
		GVCST_ROOT_SEARCH_AND_PREP(est_first_pass);
	} else
		sn_tpwrapped = FALSE;
	found = gvcst_query();
	COPY_KEY(gv_currkey, gv_altkey); /* set gv_currkey to gv_altkey */
	found = gvcst_get(val);
	INCR_GVSTATS_COUNTER(cs_addrs, cs_addrs->nl, n_get, (gtm_uint64_t) -1); /* only counted externally as one get */
	INCR_GVSTATS_COUNTER(cs_addrs, cs_addrs->nl, n_query, (gtm_uint64_t) -1);
	if (sn_tpwrapped)
	{
		op_tcommit();
		REVERT; /* remove our condition handler */
	}
	RESTORE_GV_CURRKEY;
	assert(save_dollar_tlevel == dollar_tlevel);
#	endif
	return found;
}
Example #5
0
void trigger_delete_all(void)
{
	int			count;
	char			count_str[MAX_DIGITS_IN_INT + 1];
	sgmnt_addrs		*csa;
	mval			curr_gbl_name;
	int			cycle;
	mstr			gbl_name;
	mname_entry		gvent;
	gv_namehead		*hasht_tree, *gvt;
	mval			*mv_count_ptr;
	mval			*mv_cycle_ptr;
	mval			mv_indx;
	gd_region		*reg;
	int			reg_indx;
	int4			result;
	char			save_currkey[SIZEOF(gv_key) + DBKEYSIZE(MAX_KEY_SZ)];
	gv_key			*save_gv_currkey;
	gd_region		*save_gv_cur_region;
	gv_namehead		*save_gv_target;
	sgm_info		*save_sgm_info_ptr;
	int			trig_indx;
	mval			trigger_cycle;
	mval			trigger_count;
	mval			val;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	assert(0 < dollar_tlevel);
	/* Before we delete any triggers, verify that none of the triggers have been fired in this transaction. If they have,
	 * this creates an un-commitable transaction that will end in a TPFAIL error. Since that error indicates database
	 * damage, we'd rather detect this avoidable condition and give a descriptive error instead (TRIGMODINTP).
	 */
	for (gvt = gv_target_list; NULL != gvt; gvt = gvt->next_gvnh)
	{
		if (gvt->trig_local_tn == local_tn)
			rts_error(VARLSTCNT(1) ERR_TRIGMODINTP);
	}
	SWITCH_TO_DEFAULT_REGION;
	INITIAL_HASHT_ROOT_SEARCH_IF_NEEDED;
	if (0 != gv_target->root)
	{
		/* kill ^#t("#TRHASH") */
		BUILD_HASHT_SUB_CURRKEY(LITERAL_HASHTRHASH, STRLEN(LITERAL_HASHTRHASH));
		gvcst_kill(TRUE);
		/* kill ^#t("#TNAME") */
		BUILD_HASHT_SUB_CURRKEY(LITERAL_HASHTNAME, STRLEN(LITERAL_HASHTNAME));
		gvcst_kill(TRUE);
	}
	for (reg_indx = 0, reg = gd_header->regions; reg_indx < gd_header->n_regions; reg_indx++, reg++)
	{
		if (!reg->open)
			gv_init_reg(reg);
		if (!reg->read_only)
		{
			gv_cur_region = reg;
			change_reg();
			csa = cs_addrs;
			SETUP_TRIGGER_GLOBAL;
			INITIAL_HASHT_ROOT_SEARCH_IF_NEEDED;
			/* There might not be any ^#t in this region, so check */
			if (0 != gv_target->root)
			{	/* Kill all descendents of ^#t(trigvn, indx) where trigvn is any global with a trigger,
				 * but skip the "#XYZ" entries. setup ^#t(trigvn,"$") as the PREV key for op_gvorder
				 */
				BUILD_HASHT_SUB_CURRKEY(LITERAL_MAXHASHVAL, STRLEN(LITERAL_MAXHASHVAL));
				TREF(gv_last_subsc_null) = FALSE; /* We know its not null, but prior state is unreliable */
				while (TRUE)
				{
					op_gvorder(&curr_gbl_name);
					/* quit:$length(curr_gbl_name)=0 */
					if (0 == curr_gbl_name.str.len)
						break;
					/* $get(^#t(curr_gbl_name,#COUNT)) */
					BUILD_HASHT_SUB_SUB_CURRKEY(curr_gbl_name.str.addr, curr_gbl_name.str.len,
						LITERAL_HASHCOUNT, STRLEN(LITERAL_HASHCOUNT));
					if (gvcst_get(&trigger_count))
					{
						mv_count_ptr = &trigger_count;
						count = MV_FORCE_INT(mv_count_ptr);
						/* $get(^#t(curr_gbl_name,#CYCLE)) */
						BUILD_HASHT_SUB_SUB_CURRKEY(curr_gbl_name.str.addr, curr_gbl_name.str.len,
							LITERAL_HASHCYCLE, STRLEN(LITERAL_HASHCYCLE));
						if (!gvcst_get(&trigger_cycle))
							assert(FALSE); /* Found #COUNT, there must be #CYCLE */
						mv_cycle_ptr = &trigger_cycle;
						cycle = MV_FORCE_INT(mv_cycle_ptr);
						/* kill ^#t(curr_gbl_name) */
						BUILD_HASHT_SUB_CURRKEY(curr_gbl_name.str.addr, curr_gbl_name.str.len);
						gvcst_kill(TRUE);
						cycle++;
						MV_FORCE_MVAL(&trigger_cycle, cycle);
						/* set ^#t(curr_gbl_name,#CYCLE)=trigger_cycle */
						SET_TRIGGER_GLOBAL_SUB_SUB_MVAL(curr_gbl_name.str.addr, curr_gbl_name.str.len,
							LITERAL_HASHCYCLE, STRLEN(LITERAL_HASHCYCLE), trigger_cycle, result);
						assert(PUT_SUCCESS == result);
					} /* else there is no #COUNT, then no triggers, leave #CYCLE alone */
					/* get ready for op_gvorder() call for next trigger under ^#t */
					BUILD_HASHT_SUB_CURRKEY(curr_gbl_name.str.addr, curr_gbl_name.str.len);
				}
				csa->incr_db_trigger_cycle = TRUE;
				if (dollar_ztrigger_invoked)
				{	/* increment db_dztrigger_cycle so that next gvcst_put/gvcst_kill in this transaction,
					 * on this region, will re-read. See trigger_update.c for a comment on why it is okay
					 * for db_dztrigger_cycle to be incremented more than once in the same transaction
					 */
					csa->db_dztrigger_cycle++;
				}
			}
		}
	}
	util_out_print_gtmio("All existing triggers deleted", FLUSH);
}
Example #6
0
int4 trigger_delete(char *trigvn, int trigvn_len, mval *trigger_count, int index)
{
	int			count;
	mval			*mv_cnt_ptr;
	mval			mv_val;
	mval			*mv_val_ptr;
	int			num_len;
	char			*ptr1;
	int4			result;
	int4			retval;
	char			save_currkey[SIZEOF(gv_key) + DBKEYSIZE(MAX_KEY_SZ)];
	gv_key			*save_gv_currkey;
	stringkey		kill_hash, set_hash;
	int			sub_indx;
	char			tmp_trig_str[MAX_BUFF_SIZE];
	int4			trig_len;
	char			trig_name[MAX_TRIGNAME_LEN];
	int			trig_name_len;
	int			tmp_len;
	char			*tt_val[NUM_SUBS];
	uint4			tt_val_len[NUM_SUBS];
	mval			trigger_value;
	mval			trigger_index;
	mval			xecute_index;
	uint4			xecute_idx;
	uint4			used_trigvn_len;
	mval			val;
	char			val_str[MAX_DIGITS_IN_INT + 1];
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	mv_val_ptr = &mv_val;
	MV_FORCE_MVAL(&trigger_index, index);
	count = MV_FORCE_INT(trigger_count);
	/* build up array of values - needed for comparison in hash stuff */
	ptr1 = tmp_trig_str;
	memcpy(ptr1, trigvn, trigvn_len);
	ptr1 += trigvn_len;
	*ptr1++ = '\0';
	tmp_len = trigvn_len + 1;
	for (sub_indx = 0; sub_indx < NUM_SUBS; sub_indx++)
	{
		BUILD_HASHT_SUB_MSUB_SUB_CURRKEY(trigvn, trigvn_len, trigger_index, trigger_subs[sub_indx],
						 STRLEN(trigger_subs[sub_indx]));
		trig_len = gvcst_get(&trigger_value) ? trigger_value.str.len : 0;
		if (0 == trig_len)
		{
			tt_val[sub_indx] = NULL;
			tt_val_len[sub_indx] = 0;
			continue;
		}
		if (TRIGNAME_SUB == sub_indx)
		{
			trig_name_len = trig_len;
			assert(MAX_TRIGNAME_LEN >= trig_len);
			memcpy(trig_name, trigger_value.str.addr, trig_name_len);
			tt_val[sub_indx] = NULL;
			tt_val_len[sub_indx] = 0;
			continue;
		}
		tt_val[sub_indx] = ptr1;
		tt_val_len[sub_indx] = trig_len;
		tmp_len += trig_len;
		if (0 < trig_len)
		{
			if (MAX_BUFF_SIZE <= tmp_len)
				return VAL_TOO_LONG;
			memcpy(ptr1, trigger_value.str.addr, trig_len);
			ptr1 += trig_len;
		}
		*ptr1++ = '\0';
		tmp_len++;
	}
	/* Get trigger name, set hash value, and kill hash values from trigger before we delete it.
	 * The values will be used in clean ups associated with the deletion
	 */
	/* $get(^#t(GVN,trigger_index,"LHASH") for deletion in  cleanup_trigger_hash */
	BUILD_HASHT_SUB_MSUB_SUB_CURRKEY(trigvn, trigvn_len, trigger_index, trigger_subs[LHASH_SUB],
		STRLEN(trigger_subs[LHASH_SUB]));
	if (gvcst_get(mv_val_ptr))
		kill_hash.hash_code = (uint4)MV_FORCE_INT(mv_val_ptr);
	else {
		util_out_print_gtmio("The LHASH for global ^!AD does not exist", FLUSH, trigvn_len, trigvn);
		kill_hash.hash_code = 0;
	}
	/* $get(^#t(GVN,trigger_index,"BHASH") for deletion in  cleanup_trigger_hash */
	BUILD_HASHT_SUB_MSUB_SUB_CURRKEY(trigvn, trigvn_len, trigger_index, trigger_subs[BHASH_SUB],
		STRLEN(trigger_subs[BHASH_SUB]));
	if (gvcst_get(mv_val_ptr))
		set_hash.hash_code = (uint4)MV_FORCE_INT(mv_val_ptr);
	else {
		util_out_print_gtmio("The BHASH for global ^!AD does not exist", FLUSH, trigvn_len, trigvn);
		set_hash.hash_code = 0;
	}
	/* kill ^#t(GVN,trigger_index) */
	BUILD_HASHT_SUB_MSUB_CURRKEY(trigvn, trigvn_len, trigger_index);
	gvcst_kill(TRUE);
	assert(0 == gvcst_data());
	if (1 == count)
	{ /* This is the last trigger for "trigvn" - clean up trigger name, remove #LABEL and #COUNT */
		assert(1 == index);
		BUILD_HASHT_SUB_SUB_CURRKEY(trigvn, trigvn_len, LITERAL_HASHLABEL, STRLEN(LITERAL_HASHLABEL));
		gvcst_kill(TRUE);
		BUILD_HASHT_SUB_SUB_CURRKEY(trigvn, trigvn_len, LITERAL_HASHCOUNT, STRLEN(LITERAL_HASHCOUNT));
		gvcst_kill(TRUE);
		cleanup_trigger_name(trigvn, trigvn_len, trig_name, trig_name_len);
		cleanup_trigger_hash(trigvn, trigvn_len, tt_val, tt_val_len, &set_hash, &kill_hash, TRUE, 0);
	} else
	{
		cleanup_trigger_hash(trigvn, trigvn_len, tt_val, tt_val_len, &set_hash, &kill_hash, TRUE, index);
		cleanup_trigger_name(trigvn, trigvn_len, trig_name, trig_name_len);
		if (index != count)
		{	/* Shift the last trigger (index is #COUNT value) to the just deleted trigger's index.
			 * This way count is always accurate and can still be used as the index for new triggers.
			 * Note - there is no dependence on the trigger order, or this technique wouldn't work.
			 */
			ptr1 = tmp_trig_str;
			memcpy(ptr1, trigvn, trigvn_len);
			ptr1 += trigvn_len;
			*ptr1++ = '\0';
			for (sub_indx = 0; sub_indx < NUM_TOTAL_SUBS; sub_indx++)
			{
				/* $get(^#t(GVN,trigger_count,sub_indx) */
				BUILD_HASHT_SUB_MSUB_SUB_CURRKEY(trigvn, trigvn_len, *trigger_count, trigger_subs[sub_indx],
								STRLEN(trigger_subs[sub_indx]));
				if (gvcst_get(&trigger_value))
				{
					trig_len = trigger_value.str.len;
					/* set ^#t(GVN,trigger_index,sub_indx)=^#t(GVN,trigger_count,sub_indx) */
					SET_TRIGGER_GLOBAL_SUB_MSUB_SUB_MVAL(trigvn, trigvn_len, trigger_index,
						trigger_subs[sub_indx], STRLEN(trigger_subs[sub_indx]), trigger_value, result);
					assert(PUT_SUCCESS == result);
				} else if (XECUTE_SUB == sub_indx)
				{ /* multi line trigger broken up because it exceeds record size */
					for (xecute_idx = 0; ; xecute_idx++)
					{
						i2mval(&xecute_index, xecute_idx);
						BUILD_HASHT_SUB_MSUB_SUB_MSUB_CURRKEY(trigvn, trigvn_len, *trigger_count,
							trigger_subs[sub_indx], STRLEN(trigger_subs[sub_indx]), xecute_index);
						if (!gvcst_get(&trigger_value))
							break;
						SET_TRIGGER_GLOBAL_SUB_MSUB_SUB_MSUB_MVAL(trigvn, trigvn_len, trigger_index,
							trigger_subs[sub_indx], STRLEN(trigger_subs[sub_indx]), xecute_index,
							trigger_value, result);
						assert(PUT_SUCCESS == result);
					}
					assert (xecute_idx >= 2); /* multi-line trigger, indices 0, 1 and 2 MUST be defined */
				} else
				{
					/* in PRO this is a nasty case that will result in an access violation
					 * because data that should be present is not. In the next go around
					 * with trigger installation this case should be handled better */
					assert(!((TRIGNAME_SUB == sub_indx) || (CMD_SUB == sub_indx) ||
						 (CHSET_SUB == sub_indx))); /* these should not be zero length */
					trig_len = 0;
				}
				if (NUM_SUBS > sub_indx)
				{
					tt_val[sub_indx] = ptr1;
					tt_val_len[sub_indx] = trig_len;
					if (0 < trig_len)
					{
						memcpy(ptr1, trigger_value.str.addr, trig_len);
						ptr1 += trig_len;
					}
					*ptr1++ = '\0';
				}
			}
			/* $get(^#t(GVN,trigger_count,"LHASH") for update_trigger_hash_value */
			BUILD_HASHT_SUB_MSUB_SUB_CURRKEY(trigvn, trigvn_len, *trigger_count, trigger_subs[LHASH_SUB],
							 STRLEN(trigger_subs[LHASH_SUB]));
			if (!gvcst_get(mv_val_ptr))
				return PUT_SUCCESS;
			kill_hash.hash_code = (uint4)MV_FORCE_INT(mv_val_ptr);
			/* $get(^#t(GVN,trigger_count,"BHASH") for update_trigger_hash_value */
			BUILD_HASHT_SUB_MSUB_SUB_CURRKEY(trigvn, trigvn_len, *trigger_count, trigger_subs[BHASH_SUB],
							 STRLEN(trigger_subs[BHASH_SUB]));
			if (!gvcst_get(mv_val_ptr))
				return PUT_SUCCESS;
			set_hash.hash_code = (uint4)MV_FORCE_INT(mv_val_ptr);
			/* update hash values from above */
			if (VAL_TOO_LONG == (retval = update_trigger_hash_value(trigvn, trigvn_len, tt_val, tt_val_len,
					&set_hash, &kill_hash, count, index)))
				return VAL_TOO_LONG;
			/* fix the value ^#t("#TNAME",^#t(GVN,index,"#TRIGNAME")) to point to the correct "index" */
			if (VAL_TOO_LONG == (retval = update_trigger_name_value(trigvn_len, tt_val[TRIGNAME_SUB],
					tt_val_len[TRIGNAME_SUB], index)))
				return VAL_TOO_LONG;
			/* kill ^#t(GVN,COUNT) which was just shifted to trigger_index */
			BUILD_HASHT_SUB_MSUB_CURRKEY(trigvn, trigvn_len, *trigger_count);
			gvcst_kill(TRUE);
		}
		/* Update #COUNT */
		count--;
		MV_FORCE_MVAL(trigger_count, count);
		SET_TRIGGER_GLOBAL_SUB_SUB_MVAL(trigvn, trigvn_len, LITERAL_HASHCOUNT, STRLEN(LITERAL_HASHCOUNT), *trigger_count,
			result);
		assert(PUT_SUCCESS == result);		/* Size of count can only get shorter or stay the same */
	}
	trigger_incr_cycle(trigvn, trigvn_len);
	return PUT_SUCCESS;
}
Example #7
0
boolean_t trigger_delete_name(char *trigger_name, uint4 trigger_name_len, uint4 *trig_stats)
{
	sgmnt_addrs		*csa;
	char			curr_name[MAX_MIDENT_LEN + 1];
	uint4			curr_name_len, orig_name_len;
	mstr			gbl_name;
	mname_entry		gvent;
	gv_namehead		*hasht_tree;
	int			len;
	mval			mv_curr_nam;
	boolean_t		name_found;
	char			*ptr;
	char			*name_tail_ptr;
	char			save_currkey[SIZEOF(gv_key) + DBKEYSIZE(MAX_KEY_SZ)];
	gv_key			*save_gv_currkey;
	gd_region		*save_gv_cur_region;
	gv_namehead		*save_gv_target;
	char			save_name[MAX_MIDENT_LEN + 1];
	sgm_info		*save_sgm_info_ptr;
	mval			trig_gbl;
	mval			trig_value;
	mval			trigger_count;
	char			trigvn[MAX_MIDENT_LEN + 1];
	int			trigvn_len;
	int			trig_indx;
	int			badpos;
	boolean_t		wildcard;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	badpos = 0;
	orig_name_len = trigger_name_len;
	if ((0 == trigger_name_len) || (trigger_name_len !=
			(badpos = validate_input_trigger_name(trigger_name, trigger_name_len, &wildcard))))
	{	/* is the input name valid */
		CONV_STR_AND_PRINT("Invalid trigger NAME string: ", orig_name_len, trigger_name);
		/* badpos is the string position where the bad character was found, pretty print it */
		return TRIG_FAILURE;
	}
	name_tail_ptr = trigger_name + trigger_name_len - 1;
	if (TRIGNAME_SEQ_DELIM == *name_tail_ptr || wildcard )
		/* drop the trailing # sign or wildcard */
		trigger_name_len--;
	/* $data(^#t) */
	SWITCH_TO_DEFAULT_REGION;
	INITIAL_HASHT_ROOT_SEARCH_IF_NEEDED;
	if (0 == gv_target->root)
	{
		util_out_print_gtmio("Trigger named !AD does not exist", FLUSH, orig_name_len, trigger_name);
		return TRIG_FAILURE;
	}
	name_found = FALSE;
	assert(trigger_name_len < MAX_MIDENT_LEN);
	memcpy(save_name, trigger_name, trigger_name_len);
	save_name[trigger_name_len] = '\0';
	memcpy(curr_name, save_name, trigger_name_len);
	curr_name_len = trigger_name_len;
	STR2MVAL(mv_curr_nam, trigger_name, trigger_name_len);
	do {
		/* GVN = $get(^#t("#TNAME",curr_name) */
		BUILD_HASHT_SUB_SUB_CURRKEY(LITERAL_HASHTNAME, STRLEN(LITERAL_HASHTNAME), curr_name, curr_name_len);
		if (gvcst_get(&trig_gbl))
		{
			SAVE_TRIGGER_REGION_INFO;
			ptr = trig_gbl.str.addr;
			trigvn_len = STRLEN(trig_gbl.str.addr);
			assert(MAX_MIDENT_LEN >= trigvn_len);
			memcpy(trigvn, ptr, trigvn_len);
			ptr += trigvn_len + 1;
			/* the index is just beyon the length of the GVN string */
			A2I(ptr, trig_gbl.str.addr + trig_gbl.str.len, trig_indx);
			gbl_name.addr = trigvn;
			gbl_name.len = trigvn_len;
			GV_BIND_NAME_ONLY(gd_header, &gbl_name);
			csa = gv_target->gd_csa;
			SETUP_TRIGGER_GLOBAL;
			INITIAL_HASHT_ROOT_SEARCH_IF_NEEDED;
			/* $get(^#t(GVN,"COUNT") */
			BUILD_HASHT_SUB_SUB_CURRKEY(trigvn, trigvn_len, LITERAL_HASHCOUNT, STRLEN(LITERAL_HASHCOUNT));
			/* if it does not exist, return false */
			if (!gvcst_get(&trigger_count))
			{
				util_out_print_gtmio("Trigger named !AD exists in the lookup table, "
						"but global ^!AD has no triggers",
						FLUSH, curr_name_len, curr_name, trigvn_len, trigvn);
				return TRIG_FAILURE;
			}
			/* kill the target trigger for GVN at index trig_indx */
			if (PUT_SUCCESS != (trigger_delete(trigvn, trigvn_len, &trigger_count, trig_indx)))
			{
				util_out_print_gtmio("Trigger named !AD exists in the lookup table, but was not deleted!",
						FLUSH, orig_name_len, trigger_name);
			} else
			{
				csa->incr_db_trigger_cycle = TRUE;
				if (dollar_ztrigger_invoked)
				{	/* increment db_dztrigger_cycle so that next gvcst_put/gvcst_kill in this transaction,
					 * on this region, will re-read triggers. See trigger_update.c for a comment on why
					 * it is okay for db_dztrigger_cycle to be incremented more than once in the same
					 * transaction
					 */
					csa->db_dztrigger_cycle++;
				}
				trig_stats[STATS_DELETED]++;
				if (0 == trig_stats[STATS_ERROR])
					util_out_print_gtmio("Deleted trigger named '!AD' for global ^!AD",
							FLUSH, curr_name_len, curr_name, trigvn_len, trigvn);
			}
			RESTORE_TRIGGER_REGION_INFO;
			name_found = TRUE;
		} else
		{ /* no names match, if !wildcard report an error */
			if (!wildcard)
			{
				util_out_print_gtmio("Trigger named !AD does not exist",
						FLUSH, orig_name_len, trigger_name);
				return TRIG_FAILURE;
			}
		}
		if (!wildcard)
			/* not a wild card, don't $order for the next match */
			break;
		op_gvorder(&mv_curr_nam);
		if (0 == mv_curr_nam.str.len)
			break;
		assert(mv_curr_nam.str.len < MAX_MIDENT_LEN);
		memcpy(curr_name, mv_curr_nam.str.addr, mv_curr_nam.str.len);
		curr_name_len = mv_curr_nam.str.len;
		if (0 != memcmp(curr_name, save_name, trigger_name_len))
			/* stop when gv_order returns a string that no longer starts save_name */
			break;
	} while (wildcard);
	if (name_found)
		return TRIG_SUCCESS;
	util_out_print_gtmio("Trigger named !AD does not exist", FLUSH, orig_name_len, trigger_name);
	return TRIG_FAILURE;
}
Example #8
0
STATICFNDEF int4 update_trigger_hash_value(char *trigvn, int trigvn_len, char **values, uint4 *value_len, stringkey *set_hash,
					   stringkey *kill_hash, int old_trig_index, int new_trig_index)
{
	sgmnt_addrs		*csa;
	int			hash_index;
	mval			key_val;
	uint4			len;
	mval			mv_hash;
	mval			mv_hash_indx;
	int			num_len;
	char			*ptr;
	int4			result;
	char			save_currkey[SIZEOF(gv_key) + DBKEYSIZE(MAX_KEY_SZ)];
	gv_key			*save_gv_currkey;
	gd_region		*save_gv_cur_region;
	gv_namehead		*save_gv_target;
	sgm_info		*save_sgm_info_ptr;
	char			tmp_str[MAX_MIDENT_LEN + 1 + MAX_DIGITS_IN_INT];
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	SAVE_TRIGGER_REGION_INFO;
	SWITCH_TO_DEFAULT_REGION;
	assert(0 != gv_target->root);
	if (NULL != strchr(values[CMD_SUB], 'S'))
	{
		if (search_trigger_hash(trigvn, trigvn_len, set_hash, old_trig_index, &hash_index))
		{
			MV_FORCE_UMVAL(&mv_hash, set_hash->hash_code);
			MV_FORCE_MVAL(&mv_hash_indx, hash_index);
			BUILD_HASHT_SUB_MSUB_MSUB_CURRKEY(LITERAL_HASHTRHASH, STRLEN(LITERAL_HASHTRHASH), mv_hash, mv_hash_indx);
			if (!gvcst_get(&key_val))
			{	/* There has to be a #TRHASH entry */
				assert(FALSE);
				rts_error(VARLSTCNT(8) ERR_TRIGDEFBAD, 6, trigvn_len, trigvn, LEN_AND_LIT("\"#TRHASH\""),
					set_hash->str.len, set_hash->str.addr);
			}
			assert((MAX_MIDENT_LEN + 1 + MAX_DIGITS_IN_INT) >= key_val.str.len);
			len = STRLEN(key_val.str.addr);
			memcpy(tmp_str, key_val.str.addr, len);
			ptr = tmp_str + len;
			*ptr++ = '\0';
			num_len = 0;
			I2A(ptr, num_len, new_trig_index);
			len += num_len + 1;
			SET_TRIGGER_GLOBAL_SUB_MSUB_MSUB_STR(LITERAL_HASHTRHASH, STRLEN(LITERAL_HASHTRHASH), mv_hash, mv_hash_indx,
				tmp_str, len, result);
			if (PUT_SUCCESS != result)
			{
				RESTORE_TRIGGER_REGION_INFO;
				return result;
			}
		} else
		{	/* There has to be an entry with the old hash value, we just looked it up */
			assert(FALSE);
			rts_error(VARLSTCNT(8) ERR_TRIGDEFBAD, 6, trigvn_len, trigvn, LEN_AND_LIT("\"#TRHASH\""),
				set_hash->str.len, set_hash->str.addr);
		}
	}
	if (search_trigger_hash(trigvn, trigvn_len, kill_hash, old_trig_index, &hash_index))
	{
		MV_FORCE_UMVAL(&mv_hash, kill_hash->hash_code);
		MV_FORCE_MVAL(&mv_hash_indx, hash_index);
		BUILD_HASHT_SUB_MSUB_MSUB_CURRKEY(LITERAL_HASHTRHASH, STRLEN(LITERAL_HASHTRHASH), mv_hash, mv_hash_indx);
		if (!gvcst_get(&key_val))
		{	/* There has to be a #TRHASH entry */
			assert(FALSE);
			rts_error(VARLSTCNT(8) ERR_TRIGDEFBAD, 6, trigvn_len, trigvn, LEN_AND_LIT("\"#TRHASH\""),
				kill_hash->str.len, kill_hash->str.addr);
		}
		assert((MAX_MIDENT_LEN + 1 + MAX_DIGITS_IN_INT) >= key_val.str.len);
		len = STRLEN(key_val.str.addr);
		memcpy(tmp_str, key_val.str.addr, len);
		ptr = tmp_str + len;
		*ptr++ = '\0';
		num_len = 0;
		I2A(ptr, num_len, new_trig_index);
		len += num_len + 1;
		SET_TRIGGER_GLOBAL_SUB_MSUB_MSUB_STR(LITERAL_HASHTRHASH, STRLEN(LITERAL_HASHTRHASH), mv_hash, mv_hash_indx,
			tmp_str, len, result);
		if (PUT_SUCCESS != result)
		{
			RESTORE_TRIGGER_REGION_INFO;
			return result;
		}
	} else
	{	/* There has to be an entry with the old hash value, we just looked it up */
		assert(FALSE);
		rts_error(VARLSTCNT(8) ERR_TRIGDEFBAD, 6, trigvn_len, trigvn, LEN_AND_LIT("\"#TRHASH\""), kill_hash->str.len,
			kill_hash->str.addr);
	}
	RESTORE_TRIGGER_REGION_INFO;
	return PUT_SUCCESS;
}
Example #9
0
STATICFNDEF void cleanup_trigger_name(char *trigvn, int trigvn_len, char *trigger_name, int trigger_name_len)
{
	sgmnt_addrs		*csa;
	mname_entry		gvent;
	gv_namehead		*hasht_tree;
	int4			result;
	char			save_currkey[SIZEOF(gv_key) + DBKEYSIZE(MAX_KEY_SZ)];
	char			save_altkey[SIZEOF(gv_key) + DBKEYSIZE(MAX_KEY_SZ)];
	gv_key			*save_gv_altkey;
	gv_key			*save_gv_currkey;
	gd_region		*save_gv_cur_region;
	gv_namehead		*save_gv_target;
	gv_namehead		*save_gvtarget;
	sgm_info		*save_sgm_info_ptr;
	char			trunc_name[MAX_TRIGNAME_LEN + 1];
	uint4			used_trigvn_len;
	mval			val;
	mval			*val_ptr;
	char			val_str[MAX_DIGITS_IN_INT + 1];
	int			var_count;
	boolean_t		is_auto_name;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	/* assume user defined name or auto gen name whose GVN < 21 chars */
	is_auto_name = FALSE;
	if (!trigger_user_name(trigger_name, trigger_name_len))
	{	/* auto gen name uses #TNCOUNT and #SEQNO under #TNAME */
		is_auto_name = TRUE;
		used_trigvn_len = MIN(trigvn_len, MAX_AUTO_TRIGNAME_LEN);
		memcpy(trunc_name, trigvn, used_trigvn_len);
	}
	SAVE_TRIGGER_REGION_INFO;
	SWITCH_TO_DEFAULT_REGION;
	if (0 != gv_target->root)
	{
		if (is_auto_name)
		{
			/* $get(^#t("#TNAME",<trunc_name>,"#TNCOUNT")) */
			BUILD_HASHT_SUB_SUB_SUB_CURRKEY(LITERAL_HASHTNAME, STRLEN(LITERAL_HASHTNAME), trunc_name,
							used_trigvn_len, LITERAL_HASHTNCOUNT, STRLEN(LITERAL_HASHTNCOUNT));
			if (gvcst_get(&val))
			{	 /* only long autogenerated names have a #TNCOUNT entry */
				val_ptr = &val;
				var_count = MV_FORCE_INT(val_ptr);
				if (1 == var_count)
				{
					/* kill ^#t("#TNAME",<trunc_name>) to kill #TNCOUNT and #SEQNO */
					BUILD_HASHT_SUB_SUB_CURRKEY(LITERAL_HASHTNAME, STRLEN(LITERAL_HASHTNAME), trunc_name,
								    used_trigvn_len);
					gvcst_kill(TRUE);
				} else
				{
					var_count--;
					MV_FORCE_MVAL(&val, var_count);
					/* set ^#t("#TNAME",GVN,"#TNCOUNT")=var_count */
					SET_TRIGGER_GLOBAL_SUB_SUB_SUB_MVAL(LITERAL_HASHTNAME, STRLEN(LITERAL_HASHTNAME),
									trunc_name, used_trigvn_len, LITERAL_HASHTNCOUNT,
									STRLEN(LITERAL_HASHTNCOUNT), val, result);
					assert(PUT_SUCCESS == result);		/* The count size can only decrease */
				}
			}
		}
		/* kill ^#t("#TNAME",<trigger_name>,:) or zkill ^#t("#TNAME",<trigger_name>) if is_auto_name==FALSE */
		BUILD_HASHT_SUB_SUB_CURRKEY(LITERAL_HASHTNAME, STRLEN(LITERAL_HASHTNAME), trigger_name,
					    trigger_name_len - 1);
		gvcst_kill(is_auto_name);
	}
	RESTORE_TRIGGER_REGION_INFO;
}
Example #10
0
/*
 * -----------------------------------------------
 * gvn2gds()
 * Converts a global variable name (GVN) into its internal database repesentation (GDS)
 *
 * Arguments:
 *	gvn	      - Pointer to Source Name string mval. Must be in GVN form.
 *	buf	      - Pointer to a buffer large enough to fit the whole GDS of the passed in GVN.
 *	col	      - Collation number.
 * Return:
 *	unsigned char - Pointer to the end of the GDS written to gvkey.
 * -----------------------------------------------
 */
unsigned char *gvn2gds(mval *gvn, gv_key *gvkey, int act)
{
	boolean_t	est_first_pass, retn;
	collseq 	*csp;
	gd_region	tmpreg;
	gv_namehead	temp_gv_target;
	unsigned char 	*key, *key_top, *key_start;
	int		subscript, i, contains_env;
	int		*start, *stop;
	gv_name_and_subscripts 	start_buff, stop_buff;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	/* determine which buffer to use */
	DETERMINE_BUFFER(gvn, start_buff, stop_buff, start, stop);
	if (0 != act)
	{
		csp = ready_collseq(act);
		if (NULL == csp)
			rts_error_csa(CSA_ARG(NULL) VARLSTCNT(3) ERR_COLLATIONUNDEF, 1, act);
	} else
		csp = NULL;	/* Do not issue COLLATIONUNDEF for 0 collation */
	retn = TRUE;
	assert(MV_IS_STRING(gvn));
	key_start = &gvkey->base[0];
	key = key_start;
	gvkey->prev = 0;
	gvkey->top = DBKEYSIZE(MAX_KEY_SZ);
	key_top = key_start + gvkey->top;
	/* We will parse all of the components up front. */
	if (!parse_gv_name_and_subscripts(gvn, &subscript, start, stop, &contains_env))
		NOCANONICNAME_ERROR(gvn);
	if (stop[contains_env] - start[contains_env] > gvkey->top)
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_GVSUBOFLOW);
 	memcpy(key, gvn->str.addr + start[contains_env], stop[contains_env] - start[contains_env]);
	key += stop[contains_env] - start[contains_env];
	*key++ = KEY_DELIMITER;
	gvkey->end = key - key_start;
	/* Temporarily repoint global variables "gv_cur_region", "gv_target" and "transform".
	 * They are needed by mval2subsc for the following
	 *	"transform", "gv_target->nct", "gv_target->collseq" and "gv_cur_region->std_null_coll"
	 * Note that transform is usually ON, specifying that collation transformation is "enabled",
	 * and is only shut off for minor periods when something is being critically formatted (like
	 * we're doing here). Note that mval2subsc could issue an rts_error, so we establish a
	 * condition handler to restore the above.
	 */
	save_transform = TREF(transform);
	assert(save_transform);
	TREF(transform) = TRUE;
	reset_gv_target = gv_target;
	gv_target = &temp_gv_target;
	memset(gv_target, 0, SIZEOF(gv_namehead));
	gv_target->collseq = csp;
	memset(&tmpreg, 0, SIZEOF(gd_region));
	/* Assign "gv_cur_region" only after tmpreg has been fully initialized or timer interrupts can look at inconsistent copy */
	save_gv_cur_region = gv_cur_region;
	gv_cur_region = &tmpreg;
	gv_cur_region->std_null_coll = TRUE;
	ESTABLISH_NORET(gvn2gds_ch, est_first_pass);
	/* we know the number of subscripts, so we convert them all */
	for (i = 1 + contains_env; i <= contains_env + subscript; ++i)
	{
		if (!(retn = convert_key_to_db(gvn, start[i], stop[i], gvkey, &key)))
			break;
	}
	REVERT;
	RESTORE_GBL_VARS_BEFORE_FUN_RETURN;
	if (!retn || !CAN_APPEND_HIDDEN_SUBS(gvkey))
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_GVSUBOFLOW);
	*key++ = KEY_DELIMITER;	/* add double terminating null byte */
	assert(key <= key_top);
	return key;
}
Example #11
0
/******************************************************************************************
Input Parameters:
	level: level of working block
	dest_blk_id: last destination used for swap
Output Parameters:
	kill_set_ptr: Kill set to be freed
	*exclude_glist_ptr: List of globals not to be moved for a swap destination
Input/Output Parameters:
	gv_target : as working block's history
	reorg_gv_target->hist : as desitnitions block's history
 ******************************************************************************************/
enum cdb_sc mu_swap_blk(int level, block_id *pdest_blk_id, kill_set *kill_set_ptr, glist *exclude_glist_ptr)
{
	unsigned char		x_blk_lmap;
	unsigned short		temp_ushort;
	int			rec_size1, rec_size2;
	int			wlevel, nslevel, dest_blk_level;
	int			piece_len1, piece_len2, first_offset, second_offset,
				work_blk_size, work_parent_size, dest_blk_size, dest_parent_size;
	int			dest_child_cycle;
	int			blk_seg_cnt, blk_size;
	trans_num		ctn;
	int			key_len, key_len_dir;
	block_id		dest_blk_id, work_blk_id, child1, child2;
	enum cdb_sc		status;
	srch_hist 		*dest_hist_ptr, *dir_hist_ptr;
	cache_rec_ptr_t		dest_child_cr;
	blk_segment		*bs1, *bs_ptr;
	sm_uc_ptr_t		saved_blk, work_blk_ptr, work_parent_ptr, dest_parent_ptr, dest_blk_ptr,
				bn_ptr, bmp_buff, tblk_ptr, rec_base, rPtr1;
	boolean_t		gbl_target_was_set, blk_was_free, deleted;
	gv_namehead		*save_targ;
	srch_blk_status		bmlhist, destblkhist, *hist_ptr;
	unsigned char    	save_cw_set_depth;
	cw_set_element		*tmpcse;
	jnl_buffer_ptr_t	jbbp; /* jbbp is non-NULL only if before-image journaling */
	unsigned int		bsiz;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	dest_blk_id = *pdest_blk_id;
	CHECK_AND_RESET_UPDATE_ARRAY;	/* reset update_array_ptr to update_array */
	if (NULL == TREF(gv_reorgkey))
		GVKEY_INIT(TREF(gv_reorgkey), DBKEYSIZE(MAX_KEY_SZ));
	dest_hist_ptr = &(reorg_gv_target->hist);
	dir_hist_ptr = reorg_gv_target->alt_hist;
	blk_size = cs_data->blk_size;
	work_parent_ptr = gv_target->hist.h[level+1].buffaddr;
	work_parent_size = ((blk_hdr_ptr_t)work_parent_ptr)->bsiz;
	work_blk_ptr = gv_target->hist.h[level].buffaddr;
	work_blk_size = ((blk_hdr_ptr_t)work_blk_ptr)->bsiz;
	work_blk_id = gv_target->hist.h[level].blk_num;
	if (blk_size < work_blk_size)
	{
		assert(t_tries < CDB_STAGNATE);
		return cdb_sc_blkmod;
	}
	cws_reorg_remove_index = 0;
	/*===== Infinite loop to find the destination block =====*/
	for ( ; ; )
	{
		blk_was_free = FALSE;
		INCR_BLK_NUM(dest_blk_id);
		/* A Pre-order traversal should not cause a child block to go to its parent.
		 * However, in case it happens because already the organization was like that or for any other reason, skip swap.
		 * If we decide to swap, code below should be changed to take care of the special case.
		 * Still a grand-child can go to its grand-parent. This is rare and following code can handle it.
		 */
		if (dest_blk_id == gv_target->hist.h[level+1].blk_num)
			continue;
		if (cs_data->trans_hist.total_blks <= dest_blk_id || dest_blk_id == work_blk_id)
		{
			*pdest_blk_id = dest_blk_id;
			return cdb_sc_oprnotneeded;
		}
		ctn = cs_addrs->ti->curr_tn;
		/* We need to save the block numbers that were NEWLY ADDED (since entering this function "mu_swap_blk")
		 * through the CWS_INSERT macro (in db_csh_get/db_csh_getn which can be called by t_qread or gvcst_search below).
		 * This is so that we can delete these blocks from the "cw_stagnate" hashtable in case we determine the need to
		 * choose a different "dest_blk_id" in this for loop (i.e. come to the next iteration). If these blocks are not
		 * deleted, then the hashtable will keep growing (a good example will be if -EXCLUDE qualifier is specified and
		 * a lot of prospective dest_blk_ids get skipped because they contain EXCLUDEd global variables) and very soon
		 * the hashtable will contain more entries than there are global buffers and at that point db_csh_getn will not
		 * be able to get a free global buffer for a new block (since it checks the "cw_stagnate" hashtable before reusing
		 * a buffer in case of MUPIP REORG). To delete these previous iteration blocks, we use the "cws_reorg_remove_array"
		 * variable. This array should have enough entries to accommodate the maximum number of blocks that can be t_qread
		 * in one iteration down below. And that number is the sum of
		 *	+     MAX_BT_DEPTH : for the t_qread while loop down the tree done below
		 *	+ 2 * MAX_BT_DEPTH : for the two calls to gvcst_search done below
		 *	+ 2                : 1 for the t_qread of dest_blk_id and 1 more for the t_qread of a
		 *			     bitmap block done inside the call to get_lmap below
		 *	= 3 * MAX_BT_DEPTH + 2
		 * To be safe, we give a buffer of MAX_BT_DEPTH elements i.e. (4 * MAX_BT_DEPTH) + 2.
		 * This is defined in the macro CWS_REMOVE_ARRAYSIZE in cws_insert.h
		 */
		/* reset whatever blocks the previous iteration of this for loop had filled in the cw_stagnate hashtable */
		for ( ; cws_reorg_remove_index > 0; cws_reorg_remove_index--)
		{
			deleted = delete_hashtab_int4(&cw_stagnate, (uint4 *)&cws_reorg_remove_array[cws_reorg_remove_index]);
			assert(deleted);
		}
		/* read corresponding bitmap block before attempting to read destination  block.
		 * if bitmap indicates block is free, we will not read the destination block
		 */
		bmp_buff = get_lmap(dest_blk_id, &x_blk_lmap, (sm_int_ptr_t)&bmlhist.cycle, &bmlhist.cr);
		if (!bmp_buff || BLK_MAPINVALID == x_blk_lmap ||
			((blk_hdr_ptr_t)bmp_buff)->bsiz != BM_SIZE(BLKS_PER_LMAP) ||
			((blk_hdr_ptr_t)bmp_buff)->levl != LCL_MAP_LEVL)
		{
			assert(CDB_STAGNATE > t_tries);
			return cdb_sc_badbitmap;
		}
		if (BLK_FREE != x_blk_lmap)
		{	/* x_blk_lmap is either BLK_BUSY or BLK_RECYCLED. In either case, we need to read destination block
			 * in case we later detect that the before-image needs to be written.
			 */
			if (!(dest_blk_ptr = t_qread(dest_blk_id, (sm_int_ptr_t)&destblkhist.cycle, &destblkhist.cr)))
			{
				assert(t_tries < CDB_STAGNATE);
				return (enum cdb_sc)rdfail_detail;
			}
			destblkhist.blk_num = dest_blk_id;
			destblkhist.buffaddr = dest_blk_ptr;
			destblkhist.level = dest_blk_level = ((blk_hdr_ptr_t)dest_blk_ptr)->levl;
		}
		if (BLK_BUSY != x_blk_lmap)
		{	/* x_blk_map is either BLK_FREE or BLK_RECYCLED both of which mean the block is not used in the bitmap */
			blk_was_free = TRUE;
			break;
		}
		/* dest_blk_id might contain a *-record only.
		 * So follow the pointer to go to the data/index block, which has a non-* key to search.
		 */
		nslevel = dest_blk_level;
		if (MAX_BT_DEPTH <= nslevel)
		{
			assert(CDB_STAGNATE > t_tries);
			return cdb_sc_maxlvl;
		}
		rec_base = dest_blk_ptr + SIZEOF(blk_hdr);
		GET_RSIZ(rec_size1, rec_base);
		tblk_ptr = dest_blk_ptr;
		while ((BSTAR_REC_SIZE == rec_size1) && (0 != nslevel))
		{
			GET_LONG(child1, (rec_base + SIZEOF(rec_hdr)));
			if (0 == child1 || child1 > cs_data->trans_hist.total_blks - 1)
			{
				assert(t_tries < CDB_STAGNATE);
				return cdb_sc_rdfail;
			}
			if (!(tblk_ptr = t_qread(child1, (sm_int_ptr_t)&dest_child_cycle, &dest_child_cr)))
			{
				assert(t_tries < CDB_STAGNATE);
				return (enum cdb_sc)rdfail_detail;
			}
			/* leaf of a killed GVT can have block header only.   Skip those blocks */
			if (SIZEOF(blk_hdr) >= ((blk_hdr_ptr_t)tblk_ptr)->bsiz)
				break;
			nslevel--;
			rec_base = tblk_ptr + SIZEOF(blk_hdr);
			GET_RSIZ(rec_size1, rec_base);
		}
		/* leaf of a killed GVT can have block header only.   Skip those blocks */
		if (SIZEOF(blk_hdr) >= ((blk_hdr_ptr_t)tblk_ptr)->bsiz)
			continue;
		/* get length of global variable name (do not read subscript) for dest_blk_id */
		GET_GBLNAME_LEN(key_len_dir, rec_base + SIZEOF(rec_hdr));
		/* key_len = length of 1st key value (including subscript) for dest_blk_id */
		GET_KEY_LEN(key_len, rec_base + SIZEOF(rec_hdr));
		if ((1 >= key_len_dir || MAX_MIDENT_LEN + 1 < key_len_dir) || (2 >= key_len || MAX_KEY_SZ < key_len))
		{	/* Earlier used to restart here always. But dest_blk_id can be a block,
			 * which is just killed and still marked busy.  Skip it, if we are in last retry.
			 */
			if (CDB_STAGNATE <= t_tries)
				continue;
			else
				return cdb_sc_blkmod;
		}
		memcpy(&((TREF(gv_reorgkey))->base[0]), rec_base + SIZEOF(rec_hdr), key_len_dir);
		(TREF(gv_reorgkey))->base[key_len_dir] = 0;
		(TREF(gv_reorgkey))->end = key_len_dir;
		if (exclude_glist_ptr->next)
		{	/* exclude blocks for globals in the list of EXCLUDE option */
			if  (in_exclude_list(&((TREF(gv_reorgkey))->base[0]), key_len_dir - 1, exclude_glist_ptr))
				continue;
		}
		save_targ = gv_target;
		if (INVALID_GV_TARGET != reset_gv_target)
			gbl_target_was_set = TRUE;
		else
		{
			gbl_target_was_set = FALSE;
			reset_gv_target = save_targ;
		}
		gv_target = reorg_gv_target;
		gv_target->root = cs_addrs->dir_tree->root;
		gv_target->clue.end = 0;
		/* assign Directory tree path to find dest_blk_id in dir_hist_ptr */
		status = gvcst_search(TREF(gv_reorgkey), dir_hist_ptr);
		if (cdb_sc_normal != status)
		{
			assert(t_tries < CDB_STAGNATE);
			RESET_GV_TARGET_LCL_AND_CLR_GBL(save_targ, DO_GVT_GVKEY_CHECK);
			return status;
		}
		if (dir_hist_ptr->h[0].curr_rec.match != (TREF(gv_reorgkey))->end + 1)
		{	/* may be in a kill_set of another process */
			RESET_GV_TARGET_LCL_AND_CLR_GBL(save_targ, DO_GVT_GVKEY_CHECK);
			continue;
		}
		for (wlevel = 0; wlevel <= dir_hist_ptr->depth &&
			dir_hist_ptr->h[wlevel].blk_num != dest_blk_id; wlevel++);
		if (dir_hist_ptr->h[wlevel].blk_num == dest_blk_id)
		{	/* do not swap a dir_tree block */
			RESET_GV_TARGET_LCL_AND_CLR_GBL(save_targ, DO_GVT_GVKEY_CHECK);
			continue;
		}
		/* gv_reorgkey will now have the first key from dest_blk_id,
		 * or, from a descendant of dest_blk_id (in case it had a *-key only).
		 */
		memcpy(&((TREF(gv_reorgkey))->base[0]), rec_base + SIZEOF(rec_hdr), key_len);
		(TREF(gv_reorgkey))->end = key_len - 1;
		GET_KEY_LEN(key_len_dir, dir_hist_ptr->h[0].buffaddr + dir_hist_ptr->h[0].curr_rec.offset + SIZEOF(rec_hdr));
		/* Get root of GVT for dest_blk_id */
		GET_LONG(gv_target->root,
			dir_hist_ptr->h[0].buffaddr + dir_hist_ptr->h[0].curr_rec.offset + SIZEOF(rec_hdr) + key_len_dir);
		if ((0 == gv_target->root) || (gv_target->root > (cs_data->trans_hist.total_blks - 1)))
		{
			assert(t_tries < CDB_STAGNATE);
			RESET_GV_TARGET_LCL_AND_CLR_GBL(save_targ, DO_GVT_GVKEY_CHECK);
			return cdb_sc_blkmod;
		}
		/* Assign Global Variable Tree path to find dest_blk_id in dest_hist_ptr */
		gv_target->clue.end = 0;
		status = gvcst_search(TREF(gv_reorgkey), dest_hist_ptr);
		RESET_GV_TARGET_LCL_AND_CLR_GBL(save_targ, DO_GVT_GVKEY_CHECK);
		if (dest_blk_level >= dest_hist_ptr->depth || /* do not swap in root level */
			dest_hist_ptr->h[dest_blk_level].blk_num != dest_blk_id) /* must be in a kill set of another process. */
			continue;
		if ((cdb_sc_normal != status) || (dest_hist_ptr->h[nslevel].curr_rec.match != ((TREF(gv_reorgkey))->end + 1)))
		{
			assert(t_tries < CDB_STAGNATE);
			return (cdb_sc_normal != status ? status : cdb_sc_blkmod);
		}
		for (wlevel = nslevel; wlevel <= dest_blk_level; wlevel++)
			dest_hist_ptr->h[wlevel].tn = ctn;
		dest_blk_ptr = dest_hist_ptr->h[dest_blk_level].buffaddr;
		dest_blk_size = ((blk_hdr_ptr_t)dest_blk_ptr)->bsiz;
		dest_parent_ptr = dest_hist_ptr->h[dest_blk_level+1].buffaddr;
		dest_parent_size = ((blk_hdr_ptr_t)dest_parent_ptr)->bsiz;
		break;
	}
	/*===== End of infinite loop to find the destination block =====*/
	/*-----------------------------------------------------
	   Now modify blocks for swapping. Maximum of 4 blocks.
	   -----------------------------------------------------*/
	if (!blk_was_free)
	{	/* 1: dest_blk_id into work_blk_id */
		BLK_INIT(bs_ptr, bs1);
		BLK_SEG(bs_ptr, dest_blk_ptr + SIZEOF(blk_hdr), dest_blk_size - SIZEOF(blk_hdr));
		if (!BLK_FINI (bs_ptr,bs1))
		{
			assert(t_tries < CDB_STAGNATE);
			return cdb_sc_blkmod;
		}
		assert(gv_target->hist.h[level].blk_num == work_blk_id);
		assert(gv_target->hist.h[level].buffaddr == work_blk_ptr);
		t_write(&gv_target->hist.h[level], (unsigned char *)bs1, 0, 0, dest_blk_level, TRUE, TRUE, GDS_WRITE_KILLTN);
	}
	/* 2: work_blk_id into dest_blk_id */
	if (!blk_was_free && work_blk_id == dest_hist_ptr->h[dest_blk_level+1].blk_num)
	{	/* work_blk_id will be swapped with its child.
		 * This is the only vertical swap.  Here working block goes to its child.
		 * Working block cannot goto its parent because of traversal
		 */
		if (dest_blk_level + 1 != level || dest_parent_size != work_blk_size)
		{
			assert(t_tries < CDB_STAGNATE);
			return cdb_sc_blkmod;
		}
		BLK_INIT(bs_ptr, bs1);
		BLK_ADDR(saved_blk, dest_parent_size, unsigned char);
		memcpy(saved_blk, dest_parent_ptr, dest_parent_size);
		first_offset = dest_hist_ptr->h[dest_blk_level+1].curr_rec.offset;
		GET_RSIZ(rec_size1, saved_blk + first_offset);
		if (work_blk_size < first_offset + rec_size1)
		{
			assert(t_tries < CDB_STAGNATE);
			return cdb_sc_blkmod;
		}
		piece_len1 =  first_offset + rec_size1;
		BLK_SEG(bs_ptr, saved_blk + SIZEOF(blk_hdr), piece_len1 - SIZEOF(block_id) - SIZEOF(blk_hdr));
		BLK_ADDR(bn_ptr, SIZEOF(block_id), unsigned char);
		PUT_LONG(bn_ptr, work_blk_id); /* since work_blk_id will now be the child of dest_blk_id */
		BLK_SEG(bs_ptr, bn_ptr, SIZEOF(block_id));
		BLK_SEG(bs_ptr, saved_blk + piece_len1, dest_parent_size - piece_len1);
		if (!BLK_FINI(bs_ptr, bs1))
		{
			assert(t_tries < CDB_STAGNATE);
			return cdb_sc_blkmod;
		}
		assert(dest_blk_id == dest_hist_ptr->h[dest_blk_level].blk_num);
		assert(dest_blk_ptr == dest_hist_ptr->h[dest_blk_level].buffaddr);
		t_write(&dest_hist_ptr->h[dest_blk_level], (unsigned char *)bs1, 0, 0, level, TRUE, TRUE, GDS_WRITE_KILLTN);
	} else /* free block or, when working block does not move vertically (swap with parent/child) */
	{
		BLK_INIT(bs_ptr, bs1);
		BLK_ADDR(saved_blk, work_blk_size, unsigned char);
		memcpy(saved_blk, work_blk_ptr, work_blk_size);
		BLK_SEG(bs_ptr, saved_blk + SIZEOF(blk_hdr), work_blk_size - SIZEOF(blk_hdr));
		if (!BLK_FINI(bs_ptr, bs1))
		{
			assert(t_tries < CDB_STAGNATE);
			return cdb_sc_blkmod;
		}
		if (blk_was_free)
		{
			tmpcse = &cw_set[cw_set_depth];
			t_create(dest_blk_id, (unsigned char *)bs1, 0, 0, level);
			/* Although we invoked t_create, we do not want t_end to allocate the block (i.e. change mode
			 * from gds_t_create to gds_t_acquired). Instead we do that and a little more (that t_end does) all here.
			 */
			assert(dest_blk_id == tmpcse->blk);
			tmpcse->mode = gds_t_acquired;
			/* If snapshots are in progress, we might want to read the before images of the FREE blocks also.
			 * Since mu_swap_blk mimics a small part of t_end, it sets cse->mode to gds_t_acquired and hence
			 * will not read the before images of the FREE blocks in t_end. To workaround this, set
			 * cse->was_free to TRUE so that in t_end, this condition can be used to read the before images of
			 * the FREE blocks if needed.
			 */
			(BLK_FREE == x_blk_lmap) ? SET_FREE(tmpcse) : SET_NFREE(tmpcse);
			/* No need to write before-image in case the block is FREE. In case the database had never been fully
			 * upgraded from V4 to V5 format (after the MUPIP UPGRADE), all RECYCLED blocks can basically be considered
			 * FREE (i.e. no need to write before-images since backward journal recovery will never be expected
			 * to take the database to a point BEFORE the mupip upgrade).
			 */
			if ((BLK_FREE == x_blk_lmap) || !cs_data->db_got_to_v5_once)
				tmpcse->old_block = NULL;
			else
			{	/* Destination is a recycled block that needs a before image */
				tmpcse->old_block = destblkhist.buffaddr;
				/* Record cr,cycle. This is used later in t_end to determine if checksums need to be recomputed */
				tmpcse->cr = destblkhist.cr;
				tmpcse->cycle = destblkhist.cycle;
				jbbp = (JNL_ENABLED(cs_addrs) && cs_addrs->jnl_before_image) ? cs_addrs->jnl->jnl_buff : NULL;
				if ((NULL != jbbp) && (((blk_hdr_ptr_t)tmpcse->old_block)->tn < jbbp->epoch_tn))
				{	/* Compute CHECKSUM for writing PBLK record before getting crit.
					 * It is possible that we are reading a block that is actually marked free in
					 * the bitmap (due to concurrency issues at this point). Therefore we might be
					 * actually reading uninitialized block headers and in turn a bad value of
					 * "old_block->bsiz". Restart if we ever access a buffer whose size is greater
					 * than the db block size.
					 */
					bsiz = ((blk_hdr_ptr_t)(tmpcse->old_block))->bsiz;
					if (bsiz > blk_size)
					{
						assert(CDB_STAGNATE > t_tries);
						return cdb_sc_lostbmlcr;
					}
					JNL_GET_CHECKSUM_ACQUIRED_BLK(tmpcse, cs_data, cs_addrs, tmpcse->old_block, bsiz);
				}
			}
			assert(GDSVCURR == tmpcse->ondsk_blkver);	/* should have been set by t_create above */
		} else
		{
			hist_ptr = &dest_hist_ptr->h[dest_blk_level];
			assert(dest_blk_id == hist_ptr->blk_num);
			assert(dest_blk_ptr == hist_ptr->buffaddr);
			t_write(hist_ptr, (unsigned char *)bs1, 0, 0, level, TRUE, TRUE, GDS_WRITE_KILLTN);
		}
	}
	if (!blk_was_free)
	{	/* 3: Parent of destination block (may be parent of working block too) */
		if (gv_target->hist.h[level+1].blk_num == dest_hist_ptr->h[dest_blk_level+1].blk_num)
		{	/* dest parent == work_blk parent */
			BLK_INIT(bs_ptr, bs1);
			/* Interchange pointer to dest_blk_id and work_blk_id */
			if (level != dest_blk_level ||
				gv_target->hist.h[level+1].curr_rec.offset == dest_hist_ptr->h[level+1].curr_rec.offset)
			{
				assert(t_tries < CDB_STAGNATE);
				return cdb_sc_blkmod;
			}
			if (gv_target->hist.h[level+1].curr_rec.offset < dest_hist_ptr->h[level+1].curr_rec.offset)
			{
				first_offset = gv_target->hist.h[level+1].curr_rec.offset;
				second_offset = dest_hist_ptr->h[level+1].curr_rec.offset;
			} else
			{
				first_offset = dest_hist_ptr->h[level+1].curr_rec.offset;
				second_offset = gv_target->hist.h[level+1].curr_rec.offset;
			}
			GET_RSIZ(rec_size1, dest_parent_ptr + first_offset);
			GET_RSIZ(rec_size2, dest_parent_ptr + second_offset);
			if (dest_parent_size < first_offset + rec_size1 ||
				dest_parent_size < second_offset + rec_size2 ||
				BSTAR_REC_SIZE >= rec_size1 || BSTAR_REC_SIZE > rec_size2)
			{
				assert(t_tries < CDB_STAGNATE);
				return cdb_sc_blkmod;
			}
			piece_len1 =  first_offset + rec_size1 - SIZEOF(block_id);
			piece_len2 =  second_offset + rec_size2 - SIZEOF(block_id);
			GET_LONG(child1, dest_parent_ptr + piece_len1);
			GET_LONG(child2, dest_parent_ptr + piece_len2);
			BLK_SEG(bs_ptr, dest_parent_ptr + SIZEOF(blk_hdr), piece_len1 - SIZEOF(blk_hdr));
			BLK_ADDR(bn_ptr, SIZEOF(block_id), unsigned char);
			PUT_LONG(bn_ptr, child2);
			BLK_SEG(bs_ptr, bn_ptr, SIZEOF(block_id));
			BLK_SEG(bs_ptr, dest_parent_ptr + first_offset + rec_size1,
				second_offset + rec_size2 - SIZEOF(block_id) - first_offset - rec_size1);
			BLK_ADDR(bn_ptr, SIZEOF(block_id), unsigned char);
			PUT_LONG(bn_ptr, child1);
			BLK_SEG(bs_ptr, bn_ptr, SIZEOF(block_id));
			BLK_SEG(bs_ptr, dest_parent_ptr + second_offset + rec_size2,
				dest_parent_size - second_offset - rec_size2);
			if (!BLK_FINI(bs_ptr,bs1))
			{
				assert(t_tries < CDB_STAGNATE);
				return cdb_sc_blkmod;
			}
			assert(level == dest_blk_level);
			assert(dest_parent_ptr == dest_hist_ptr->h[level+1].buffaddr);
			t_write(&dest_hist_ptr->h[level+1], (unsigned char *)bs1, 0, 0, level+1, FALSE, TRUE, GDS_WRITE_KILLTN);
		} else if (work_blk_id != dest_hist_ptr->h[dest_blk_level+1].blk_num)
		{	/* Destination block moved in the position of working block.
			 * So destination block's parent's pointer should be changed to work_blk_id
			 */
			BLK_INIT(bs_ptr, bs1);
			GET_RSIZ(rec_size1, dest_parent_ptr + dest_hist_ptr->h[dest_blk_level+1].curr_rec.offset);
			if (dest_parent_size < rec_size1 +  dest_hist_ptr->h[dest_blk_level+1].curr_rec.offset ||
				BSTAR_REC_SIZE > rec_size1)
			{
				assert(t_tries < CDB_STAGNATE);
				return cdb_sc_blkmod;
			}
			BLK_SEG (bs_ptr, dest_parent_ptr + SIZEOF(blk_hdr),
			    dest_hist_ptr->h[dest_blk_level+1].curr_rec.offset + rec_size1 - SIZEOF(blk_hdr) - SIZEOF(block_id));
			BLK_ADDR(bn_ptr, SIZEOF(block_id), unsigned char);
			PUT_LONG(bn_ptr, work_blk_id);
			BLK_SEG(bs_ptr, bn_ptr, SIZEOF(block_id));
			BLK_SEG(bs_ptr, dest_parent_ptr + dest_hist_ptr->h[dest_blk_level+1].curr_rec.offset + rec_size1,
				dest_parent_size - dest_hist_ptr->h[dest_blk_level+1].curr_rec.offset  - rec_size1);
			if (!BLK_FINI(bs_ptr,bs1))
			{
				assert(t_tries < CDB_STAGNATE);
				return cdb_sc_blkmod;
			}
			assert(dest_parent_ptr == dest_hist_ptr->h[dest_blk_level+1].buffaddr);
			t_write(&dest_hist_ptr->h[dest_blk_level+1], (unsigned char *)bs1, 0, 0, dest_blk_level+1,
				FALSE, TRUE, GDS_WRITE_KILLTN);
		}
	}
	/* 4: Parent of working block, if different than destination's parent or, destination was a free block */
	if (blk_was_free || gv_target->hist.h[level+1].blk_num != dest_hist_ptr->h[dest_blk_level+1].blk_num)
	{	/* Parent block of working blk should correctly point the working block. Working block went to dest_blk_id  */
		GET_RSIZ(rec_size1, (work_parent_ptr + gv_target->hist.h[level+1].curr_rec.offset));
		if (work_parent_size < rec_size1 +  gv_target->hist.h[level+1].curr_rec.offset || BSTAR_REC_SIZE > rec_size1)
		{
			assert(t_tries < CDB_STAGNATE);
			return cdb_sc_blkmod;
		}
		BLK_INIT(bs_ptr, bs1);
		BLK_SEG(bs_ptr, work_parent_ptr + SIZEOF(blk_hdr),
			gv_target->hist.h[level+1].curr_rec.offset + rec_size1 - SIZEOF(blk_hdr) - SIZEOF(block_id));
		BLK_ADDR(bn_ptr, SIZEOF(block_id), unsigned char);
		PUT_LONG(bn_ptr, dest_blk_id);
		BLK_SEG(bs_ptr, bn_ptr, SIZEOF(block_id));
		BLK_SEG(bs_ptr, work_parent_ptr + gv_target->hist.h[level+1].curr_rec.offset + rec_size1,
			work_parent_size - gv_target->hist.h[level+1].curr_rec.offset - rec_size1);
		if (!BLK_FINI(bs_ptr, bs1))
		{
			assert(t_tries < CDB_STAGNATE);
			return cdb_sc_blkmod;
		}
		assert(gv_target->hist.h[level+1].buffaddr == work_parent_ptr);
		t_write(&gv_target->hist.h[level+1], (unsigned char *)bs1, 0, 0, level+1, FALSE, TRUE, GDS_WRITE_KILLTN);
	}
	/* else already taken care of, when dest_blk_id moved */
	if (blk_was_free)
	{	/* A free/recycled block will become busy block.
		 * So the local bitmap must be updated.
		 * Local bit map block will be added in the list of update arrray for concurrency check and
		 * 	also the cw_set element will be created to mark the free/recycled block as free.
		 * kill_set_ptr will save the block which will become free.
		 */
		child1 = ROUND_DOWN2(dest_blk_id, BLKS_PER_LMAP); /* bit map block */
		bmlhist.buffaddr = bmp_buff;
		bmlhist.blk_num = child1;
		child1 = dest_blk_id - child1;
		assert(child1);
		PUT_LONG(update_array_ptr, child1);
		/* Need to put bit maps on the end of the cw set for concurrency checking.
		 * We want to simulate t_write_map, except we want to update "cw_map_depth" instead of "cw_set_depth".
		 * Hence the save and restore logic (for "cw_set_depth") below.
		 */
		save_cw_set_depth = cw_set_depth;
		assert(!cw_map_depth);
		t_write_map(&bmlhist, (uchar_ptr_t)update_array_ptr, ctn, 1);	/* will increment cw_set_depth */
		cw_map_depth = cw_set_depth;		/* set cw_map_depth to the latest cw_set_depth */
		cw_set_depth = save_cw_set_depth;	/* restore cw_set_depth */
		/* t_write_map simulation end */
		update_array_ptr += SIZEOF(block_id);
		child1 = 0;
		PUT_LONG(update_array_ptr, child1);
		update_array_ptr += SIZEOF(block_id);
		assert(1 == cw_set[cw_map_depth - 1].reference_cnt);	/* 1 free block is now becoming BLK_USED in the bitmap */
		/* working block will be removed */
		kill_set_ptr->blk[kill_set_ptr->used].flag = 0;
		kill_set_ptr->blk[kill_set_ptr->used].level = 0;
		kill_set_ptr->blk[kill_set_ptr->used++].block = work_blk_id;
	}
	*pdest_blk_id = dest_blk_id;
	return cdb_sc_normal;
}
Example #12
0
void	mu_reorg_upgrd_dwngrd(void)
{
	blk_hdr			new_hdr;
	blk_segment		*bs1, *bs_ptr;
	block_id		*blkid_ptr, curblk, curbmp, start_blk, stop_blk, start_bmp, last_bmp;
	block_id		startblk_input, stopblk_input;
	boolean_t		upgrade, downgrade, safejnl, nosafejnl, region, first_reorg_in_this_db_fmt, reorg_entiredb;
	boolean_t		startblk_specified, stopblk_specified, set_fully_upgraded, db_got_to_v5_once, mark_blk_free;
	cache_rec_ptr_t		cr;
	char			*bml_lcl_buff = NULL, *command, *reorg_command;
	sm_uc_ptr_t		bptr = NULL;
	cw_set_element		*cse;
	enum cdb_sc		cdb_status;
	enum db_ver		new_db_format, ondsk_blkver;
	gd_region		*reg;
	int			cycle;
	int4			blk_seg_cnt, blk_size;	/* needed for BLK_INIT,BLK_SEG and BLK_FINI macros */
	int4			blocks_left, expected_blks2upgrd, actual_blks2upgrd, total_blks, free_blks;
	int4			status, status1, mapsize, lcnt, bml_status;
	reorg_stats_t		reorg_stats;
	sgmnt_addrs		*csa;
	sgmnt_data_ptr_t	csd;
	sm_uc_ptr_t		blkBase, bml_sm_buff;	/* shared memory pointer to the bitmap global buffer */
	srch_hist		alt_hist;
	srch_blk_status		*blkhist, bmlhist;
	tp_region		*rptr;
	trans_num		curr_tn;
	unsigned char    	save_cw_set_depth;
	uint4			lcl_update_trans;

	region    = (CLI_PRESENT == cli_present("REGION"));
	upgrade   = (CLI_PRESENT == cli_present("UPGRADE"));
	downgrade = (CLI_PRESENT == cli_present("DOWNGRADE"));
	assert(upgrade && !downgrade || !upgrade && downgrade);
	command = upgrade ? "UPGRADE" : "DOWNGRADE";
	reorg_command = upgrade ? "MUPIP REORG UPGRADE" : "MUPIP REORG DOWNGRADE";
	reorg_entiredb = TRUE;	/* unless STARTBLK or STOPBLK is specified we are going to {up,down}grade the entire database */
	startblk_specified = FALSE;
	assert(SIZEOF(block_id) == SIZEOF(uint4));
	if ((CLI_PRESENT == cli_present("STARTBLK")) && (cli_get_hex("STARTBLK", (uint4 *)&startblk_input)))
	{
		reorg_entiredb = FALSE;
		startblk_specified = TRUE;
	}
	stopblk_specified = FALSE;
	assert(SIZEOF(block_id) == SIZEOF(uint4));
	if ((CLI_PRESENT == cli_present("STOPBLK")) && (cli_get_hex("STOPBLK", (uint4 *)&stopblk_input)))
	{
		reorg_entiredb = FALSE;
		stopblk_specified = TRUE;
	}
	mu_reorg_upgrd_dwngrd_in_prog = TRUE;
	mu_reorg_nosafejnl = (CLI_NEGATED == cli_present("SAFEJNL")) ? TRUE : FALSE;

	assert(region);
	status = SS_NORMAL;
	error_mupip = FALSE;
	gvinit();	/* initialize gd_header (needed by the later call to mu_getlst) */
	mu_getlst("REG_NAME", SIZEOF(tp_region));	/* get the parameter corresponding to REGION qualifier */
	if (error_mupip)
	{
		util_out_print("!/MUPIP REORG !AD cannot proceed with above errors!/", TRUE, LEN_AND_STR(command));
		mupip_exit(ERR_MUNOACTION);
	}
	assert(DBKEYSIZE(MAX_KEY_SZ) == gv_keysize);	/* no need to invoke GVKEYSIZE_INIT_IF_NEEDED macro */
	gv_target = targ_alloc(gv_keysize, NULL, NULL);	/* t_begin needs this initialized */
	gv_target_list = NULL;
	memset(&alt_hist, 0, SIZEOF(alt_hist));	/* null-initialize history */
	blkhist = &alt_hist.h[0];
	for (rptr = grlist;  NULL != rptr;  rptr = rptr->fPtr)
	{
		if (mu_ctrly_occurred || mu_ctrlc_occurred)
			break;
		reg = rptr->reg;
		util_out_print("!/Region !AD : MUPIP REORG !AD started", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
		if (reg_cmcheck(reg))
		{
			util_out_print("Region !AD : MUPIP REORG !AD cannot run across network",
				TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			status = ERR_MUNOFINISH;
			continue;
		}
		mu_reorg_process = TRUE;	/* gvcst_init will use this value to use gtm_poollimit settings. */
		gvcst_init(reg);
		mu_reorg_process = FALSE;
		assert(update_array != NULL);
		/* access method stored in global directory and database file header might be different in which case
		 * the database setting prevails. therefore, the access method check can be done only after opening
		 * the database (i.e. after the gvcst_init)
		 */
		if (dba_bg != REG_ACC_METH(reg))
		{
			util_out_print("Region !AD : MUPIP REORG !AD cannot continue as access method is not BG",
				TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			status = ERR_MUNOFINISH;
			continue;
		}
		/* The mu_getlst call above uses insert_region to create the grlist, which ensures that duplicate regions mapping to
		 * the same db file correspond to only one grlist entry.
		 */
		assert(FALSE == reg->was_open);
		TP_CHANGE_REG(reg);	/* sets gv_cur_region, cs_addrs, cs_data */
		csa = cs_addrs;
		csd = cs_data;
		blk_size = csd->blk_size;	/* "blk_size" is used by the BLK_FINI macro */
		if (reg->read_only)
		{
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_DBRDONLY, 2, DB_LEN_STR(reg));
			status = ERR_MUNOFINISH;
			continue;
		}
		assert(GDSVCURR == GDSV6); /* so we trip this assert in case GDSVCURR changes without a change to this module */
		new_db_format = (upgrade ? GDSV6 : GDSV4);
		grab_crit(reg);
		curr_tn = csd->trans_hist.curr_tn;
		/* set the desired db format in the file header to the appropriate version, increment transaction number */
		status1 = desired_db_format_set(reg, new_db_format, reorg_command);
		assert(csa->now_crit);	/* desired_db_format_set() should not have released crit */
		first_reorg_in_this_db_fmt = TRUE;	/* with the current desired_db_format, this is the first reorg */
		if (SS_NORMAL != status1)
		{	/* "desired_db_format_set" would have printed appropriate error messages */
			if (ERR_MUNOACTION != status1)
			{	/* real error occurred while setting the db format. skip to next region */
				status = ERR_MUNOFINISH;
				rel_crit(reg);
				continue;
			}
			util_out_print("Region !AD : Desired DB Format remains at !AD after !AD", TRUE, REG_LEN_STR(reg),
				LEN_AND_STR(gtm_dbversion_table[new_db_format]), LEN_AND_STR(reorg_command));
			if (csd->reorg_db_fmt_start_tn == csd->desired_db_format_tn)
				first_reorg_in_this_db_fmt = FALSE;
		} else
			util_out_print("Region !AD : Desired DB Format set to !AD by !AD", TRUE, REG_LEN_STR(reg),
				LEN_AND_STR(gtm_dbversion_table[new_db_format]), LEN_AND_STR(reorg_command));
		assert(dba_bg == csd->acc_meth);
		/* Check blks_to_upgrd counter to see if upgrade/downgrade is complete */
		total_blks = csd->trans_hist.total_blks;
		free_blks = csd->trans_hist.free_blocks;
		actual_blks2upgrd = csd->blks_to_upgrd;
		/* If MUPIP REORG UPGRADE and there is no block to upgrade in the database as indicated by BOTH
		 * 	"csd->blks_to_upgrd" and "csd->fully_upgraded", then we can skip processing.
		 * If MUPIP REORG UPGRADE and all non-free blocks need to be upgraded then again we can skip processing.
		 */
		if ((upgrade && (0 == actual_blks2upgrd) && csd->fully_upgraded)
			|| (!upgrade && ((total_blks - free_blks) == actual_blks2upgrd)))
		{
			util_out_print("Region !AD : Blocks to Upgrade counter indicates no action needed for MUPIP REORG !AD",
				       TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			util_out_print("Region !AD : Total Blocks = [0x!XL] : Free Blocks = [0x!XL] : "
				       "Blocks to upgrade = [0x!XL]",
				       TRUE, REG_LEN_STR(reg), total_blks, free_blks, actual_blks2upgrd);
			util_out_print("Region !AD : MUPIP REORG !AD finished!/", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			rel_crit(reg);
			continue;
		}
		stop_blk = total_blks;
		if (stopblk_specified && stopblk_input <= stop_blk)
			stop_blk = stopblk_input;
		if (first_reorg_in_this_db_fmt)
		{	/* Note down reorg start tn (in case we are interrupted, future reorg will know to resume) */
			csd->reorg_db_fmt_start_tn = csd->desired_db_format_tn;
			csd->reorg_upgrd_dwngrd_restart_block = 0;
			start_blk = (startblk_specified ? startblk_input : 0);
		} else
		{	/* Either a concurrent MUPIP REORG of the same type ({up,down}grade) is currently running
			 * or a previously running REORG of the same type was interrupted (Ctrl-Ced).
			 * In either case resume processing from whatever restart block number is stored in fileheader
			 * the only exception is if "STARTBLK" was specified in the input in which use that unconditionally.
			 */
			start_blk = (startblk_specified ? startblk_input : csd->reorg_upgrd_dwngrd_restart_block);
		}
		if (start_blk > stop_blk)
			start_blk = stop_blk;
		mu_reorg_upgrd_dwngrd_start_tn = csd->reorg_db_fmt_start_tn;
		/* Before releasing crit, flush the file-header and dirty buffers in cache to disk. This is because we are now
		 * going to read each GDS block directly from disk to determine if it needs to be upgraded/downgraded or not.
		 */
		if (!wcs_flu(WCSFLU_FLUSH_HDR))	/* wcs_flu assumes gv_cur_region is set (which it is in this routine) */
		{
			rel_crit(reg);
			gtm_putmsg_csa(CSA_ARG(csa)
				VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT("MUPIP REORG UPGRADE/DOWNGRADE"), DB_LEN_STR(reg));
			status = ERR_MUNOFINISH;
			continue;
		}
		rel_crit(reg);
		/* Loop through entire database one GDS block at a time and upgrade/downgrade each of them */
		status1 = SS_NORMAL;
		start_bmp = ROUND_DOWN2(start_blk, BLKS_PER_LMAP);
		last_bmp  = ROUND_DOWN2(stop_blk - 1, BLKS_PER_LMAP);
		curblk = start_blk;	/* curblk is the block to be upgraded/downgraded */
		util_out_print("Region !AD : Started processing from block number [0x!XL]", TRUE, REG_LEN_STR(reg), curblk);
		if (NULL != bptr)
		{	/* malloc/free "bptr" for each region as GDS block-size can be different */
			free(bptr);
			bptr = NULL;
		}
		memset(&reorg_stats, 0, SIZEOF(reorg_stats));	/* initialize statistics for this region */
		for (curbmp = start_bmp; curbmp <= last_bmp; curbmp += BLKS_PER_LMAP)
		{
			if (mu_ctrly_occurred || mu_ctrlc_occurred)
			{
				status1 = ERR_MUNOFINISH;
				break;
			}
			/* --------------------------------------------------------------
			 *             Read in current bitmap block
			 * --------------------------------------------------------------
			 */
			assert(!csa->now_crit);
			bml_sm_buff = t_qread(curbmp, (sm_int_ptr_t)&cycle, &cr); /* bring block into the cache outside of crit */
			reorg_stats.blks_read_from_disk_bmp++;
			grab_crit_encr_cycle_sync(reg); /* needed so t_qread does not return NULL below */
			if (mu_reorg_upgrd_dwngrd_start_tn != csd->desired_db_format_tn)
			{	/* csd->desired_db_format changed since reorg started. discontinue the reorg */
				/* see later comment on "csd->reorg_upgrd_dwngrd_restart_block" for why the assignment
				 * of this field should be done only if a db format change did not occur.
				 */
				rel_crit(reg);
				status1 = ERR_MUNOFINISH;
				/* This "start_tn" check is redone after the for-loop and an error message is printed there */
				break;
			} else if (reorg_entiredb)
			{	/* Change "csd->reorg_upgrd_dwngrd_restart_block" only if STARTBLK or STOPBLK was NOT specified */
				assert(csd->reorg_upgrd_dwngrd_restart_block <= MAX(start_blk, curbmp));
				csd->reorg_upgrd_dwngrd_restart_block = curbmp;	/* previous blocks have been upgraded/downgraded */
			}
			/* Check blks_to_upgrd counter to see if upgrade/downgrade is complete.
			 * Repeat check done a few steps earlier outside of this for loop.
			 */
			total_blks = csd->trans_hist.total_blks;
			free_blks = csd->trans_hist.free_blocks;
			actual_blks2upgrd = csd->blks_to_upgrd;
			if ((upgrade && (0 == actual_blks2upgrd) && csd->fully_upgraded)
				|| (!upgrade && ((total_blks - free_blks) == actual_blks2upgrd)))
			{
				rel_crit(reg);
				break;
			}
			bml_sm_buff = t_qread(curbmp, (sm_int_ptr_t)&cycle, &cr); /* now that in crit, note down stable buffer */
			if (NULL == bml_sm_buff)
				rts_error_csa(CSA_ARG(csa) VARLSTCNT(1) ERR_DSEBLKRDFAIL);
			ondsk_blkver = cr->ondsk_blkver;	/* note down db fmt on disk for bitmap block */
			/* Take a copy of the shared memory bitmap buffer into process-private memory before releasing crit.
			 * We are interested in those blocks that are currently marked as USED in the bitmap.
			 * It is possible that once we release crit, concurrent updates change the bitmap state of those blocks.
			 * In that case, those updates will take care of doing the upgrade/downgrade of those blocks in the
			 * format currently set in csd->desired_db_format i.e. accomplishing MUPIP REORG UPGRADE/DOWNGRADE's job.
			 * If the desired_db_format changes concurrently, we will stop doing REORG UPGRADE/DOWNGRADE processing.
			 */
			if (NULL == bml_lcl_buff)
				bml_lcl_buff = malloc(BM_SIZE(BLKS_PER_LMAP));
			memcpy(bml_lcl_buff, (blk_hdr_ptr_t)bml_sm_buff, BM_SIZE(BLKS_PER_LMAP));
			if (FALSE == cert_blk(reg, curbmp, (blk_hdr_ptr_t)bml_lcl_buff, 0, FALSE))
			{	/* certify the block while holding crit as cert_blk uses fields from file-header (shared memory) */
				assert(FALSE);	/* in pro, skip ugprading/downgarding all blks in this unreliable local bitmap */
				rel_crit(reg);
				util_out_print("Region !AD : Bitmap Block [0x!XL] has integrity errors. Skipping this bitmap.",
					TRUE, REG_LEN_STR(reg), curbmp);
				status1 = ERR_MUNOFINISH;
				continue;
			}
			rel_crit(reg);
			/* ------------------------------------------------------------------------
			 *         Upgrade/Downgrade all BUSY blocks in the current bitmap
			 * ------------------------------------------------------------------------
			 */
			curblk = (curbmp == start_bmp) ? start_blk : curbmp;
			mapsize = (curbmp == last_bmp) ? (stop_blk - curbmp) : BLKS_PER_LMAP;
			assert(0 != mapsize);
			assert(mapsize <= BLKS_PER_LMAP);
			db_got_to_v5_once = csd->db_got_to_v5_once;
			for (lcnt = curblk - curbmp; lcnt < mapsize; lcnt++, curblk++)
			{
				if (mu_ctrly_occurred || mu_ctrlc_occurred)
				{
					status1 = ERR_MUNOFINISH;
					goto stop_reorg_on_this_reg;	/* goto needed because of nested FOR Loop */
				}
				GET_BM_STATUS(bml_lcl_buff, lcnt, bml_status);
				assert(BLK_MAPINVALID != bml_status); /* cert_blk ran clean so we dont expect invalid entries */
				if (BLK_FREE == bml_status)
				{
					reorg_stats.blks_skipped_free++;
					continue;
				}
				/* MUPIP REORG UPGRADE/DOWNGRADE will convert USED & RECYCLED blocks */
				if (db_got_to_v5_once || (BLK_RECYCLED != bml_status))
				{	/* Do NOT read recycled V4 block from disk unless it is guaranteed NOT to be too full */
					if (lcnt)
					{	/* non-bitmap block */
						/* read in block from disk into private buffer. dont pollute the cache yet */
						if (NULL == bptr)
							bptr = (sm_uc_ptr_t)malloc(blk_size);
						status1 = dsk_read(curblk, bptr, &ondsk_blkver, FALSE);
						/* dsk_read on curblk could return an error (DYNUPGRDFAIL) if curblk needs to be
						 * upgraded and if its block size was too big to allow the extra block-header space
						 * requirements for a dynamic upgrade. a MUPIP REORG DOWNGRADE should not error out
						 * in that case as the block is already in the downgraded format.
						 */
						if (SS_NORMAL != status1)
						{
							if (!upgrade && (ERR_DYNUPGRDFAIL == status1))
							{
								assert(GDSV4 == new_db_format);
								ondsk_blkver = new_db_format;
							} else
							{
								gtm_putmsg_csa(CSA_ARG(csa)
									VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(reg), status1);
								util_out_print("Region !AD : Error occurred while reading block "
									"[0x!XL]", TRUE, REG_LEN_STR(reg), curblk);
								status1 = ERR_MUNOFINISH;
								goto stop_reorg_on_this_reg;/* goto needed due to nested FOR Loop */
							}
						}
						reorg_stats.blks_read_from_disk_nonbmp++;
					} /* else bitmap block has been read in crit earlier and ondsk_blkver appropriately set */
					if (new_db_format == ondsk_blkver)
					{
						assert((SS_NORMAL == status1) || (!upgrade && (ERR_DYNUPGRDFAIL == status1)));
						status1 = SS_NORMAL;	/* treat DYNUPGRDFAIL as no error in case of downgrade */
						reorg_stats.blks_skipped_newfmtindisk++;
						continue;	/* current disk version is identical to what is desired */
					}
					assert(SS_NORMAL == status1);
				}
				/* Begin non-TP transaction to upgrade/downgrade the block.
				 * The way we do that is by updating the block using a null update array.
				 * Any update to a block will trigger an automatic upgrade/downgrade of the block based on
				 * 	the current fileheader desired_db_format setting and we use that here.
				 */
				t_begin(ERR_MUREORGFAIL, UPDTRNS_DB_UPDATED_MASK);
				for (; ;)
				{
					CHECK_AND_RESET_UPDATE_ARRAY;	/* reset update_array_ptr to update_array */
					curr_tn = csd->trans_hist.curr_tn;
					db_got_to_v5_once = csd->db_got_to_v5_once;
					if (db_got_to_v5_once || (BLK_RECYCLED != bml_status))
					{
						blkhist->cse = NULL;	/* start afresh (do not use value from previous retry) */
						blkBase = t_qread(curblk, (sm_int_ptr_t)&blkhist->cycle, &blkhist->cr);
						if (NULL == blkBase)
						{
							t_retry((enum cdb_sc)rdfail_detail);
							continue;
						}
						blkhist->blk_num = curblk;
						blkhist->buffaddr = blkBase;
						ondsk_blkver = blkhist->cr->ondsk_blkver;
						new_hdr = *(blk_hdr_ptr_t)blkBase;
						mu_reorg_upgrd_dwngrd_blktn = new_hdr.tn;
						mark_blk_free = FALSE;
						inctn_opcode = upgrade ? inctn_blkupgrd : inctn_blkdwngrd;
					} else
					{
						mark_blk_free = TRUE;
						inctn_opcode = inctn_blkmarkfree;
					}
					inctn_detail.blknum_struct.blknum = curblk;
					/* t_end assumes that the history it is passed does not contain a bitmap block.
					 * for bitmap block, the history validation information is passed through cse instead.
					 * therefore we need to handle bitmap and non-bitmap cases separately.
					 */
					if (!lcnt)
					{	/* Means a bitmap block.
						 * At this point we can do a "new_db_format != ondsk_blkver" check to determine
						 * if the block got converted since we did the dsk_read (see the non-bitmap case
						 * for a similar check done there), but in that case we will have a transaction
						 * which has read 1 bitmap block and is updating no block. "t_end" currently cannot
						 * handle this case as it expects any bitmap block that needs validation to also
						 * have a corresponding cse which will hold its history. Hence we avoid doing the
						 * new_db_format check. The only disadvantage of this is that we will end up
						 * modifying the bitmap block as part of this transaction (in an attempt to convert
						 * its ondsk_blkver) even though it is already in the right format. Since this
						 * overhead is going to be one per bitmap block and since the block is in the cache
						 * at this point, we should not lose much.
						 */
						assert(!mark_blk_free);
						BLK_ADDR(blkid_ptr, SIZEOF(block_id), block_id);
						*blkid_ptr = 0;
						t_write_map(blkhist, (unsigned char *)blkid_ptr, curr_tn, 0);
						assert(&alt_hist.h[0] == blkhist);
						alt_hist.h[0].blk_num = 0; /* create empty history for bitmap block */
						assert(update_trans);
					} else
					{	/* non-bitmap block. fill in history for validation in t_end */
						assert(curblk);	/* we should never come here for block 0 (bitmap) */
						if (!mark_blk_free)
						{
							assert(blkhist->blk_num == curblk);
							assert(blkhist->buffaddr == blkBase);
							blkhist->tn      = curr_tn;
							alt_hist.h[1].blk_num = 0;
						}
						/* Also need to pass the bitmap as history to detect if any concurrent M-kill
						 * is freeing up the same USED block that we are trying to convert OR if any
						 * concurrent M-set is reusing the same RECYCLED block that we are trying to
						 * convert. Because of t_end currently not being able to validate a bitmap
						 * without that simultaneously having a cse, we need to create a cse for the
						 * bitmap that is used only for bitmap history validation, but should not be
						 * used to update the contents of the bitmap block in bg_update.
						 */
						bmlhist.buffaddr = t_qread(curbmp, (sm_int_ptr_t)&bmlhist.cycle, &bmlhist.cr);
						if (NULL == bmlhist.buffaddr)
						{
							t_retry((enum cdb_sc)rdfail_detail);
							continue;
						}
						bmlhist.blk_num = curbmp;
						bmlhist.tn = curr_tn;
						GET_BM_STATUS(bmlhist.buffaddr, lcnt, bml_status);
						if (BLK_MAPINVALID == bml_status)
						{
							t_retry(cdb_sc_lostbmlcr);
							continue;
						}
						if (!mark_blk_free)
						{
							if ((new_db_format != ondsk_blkver) && (BLK_FREE != bml_status))
							{	/* block still needs to be converted. create cse */
								BLK_INIT(bs_ptr, bs1);
								BLK_SEG(bs_ptr, blkBase + SIZEOF(new_hdr),
									new_hdr.bsiz - SIZEOF(new_hdr));
								BLK_FINI(bs_ptr, bs1);
								t_write(blkhist, (unsigned char *)bs1, 0, 0,
									((blk_hdr_ptr_t)blkBase)->levl, FALSE,
									FALSE, GDS_WRITE_PLAIN);
								/* The directory tree status for now is only used to determine
								 * whether writing the block to snapshot file (see t_end_sysops.c).
 								 * For reorg upgrade/downgrade process, the block is updated in a
								 * sequential way without changing the gv_target. In this case, we
								 * assume the block is in directory tree so as to have it written to
								 * the snapshot file.
			 					 */
								BIT_SET_DIR_TREE(cw_set[cw_set_depth-1].blk_prior_state);
								/* reset update_trans in case previous retry had set it to 0 */
								update_trans = UPDTRNS_DB_UPDATED_MASK;
								if (BLK_RECYCLED == bml_status)
								{	/* If block that we are upgarding is RECYCLED, indicate to
									 * bg_update that blks_to_upgrd counter should NOT be
									 * touched in this case by setting "mode" to a special value
									 */
									assert(cw_set[cw_set_depth-1].mode == gds_t_write);
									cw_set[cw_set_depth-1].mode = gds_t_write_recycled;
									/* we SET block as NOT RECYCLED, otherwise, the mm_update()
									 * or bg_update_phase2 may skip writing it to snapshot file
									 * when its level is 0
									 */
									BIT_CLEAR_RECYCLED(cw_set[cw_set_depth-1].blk_prior_state);
								}
							} else
							{	/* Block got converted by another process since we did the dsk_read.
								 * 	or this block became marked free in the bitmap.
								 * No need to update this block. just call t_end for validation of
								 * 	both the non-bitmap block as well as the bitmap block.
								 * Note down that this transaction is no longer updating any blocks.
								 */
								update_trans = 0;
							}
							/* Need to put bit maps on the end of the cw set for concurrency checking.
							 * We want to simulate t_write_map, except we want to update "cw_map_depth"
							 * instead of "cw_set_depth". Hence the save and restore logic below.
							 * This part of the code is similar to the one in mu_swap_blk.c
							 */
							save_cw_set_depth = cw_set_depth;
							assert(!cw_map_depth);
							t_write_map(&bmlhist, NULL, curr_tn, 0); /* will increment cw_set_depth */
							cw_map_depth = cw_set_depth; /* set cw_map_depth to latest cw_set_depth */
							cw_set_depth = save_cw_set_depth;/* restore cw_set_depth */
							/* t_write_map simulation end */
						} else
						{
							if (BLK_RECYCLED != bml_status)
							{	/* Block was RECYCLED at beginning but no longer so. Retry */
								t_retry(cdb_sc_bmlmod);
								continue;
							}
							/* Mark recycled block as FREE in bitmap */
							assert(lcnt == (curblk - curbmp));
							assert(update_array_ptr == update_array);
							*((block_id *)update_array_ptr) = lcnt;
							update_array_ptr += SIZEOF(block_id);
							/* the following assumes SIZEOF(block_id) == SIZEOF(int) */
							assert(SIZEOF(block_id) == SIZEOF(int));
							*(int *)update_array_ptr = 0;
							t_write_map(&bmlhist, (unsigned char *)update_array, curr_tn, 0);
							update_trans = UPDTRNS_DB_UPDATED_MASK;
						}
					}
					assert(SIZEOF(lcl_update_trans) == SIZEOF(update_trans));
					lcl_update_trans = update_trans;	/* take a copy before t_end modifies it */
					if ((trans_num)0 != t_end(&alt_hist, NULL, TN_NOT_SPECIFIED))
					{	/* In case this is MM and t_end() remapped an extended database, reset csd */
						assert(csd == cs_data);
						if (!lcl_update_trans)
						{
							assert(lcnt);
							assert(!mark_blk_free);
							assert((new_db_format == ondsk_blkver) || (BLK_BUSY != bml_status));
							if (BLK_BUSY != bml_status)
								reorg_stats.blks_skipped_free++;
							else
								reorg_stats.blks_skipped_newfmtincache++;
						} else if (!lcnt)
							reorg_stats.blks_converted_bmp++;
						else
							reorg_stats.blks_converted_nonbmp++;
						break;
					}
					assert(csd == cs_data);
				}
			}
		}
	stop_reorg_on_this_reg:
		/* even though ctrl-c occurred, update file-header fields to store reorg's progress before exiting */
		grab_crit(reg);
		blocks_left = 0;
		assert(csd->trans_hist.total_blks >= csd->blks_to_upgrd);
		actual_blks2upgrd = csd->blks_to_upgrd;
		total_blks = csd->trans_hist.total_blks;
		free_blks = csd->trans_hist.free_blocks;
		/* Care should be taken not to set "csd->reorg_upgrd_dwngrd_restart_block" in case of a concurrent db fmt
		 * change. This is because let us say we are doing REORG UPGRADE. A concurrent REORG DOWNGRADE would
		 * have reset "csd->reorg_upgrd_dwngrd_restart_block" field to 0 and if that reorg is interrupted by a
		 * Ctrl-C (before this reorg came here) it would have updated "csd->reorg_upgrd_dwngrd_restart_block" to
		 * a non-zero value indicating how many blocks from 0 have been downgraded. We should not reset this
		 * field to "curblk" as it will be mis-interpreted as the number of blocks that have been DOWNgraded.
		 */
		set_fully_upgraded = FALSE;
		if (mu_reorg_upgrd_dwngrd_start_tn != csd->desired_db_format_tn)
		{	/* csd->desired_db_format changed since reorg started. discontinue the reorg */
			util_out_print("Region !AD : Desired DB Format changed during REORG. Stopping REORG.",
				TRUE, REG_LEN_STR(reg));
			status1 = ERR_MUNOFINISH;
		} else if (reorg_entiredb)
		{	/* Change "csd->reorg_upgrd_dwngrd_restart_block" only if STARTBLK or STOPBLK was NOT specified */
			assert(csd->reorg_upgrd_dwngrd_restart_block <= curblk);
			csd->reorg_upgrd_dwngrd_restart_block = curblk;	/* blocks lesser than this have been upgraded/downgraded */
			expected_blks2upgrd = upgrade ? 0 : (total_blks - free_blks);
			blocks_left = upgrade ? actual_blks2upgrd : (expected_blks2upgrd - actual_blks2upgrd);
			/* If this reorg command went through all blocks in the database, then it should have
			 * 	correctly concluded at this point whether the reorg is complete or not.
			 * If this reorg command started from where a previous incomplete reorg left
			 *	(i.e. first_reorg_in_this_db_fmt is FALSE), it cannot determine if the initial
			 *	GDS blocks that it skipped are completely {up,down}graded or not.
			 */
			assert((0 == blocks_left) || (SS_NORMAL != status1) || !first_reorg_in_this_db_fmt);
			/* If this is a MUPIP REORG UPGRADE that did go through every block in the database (indicated by
			 * "reorg_entiredb" && "first_reorg_in_this_db_fmt") and the current count of "blks_to_upgrd" is
			 * 0 in the file-header and the desired_db_format did not change since the start of the REORG,
			 * we can be sure that the entire database has been upgraded. Set "csd->fully_upgraded" to TRUE.
			 */
			if ((SS_NORMAL == status1) && first_reorg_in_this_db_fmt && upgrade && (0 == actual_blks2upgrd))
			{
				csd->fully_upgraded = TRUE;
				csd->db_got_to_v5_once = TRUE;
				set_fully_upgraded = TRUE;
			}
			/* flush all changes noted down in the file-header */
			if (!wcs_flu(WCSFLU_FLUSH_HDR))	/* wcs_flu assumes gv_cur_region is set (which it is in this routine) */
			{
				gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4,
					LEN_AND_LIT("MUPIP REORG UPGRADE/DOWNGRADE"), DB_LEN_STR(reg));
				status = ERR_MUNOFINISH;
				rel_crit(reg);
				continue;
			}
		}
		curr_tn = csd->trans_hist.curr_tn;
		rel_crit(reg);
		util_out_print("Region !AD : Stopped processing at block number [0x!XL]", TRUE, REG_LEN_STR(reg), curblk);
		/* Print statistics */
		util_out_print("Region !AD : Statistics : Blocks Read From Disk (Bitmap)     : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_read_from_disk_bmp);
		util_out_print("Region !AD : Statistics : Blocks Skipped (Free)              : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_skipped_free);
		util_out_print("Region !AD : Statistics : Blocks Read From Disk (Non-Bitmap) : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_read_from_disk_nonbmp);
		util_out_print("Region !AD : Statistics : Blocks Skipped (new fmt in disk)   : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_skipped_newfmtindisk);
		util_out_print("Region !AD : Statistics : Blocks Skipped (new fmt in cache)  : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_skipped_newfmtincache);
		util_out_print("Region !AD : Statistics : Blocks Converted (Bitmap)          : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_converted_bmp);
		util_out_print("Region !AD : Statistics : Blocks Converted (Non-Bitmap)      : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_converted_nonbmp);
		if (reorg_entiredb && (SS_NORMAL == status1) && (0 != blocks_left))
		{	/* file-header counter does not match what reorg on the entire database expected to see */
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_DBBTUWRNG, 2, expected_blks2upgrd, actual_blks2upgrd);
			util_out_print("Region !AD : Run MUPIP INTEG (without FAST qualifier) to fix the counter",
				TRUE, REG_LEN_STR(reg));
			status1 = ERR_MUNOFINISH;
		} else
			util_out_print("Region !AD : Total Blocks = [0x!XL] : Free Blocks = [0x!XL] : "
				       "Blocks to upgrade = [0x!XL]",
				       TRUE, REG_LEN_STR(reg), total_blks, free_blks, actual_blks2upgrd);
		/* Issue success or failure message for this region */
		if (SS_NORMAL == status1)
		{	/* issue success only if REORG did not encounter any error in its processing */
			if (set_fully_upgraded)
				util_out_print("Region !AD : Database is now FULLY UPGRADED", TRUE, REG_LEN_STR(reg));
			util_out_print("Region !AD : MUPIP REORG !AD finished!/", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			send_msg_csa(CSA_ARG(csa) VARLSTCNT(7) ERR_MUREUPDWNGRDEND, 5, REG_LEN_STR(reg),
										process_id, process_id, &curr_tn);
		} else
		{
			assert(ERR_MUNOFINISH == status1);
			assert((SS_NORMAL == status) || (ERR_MUNOFINISH == status));
			util_out_print("Region !AD : MUPIP REORG !AD incomplete. See above messages.!/",
					TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			status = status1;
		}
	}
	if (NULL != bptr)
		free(bptr);
	if (NULL != bml_lcl_buff)
		free(bml_lcl_buff);
	if (mu_ctrly_occurred || mu_ctrlc_occurred)
	{
		gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_REORGCTRLY);
		status = ERR_MUNOFINISH;
	}
	mupip_exit(status);
}
Example #13
0
short rc_fnd_file(rc_xdsid *xdsid)
{
	gv_namehead	*g;
	short		dsid, node;
	gd_binding	*map;
	char		buff[1024], *cp, *cp1;
	mstr		fpath1, fpath2;
	mval		v;
	int		i, keysize;
	int             len, node2;

	GET_SHORT(dsid, &xdsid->dsid.value);
	GET_SHORT(node, &xdsid->node.value);
	if (!dsid_list)
	{
		/*	open special database, set up entry */
		dsid_list = (rc_dsid_list *)malloc(SIZEOF(rc_dsid_list));
		dsid_list->dsid = RC_NSPACE_DSID;
		dsid_list->next = NULL;
		fpath1.addr = RC_NSPACE_PATH;
		fpath1.len = SIZEOF(RC_NSPACE_PATH);
		if (SS_NORMAL != TRANS_LOG_NAME(&fpath1, &fpath2, buff, SIZEOF(buff), do_sendmsg_on_log2long))
		{
			char msg[256];
			SPRINTF(msg, "Invalid DB filename, \"%s\"", fpath1.addr);
			gtcm_rep_err(msg, errno);
			return RC_BADFILESPEC;
		}
		if (fpath2.len > MAX_FN_LEN)
			return RC_BADFILESPEC;
		dsid_list->fname = (char *)malloc(fpath2.len + 1);
		memcpy(dsid_list->fname, fpath2.addr, fpath2.len);
		*((char*)(dsid_list->fname + fpath2.len)) = 0;
		gv_cur_region = (gd_region *)malloc(SIZEOF(gd_region));
		memset(gv_cur_region, 0, SIZEOF(gd_region));
		gv_cur_region->dyn.addr = (gd_segment *)malloc(SIZEOF(gd_segment));
		memset(gv_cur_region->dyn.addr, 0, SIZEOF(gd_segment));
		memcpy(gv_cur_region->dyn.addr->fname, fpath2.addr, fpath2.len);
		gv_cur_region->dyn.addr->fname_len = fpath2.len;
		gv_cur_region->dyn.addr->acc_meth = dba_bg;
		ESTABLISH_RET(rc_fnd_file_ch1, RC_SUCCESS);
		gvcst_init(gv_cur_region);
		REVERT;
		change_reg();
		/* check to see if this DB has the reserved bytes field set
		 * correctly.  Global pages must always have some extra unused
		 * space left in them (RC_RESERVED bytes) so that the page
		 * will fit into the client buffer when unpacked by the
		 * client.
		 */
		if (cs_data->reserved_bytes < RC_RESERVED)
		{
			OMI_DBG((omi_debug,
			"Unable to access database file:  \"%s\"\nReserved_bytes field in the file header is too small for GT.CM\n",
			fpath2.addr));
			free(dsid_list->fname);
			dsid_list->fname = NULL;
			free(dsid_list);
			dsid_list = NULL;
			free(gv_cur_region->dyn.addr);
			gv_cur_region->dyn.addr = NULL;
			free(gv_cur_region);
			gv_cur_region = NULL;
			return RC_FILEACCESS;
		}
		gv_keysize = DBKEYSIZE(gv_cur_region->max_key_size);
		GVKEY_INIT(gv_currkey, gv_keysize);
		GVKEY_INIT(gv_altkey, gv_keysize);
		cs_addrs->dir_tree = (gv_namehead *)malloc(SIZEOF(gv_namehead) + 2 * SIZEOF(gv_key) + 3 * (gv_keysize - 1));
		g = cs_addrs->dir_tree;
		g->first_rec = (gv_key*)(g->clue.base + gv_keysize);
		g->last_rec = (gv_key*)(g->first_rec->base + gv_keysize);
		g->clue.top = g->last_rec->top = g->first_rec->top = gv_keysize;
		g->clue.prev = g->clue.end = 0;
		g->root = DIR_ROOT;
		dsid_list->gda = (gd_addr*)malloc(SIZEOF(gd_addr) + 3 * SIZEOF(gd_binding));
		dsid_list->gda->n_maps = 3;
		dsid_list->gda->n_regions = 1;
		dsid_list->gda->n_segments = 1;
		dsid_list->gda->maps = (gd_binding*)((char*)dsid_list->gda + SIZEOF(gd_addr));
		dsid_list->gda->max_rec_size = gv_cur_region->max_rec_size;
		map = dsid_list->gda->maps;
		map ++;
		memset(map->name, 0, SIZEOF(map->name));
		map->name[0] = '%';
		map->reg.addr = gv_cur_region;
		map++;
		map->reg.addr = gv_cur_region;
		memset(map->name, -1, SIZEOF(map->name));
		dsid_list->gda->tab_ptr = (hash_table_mname *)malloc(SIZEOF(hash_table_mname));
		init_hashtab_mname(dsid_list->gda->tab_ptr, 0, HASHTAB_NO_COMPACT, HASHTAB_NO_SPARE_TABLE);
		change_reg();
		if (rc_overflow->top < cs_addrs->hdr->blk_size)
		{
			if (rc_overflow->buff)
				free(rc_overflow->buff);
			rc_overflow->top = cs_addrs->hdr->blk_size;
			rc_overflow->buff = (char*)malloc(rc_overflow->top);
			if (rc_overflow_size < rc_overflow->top)
				rc_overflow_size = rc_overflow->top;
		}
	}
	for (fdi_ptr = dsid_list; fdi_ptr && (fdi_ptr->dsid != dsid); fdi_ptr = fdi_ptr->next)
		;
	if (!fdi_ptr)
	{	/*	need to open new database, add to list, set fdi_ptr */
		gd_header = dsid_list->gda;
		gv_currkey->end = 0;
		v.mvtype = MV_STR;
		v.str.len = RC_NSPACE_GLOB_LEN-1;
		v.str.addr = RC_NSPACE_GLOB;
		GV_BIND_NAME_AND_ROOT_SEARCH(gd_header, &v.str);
		if (!gv_target->root)	/* No namespace global */
			return RC_UNDEFNAMSPC;
		v.mvtype = MV_STR;
		v.str.len = SIZEOF(RC_NSPACE_DSI_SUB)-1;
		v.str.addr = RC_NSPACE_DSI_SUB;
		mval2subsc(&v,gv_currkey);
		node2 = node;
		MV_FORCE_MVAL(&v,node2);
		mval2subsc(&v,gv_currkey);
		i = dsid / 256;
		MV_FORCE_MVAL(&v,i);
		mval2subsc(&v,gv_currkey);
		if (gvcst_get(&v))
			return RC_UNDEFNAMSPC;
		for (cp = v.str.addr, i = 1; i < RC_FILESPEC_PIECE; i++)
			for (; *cp++ != RC_FILESPEC_DELIM; )
				;
		for (cp1 = cp; *cp1++ != RC_FILESPEC_DELIM; )
			;
		cp1--;
		len = (int)(cp1 - cp);
		if (len > MAX_FN_LEN)
			return RC_BADFILESPEC;
		fdi_ptr = (rc_dsid_list *)malloc(SIZEOF(rc_dsid_list));
		fdi_ptr->fname = (char *)malloc(len+1);
		fdi_ptr->dsid = dsid;
		memcpy(fdi_ptr->fname, cp, len);
		*(fdi_ptr->fname + (len)) = 0;
		gv_cur_region = (gd_region *)malloc(SIZEOF(gd_region));
		memset(gv_cur_region, 0, SIZEOF(gd_region));
		gv_cur_region->dyn.addr = (gd_segment *)malloc(SIZEOF(gd_segment));
		memset(gv_cur_region->dyn.addr, 0, SIZEOF(gd_segment));
		memcpy(gv_cur_region->dyn.addr->fname, cp, len);
		gv_cur_region->dyn.addr->fname_len = len;
		gv_cur_region->dyn.addr->acc_meth = dba_bg;
		ESTABLISH_RET(rc_fnd_file_ch2, RC_SUCCESS);
		gvcst_init(gv_cur_region);
		REVERT;
		change_reg();
		/* check to see if this DB has the reserved bytes field set
		 * correctly.  Global pages must always have some extra unused
		 * space left in them (RC_RESERVED bytes) so that the page
		 * will fit into the client buffer when unpacked by the
		 * client.
		 */
		if (cs_data->reserved_bytes < RC_RESERVED)
		{
			OMI_DBG((omi_debug,
			"Unable to access database file:  \"%s\"\nReserved_bytes field in the file header is too small for GT.CM\n",
			fdi_ptr->fname));
			free(dsid_list->fname);
			dsid_list->fname = NULL;
			free(dsid_list);
			dsid_list = NULL;
			free(gv_cur_region->dyn.addr);
			gv_cur_region->dyn.addr = NULL;
			free(gv_cur_region);
			gv_cur_region = NULL;
			return RC_FILEACCESS;
		}
		assert(!cs_addrs->hold_onto_crit);	/* this ensures we can safely do unconditional grab_crit and rel_crit */
		grab_crit(gv_cur_region);
		cs_data->rc_srv_cnt++;
		if (!cs_data->dsid)
		{
			cs_data->dsid = dsid;
			cs_data->rc_node = node;
		} else if (cs_data->dsid != dsid || cs_data->rc_node != node)
		{
			cs_data->rc_srv_cnt--;
			rel_crit(gv_cur_region);
			OMI_DBG((omi_debug, "Dataset ID/RC node mismatch"));
			OMI_DBG((omi_debug, "DB file:  \"%s\"\n", dsid_list->fname));
			OMI_DBG((omi_debug, "Stored DSID:  %d\tRC Node:  %d\n", cs_data->dsid, cs_data->rc_node));
			OMI_DBG((omi_debug, "RC Rq DSID:  %d\tRC Node:  %d\n", dsid,node));
			free(fdi_ptr->fname);
			fdi_ptr->fname = NULL;
			free(fdi_ptr);
			fdi_ptr = NULL;
			free(gv_cur_region->dyn.addr);
			gv_cur_region->dyn.addr = NULL;
			free(gv_cur_region);
			gv_cur_region = NULL;
			return RC_FILEACCESS;
		}
		rel_crit(gv_cur_region);
		keysize = DBKEYSIZE(gv_cur_region->max_key_size);
		GVKEYSIZE_INCREASE_IF_NEEDED(keysize);
		cs_addrs->dir_tree = (gv_namehead *)malloc(SIZEOF(gv_namehead) + 2 * SIZEOF(gv_key) + 3 * (keysize - 1));
		g = cs_addrs->dir_tree;
		g->first_rec = (gv_key*)(g->clue.base + keysize);
		g->last_rec = (gv_key*)(g->first_rec->base + keysize);
		g->clue.top = g->last_rec->top = g->first_rec->top = keysize;
		g->clue.prev = g->clue.end = 0;
		g->root = DIR_ROOT;
		fdi_ptr->gda = (gd_addr*)malloc(SIZEOF(gd_addr) + 3 * SIZEOF(gd_binding));
		fdi_ptr->gda->n_maps = 3;
		fdi_ptr->gda->n_regions = 1;
		fdi_ptr->gda->n_segments = 1;
		fdi_ptr->gda->maps = (gd_binding*)((char*)fdi_ptr->gda + SIZEOF(gd_addr));
		fdi_ptr->gda->max_rec_size = gv_cur_region->max_rec_size;
		map = fdi_ptr->gda->maps;
		map ++;
		memset(map->name, 0, SIZEOF(map->name));
		map->name[0] = '%';
		map->reg.addr = gv_cur_region;
		map++;
		map->reg.addr = gv_cur_region;
		memset(map->name, -1, SIZEOF(map->name));
		fdi_ptr->gda->tab_ptr = (hash_table_mname *)malloc(SIZEOF(hash_table_mname));
		init_hashtab_mname(fdi_ptr->gda->tab_ptr, 0, HASHTAB_NO_COMPACT, HASHTAB_NO_SPARE_TABLE);
		fdi_ptr->next = dsid_list->next;
		dsid_list->next = fdi_ptr;
	}
	gv_cur_region = fdi_ptr->gda->maps[1].reg.addr;
	change_reg();
	if (rc_overflow->top < cs_addrs->hdr->blk_size)
	{
		if (rc_overflow->buff)
			free(rc_overflow->buff);
		rc_overflow->top = cs_addrs->hdr->blk_size;
		rc_overflow->buff = (char*)malloc(rc_overflow->top);
		if (rc_overflow_size < rc_overflow->top)
			rc_overflow_size = rc_overflow->top;
	}
	if (!rc_overflow -> top)
	{
		rc_overflow -> top = rc_overflow_size;
		rc_overflow->buff = (char *)malloc(rc_overflow->top);
	}
	gd_header = fdi_ptr->gda;
	return RC_SUCCESS;
}
Example #14
0
uint4	mur_forward(jnl_tm_t min_broken_time, seq_num min_broken_seqno, seq_num losttn_seqno)
{
	boolean_t		added, this_reg_stuck;
	boolean_t		is_set_kill_zkill_ztworm, is_set_kill_zkill;
	jnl_record		*rec;
	enum jnl_record_type	rectype;
	enum rec_fence_type	rec_fence;
	enum broken_type	recstat;
	jnl_tm_t		rec_time;
	int4			rec_image_count = 0;	/* This is a dummy variable for UNIX */
	uint4			status, regcnt_stuck, num_partners;
	mval			mv;
	reg_ctl_list		*rctl, *rctl_top, *prev_rctl;
	jnl_ctl_list		*jctl;
	gd_region		*reg;
	sgmnt_addrs		*csa;
	seq_num 		rec_token_seq;
	forw_multi_struct	*forw_multi;
	multi_struct 		*multi;

	error_def(ERR_JNLREADEOF);
	error_def(ERR_BLKCNTEDITFAIL);

	skip_dbtriggers = TRUE;	/* do not want to invoke any triggers for updates done by journal recovery */
	murgbl.extr_buff = (char *)malloc(murgbl.max_extr_record_length);
	for (recstat = (enum broken_type)0; recstat < TOT_EXTR_TYPES; recstat++)
		murgbl.extr_file_create[recstat] = TRUE;
	jgbl.dont_reset_gbl_jrec_time = jgbl.forw_phase_recovery = TRUE;
	assert(NULL == jgbl.mur_pini_addr_reset_fnptr);
	jgbl.mur_pini_addr_reset_fnptr = (pini_addr_reset_fnptr)mur_pini_addr_reset;
	gv_keysize = DBKEYSIZE(MAX_KEY_SZ);
	mu_gv_stack_init();
	murgbl.consist_jnl_seqno = 0;
	/* Note down passed in values in murgbl global so "mur_forward_play_cur_jrec" function can see it as well */
	murgbl.min_broken_time = min_broken_time;
	murgbl.min_broken_seqno = min_broken_seqno;
	murgbl.losttn_seqno = losttn_seqno;
	assert(!mur_options.rollback || (murgbl.losttn_seqno <= murgbl.min_broken_seqno));
	prev_rctl = NULL;
	rctl_start = NULL;
	assert(0 == murgbl.regcnt_remaining);
	for (rctl = mur_ctl, rctl_top = mur_ctl + murgbl.reg_total; rctl < rctl_top; rctl++)
	{
		if (mur_options.forward)
		{
			assert(NULL == rctl->jctl_turn_around);
			jctl = rctl->jctl = rctl->jctl_head;
			assert(jctl->reg_ctl == rctl);
			jctl->rec_offset = JNL_HDR_LEN;
			jnl_fence_ctl.fence_list = JNL_FENCE_LIST_END; /* initialized to reflect journaling is not enabled */
		} else
		{
			jctl = rctl->jctl = (NULL == rctl->jctl_turn_around) ? rctl->jctl_head : rctl->jctl_turn_around;
			assert(jctl->reg_ctl == rctl);
			jctl->rec_offset = jctl->turn_around_offset;
			jgbl.mur_jrec_seqno = jctl->turn_around_seqno;
			if (mur_options.rollback && murgbl.consist_jnl_seqno < jgbl.mur_jrec_seqno)
				murgbl.consist_jnl_seqno = jgbl.mur_jrec_seqno;
			assert(murgbl.consist_jnl_seqno <= murgbl.losttn_seqno);
			assert((NULL != rctl->jctl_turn_around) || (0 == jctl->rec_offset));
		}
		if (mur_options.update || mur_options.extr[GOOD_TN])
		{
			reg = rctl->gd;
			gv_cur_region = reg;
			tp_change_reg();	/* note : sets cs_addrs to non-NULL value even if gv_cur_region->open is FALSE
						 * (cs_data could still be NULL). */
			rctl->csa = cs_addrs;
			cs_addrs->rctl = rctl;
			rctl->csd = cs_data;
			rctl->sgm_info_ptr = cs_addrs->sgm_info_ptr;
			SET_CSA_DIR_TREE(cs_addrs, MAX_KEY_SZ, reg);
			gv_target = cs_addrs->dir_tree;
		}
		jctl->after_end_of_data = FALSE;
		status = mur_next(jctl, jctl->rec_offset);
		assert(ERR_JNLREADEOF != status);	/* cannot get EOF at start of forward processing */
		if (SS_NORMAL != status)
			return status;
		PRINT_VERBOSE_STAT(jctl, "mur_forward:at the start");
		/* Any multi-region TP transaction will be processed as multiple single-region TP transactions up
		 * until the tp-resolve-time is reached. From then on, they will be treated as one multi-region TP
		 * transaction. This is needed for proper lost-tn determination (any multi-region transaction that
		 * gets played in a region AFTER it has already encountered a broken tn should treat this as a lost tn).
		 */
		do
		{
			assert(jctl == rctl->jctl);
			rec = rctl->mur_desc->jnlrec;
			rec_time = rec->prefix.time;
			if (rec_time > mur_options.before_time)
				break;	/* Records after -BEFORE_TIME do not go to extract or losttrans or brkntrans files */
			if (rec_time < mur_options.after_time)
			{
				status = mur_next_rec(&jctl);
				continue; /* Records before -AFTER_TIME do not go to extract or losttrans or brkntrans files */
			}
			if (rec_time >= jgbl.mur_tp_resolve_time)
				break;	/* Records after tp-resolve-time will be processed below */
			/* TODO: what do we do if we find a BROKEN tn here? */
			status = mur_forward_play_cur_jrec(rctl);
			if (SS_NORMAL != status)
				break;
			status = mur_next_rec(&jctl);
		} while (SS_NORMAL == status);
		CHECK_IF_EOF_REACHED(rctl, status); /* sets rctl->forw_eof_seen if needed; resets "status" to SS_NORMAL */
		if (SS_NORMAL != status)
			return status;
		if (rctl->forw_eof_seen)
		{
			PRINT_VERBOSE_STAT(jctl, "mur_forward:Reached EOF before tp_resolve_time");
			continue;	/* Reached EOF before even getting to tp_resolve_time.
					 * Do not even consider region for next processing loop */
		}
		rctl->last_tn = 0;
		rctl->process_losttn = FALSE;
		murgbl.regcnt_remaining++;	/* # of regions participating in recovery at this point */
		if (NULL == rctl_start)
			rctl_start = rctl;
		if (NULL != prev_rctl)
		{
			prev_rctl->next_rctl = rctl;
			rctl->prev_rctl = prev_rctl;
		}
		prev_rctl = rctl;
		assert(murgbl.ok_to_update_db || !rctl->db_updated);
		PRINT_VERBOSE_STAT(jctl, "mur_forward:at tp_resolve_time");
	}
	/* Note that it is possible for rctl_start to be NULL at this point. That is there is no journal record in any region
	 * AFTER the calculated tp-resolve-time. This is possible if for example -AFTER_TIME was used and has a time later
	 * than any journal record in all journal files. If rctl_start is NULL, prev_rctl should also be NULL and vice versa.
	 */
	if (prev_rctl != rctl_start)
	{
		assert(NULL != prev_rctl);
		assert(NULL != rctl_start);
		prev_rctl->next_rctl = rctl_start;
		rctl_start->prev_rctl = prev_rctl;
	} else
	{	/* prev_rctl & rctl_start are identical. They both should be NULL or should point to a single element linked list */
		assert((NULL == rctl_start) || (NULL == rctl_start->next_rctl) && (NULL == rctl_start->prev_rctl));
	}
	rctl = rctl_start;
	regcnt_stuck = 0; /* # of regions we are stuck in waiting for other regions to resolve a multi-region TP transaction */
	assert((NULL == rctl) || (NULL == rctl->forw_multi));
	gv_cur_region = NULL;	/* clear out any previous value to ensure gv_cur_region/cs_addrs/cs_data
				 * all get set in sync by the MUR_CHANGE_REG macro below.
				 */
	while (NULL != rctl)
	{	/* while there is at least one region remaining with unprocessed journal records */
		assert(NULL != rctl_start);
		assert(0 < murgbl.regcnt_remaining);
		if (NULL != rctl->forw_multi)
		{	/* This region's current journal record is part of a TP transaction waiting for other regions */
			regcnt_stuck++;
			if (regcnt_stuck >= murgbl.regcnt_remaining)
				GTMASSERT;	/* Out-of-design situation. Stuck in ALL regions. */
			rctl = rctl->next_rctl;	/* Move on to the next available region */
			assert(NULL != rctl);
			continue;
		}
		regcnt_stuck = 0;	/* restart the counter now that we found at least one non-stuck region */
		MUR_CHANGE_REG(rctl);
		jctl = rctl->jctl;
		this_reg_stuck = FALSE;
		for ( status = SS_NORMAL; SS_NORMAL == status; )
		{
			assert(jctl == rctl->jctl);
			rec = rctl->mur_desc->jnlrec;
			rec_time = rec->prefix.time;
			assert(rec_time >= jgbl.mur_tp_resolve_time);
			if (rec_time > mur_options.before_time)
				break;	/* Records after -BEFORE_TIME do not go to extract or losttrans or brkntrans files */
			assert((0 == mur_options.after_time) || mur_options.forward && !rctl->db_updated);
			if (rec_time < mur_options.after_time)
			{
				status = mur_next_rec(&jctl);
				continue; /* Records before -AFTER_TIME do not go to extract or losttrans or brkntrans files */
			}
			/* Check if current journal record can be played right away or need to wait for corresponding journal
			 * records from other participating TP regions to be reached. A non-TP or ZTP transaction can be played
			 * without issues (i.e. has no dependencies with any other regions). A single-region TP transaction too
			 * falls in the same category. A multi-region TP transaction needs to wait until all participating regions
			 * have played all journal records BEFORE this TP in order to ensure recover plays records in the exact
			 * same order that GT.M performed them in.
			 */
			/* If FENCE_NONE is specified, we would not have maintained any multi hashtable in mur_back_process for
			 * broken transaction processing. So we process multi-region TP transactions as multiple single-region
			 * TP transactions in forward phase.
			 */
			if (FENCE_NONE != mur_options.fences)
			{
				rectype = (enum jnl_record_type)rec->prefix.jrec_type;
				if (IS_TP(rectype) && IS_TUPD(rectype))
				{
					assert(IS_SET_KILL_ZKILL_ZTRIG_ZTWORM(rectype));
					assert(&rec->jrec_set_kill.num_participants == &rec->jrec_ztworm.num_participants);
					num_partners = rec->jrec_set_kill.num_participants;
					assert(0 < num_partners);
					if (1 < num_partners)
					{
						this_reg_stuck = TRUE;
						assert(&rec->jrec_set_kill.update_num == &rec->jrec_ztworm.update_num);
					}
				}
			}
			if (this_reg_stuck)
			{
				rec_token_seq = GET_JNL_SEQNO(rec);
				VMS_ONLY(
					/* In VMS, pid is not unique. We need "image_count" as well. But this is not needed
					 * in case of rollback as the token is guaranteed to be unique in that case.
					 */
					if (!mur_options.rollback)
					{
						MUR_GET_IMAGE_COUNT(jctl, rec, rec_image_count, status);
						if (SS_NORMAL != status)
						{
							this_reg_stuck = FALSE;	/* so abnormal "status" gets checked below */
							break;
						}
					}
				)
				/* In Unix, "rec_image_count" is ignored by the MUR_FORW* macros */
				MUR_FORW_TOKEN_LOOKUP(forw_multi, rec_token_seq, rec_time, rec_image_count);
				if (NULL != forw_multi)
				{	/* This token has already been seen in another region in forward processing.
					 * Add current region as well. If all regions have been resolved, then play
					 * the entire transaction maintaining the exact same order of updates within.
					 */
					MUR_FORW_TOKEN_ONE_MORE_REG(forw_multi, rctl);
				} else
				{	/* First time we are seeing this token in forward processing. Check if this
					 * has already been determined to be a broken transaction.
					 */
					recstat = GOOD_TN;
					multi = NULL;
					if (IS_REC_POSSIBLY_BROKEN(rec_time, rec_token_seq))
					{
						multi = MUR_TOKEN_LOOKUP(rec_token_seq, rec_image_count, rec_time, TPFENCE);
						if ((NULL != multi) && (0 < multi->partner))
							recstat = BROKEN_TN;
					}
					MUR_FORW_TOKEN_ADD(forw_multi, rec_token_seq, rec_time, rctl, num_partners,
								recstat, multi, rec_image_count);
				}
				/* Check that "tabent" field has been initialized above (by either the MUR_FORW_TOKEN_LOOKUP
				 * or MUR_FORW_TOKEN_ADD macros). This is relied upon by "mur_forward_play_multireg_tp" below.
				 */
				assert(NULL != forw_multi->u.tabent);
				assert(forw_multi->num_reg_seen_forward <= forw_multi->num_reg_seen_backward);
				if (forw_multi->num_reg_seen_forward == forw_multi->num_reg_seen_backward)
				{	/* All regions have been seen in forward processing. Now play it.
					 * Note that the TP could be BROKEN_TN or GOOD_TN. The callee handles it.
					 */
					assert(forw_multi == rctl->forw_multi);
					status = mur_forward_play_multireg_tp(forw_multi, rctl);
					this_reg_stuck = FALSE;
					/* Note that as part of playing the TP transaction, we could have reached
					 * the EOF of rctl. In this case, we need to break out of the loop.
					 */
					if ((SS_NORMAL != status) || rctl->forw_eof_seen)
						break;
					assert(NULL == rctl->forw_multi);
					assert(!dollar_tlevel);
					jctl = rctl->jctl;	/* In case the first record after the most recently processed
								 * TP transaction is in the next generation journal file */
					continue;
				}
				break;
			} else
			{
				status = mur_forward_play_cur_jrec(rctl);
				if (SS_NORMAL != status)
					break;
			}
			assert(!this_reg_stuck);
			status = mur_next_rec(&jctl);
		}
Example #15
0
void bin_load(uint4 begin, uint4 end)
{
	unsigned char	*ptr, *cp1, *cp2, *btop, *gvkey_char_ptr, *tmp_ptr, *tmp_key_ptr, *c, *ctop, *ptr_base;
	unsigned char	hdr_lvl, src_buff[MAX_KEY_SZ + 1], dest_buff[MAX_ZWR_KEY_SZ],
			cmpc_str[MAX_KEY_SZ + 1], dup_key_str[MAX_KEY_SZ + 1], sn_key_str[MAX_KEY_SZ + 1], *sn_key_str_end;
	unsigned char	*end_buff;
	unsigned short	rec_len, next_cmpc, numsubs;
	int		len;
	int		current, last, length, max_blk_siz, max_key, status;
	int		tmp_cmpc, sn_chunk_number, expected_sn_chunk_number = 0, sn_hold_buff_pos, sn_hold_buff_size;
	uint4		iter, max_data_len, max_subsc_len, key_count, gblsize;
	ssize_t		rec_count, global_key_count, subsc_len,extr_std_null_coll, last_sn_error_offset=0,
				file_offset_base=0, file_offset=0;
	boolean_t	need_xlation, new_gvn, utf8_extract;
	boolean_t	is_hidden_subscript, ok_to_put = TRUE, putting_a_sn = FALSE, sn_incmp_gbl_already_killed = FALSE;
	rec_hdr		*rp, *next_rp;
	mval		v, tmp_mval;
	mstr		mstr_src, mstr_dest;
	collseq		*extr_collseq, *db_collseq, *save_gv_target_collseq;
	coll_hdr	extr_collhdr, db_collhdr;
	gv_key 		*tmp_gvkey = NULL;	/* null-initialize at start, will be malloced later */
	gv_key		*sn_gvkey = NULL; /* null-initialize at start, will be malloced later */
	gv_key		*sn_savekey = NULL; /* null-initialize at start, will be malloced later */
	char		std_null_coll[BIN_HEADER_NUMSZ + 1], *sn_hold_buff = NULL, *sn_hold_buff_temp = NULL;
#	ifdef GTM_CRYPT
	gtmcrypt_key_t			*encr_key_handles;
	char				*inbuf;
	int4				index;
	int				req_dec_blk_size, init_status, crypt_status;
	muext_hash_hdr_ptr_t		hash_array = NULL;
#	endif
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	assert(4 == SIZEOF(coll_hdr));
	gvinit();
	v.mvtype = MV_STR;
	len = file_input_bin_get((char **)&ptr, &file_offset_base, (char **)&ptr_base);
	hdr_lvl = EXTR_HEADER_LEVEL(ptr);
	if (!(((('4' == hdr_lvl) || ('5' == hdr_lvl)) && (V5_BIN_HEADER_SZ == len)) ||
			(('6' == hdr_lvl) && (BIN_HEADER_SZ == len)) ||
			(('7' == hdr_lvl) && (BIN_HEADER_SZ == len)) ||
			(('4' > hdr_lvl) && (V3_BIN_HEADER_SZ == len))))
	{
		rts_error(VARLSTCNT(1) ERR_LDBINFMT);
		mupip_exit(ERR_LDBINFMT);
	}
	/* expecting the level in a single character */
	assert(' ' == *(ptr + SIZEOF(BIN_HEADER_LABEL) - 3));
	if (0 != memcmp(ptr, BIN_HEADER_LABEL, SIZEOF(BIN_HEADER_LABEL) - 2) || ('2' > hdr_lvl) ||
			*(BIN_HEADER_VERSION_ENCR) < hdr_lvl)
	{	/* ignore the level check */
		rts_error(VARLSTCNT(1) ERR_LDBINFMT);
		mupip_exit(ERR_LDBINFMT);
	}
	/* check if extract was generated in UTF-8 mode */
	utf8_extract = (0 == MEMCMP_LIT(&ptr[len - BIN_HEADER_LABELSZ], UTF8_NAME)) ? TRUE : FALSE;
	if ((utf8_extract && !gtm_utf8_mode) || (!utf8_extract && gtm_utf8_mode))
	{ /* extract CHSET doesn't match $ZCHSET */
		if (utf8_extract)
			rts_error(VARLSTCNT(4) ERR_LOADINVCHSET, 2, LEN_AND_LIT("UTF-8"));
		else
			rts_error(VARLSTCNT(4) ERR_LOADINVCHSET, 2, LEN_AND_LIT("M"));
		mupip_exit(ERR_LDBINFMT);
	}
	if ('4' >= hdr_lvl)
	{	/* Binary extracts in V50000-to-V52000 (label=4) and pre-V50000 (label=3) could have a '\0' byte (NULL byte)
		 * in the middle of the string. Replace it with ' ' (space) like it would be in V52000 binary extracts and above.
		 */
		for (c = ptr, ctop = c + len; c < ctop; c++)
		{
			if ('\0' == *c)
				*c = ' ';
		}
	}
	util_out_print("Label = !AD\n", TRUE, len, ptr);
	new_gvn = FALSE;
	if (hdr_lvl > '3')
	{
		if (hdr_lvl > '5')
		{
			memcpy(std_null_coll, ptr + BIN_HEADER_NULLCOLLOFFSET, BIN_HEADER_NUMSZ);
			std_null_coll[BIN_HEADER_NUMSZ] = '\0';
		}
		else
		{
			memcpy(std_null_coll, ptr + V5_BIN_HEADER_NULLCOLLOFFSET, V5_BIN_HEADER_NUMSZ);
			std_null_coll[V5_BIN_HEADER_NUMSZ] = '\0';
		}
		extr_std_null_coll = STRTOUL(std_null_coll, NULL, 10);
		if (0 != extr_std_null_coll && 1!= extr_std_null_coll)
		{
			rts_error(VARLSTCNT(5) ERR_TEXT, 2, RTS_ERROR_TEXT("Corrupted null collation field  in header"),
				ERR_LDBINFMT);
			mupip_exit(ERR_LDBINFMT);
		}
	} else
		extr_std_null_coll = 0;
#	ifdef GTM_CRYPT
	if ('7' <= hdr_lvl)
	{
		int	i, num_indexes;
		len = file_input_bin_get((char **)&ptr, &file_offset_base, (char **)&ptr_base);
		hash_array = (muext_hash_hdr *)malloc(len);
		/* store hashes of all the files used during extract into muext_hash_hdr structure */
		memcpy((char *)hash_array, ptr, len);
		num_indexes = len / GTMCRYPT_HASH_LEN;
		encr_key_handles = (gtmcrypt_key_t *)malloc(SIZEOF(gtmcrypt_key_t) * num_indexes);
		INIT_PROC_ENCRYPTION(crypt_status);
		GC_BIN_LOAD_ERR(crypt_status);
		for (index = 0; index < num_indexes; index++)
		{
			if (0 == memcmp(hash_array[index].gtmcrypt_hash, EMPTY_GTMCRYPT_HASH, GTMCRYPT_HASH_LEN))
				continue;
			GTMCRYPT_GETKEY(hash_array[index].gtmcrypt_hash, encr_key_handles[index], crypt_status);
			GC_BIN_LOAD_ERR(crypt_status);
		}
	}
#	endif
	if ('2' < hdr_lvl)
	{
		len = file_input_bin_get((char **)&ptr, &file_offset_base, (char **)&ptr_base);
		if (SIZEOF(coll_hdr) != len)
		{
			rts_error(VARLSTCNT(5) ERR_TEXT, 2, RTS_ERROR_TEXT("Corrupt collation header"), ERR_LDBINFMT);
			mupip_exit(ERR_LDBINFMT);
		}
		extr_collhdr = *((coll_hdr *)(ptr));
		new_gvn = TRUE;
	} else
		gtm_putmsg(VARLSTCNT(3) ERR_OLDBINEXTRACT, 1, hdr_lvl - '0');
	if (begin < 2)
		begin = 2;
	for (iter = 2; iter < begin; iter++)
	{
		if (!(len = file_input_bin_get((char **)&ptr, &file_offset_base, (char **)&ptr_base)))
		{
			gtm_putmsg(VARLSTCNT(3) ERR_LOADEOF, 1, begin);
			util_out_print("Error reading record number: !UL\n", TRUE, iter);
			mupip_error_occurred = TRUE;
			return;
		} else if (len == SIZEOF(coll_hdr))
		{
			extr_collhdr = *((coll_hdr *)(ptr));
			assert(hdr_lvl > '2');
			iter--;
		}
	}
	assert(iter == begin);
	util_out_print("Beginning LOAD at record number: !UL\n", TRUE, begin);
	max_data_len = 0;
	max_subsc_len = 0;
	global_key_count = key_count = 0;
	rec_count = begin - 1;
	extr_collseq = db_collseq = NULL;
	need_xlation = FALSE;
	assert(NULL == tmp_gvkey);	/* GVKEY_INIT macro relies on this */
	GVKEY_INIT(tmp_gvkey, DBKEYSIZE(MAX_KEY_SZ));	/* tmp_gvkey will point to malloced memory after this */
	assert(NULL == sn_gvkey);	/* GVKEY_INIT macro relies on this */
	GVKEY_INIT(sn_gvkey, DBKEYSIZE(MAX_KEY_SZ));	/* sn_gvkey will point to malloced memory after this */
	assert(NULL == sn_savekey);	/* GVKEY_INIT macro relies on this */
	GVKEY_INIT(sn_savekey, DBKEYSIZE(MAX_KEY_SZ));	/* sn_gvkey will point to malloced memory after this */
	for (; !mupip_DB_full ;)
	{
		if (++rec_count > end)
			break;
		next_cmpc = 0;
		mupip_error_occurred = FALSE;
		if (mu_ctrly_occurred)
			break;
		if (mu_ctrlc_occurred)
		{
			util_out_print("!AD:!_  Key cnt: !UL  max subsc len: !UL  max data len: !UL", TRUE,
				LEN_AND_LIT(gt_lit), key_count, max_subsc_len, max_data_len);
			util_out_print("Last LOAD record number: !UL", TRUE, key_count ? (rec_count - 1) : 0);
			mu_gvis();
			util_out_print(0, TRUE);
			mu_ctrlc_occurred = FALSE;
		}
		if (!(len = file_input_bin_get((char **)&ptr, &file_offset_base, (char **)&ptr_base)) || mupip_error_occurred)
			break;
		else if (len == SIZEOF(coll_hdr))
		{
			extr_collhdr = *((coll_hdr *)(ptr));
			assert(hdr_lvl > '2');
			new_gvn = TRUE;			/* next record will contain a new gvn */
			rec_count--;	/* Decrement as this record does not count as a record for loading purposes */
			continue;
		}
		rp = (rec_hdr*)(ptr);
#		ifdef GTM_CRYPT
		if ('7' <= hdr_lvl)
		{	/* Getting index value from the extracted file. It indicates which database file this record belongs to */
			GET_LONG(index, ptr);
			if (-1 != index) /* Indicates that the record is encrypted. */
			{
				req_dec_blk_size = len - SIZEOF(int4);
				inbuf = (char *)(ptr + SIZEOF(int4));
				GTMCRYPT_DECODE_FAST(encr_key_handles[index], inbuf, req_dec_blk_size, NULL, crypt_status);
				GC_BIN_LOAD_ERR(crypt_status);
			}
			rp = (rec_hdr*)(ptr + SIZEOF(int4));
		}
#		endif
		btop = ptr + len;
		cp1 = (unsigned char*)(rp + 1);
		v.str.addr = (char*)cp1;
		while (*cp1++)
			;
		v.str.len =INTCAST((char*)cp1 - v.str.addr - 1);
		if (('2' >= hdr_lvl) || new_gvn)
		{
			if ((HASHT_GBLNAME_LEN == v.str.len) &&	(0 == memcmp(v.str.addr, HASHT_GBLNAME, HASHT_GBLNAME_LEN)))
				continue;
			bin_call_db(BIN_BIND, (INTPTR_T)gd_header, (INTPTR_T)&v.str);
			max_key = gv_cur_region->max_key_size;
			db_collhdr.act = gv_target->act;
			db_collhdr.ver = gv_target->ver;
			db_collhdr.nct = gv_target->nct;
		}
		GET_USHORT(rec_len, &rp->rsiz);
		if (EVAL_CMPC(rp) != 0 || v.str.len > rec_len || mupip_error_occurred)
		{
			bin_call_db(ERR_COR, (INTPTR_T)rec_count, (INTPTR_T)global_key_count);
			mu_gvis();
			DISPLAY_FILE_OFFSET_OF_RECORD_AND_REST_OF_BLOCK;
			continue;
		}
		if (new_gvn)
		{
			global_key_count = 1;
			if ((db_collhdr.act != extr_collhdr.act || db_collhdr.ver != extr_collhdr.ver
				|| db_collhdr.nct != extr_collhdr.nct
				|| gv_cur_region->std_null_coll != extr_std_null_coll))
			{
				if (extr_collhdr.act)
				{
					if (extr_collseq = ready_collseq((int)extr_collhdr.act))
					{
						if (!do_verify(extr_collseq, extr_collhdr.act, extr_collhdr.ver))
						{
							gtm_putmsg(VARLSTCNT(8) ERR_COLLTYPVERSION, 2, extr_collhdr.act,
								extr_collhdr.ver, ERR_GVIS, 2, gv_altkey->end - 1, gv_altkey->base);
							mupip_exit(ERR_COLLTYPVERSION);
						}
					} else
					{
						gtm_putmsg(VARLSTCNT(7) ERR_COLLATIONUNDEF, 1, extr_collhdr.act,
							ERR_GVIS, 2, gv_altkey->end - 1, gv_altkey->base);
						mupip_exit(ERR_COLLATIONUNDEF);
					}
				}
				if (db_collhdr.act)
				{
					if (db_collseq = ready_collseq((int)db_collhdr.act))
					{
						if (!do_verify(db_collseq, db_collhdr.act, db_collhdr.ver))
						{
							gtm_putmsg(VARLSTCNT(8) ERR_COLLTYPVERSION, 2, db_collhdr.act,
								db_collhdr.ver, ERR_GVIS, 2, gv_altkey->end - 1, gv_altkey->base);
							mupip_exit(ERR_COLLTYPVERSION);
						}
					} else
					{
						gtm_putmsg(VARLSTCNT(7) ERR_COLLATIONUNDEF, 1, db_collhdr.act,
							ERR_GVIS, 2, gv_altkey->end - 1, gv_altkey->base);
						mupip_exit(ERR_COLLATIONUNDEF);
					}
				}
				need_xlation = TRUE;
			} else
				need_xlation = FALSE;
		}
		new_gvn = FALSE;
		for (; rp < (rec_hdr*)btop; rp = (rec_hdr*)((unsigned char *)rp + rec_len))
		{
			GET_USHORT(rec_len, &rp->rsiz);
			if (rec_len + (unsigned char *)rp > btop)
			{
				bin_call_db(ERR_COR, (INTPTR_T)rec_count, (INTPTR_T)global_key_count);
				mu_gvis();
				DISPLAY_FILE_OFFSET_OF_RECORD_AND_REST_OF_BLOCK;
				break;
			}
			cp1 =  (unsigned char*)(rp + 1);
			cp2 = gv_currkey->base + EVAL_CMPC(rp);
			current = 1;
			for (;;)
			{
				last = current;
				current = *cp2++ = *cp1++;
				if (0 == last && 0 == current)
					break;
				if (cp1 > (unsigned char *)rp + rec_len ||
				    cp2 > (unsigned char *)gv_currkey + gv_currkey->top)
				{
					gv_currkey->end = cp2 - gv_currkey->base - 1;
					gv_currkey->base[gv_currkey->end] = 0;
					gv_currkey->base[gv_currkey->end - 1] = 0;
					bin_call_db(ERR_COR, (INTPTR_T)rec_count, (INTPTR_T)global_key_count);
					mu_gvis();
					DISPLAY_FILE_OFFSET_OF_RECORD_AND_REST_OF_BLOCK;
					break;
				}
			}
			if (mupip_error_occurred)
				break;
			gv_currkey->end = cp2 - gv_currkey->base - 1;
			if (need_xlation)
			{
				assert(hdr_lvl >= '3');
				assert(extr_collhdr.act || db_collhdr.act || extr_collhdr.nct || db_collhdr.nct ||
				 	extr_std_null_coll != gv_cur_region->std_null_coll);
							/* gv_currkey would have been modified/translated in the earlier put */
				memcpy(gv_currkey->base, cmpc_str, next_cmpc);
				next_rp = (rec_hdr *)((unsigned char*)rp + rec_len);
				if ((unsigned char*)next_rp < btop)
				{
					next_cmpc = EVAL_CMPC(next_rp);
					assert(next_cmpc <= gv_currkey->end);
					memcpy(cmpc_str, gv_currkey->base, next_cmpc);
				} else
					next_cmpc = 0;
							/* length of the key might change (due to nct variation),
							 * so get a copy of the original key from the extract */
				memcpy(dup_key_str, gv_currkey->base, gv_currkey->end + 1);
				gvkey_char_ptr = dup_key_str;
				while (*gvkey_char_ptr++)
					;
				gv_currkey->prev = 0;
				gv_currkey->end = gvkey_char_ptr - dup_key_str;
				assert(gv_keysize <= tmp_gvkey->top);
				while (*gvkey_char_ptr)
				{
						/* get next subscript (in GT.M internal subsc format) */
					subsc_len = 0;
					tmp_ptr = src_buff;
					while (*gvkey_char_ptr)
						*tmp_ptr++ = *gvkey_char_ptr++;
					subsc_len = tmp_ptr - src_buff;
					src_buff[subsc_len] = '\0';
					if (extr_collseq)
					{
						/* undo the extract time collation */
						TREF(transform) = TRUE;
						save_gv_target_collseq = gv_target->collseq;
						gv_target->collseq = extr_collseq;
					} else
						TREF(transform) = FALSE;
						/* convert the subscript to string format */
					end_buff = gvsub2str(src_buff, dest_buff, FALSE);
						/* transform the string to the current subsc format */
					TREF(transform) = TRUE;
					tmp_mval.mvtype = MV_STR;
                                	tmp_mval.str.addr = (char *)dest_buff;
                                	tmp_mval.str.len = INTCAST(end_buff - dest_buff);
					tmp_gvkey->prev = 0;
					tmp_gvkey->end = 0;
					if (extr_collseq)
						gv_target->collseq = save_gv_target_collseq;
					mval2subsc(&tmp_mval, tmp_gvkey);
						/* we now have the correctly transformed subscript */
					tmp_key_ptr = gv_currkey->base + gv_currkey->end;
					memcpy(tmp_key_ptr, tmp_gvkey->base, tmp_gvkey->end + 1);
					gv_currkey->prev = gv_currkey->end;
					gv_currkey->end += tmp_gvkey->end;
					gvkey_char_ptr++;
				}
				if ( gv_cur_region->std_null_coll != extr_std_null_coll && gv_currkey->prev)
				{
					if (extr_std_null_coll == 0)
					{
						GTM2STDNULLCOLL(gv_currkey->base, gv_currkey->end);
					} else
					{
						STD2GTMNULLCOLL(gv_currkey->base, gv_currkey->end);
					}
				}
			}
			if (gv_currkey->end >= max_key)
			{
				bin_call_db(ERR_COR, (INTPTR_T)rec_count, (INTPTR_T)global_key_count);
				mu_gvis();
				DISPLAY_FILE_OFFSET_OF_RECORD_AND_REST_OF_BLOCK;
				continue;
			}
			/*
			 * Spanning node-related variables and their usage:
			 *
			 * expected_sn_chunk_number: 	0  - looking for spanning nodes (regular nodes are OK, too)
			 *				!0 - number of the next chunk needed (implies we are building
			 *					a spanning node's value)
			 *
			 * While building a spanning node's value:
			 * numsubs: the number of chunks needed to build the spanning node's value
			 * gblsize: the expected size of the completed value
			 * sn_chunk_number: The chunk number of the chunk from the current record from the extract
			 *
			 * Managing the value
			 * sn_hold_buff: buffer used to accumulate the spanning node's value
			 * sn_hold_buff_size: Allocated size of buffer
			 * sn_hold_buff_pos: amount of the buffer used; where to place the next chunk
			 * sn_hold_buff_temp: used when we have to increase the size of the buffer
			 *
			 * Controlling the placing of the key,value in the database:
			 * ok_to_put: means we are ready to place the key,value in the database, i.e., we have the full value
			 * 		(either of the spanning node or a regular node).
			 * putting_a_sn: we are placing a spanning node in the database, i.e, use the key from sn_gvkey and
			 * 		the value from sn_hold_buff.
			 */
			CHECK_HIDDEN_SUBSCRIPT(gv_currkey,is_hidden_subscript);
			if (!is_hidden_subscript && (max_subsc_len < (gv_currkey->end + 1)))
				max_subsc_len = gv_currkey->end + 1;
			v.str.addr = (char*)cp1;
			v.str.len =INTCAST(rec_len - (cp1 - (unsigned char *)rp));
			if (expected_sn_chunk_number && !is_hidden_subscript)
			{	/* we were expecting a chunk of an spanning node and we did not get one */
				DISPLAY_INCMP_SN_MSG;
				util_out_print("!_!_Expected chunk number : !UL but found a non-spanning node", TRUE,
						expected_sn_chunk_number + 1);
				if (sn_hold_buff_pos)
					DISPLAY_PARTIAL_SN_HOLD_BUFF;
				KILL_INCMP_SN_IF_NEEDED;
				sn_hold_buff_pos = 0;
				expected_sn_chunk_number = 0;
				ok_to_put = TRUE;
				putting_a_sn = FALSE;
				numsubs = 0;
			}
			if (is_hidden_subscript)
			{	/* it's a chunk and we were expecting one */
				sn_chunk_number = SPAN_GVSUBS2INT((span_subs *) &(gv_currkey->base[gv_currkey->end - 4]));
				if (!expected_sn_chunk_number && is_hidden_subscript && sn_chunk_number)
				{ /* we not expecting a payload chunk (as opposed to a control record) but we got one */
					DISPLAY_INCMP_SN_MSG;
					util_out_print("!_!_Not expecting a spanning node chunk but found chunk : !UL", TRUE,
							sn_chunk_number + 1);
					if (v.str.len)
						DISPLAY_VALUE("!_!_Errant Chunk :");
					continue;
				}
				if (0 == sn_chunk_number)
				{ 	/* first spanning node chunk, get ctrl info */
					if (0 != expected_sn_chunk_number)
					{
						DISPLAY_INCMP_SN_MSG;
						util_out_print("!_!_Expected chunk number : !UL but found chunk number : !UL", TRUE,
								expected_sn_chunk_number + 1, sn_chunk_number + 1);
						if (sn_hold_buff_pos)
							DISPLAY_PARTIAL_SN_HOLD_BUFF;
						KILL_INCMP_SN_IF_NEEDED;
					}
					/* start building a new spanning node */
					sn_gvkey->end = gv_currkey->end - (SPAN_SUBS_LEN + 1);
					memcpy(sn_gvkey->base, gv_currkey->base, sn_gvkey->end);
					sn_gvkey->base[sn_gvkey->end] = 0;
					sn_gvkey->prev = gv_currkey->prev;
					sn_gvkey->top = gv_currkey->top;
					GET_NSBCTRL(v.str.addr, numsubs, gblsize);
					/* look for first payload chunk */
					expected_sn_chunk_number = 1;
					sn_hold_buff_pos = 0;
					ok_to_put = FALSE;
					sn_incmp_gbl_already_killed = FALSE;
				} else
				{	/* we only need to compare the key before the hidden subscripts */
					if ((expected_sn_chunk_number == sn_chunk_number)
							&& (sn_gvkey->end == gv_currkey->end - (SPAN_SUBS_LEN + 1))
							&& !memcmp(sn_gvkey->base,gv_currkey->base, sn_gvkey->end)
							&& ((sn_hold_buff_pos + v.str.len) <= gblsize))
					{
						if (NULL == sn_hold_buff)
						{
							sn_hold_buff_size = DEFAULT_SN_HOLD_BUFF_SIZE;
							sn_hold_buff = (char *)malloc(DEFAULT_SN_HOLD_BUFF_SIZE);
						}
						if ((sn_hold_buff_pos + v.str.len) > sn_hold_buff_size)
						{
							sn_hold_buff_size = sn_hold_buff_size * 2;
							sn_hold_buff_temp = (char *)malloc(sn_hold_buff_size);
							memcpy(sn_hold_buff_temp, sn_hold_buff, sn_hold_buff_pos);
							free (sn_hold_buff);
							sn_hold_buff = sn_hold_buff_temp;
						}
						memcpy(sn_hold_buff + sn_hold_buff_pos, v.str.addr, v.str.len);
						sn_hold_buff_pos += v.str.len;
						if (expected_sn_chunk_number == numsubs)
						{
							if (sn_hold_buff_pos != gblsize)
							{	/* we don't have the expected size even though 	*/
								/* we have all the expected chunks.		 		*/
								DISPLAY_INCMP_SN_MSG;
								util_out_print("!_!_Expected size : !UL actual size : !UL", TRUE,
										gblsize, sn_hold_buff_pos);
								if (sn_hold_buff_pos)
									DISPLAY_PARTIAL_SN_HOLD_BUFF;
								KILL_INCMP_SN_IF_NEEDED;
								expected_sn_chunk_number = 0;
								ok_to_put = FALSE;
								sn_hold_buff_pos = 0;
							}
							else
							{
								expected_sn_chunk_number = 0;
								ok_to_put = TRUE;
								putting_a_sn = TRUE;
							}

						}else
							expected_sn_chunk_number++;
					}else
					{
						DISPLAY_INCMP_SN_MSG;
						if ((sn_hold_buff_pos + v.str.len) <= gblsize)
							util_out_print("!_!_Expected chunk number : !UL but found chunk number : !UL", /*BYPASSOK*/
								TRUE, expected_sn_chunk_number + 1, sn_chunk_number + 1);
						else
							util_out_print("!_!_Global value too large:  expected size : !UL actual size : !UL chunk number : !UL", TRUE, /*BYPASSOK*/
								gblsize, sn_hold_buff_pos + v.str.len, sn_chunk_number + 1);
						if (sn_hold_buff_pos)
							DISPLAY_PARTIAL_SN_HOLD_BUFF;
						if (v.str.len)
							DISPLAY_VALUE("!_!_Errant Chunk :");
						KILL_INCMP_SN_IF_NEEDED;
						sn_hold_buff_pos = 0;
						expected_sn_chunk_number = 0;
					}
				}
			} else
				ok_to_put = TRUE;
			if (ok_to_put)
			{
					if (putting_a_sn)
					{
						gv_currkey->base[gv_currkey->end - (SPAN_SUBS_LEN + 1)] = 0;
						gv_currkey->end -= (SPAN_SUBS_LEN + 1);
						v.str.addr = sn_hold_buff;
						v.str.len = sn_hold_buff_pos;
					}
					if (max_data_len < v.str.len)
						max_data_len = v.str.len;
					bin_call_db(BIN_PUT, (INTPTR_T)&v, 0);
					if (mupip_error_occurred)
					{
						if (!mupip_DB_full)
						{
							bin_call_db(ERR_COR, (INTPTR_T)rec_count, (INTPTR_T)global_key_count);
							file_offset = file_offset_base + ((unsigned char *)rp - ptr_base);
							util_out_print("!_!_at File offset : [0x!XL]", TRUE, file_offset);
							DISPLAY_CURRKEY;
							DISPLAY_VALUE("!_!_Value :");
						}
						break;
					}
					if (putting_a_sn)
						putting_a_sn = FALSE;
					else
					{
						key_count++;
						global_key_count++;
					}
			}
		}
	}
	GTMCRYPT_ONLY(
		if (NULL != hash_array)
			free(hash_array);
	)
Example #16
0
void bin_load(uint4 begin, uint4 end)
{
	unsigned char	*ptr, *cp1, *cp2, *btop, *gvkey_char_ptr, *tmp_ptr, *tmp_key_ptr, *c, *ctop;
	unsigned char	hdr_lvl, src_buff[MAX_KEY_SZ + 1], dest_buff[MAX_ZWR_KEY_SZ],
			cmpc_str[MAX_KEY_SZ + 1], dup_key_str[MAX_KEY_SZ + 1];
	unsigned char	*end_buff;
	unsigned short	rec_len, next_cmpc;
	int		len;
	int		current, last, length, max_blk_siz, max_key, status;
	uint4		iter, max_data_len, max_subsc_len, key_count;
	ssize_t	        rec_count, global_key_count, subsc_len,extr_std_null_coll;
	boolean_t	need_xlation, new_gvn, utf8_extract;
	rec_hdr		*rp, *next_rp;
	mval		v, tmp_mval;
	mstr		mstr_src, mstr_dest;
	collseq		*extr_collseq, *db_collseq, *save_gv_target_collseq;
	coll_hdr	extr_collhdr, db_collhdr;
	gv_key 		*tmp_gvkey = NULL;	/* null-initialize at start, will be malloced later */
	char		std_null_coll[BIN_HEADER_NUMSZ + 1];
#	ifdef GTM_CRYPT
	gtmcrypt_key_t			*encr_key_handles;
	char				*inbuf;
	int4				index;
	int				req_dec_blk_size, init_status, crypt_status;
	muext_hash_hdr_ptr_t		hash_array = NULL;
#	endif
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	assert(4 == SIZEOF(coll_hdr));
	gvinit();
	v.mvtype = MV_STR;
	len = file_input_bin_get((char **)&ptr);
	hdr_lvl = EXTR_HEADER_LEVEL(ptr);
	if (!(((('4' == hdr_lvl) || ('5' == hdr_lvl)) && (BIN_HEADER_SZ == len)) || (('4' > hdr_lvl) && (V3_BIN_HEADER_SZ == len))))
	{
		rts_error(VARLSTCNT(1) ERR_LDBINFMT);
		mupip_exit(ERR_LDBINFMT);
	}
	/* expecting the level in a single character */
	assert(' ' == *(ptr + SIZEOF(BIN_HEADER_LABEL) - 3));
	if (0 != memcmp(ptr, BIN_HEADER_LABEL, SIZEOF(BIN_HEADER_LABEL) - 2) || ('2' > hdr_lvl) || *(BIN_HEADER_VERSION) < hdr_lvl)
	{	/* ignore the level check */
		rts_error(VARLSTCNT(1) ERR_LDBINFMT);
		mupip_exit(ERR_LDBINFMT);
	}
	/* check if extract was generated in UTF-8 mode */
	utf8_extract = (0 == MEMCMP_LIT(&ptr[len - BIN_HEADER_LABELSZ], UTF8_NAME)) ? TRUE : FALSE;
	if ((utf8_extract && !gtm_utf8_mode) || (!utf8_extract && gtm_utf8_mode))
	{ /* extract CHSET doesn't match $ZCHSET */
		if (utf8_extract)
			rts_error(VARLSTCNT(4) ERR_LOADINVCHSET, 2, LEN_AND_LIT("UTF-8"));
		else
			rts_error(VARLSTCNT(4) ERR_LOADINVCHSET, 2, LEN_AND_LIT("M"));
		mupip_exit(ERR_LDBINFMT);
	}
	if ('4' >= hdr_lvl)
	{	/* Binary extracts in V50000-to-V52000 (label=4) and pre-V50000 (label=3) could have a '\0' byte (NULL byte)
		 * in the middle of the string. Replace it with ' ' (space) like it would be in V52000 binary extracts and above.
		 */
		for (c = ptr, ctop = c + len; c < ctop; c++)
		{
			if ('\0' == *c)
				*c = ' ';
		}
	}
	util_out_print("Label = !AD\n", TRUE, len, ptr);
	new_gvn = FALSE;
	if (hdr_lvl > '3')
	{
		memcpy(std_null_coll, ptr + BIN_HEADER_NULLCOLLOFFSET, BIN_HEADER_NUMSZ);
		std_null_coll[BIN_HEADER_NUMSZ] = '\0';
		extr_std_null_coll = STRTOUL(std_null_coll, NULL, 10);
		if (0 != extr_std_null_coll && 1!= extr_std_null_coll)
		{
			rts_error(VARLSTCNT(5) ERR_TEXT, 2, RTS_ERROR_TEXT("Corrupted null collation field  in header"),
				ERR_LDBINFMT);
			mupip_exit(ERR_LDBINFMT);
		}
	} else
		extr_std_null_coll = 0;
#	ifdef GTM_CRYPT
	if ('5' <= hdr_lvl)
	{
		int	i, num_indexes;
		len = file_input_bin_get((char **)&ptr);
		hash_array = (muext_hash_hdr *)malloc(len);
		/* store hashes of all the files used during extract into muext_hash_hdr structure */
		memcpy((char *)hash_array, ptr, len);
		num_indexes = len / GTMCRYPT_HASH_LEN;
		encr_key_handles = (gtmcrypt_key_t *)malloc(SIZEOF(gtmcrypt_key_t) * num_indexes);
		INIT_PROC_ENCRYPTION(crypt_status);
		GC_BIN_LOAD_ERR(crypt_status);
		for (index = 0; index < num_indexes; index++)
		{
			if (0 == memcmp(hash_array[index].gtmcrypt_hash, EMPTY_GTMCRYPT_HASH, GTMCRYPT_HASH_LEN))
				continue;
			GTMCRYPT_GETKEY(hash_array[index].gtmcrypt_hash, encr_key_handles[index], crypt_status);
			GC_BIN_LOAD_ERR(crypt_status);
		}
	}
#	endif
	if ('2' < hdr_lvl)
	{
		len = file_input_bin_get((char **)&ptr);
		if (SIZEOF(coll_hdr) != len)
		{
			rts_error(VARLSTCNT(5) ERR_TEXT, 2, RTS_ERROR_TEXT("Corrupt collation header"), ERR_LDBINFMT);
			mupip_exit(ERR_LDBINFMT);
		}
		extr_collhdr = *((coll_hdr *)(ptr));
		new_gvn = TRUE;
	} else
		gtm_putmsg(VARLSTCNT(3) ERR_OLDBINEXTRACT, 1, hdr_lvl - '0');
	if (begin < 2)
		begin = 2;
	for (iter = 2; iter < begin; iter++)
	{
		if (!(len = file_input_bin_get((char **)&ptr)))
		{
			gtm_putmsg(VARLSTCNT(3) ERR_LOADEOF, 1, begin);
			util_out_print("Error reading record number: !UL\n", TRUE, iter);
			mupip_error_occurred = TRUE;
			return;
		} else if (len == SIZEOF(coll_hdr))
		{
			extr_collhdr = *((coll_hdr *)(ptr));
			assert(hdr_lvl > '2');
			iter--;
		}
	}
	assert(iter == begin);
	util_out_print("Beginning LOAD at record number: !UL\n", TRUE, begin);
	max_data_len = 0;
	max_subsc_len = 0;
	global_key_count = key_count = 0;
	rec_count = begin - 1;
	extr_collseq = db_collseq = NULL;
	need_xlation = FALSE;
	assert(NULL == tmp_gvkey);	/* GVKEY_INIT macro relies on this */
	GVKEY_INIT(tmp_gvkey, DBKEYSIZE(MAX_KEY_SZ));	/* tmp_gvkey will point to malloced memory after this */
	for (; !mupip_DB_full ;)
	{
		if (++rec_count > end)
			break;
		next_cmpc = 0;
		mupip_error_occurred = FALSE;
		if (mu_ctrly_occurred)
			break;
		if (mu_ctrlc_occurred)
		{
			util_out_print("!AD:!_  Key cnt: !UL  max subsc len: !UL  max data len: !UL", TRUE,
				LEN_AND_LIT(gt_lit), key_count, max_subsc_len, max_data_len);
			util_out_print("Last LOAD record number: !UL", TRUE, key_count ? (rec_count - 1) : 0);
			mu_gvis();
			util_out_print(0, TRUE);
			mu_ctrlc_occurred = FALSE;
		}
		/* reset the stringpool for every record in order to avoid garbage collection */
		stringpool.free = stringpool.base;
		if (!(len = file_input_bin_get((char **)&ptr)) || mupip_error_occurred)
			break;
		else if (len == SIZEOF(coll_hdr))
		{
			extr_collhdr = *((coll_hdr *)(ptr));
			assert(hdr_lvl > '2');
			new_gvn = TRUE;			/* next record will contain a new gvn */
			rec_count--;	/* Decrement as this record does not count as a record for loading purposes */
			continue;
		}
		rp = (rec_hdr*)(ptr);
#		ifdef GTM_CRYPT
		if ('5' <= hdr_lvl)
		{	/* Getting index value from the extracted file. It indicates which database file this record belongs to */
			GET_LONG(index, ptr);
			if (-1 != index) /* Indicates that the record is encrypted. */
			{
				req_dec_blk_size = len - SIZEOF(int4);
				inbuf = (char *)(ptr + SIZEOF(int4));
				GTMCRYPT_DECODE_FAST(encr_key_handles[index], inbuf, req_dec_blk_size, NULL, crypt_status);
				GC_BIN_LOAD_ERR(crypt_status);
			}
			rp = (rec_hdr*)(ptr + SIZEOF(int4));
		}
#		endif
		btop = ptr + len;
		cp1 = (unsigned char*)(rp + 1);
		v.str.addr = (char*)cp1;
		while (*cp1++)
			;
		v.str.len =INTCAST((char*)cp1 - v.str.addr - 1);
		if (('2' >= hdr_lvl) || new_gvn)
		{
			if ((HASHT_GBLNAME_LEN == v.str.len) &&	(0 == memcmp(v.str.addr, HASHT_GBLNAME, HASHT_GBLNAME_LEN)))
				continue;
			bin_call_db(BIN_BIND, (INTPTR_T)gd_header, (INTPTR_T)&v.str);
			max_key = gv_cur_region->max_key_size;
			db_collhdr.act = gv_target->act;
			db_collhdr.ver = gv_target->ver;
			db_collhdr.nct = gv_target->nct;
		}
		GET_USHORT(rec_len, &rp->rsiz);
		if (rp->cmpc != 0 || v.str.len > rec_len || mupip_error_occurred)
		{
			bin_call_db(ERR_COR, (INTPTR_T)rec_count, (INTPTR_T)global_key_count);
			mu_gvis();
			util_out_print(0, TRUE);
			continue;
		}
		if (new_gvn)
		{
			global_key_count = 1;
			if ((db_collhdr.act != extr_collhdr.act || db_collhdr.ver != extr_collhdr.ver
				|| db_collhdr.nct != extr_collhdr.nct
				|| gv_cur_region->std_null_coll != extr_std_null_coll))
			{
				if (extr_collhdr.act)
				{
					if (extr_collseq = ready_collseq((int)extr_collhdr.act))
					{
						if (!do_verify(extr_collseq, extr_collhdr.act, extr_collhdr.ver))
						{
							gtm_putmsg(VARLSTCNT(8) ERR_COLLTYPVERSION, 2, extr_collhdr.act,
								extr_collhdr.ver, ERR_GVIS, 2, gv_altkey->end - 1, gv_altkey->base);
							mupip_exit(ERR_COLLTYPVERSION);
						}
					} else
					{
						gtm_putmsg(VARLSTCNT(7) ERR_COLLATIONUNDEF, 1, extr_collhdr.act,
							ERR_GVIS, 2, gv_altkey->end - 1, gv_altkey->base);
						mupip_exit(ERR_COLLATIONUNDEF);
					}
				}
				if (db_collhdr.act)
				{
					if (db_collseq = ready_collseq((int)db_collhdr.act))
					{
						if (!do_verify(db_collseq, db_collhdr.act, db_collhdr.ver))
						{
							gtm_putmsg(VARLSTCNT(8) ERR_COLLTYPVERSION, 2, db_collhdr.act,
								db_collhdr.ver, ERR_GVIS, 2, gv_altkey->end - 1, gv_altkey->base);
							mupip_exit(ERR_COLLTYPVERSION);
						}
					} else
					{
						gtm_putmsg(VARLSTCNT(7) ERR_COLLATIONUNDEF, 1, db_collhdr.act,
							ERR_GVIS, 2, gv_altkey->end - 1, gv_altkey->base);
						mupip_exit(ERR_COLLATIONUNDEF);
					}
				}
				need_xlation = TRUE;
			} else
				need_xlation = FALSE;
		}
		new_gvn = FALSE;
		for (; rp < (rec_hdr*)btop; rp = (rec_hdr*)((unsigned char *)rp + rec_len))
		{
			GET_USHORT(rec_len, &rp->rsiz);
			if (rec_len + (unsigned char *)rp > btop)
			{
				bin_call_db(ERR_COR, (INTPTR_T)rec_count, (INTPTR_T)global_key_count);
				mu_gvis();
				util_out_print(0, TRUE);
				break;
			}
			cp1 =  (unsigned char*)(rp + 1);
			cp2 = gv_currkey->base + rp->cmpc;
			current = 1;
			for (;;)
			{
				last = current;
				current = *cp2++ = *cp1++;
				if (0 == last && 0 == current)
					break;
				if (cp1 > (unsigned char *)rp + rec_len ||
				    cp2 > (unsigned char *)gv_currkey + gv_currkey->top)
				{
					bin_call_db(ERR_COR, (INTPTR_T)rec_count, (INTPTR_T)global_key_count);
					mu_gvis();
					util_out_print(0, TRUE);
					break;
				}
			}
			if (mupip_error_occurred)
				break;
			gv_currkey->end = cp2 - gv_currkey->base - 1;
			if (need_xlation)
			{
				assert(hdr_lvl >= '3');
				assert(extr_collhdr.act || db_collhdr.act || extr_collhdr.nct || db_collhdr.nct ||
				 	extr_std_null_coll != gv_cur_region->std_null_coll);
							/* gv_currkey would have been modified/translated in the earlier put */
				memcpy(gv_currkey->base, cmpc_str, next_cmpc);
				next_rp = (rec_hdr *)((unsigned char*)rp + rec_len);
				if ((unsigned char*)next_rp < btop)
				{
					next_cmpc = next_rp->cmpc;
					assert(next_cmpc <= gv_currkey->end);
					memcpy(cmpc_str, gv_currkey->base, next_cmpc);
				} else
					next_cmpc = 0;
							/* length of the key might change (due to nct variation),
							 * so get a copy of the original key from the extract */
				memcpy(dup_key_str, gv_currkey->base, gv_currkey->end + 1);
				gvkey_char_ptr = dup_key_str;
				while (*gvkey_char_ptr++)
					;
				gv_currkey->prev = 0;
				gv_currkey->end = gvkey_char_ptr - dup_key_str;
				assert(gv_keysize <= tmp_gvkey->top);
				while (*gvkey_char_ptr)
				{
						/* get next subscript (in GT.M internal subsc format) */
					subsc_len = 0;
					tmp_ptr = src_buff;
					while (*gvkey_char_ptr)
						*tmp_ptr++ = *gvkey_char_ptr++;
					subsc_len = tmp_ptr - src_buff;
					src_buff[subsc_len] = '\0';
					if (extr_collseq)
					{
						/* undo the extract time collation */
						TREF(transform) = TRUE;
						save_gv_target_collseq = gv_target->collseq;
						gv_target->collseq = extr_collseq;
					} else
						TREF(transform) = FALSE;
						/* convert the subscript to string format */
					end_buff = gvsub2str(src_buff, dest_buff, FALSE);
						/* transform the string to the current subsc format */
					TREF(transform) = TRUE;
					tmp_mval.mvtype = MV_STR;
                                	tmp_mval.str.addr = (char *)dest_buff;
                                	tmp_mval.str.len = INTCAST(end_buff - dest_buff);
					tmp_gvkey->prev = 0;
					tmp_gvkey->end = 0;
					if (extr_collseq)
						gv_target->collseq = save_gv_target_collseq;
					mval2subsc(&tmp_mval, tmp_gvkey);
						/* we now have the correctly transformed subscript */
					tmp_key_ptr = gv_currkey->base + gv_currkey->end;
					memcpy(tmp_key_ptr, tmp_gvkey->base, tmp_gvkey->end + 1);
					gv_currkey->prev = gv_currkey->end;
					gv_currkey->end += tmp_gvkey->end;
					gvkey_char_ptr++;
				}
				if ( gv_cur_region->std_null_coll != extr_std_null_coll && gv_currkey->prev)
				{
					if (extr_std_null_coll == 0)
					{
						GTM2STDNULLCOLL(gv_currkey->base, gv_currkey->end);
					} else
					{
						STD2GTMNULLCOLL(gv_currkey->base, gv_currkey->end);
					}
				}
			}
			if (gv_currkey->end >= max_key)
			{
				bin_call_db(ERR_COR, (INTPTR_T)rec_count, (INTPTR_T)global_key_count);
				mu_gvis();
				util_out_print(0, TRUE);
				continue;
			}
			if (max_subsc_len < (gv_currkey->end + 1))
				max_subsc_len = gv_currkey->end + 1;
			v.str.addr = (char*)cp1;
			v.str.len =INTCAST(rec_len - (cp1 - (unsigned char *)rp));
			if (max_data_len < v.str.len)
				max_data_len = v.str.len;
			bin_call_db(BIN_PUT, (INTPTR_T)&v, 0);
			if (mupip_error_occurred)
			{
				if (!mupip_DB_full)
				{
					bin_call_db(ERR_COR, (INTPTR_T)rec_count, (INTPTR_T)global_key_count);
					util_out_print(0, TRUE);
				}
				break;
			}
			key_count++;
			global_key_count++;
		}
	}
	GTMCRYPT_ONLY(
		if (NULL != hash_array)
			free(hash_array);
	)
Example #17
0
void op_merge(void)
{
	boolean_t		found, check_for_null_subs, is_base_var;
	lv_val			*dst_lv;
	mval 			*mkey, *value, *subsc;
	int			org_glvn1_keysz, org_glvn2_keysz, delta2, dollardata_src, dollardata_dst, sbs_depth;
	unsigned char		*ptr, *ptr2;
	unsigned char  		buff[MAX_ZWR_KEY_SZ];
	unsigned char		nullcoll_src, nullcoll_dst;
	zshow_out		output;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	assert(MAX_STRLEN >= MAX_ZWR_KEY_SZ);
	assert ((merge_args == (MARG1_LCL | MARG2_LCL)) ||
		(merge_args == (MARG1_LCL | MARG2_GBL)) ||
		(merge_args == (MARG1_GBL | MARG2_LCL)) ||
		(merge_args == (MARG1_GBL | MARG2_GBL)));
	assert(!lvzwrite_block || 0 == lvzwrite_block->curr_subsc);
	/* Need to protect value from stpgcol */
	PUSH_MV_STENT(MVST_MVAL);
	value = &mv_chain->mv_st_cont.mvs_mval;
	value->mvtype = 0; /* initialize mval in the M-stack in case stp_gcol gets called before value gets initialized below */
	if (MARG2_IS_GBL(merge_args))
	{	/* Need to protect mkey returned from gvcst_queryget from stpgcol */
		PUSH_MV_STENT(MVST_MVAL);
		mkey = &mv_chain->mv_st_cont.mvs_mval;
		mkey->mvtype = 0; /* initialize mval in M-stack in case stp_gcol gets called before mkey gets initialized below */
		gvname_env_restore(mglvnp->gblp[IND2]);
		/* now $DATA will be done for gvn2. op_gvdata input parameters are set in the form of some GBLREF */
		op_gvdata(value);
		dollardata_src = MV_FORCE_INT(value);
		if (0 == dollardata_src)
		{	/* nothing in source global */
			UNDO_ACTIVE_LV;
			POP_MV_STENT();	/* value */
			POP_MV_STENT(); /* mkey */
			if (MARG1_IS_GBL(merge_args))
				gvname_env_restore(mglvnp->gblp[IND1]);	 /* store destination as naked indicator in gv_currkey */
			merge_args = 0;	/* Must reset to zero to reuse the Global */
			return;
		}
		if (NULL == TREF(gv_mergekey2))
		{	/* We need to initialize gvn2 (right hand side). */
			GVKEY_INIT(TREF(gv_mergekey2), DBKEYSIZE(MAX_KEY_SZ));
		}
		org_glvn1_keysz = mglvnp->gblp[IND1]->s_gv_currkey->end + 1;
		org_glvn2_keysz = gv_currkey->end + 1;
		(TREF(gv_mergekey2))->end = gv_currkey->end;
		(TREF(gv_mergekey2))->prev = gv_currkey->prev;
		memcpy((TREF(gv_mergekey2))->base, gv_currkey->base, gv_currkey->end + 1);
		if (MARG1_IS_GBL(merge_args))
		{	/*==================== MERGE ^gvn1=^gvn2 =====================*/
			if (mglvnp->gblp[IND2]->s_gv_target->nct != mglvnp->gblp[IND1]->s_gv_target->nct)
				rts_error(VARLSTCNT(1) ERR_NCTCOLLDIFF);
			/* if self merge then NOOP*/
			if (!merge_desc_check()) /* will not proceed if one is descendant of another */
			{
				gvname_env_restore(mglvnp->gblp[IND1]);	 /* store destination as naked indicator in gv_currkey */
				POP_MV_STENT();	/* value */
				merge_args = 0;	/* Must reset to zero to reuse the Global */
				return;
			}
			nullcoll_src = mglvnp->gblp[IND2]->s_gv_cur_region->std_null_coll;
			nullcoll_dst = mglvnp->gblp[IND1]->s_gv_cur_region->std_null_coll;
			if (1 == dollardata_src || 11 == dollardata_src)
			{
				found = op_gvget(value);  /* value of ^glvn2 */
				if (found)
				{	/* SET ^gvn1=^gvn2 */
					gvname_env_restore(mglvnp->gblp[IND1]);
					op_gvput(value);
					/* Note: If ^gvn1's null_sub=ALLOWEXISTING and say ^gvn1("")=^gvn,
					 * this will give NULL_SUBC error
					 */
				}
			}
			check_for_null_subs = (NEVER != mglvnp->gblp[IND2]->s_gv_cur_region->null_subs) &&
				(ALWAYS != mglvnp->gblp[IND1]->s_gv_cur_region->null_subs);
			/* Traverse descendant of ^gvn2 and copy into ^gvn1 */
			for (; ;)
			{
				if (outofband)
				{
					gvname_env_restore(mglvnp->gblp[IND1]); /* naked indicator is restored into gv_currkey */
					outofband_action(FALSE);
				}
				/* Restore last key under ^gvn2 we worked */
				gvname_env_restore(mglvnp->gblp[IND2]);
				assert(0 == gv_currkey->base[gv_currkey->end - 1] && 0 == gv_currkey->base[gv_currkey->end]);
				/* following is an attempt to find immidiate right sibling */
				gv_currkey->base[gv_currkey->end] = 1;
				gv_currkey->base[gv_currkey->end + 1] = 0;
				gv_currkey->base[gv_currkey->end + 2] = 0;
				gv_currkey->end += 2;
				/* Do atomic $QUERY and $GET of current glvn2:
				 * mkey is a mstr which contains $QUERY result in database format (So no conversion necessary)
				 * value is a mstr which contains $GET result
				 */
				if (!op_gvqueryget(mkey, value))
					break;
				assert(MV_IS_STRING(mkey));
				if (mkey->str.len < org_glvn2_keysz)
					break;
				if (0 != *((unsigned char *)mkey->str.addr + (TREF(gv_mergekey2))->end - 1) ||
					memcmp(mkey->str.addr, (TREF(gv_mergekey2))->base, (TREF(gv_mergekey2))->end - 1))
					break; 					/* mkey is not under the sub-tree */
				delta2 = mkey->str.len - org_glvn2_keysz; 	/* length increase of source key */
				assert (0 < delta2);
				/* Save the new source key for next iteration */
				memcpy(mglvnp->gblp[IND2]->s_gv_currkey->base + org_glvn2_keysz - 2,
					mkey->str.addr + org_glvn2_keysz - 2, delta2 + 2);
				mglvnp->gblp[IND2]->s_gv_currkey->end = mkey->str.len - 1;
				/* Create the destination key for this iteration (under ^glvn1) */
				gvname_env_restore(mglvnp->gblp[IND1]);
				if (gv_cur_region->max_key_size < org_glvn1_keysz + delta2)
					ISSUE_GVSUBOFLOW_ERROR(gv_currkey);
				assert(gv_currkey->end == org_glvn1_keysz - 1);
				memcpy(gv_currkey->base + org_glvn1_keysz - 2,
					mkey->str.addr + org_glvn2_keysz - 2, delta2 + 2);
				gv_currkey->end = org_glvn1_keysz + delta2 - 1;
				if (nullcoll_src != nullcoll_dst)
				{
					if (0 == nullcoll_dst)
					{	/* Standard to GTM null subscript conversion*/
						STD2GTMNULLCOLL((unsigned char *)gv_currkey->base + org_glvn1_keysz - 1,
								delta2 - 1);
					} else
					{	/*  GTM to standard null subscript conversion */
						GTM2STDNULLCOLL((unsigned char *)gv_currkey->base + org_glvn1_keysz - 1,
								delta2 - 1);
					}
				}
				/* check null subscripts in destination key, note that we have already restored, destination global
				 * and curresponding region, key information
				 */
				if (check_for_null_subs)
				{
					ptr2 = gv_currkey->base + gv_currkey->end - 1;
					for (ptr = gv_currkey->base + org_glvn1_keysz - 2; ptr < ptr2; )
					{
						if (KEY_DELIMITER == *ptr++ && KEY_DELIMITER == *(ptr + 1) &&
							(0 == gv_cur_region->std_null_coll ? (STR_SUB_PREFIX == *ptr) :
							(SUBSCRIPT_STDCOL_NULL == *ptr)))
							/* Note: For sgnl_gvnulsubsc/rts_error
							 * 	 we do not restore proper naked indicator.
							 * The standard states that the effect of a MERGE command
							 * on the naked indicator is that the naked indicator will be changed
							 * as if a specific SET command would have been executed.
							 * The standard also states that the effect on the naked indicator
							 * will only take be visible after the MERGE command has completed.
							 * So, if there is an error during the execution of a MERGE command,
							 * the standard allows the naked indicator to reflect any intermediate
							 * state. This provision was made intentionally, otherwise it would
							 * have become nearly impossible to create a fully standard
							 * implementation. : From Ed de Moel : 2/1/2
							 */
							sgnl_gvnulsubsc();
					}
				}
				/* Now put value of ^glvn2 descendant into corresponding descendant under ^glvn1 */
				op_gvput(value);
			}
			gvname_env_restore(mglvnp->gblp[IND1]);	 /* store destination as naked indicator in gv_currkey */
		} else
		{	/*==================== MERGE lvn1=^gvn2 =====================*/
			assert(MARG1_IS_LCL(merge_args));
			assert(mglvnp->lclp[IND1]);
			/* Need to protect subsc created from global variable subscripts from stpgcol */
			PUSH_MV_STENT(MVST_MVAL);
			subsc = &mv_chain->mv_st_cont.mvs_mval;
			/* Restore ^gvn2 we will work */
			gvname_env_save(mglvnp->gblp[IND2]);
			if (1 == dollardata_src || 11 == dollardata_src)
			{	/* SET lvn1=^gvn2 */
				found = op_gvget(value);
				if (found)
					mglvnp->lclp[IND1]->v = *value;
			}
			for (; ;)
			{
				if (outofband)
				{
					gvname_env_restore(mglvnp->gblp[IND2]);	 /* naked indicator is restored into gv_currkey */
					outofband_action(FALSE);
				}
				assert(0 == gv_currkey->base[gv_currkey->end - 1] && 0 == gv_currkey->base[gv_currkey->end]);
				/* following is an attempt to find immidiate right sibling */
				gv_currkey->base[gv_currkey->end] = 1;
				gv_currkey->base[gv_currkey->end + 1] = 0;
				gv_currkey->base[gv_currkey->end + 2] = 0;
				gv_currkey->end += 2;
				/* Do $QUERY and $GET of current glvn2. Result will be in mkey and value respectively.
				 * mkey->str contains data as database format. So no conversion necessary
				 */
				if (!op_gvqueryget(mkey, value))
					break;
				if (mkey->str.len < (TREF(gv_mergekey2))->end + 1)
					break;
				ptr = (unsigned char *)mkey->str.addr +  (TREF(gv_mergekey2))->end - 1;
				if (0 != *ptr || memcmp(mkey->str.addr, (TREF(gv_mergekey2))->base, (TREF(gv_mergekey2))->end - 1))
					break;
				assert(MV_IS_STRING(mkey));
				delta2 = mkey->str.len - org_glvn2_keysz; /* length increase of key */
				assert (0 < delta2);
				/* Create next key for ^glvn2 */
				memcpy(gv_currkey->base + org_glvn2_keysz - 2, mkey->str.addr + org_glvn2_keysz - 2, delta2 + 2);
				gv_currkey->end = mkey->str.len - 1;
				/* Now add subscripts to create the entire key */
				dst_lv =  mglvnp->lclp[IND1];
				is_base_var = LV_IS_BASE_VAR(dst_lv);
				ptr = (unsigned char *)gv_currkey->base + org_glvn2_keysz - 1;
				assert(*ptr);
				do
				{
					LV_SBS_DEPTH(dst_lv, is_base_var, sbs_depth);
					if (MAX_LVSUBSCRIPTS <= sbs_depth)
						rts_error(VARLSTCNT(3) ERR_MERGEINCOMPL, 0, ERR_MAXNRSUBSCRIPTS);
					ptr2 = gvsub2str(ptr, buff, FALSE);
					subsc->mvtype = MV_STR;
					subsc->str.addr = (char *)buff;
					subsc->str.len = INTCAST(ptr2 - buff);
					s2pool(&subsc->str);
					dst_lv = op_putindx(VARLSTCNT(2) dst_lv, subsc);
					while (*ptr++);	/* skip to start of next subscript */
					is_base_var = FALSE;
				} while (*ptr);
				/* We created the key. Pre-process the node in case a container is being replaced,
				 * then assign the value directly. Note there is no need to worry about MV_ALIASCONT
				 * propagation since the source in this case is a global var.
				 */
				DECR_AC_REF(dst_lv, TRUE);
				dst_lv->v = *value;
			}
			gvname_env_restore(mglvnp->gblp[IND2]);	 /* naked indicator is restored into gv_currkey */
			POP_MV_STENT();     /* subsc */
		}
		POP_MV_STENT();     /* mkey */
	} else
	{	/* source is local */
		op_fndata(mglvnp->lclp[IND2], value);
		dollardata_src = MV_FORCE_INT(value);
		if (0 == dollardata_src)
		{
			UNDO_ACTIVE_LV;
			POP_MV_STENT();	/* value */
			if (MARG1_IS_GBL(merge_args))
				gvname_env_restore(mglvnp->gblp[IND1]);	 /* store destination as naked indicator in gv_currkey */
			merge_args = 0;	/* Must reset to zero to reuse the Global */
			return;
		}
		/* not memsetting output to 0 here can cause garbage value of output.out_var.lv.child which in turn can
		 * cause a premature return from lvzwr_var resulting in op_merge() returning without having done the merge.
		 */
		memset(&output, 0, SIZEOF(output));
		if (MARG1_IS_LCL(merge_args))
		{	/*==================== MERGE lvn1=lvn2 =====================*/
			assert(mglvnp->lclp[IND1]);
			/* if self merge then NOOP */
			if (!merge_desc_check()) /* will not proceed if one is descendant of another */
			{
				POP_MV_STENT();	/* value */
				merge_args = 0;	/* Must reset to zero to reuse the Global */
				return;
			}
			output.buff = (char *)buff;
			output.ptr = output.buff;
			output.out_var.lv.lvar = mglvnp->lclp[IND1];
			zwr_output = &output;
			lvzwr_init(zwr_patrn_mident, &mglvnp->lclp[IND2]->v);
			lvzwr_arg(ZWRITE_ASTERISK, 0, 0);
			lvzwr_var(mglvnp->lclp[IND2], 0);
			/* assert that destination got all data of the source and its descendants */
			DEBUG_ONLY(op_fndata(mglvnp->lclp[IND1], value));
			DEBUG_ONLY(dollardata_dst = MV_FORCE_INT(value));
			assert((dollardata_src & dollardata_dst) == dollardata_src);
		} else
		{	/*==================== MERGE ^gvn1=lvn2 =====================*/
			assert(MARG1_IS_GBL(merge_args) && MARG2_IS_LCL(merge_args));
			gvname_env_save(mglvnp->gblp[IND1]);
			output.buff = (char *)buff;
			output.ptr = output.buff;
			output.out_var.gv.end = gv_currkey->end;
			output.out_var.gv.prev = gv_currkey->prev;
			zwr_output = &output;
			lvzwr_init(zwr_patrn_mident, &mglvnp->lclp[IND2]->v);
			lvzwr_arg(ZWRITE_ASTERISK, 0, 0);
			lvzwr_var(mglvnp->lclp[IND2], 0);
			gvname_env_restore(mglvnp->gblp[IND1]);	 /* store destination as naked indicator in gv_currkey */
		}
	}
	POP_MV_STENT();	/* value */
	merge_args = 0;	/* Must reset to zero to reuse the Global */
}