コード例 #1
0
ファイル: gv_init_reg.c プロジェクト: CeperaCPP/fis-gtm
void gv_init_reg (gd_region *reg)
{
	gv_namehead	*g;
	sgmnt_addrs	*csa;
#ifdef	NOLICENSE
	licensed= TRUE ;
#else
	CRYPT_CHKSYSTEM ;
#endif
	switch (reg->dyn.addr->acc_meth)
	{
	case dba_usr:
		gvusr_init (reg, &gv_cur_region, &gv_currkey, &gv_altkey);
		break;
		/* we may be left in dba_cm state for gt_cm, if we have rundown the db and again accessed
		   the db without quitting out of gtm */
	case dba_cm:
	case dba_mm:
	case dba_bg:
		if (!reg->open)
			gvcst_init(reg);
		break;
	default:
		GTMASSERT;
	}
	assert(reg->open);
	GVKEYSIZE_INCREASE_IF_NEEDED(DBKEYSIZE(reg->max_key_size));
	if (reg->dyn.addr->acc_meth == dba_bg || reg->dyn.addr->acc_meth == dba_mm)
	{
		if (!reg->was_open)
		{
			csa = (sgmnt_addrs*)&FILE_INFO(reg)->s_addrs;
			g = csa->dir_tree;
			if (NULL != g)
			{	/* It is possible that dir_tree has already been targ_alloc'ed. This is because GT.CM or VMS DAL
				 * calls can run down regions without the process halting out. We don't want to double malloc.
				 */
				g->clue.end = 0;
			}
			SET_CSA_DIR_TREE(csa, reg->max_key_size, reg);
		}
	}
	return;
}
コード例 #2
0
ファイル: mu_int_reg.c プロジェクト: shabiel/YottaDB
void mu_int_reg(gd_region *reg, boolean_t *return_value, boolean_t return_after_open)
{
	boolean_t		read_only, was_crit;
	freeze_status		status;
	node_local_ptr_t	cnl;
	sgmnt_addrs     	*csa;
	sgmnt_data_ptr_t	csd;
	sgmnt_data		*csd_copy_ptr;
	gd_segment		*seg;
	int			gtmcrypt_errno;
#	ifdef DEBUG
	boolean_t		need_to_wait = FALSE;
	int			trynum;
	uint4			curr_wbox_seq_num;
#	endif

	*return_value = FALSE;
	jnlpool_init_needed = TRUE;
	ESTABLISH(mu_int_reg_ch);
	if (dba_usr == reg->dyn.addr->acc_meth)
	{
		util_out_print("!/Can't integ region !AD; not GDS format", TRUE,  REG_LEN_STR(reg));
		mu_int_skipreg_cnt++;
		return;
	}
	gv_cur_region = reg;
	if (reg_cmcheck(reg))
	{
		util_out_print("!/Can't integ region across network", TRUE);
		mu_int_skipreg_cnt++;
		return;
	}
	gvcst_init(gv_cur_region);
	if (gv_cur_region->was_open)
	{	/* already open under another name */
		gv_cur_region->open = FALSE;
		return;
	}
	if (return_after_open)
	{
		*return_value = TRUE;
		return;
	}
	change_reg();
	csa = &FILE_INFO(gv_cur_region)->s_addrs;
	cnl = csa->nl;
	csd = csa->hdr;
	read_only = gv_cur_region->read_only;
	assert(NULL != mu_int_master);
	/* Ensure that we don't see an increase in the file header and master map size compared to it's maximum values */
	assert(SGMNT_HDR_LEN >= SIZEOF(sgmnt_data) && (MASTER_MAP_SIZE_MAX >= MASTER_MAP_SIZE(csd)));
	/* ONLINE INTEG if asked for explicitly by specifying -ONLINE is an error if the db has partial V4 blocks.
	 * However, if -ONLINE is not explicitly specified but rather assumed implicitly (as default for -REG)
	 * then turn off ONLINE INTEG for this region and continue as if -NOONLINE was specified
	 */
	if (!csd->fully_upgraded)
	{
		ointeg_this_reg = FALSE; /* Turn off ONLINE INTEG for this region */
		if (online_specified)
		{
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_SSV4NOALLOW, 2, DB_LEN_STR(gv_cur_region));
			util_out_print(NO_ONLINE_ERR_MSG, TRUE);
			mu_int_skipreg_cnt++;
			return;
		}
	}
	if (!ointeg_this_reg || read_only)
	{
		status = region_freeze(gv_cur_region, TRUE, FALSE, TRUE, FALSE, !read_only);
		switch (status)
		{
			case REG_ALREADY_FROZEN:
				if (csa->read_only_fs)
					break;
				util_out_print("!/Database for region !AD is already frozen, not integing",
					TRUE, REG_LEN_STR(gv_cur_region));
				mu_int_skipreg_cnt++;
				return;
			case REG_FLUSH_ERROR:
				gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT(MUPIP_INTEG),
					DB_LEN_STR(gv_cur_region));
				mu_int_skipreg_cnt++;
				return;
			case REG_HAS_KIP:
				/* We have already waited for KIP to reset. This time do not wait for KIP */
				status = region_freeze(gv_cur_region, TRUE, FALSE, FALSE, FALSE, !read_only);
				if (REG_ALREADY_FROZEN == status)
				{
					if (csa->read_only_fs)
						break;
					util_out_print("!/Database for region !AD is already frozen, not integing",
						TRUE, REG_LEN_STR(gv_cur_region));
					mu_int_skipreg_cnt++;
					return;
				} else if (REG_FLUSH_ERROR == status)
				{
					gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT(MUPIP_INTEG),
						DB_LEN_STR(gv_cur_region));
					mu_int_skipreg_cnt++;
					return;
				}
				assert(REG_FREEZE_SUCCESS == status);
				/* no break */
			case REG_FREEZE_SUCCESS:
				break;
			default:
				assert(FALSE);
				/* no break */
		}
		if (read_only && (dba_bg == csa->hdr->acc_meth) && !mu_int_wait_rdonly(csa, MUPIP_INTEG))
		{
			mu_int_skipreg_cnt++;
			return;
		}
	}
	if (!ointeg_this_reg)
	{	/* Take a copy of the file-header. To ensure it is consistent, do it while holding crit. */
		was_crit = csa->now_crit;
		if (!was_crit)
			grab_crit(gv_cur_region);
		memcpy((uchar_ptr_t)&mu_int_data, (uchar_ptr_t)csd, SIZEOF(sgmnt_data));
		if (!was_crit)
			rel_crit(gv_cur_region);
		memcpy(mu_int_master, MM_ADDR(csd), MASTER_MAP_SIZE(csd));
		csd_copy_ptr = &mu_int_data;
	} else
	{
		if (!ss_initiate(gv_cur_region, util_ss_ptr, &csa->ss_ctx, preserve_snapshot, MUPIP_INTEG))
		{
			mu_int_skipreg_cnt++;
			assert(NULL != csa->ss_ctx);
			ss_release(&csa->ss_ctx);
			ointeg_this_reg = FALSE; /* Turn off ONLINE INTEG for this region */
			assert(process_id != cnl->in_crit); /* Ensure ss_initiate released the crit before returning */
			assert(!FROZEN_HARD(csd)); /* Ensure region is unfrozen before returning from ss_initiate */
			assert(INTRPT_IN_SS_INITIATE != intrpt_ok_state); /* Ensure ss_initiate released intrpt_ok_state */
			return;
		}
		assert(process_id != cnl->in_crit); /* Ensure ss_initiate released the crit before returning */
		assert(INTRPT_IN_SS_INITIATE != intrpt_ok_state); /* Ensure ss_initiate released intrpt_ok_state */
		csd_copy_ptr = &csa->ss_ctx->ss_shm_ptr->shadow_file_header;
#		if defined(DEBUG)
		curr_wbox_seq_num = 1;
		cnl->wbox_test_seq_num = curr_wbox_seq_num; /* indicate we took the next step */
		GTM_WHITE_BOX_TEST(WBTEST_OINTEG_WAIT_ON_START, need_to_wait, TRUE);
		if (need_to_wait) /* wait for them to take next step */
		{
			trynum = 30; /* given 30 cycles to tell you to go */
			while ((curr_wbox_seq_num == cnl->wbox_test_seq_num) && trynum--)
				LONG_SLEEP(1);
			cnl->wbox_test_seq_num++; /* let them know we took the next step */
			assert(trynum);
		}
#		endif
	}
	if (USES_ANY_KEY(csd_copy_ptr))
	{ 	/* Initialize mu_int_encrypt_key_handle to be used in mu_int_read */
		seg = gv_cur_region->dyn.addr;
		INIT_DB_OR_JNL_ENCRYPTION(&mu_int_encr_handles, csd_copy_ptr, seg->fname_len, (char *)seg->fname, gtmcrypt_errno);
		if (0 != gtmcrypt_errno)
		{
			GTMCRYPT_REPORT_ERROR(gtmcrypt_errno, gtm_putmsg, seg->fname_len, seg->fname);
			mu_int_skipreg_cnt++;
			return;
		}
	}
	*return_value = mu_int_fhead();
	REVERT;
	return;
}
コード例 #3
0
ファイル: mupip_set_file.c プロジェクト: 5HT/mumps
int4 mupip_set_file(int db_fn_len, char *db_fn)
{
	bool			got_standalone;
	boolean_t		bypass_partial_recov, need_standalone = FALSE;
	char			acc_spec[MAX_ACC_METH_LEN], ver_spec[MAX_DB_VER_LEN], exit_stat, *fn;
	unsigned short		acc_spec_len = MAX_ACC_METH_LEN, ver_spec_len = MAX_DB_VER_LEN;
	int			fd, fn_len;
	int4			status;
	int4			status1;
	int			glbl_buff_status, defer_status, rsrvd_bytes_status,
				extn_count_status, lock_space_status, disk_wait_status;
	int4			new_disk_wait, new_cache_size, new_extn_count, new_lock_space, reserved_bytes, defer_time;
	sgmnt_data_ptr_t	csd;
	tp_region		*rptr, single;
	enum db_acc_method	access, access_new;
	enum db_ver		desired_dbver;
	gd_region		*temp_cur_region;
	char			*errptr, *command = "MUPIP SET VERSION";
	int			save_errno;

	error_def(ERR_DBPREMATEOF);
	error_def(ERR_DBRDERR);
	error_def(ERR_DBRDONLY);
	error_def(ERR_INVACCMETHOD);
	error_def(ERR_MUNOACTION);
	error_def(ERR_RBWRNNOTCHG);
	error_def(ERR_WCERRNOTCHG);
	error_def(ERR_WCWRNNOTCHG);
	error_def(ERR_MMNODYNDWNGRD);

	exit_stat = EXIT_NRM;
	defer_status = cli_present("DEFER_TIME");
	if (defer_status)
		need_standalone = TRUE;
	bypass_partial_recov = cli_present("PARTIAL_RECOV_BYPASS") == CLI_PRESENT;
	if (bypass_partial_recov)
		need_standalone = TRUE;
	if (disk_wait_status = cli_present("WAIT_DISK"))
	{
		if (cli_get_int("WAIT_DISK", &new_disk_wait))
		{
			if (new_disk_wait < 0)
			{
				util_out_print("!UL negative, minimum WAIT_DISK allowed is 0.", TRUE, new_disk_wait);
				return (int4)ERR_WCWRNNOTCHG;
			}
			need_standalone = TRUE;
		} else
		{
			util_out_print("Error getting WAIT_DISK qualifier value", TRUE);
			return (int4)ERR_WCWRNNOTCHG;
		}
	}
	if (glbl_buff_status = cli_present("GLOBAL_BUFFERS"))
	{
		if (cli_get_int("GLOBAL_BUFFERS", &new_cache_size))
		{
			if (new_cache_size > WC_MAX_BUFFS)
			{
				util_out_print("!UL too large, maximum write cache buffers allowed is !UL", TRUE, new_cache_size,
						WC_MAX_BUFFS);
				return (int4)ERR_WCWRNNOTCHG;
			}
			if (new_cache_size < WC_MIN_BUFFS)
			{
				util_out_print("!UL too small, minimum cache buffers allowed is !UL", TRUE, new_cache_size,
						WC_MIN_BUFFS);
				return (int4)ERR_WCWRNNOTCHG;
			}
		} else
		{
			util_out_print("Error getting GLOBAL BUFFER qualifier value", TRUE);
			return (int4)ERR_WCWRNNOTCHG;
		}
		need_standalone = TRUE;
	}
	/* EXTENSION_COUNT does not require standalone access and hence need_standalone will not be set to TRUE for this. */
	if (extn_count_status = cli_present("EXTENSION_COUNT"))
	{
		if (cli_get_int("EXTENSION_COUNT", &new_extn_count))
		{
			if (new_extn_count > MAX_EXTN_COUNT)
			{
				util_out_print("!UL too large, maximum extension count allowed is !UL", TRUE, new_extn_count,
						MAX_EXTN_COUNT);
				return (int4)ERR_WCWRNNOTCHG;
			}
			if (new_extn_count < MIN_EXTN_COUNT)
			{
				util_out_print("!UL too small, minimum extension count allowed is !UL", TRUE, new_extn_count,
						MIN_EXTN_COUNT);
				return (int4)ERR_WCWRNNOTCHG;
			}
		} else
		{
			util_out_print("Error getting EXTENSION COUNT qualifier value", TRUE);
			return (int4)ERR_WCWRNNOTCHG;
		}
	}
	if (lock_space_status = cli_present("LOCK_SPACE"))
	{
		if (cli_get_int("LOCK_SPACE", &new_lock_space))
		{
			if (new_lock_space > MAX_LOCK_SPACE)
			{
				util_out_print("!UL too large, maximum lock space allowed is !UL", TRUE,
						new_lock_space, MAX_LOCK_SPACE);
				return (int4)ERR_WCWRNNOTCHG;
			}
			else if (new_lock_space < MIN_LOCK_SPACE)
			{
				util_out_print("!UL too small, minimum lock space allowed is !UL", TRUE,
						new_lock_space, MIN_LOCK_SPACE);
				return (int4)ERR_WCWRNNOTCHG;
			}
		} else
		{
			util_out_print("Error getting LOCK_SPACE qualifier value", TRUE);
			return (int4)ERR_WCWRNNOTCHG;
		}
		need_standalone = TRUE;
	}
	if (rsrvd_bytes_status = cli_present("RESERVED_BYTES"))
	{
		if (!cli_get_int("RESERVED_BYTES", &reserved_bytes))
		{
			util_out_print("Error getting RESERVED BYTES qualifier value", TRUE);
			return (int4)ERR_RBWRNNOTCHG;
		}
		need_standalone = TRUE;
	}
	if (cli_present("ACCESS_METHOD"))
	{
		cli_get_str("ACCESS_METHOD", acc_spec, &acc_spec_len);
		cli_strupper(acc_spec);
		if (0 == memcmp(acc_spec, "MM", acc_spec_len))
			access = dba_mm;
		else  if (0 == memcmp(acc_spec, "BG", acc_spec_len))
			access = dba_bg;
		else
			mupip_exit(ERR_INVACCMETHOD);
		need_standalone = TRUE;
	} else
		access = n_dba;		/* really want to keep current method,
					    which has not yet been read */
	if (cli_present("VERSION"))
	{
		assert(!need_standalone);
		cli_get_str("VERSION", ver_spec, &ver_spec_len);
		cli_strupper(ver_spec);
		if (0 == memcmp(ver_spec, "V4", ver_spec_len))
			desired_dbver = GDSV4;
		else  if (0 == memcmp(ver_spec, "V5", ver_spec_len))
			desired_dbver = GDSV5;
		else
			GTMASSERT;		/* CLI should prevent us ever getting here */
	} else
		desired_dbver = GDSVLAST;	/* really want to keep version, which has not yet been read */
	if (region)
		rptr = grlist;
	else
	{
		rptr = &single;
		memset(&single, 0, sizeof(single));
	}

	csd = (sgmnt_data *)malloc(ROUND_UP(sizeof(sgmnt_data), DISK_BLOCK_SIZE));
	in_backup = FALSE;		/* Only want yes/no from mupfndfil, not an address */
	for (;  rptr != NULL;  rptr = rptr->fPtr)
	{
		if (region)
		{
			if (dba_usr == rptr->reg->dyn.addr->acc_meth)
			{
				util_out_print("!/Region !AD is not a GDS access type", TRUE, REG_LEN_STR(rptr->reg));
				exit_stat |= EXIT_WRN;
				continue;
			}
			if (!mupfndfil(rptr->reg, NULL))
				continue;
			fn = (char *)rptr->reg->dyn.addr->fname;
			fn_len = rptr->reg->dyn.addr->fname_len;
		} else
		{
			fn = db_fn;
			fn_len = db_fn_len;
		}
		mu_gv_cur_reg_init();
		strcpy((char *)gv_cur_region->dyn.addr->fname, fn);
		gv_cur_region->dyn.addr->fname_len = fn_len;
		if (!need_standalone)
		{
			gvcst_init(gv_cur_region);
			change_reg();	/* sets cs_addrs and cs_data */
			if (gv_cur_region->read_only)
			{
				gtm_putmsg(VARLSTCNT(4) ERR_DBRDONLY, 2, DB_LEN_STR(gv_cur_region));
				exit_stat |= EXIT_ERR;
				gds_rundown();
				mu_gv_cur_reg_free();
				continue;
			}
			grab_crit(gv_cur_region);
			status = EXIT_NRM;
			access_new = (n_dba == access ? cs_data->acc_meth : access);
							/* recalculate; n_dba is a proxy for no change */
			change_fhead_timer("FLUSH_TIME", cs_data->flush_time,
					   (dba_bg == access_new ? TIM_FLU_MOD_BG : TIM_FLU_MOD_MM),
					   FALSE);
			if (GDSVLAST != desired_dbver)
			{
				if ((dba_mm != access_new) || (GDSV4 != desired_dbver))
					status1 = desired_db_format_set(gv_cur_region, desired_dbver, command);
				else
				{
					status1 = ERR_MMNODYNDWNGRD;
					gtm_putmsg(VARLSTCNT(4) status1, 2, REG_LEN_STR(gv_cur_region));
				}
				if (SS_NORMAL != status1)
				{	/* "desired_db_format_set" would have printed appropriate error messages */
					if (ERR_MUNOACTION != status1)
					{	/* real error occurred while setting the db format. skip to next region */
						status = EXIT_ERR;
					}
				}
			}
			if (EXIT_NRM == status)
			{
				if (extn_count_status)
					cs_data->extension_size = (uint4)new_extn_count;
				wcs_flu(WCSFLU_FLUSH_HDR);
				if (extn_count_status)
					util_out_print("Database file !AD now has extension count !UL",
						TRUE, fn_len, fn, cs_data->extension_size);
				if (GDSVLAST != desired_dbver)
					util_out_print("Database file !AD now has desired DB format !AD", TRUE,
						fn_len, fn, LEN_AND_STR(gtm_dbversion_table[cs_data->desired_db_format]));
			} else
				exit_stat |= status;
			rel_crit(gv_cur_region);
			gds_rundown();
		} else
		{	/* Following part needs standalone access */
			assert(GDSVLAST == desired_dbver);
			got_standalone = mu_rndwn_file(gv_cur_region, TRUE);
			if (FALSE == got_standalone)
				return (int4)ERR_WCERRNOTCHG;
			/* we should open it (for changing) after mu_rndwn_file, since mu_rndwn_file changes the file header too */
			if (-1 == (fd = OPEN(fn, O_RDWR)))
			{
				save_errno = errno;
				errptr = (char *)STRERROR(save_errno);
				util_out_print("open : !AZ", TRUE, errptr);
				exit_stat |= EXIT_ERR;
				db_ipcs_reset(gv_cur_region, FALSE);
				mu_gv_cur_reg_free();
				continue;
			}
			LSEEKREAD(fd, 0, csd, sizeof(sgmnt_data), status);
			if (0 != status)
			{
				save_errno = errno;
				PERROR("Error reading header of file");
				errptr = (char *)STRERROR(save_errno);
				util_out_print("read : !AZ", TRUE, errptr);
				util_out_print("Error reading header of file", TRUE);
				util_out_print("Database file !AD not changed:  ", TRUE, fn_len, fn);
				if (-1 != status)
					rts_error(VARLSTCNT(4) ERR_DBRDERR, 2, fn_len, fn);
				else
					rts_error(VARLSTCNT(4) ERR_DBPREMATEOF, 2, fn_len, fn);
			}
			if (rsrvd_bytes_status)
			{
				if (reserved_bytes > MAX_RESERVE_B(csd))
				{
					util_out_print("!UL too large, maximum reserved bytes allowed is !UL for database file !AD",
							TRUE, reserved_bytes, MAX_RESERVE_B(csd), fn_len, fn);
					close(fd);
					db_ipcs_reset(gv_cur_region, FALSE);
					return (int4)ERR_RBWRNNOTCHG;
				}
				csd->reserved_bytes = reserved_bytes;
			}
			access_new = (n_dba == access ? csd->acc_meth : access);
							/* recalculate; n_dba is a proxy for no change */
			change_fhead_timer("FLUSH_TIME", csd->flush_time,
					   (dba_bg == access_new ? TIM_FLU_MOD_BG : TIM_FLU_MOD_MM),
					   FALSE);
			if ((n_dba != access) && (csd->acc_meth != access))	/* n_dba is a proxy for no change */
			{
				if (dba_mm == access)
					csd->defer_time = 1;			/* defer defaults to 1 */
				csd->acc_meth = access;
				if (0 == csd->n_bts)
				{
					csd->n_bts = WC_DEF_BUFFS;
					csd->bt_buckets = getprime(csd->n_bts);
				}
			}
			if (glbl_buff_status)
			{
				csd->n_bts = BT_FACTOR(new_cache_size);
				csd->bt_buckets = getprime(csd->n_bts);
				csd->n_wrt_per_flu = 7;
				csd->flush_trigger = FLUSH_FACTOR(csd->n_bts);
			}
			if (disk_wait_status)
				csd->wait_disk_space = new_disk_wait;
			if (extn_count_status)
				csd->extension_size = (uint4)new_extn_count;
			if (lock_space_status)
				csd->lock_space_size = (uint4)new_lock_space * OS_PAGELET_SIZE;
			if (bypass_partial_recov)
			{
				csd->file_corrupt = FALSE;
				util_out_print("Database file !AD now has partial recovery flag set to  !UL(FALSE) ",
						TRUE, fn_len, fn, csd->file_corrupt);
			}
			if (dba_mm == access_new)
			{
				if (CLI_NEGATED == defer_status)
					csd->defer_time = 0;
				else  if (CLI_PRESENT == defer_status)
				{
					if (!cli_get_num("DEFER_TIME", &defer_time))
					{
						util_out_print("Error getting DEFER_TIME qualifier value", TRUE);
						db_ipcs_reset(gv_cur_region, FALSE);
						return (int4)ERR_RBWRNNOTCHG;
					}
					if (-1 > defer_time)
					{
						util_out_print("DEFER_TIME cannot take negative values less than -1", TRUE);
						util_out_print("Database file !AD not changed", TRUE, fn_len, fn);
						exit_stat |= EXIT_WRN;
						db_ipcs_reset(gv_cur_region, FALSE);
						mu_gv_cur_reg_free();
						continue;
					}
					csd->defer_time = defer_time;
				}
				if (csd->blks_to_upgrd)
				{
					util_out_print("MM access method cannot be set if there are blocks to upgrade",	TRUE);
					util_out_print("Database file !AD not changed", TRUE, fn_len, fn);
					exit_stat |= EXIT_WRN;
					db_ipcs_reset(gv_cur_region, FALSE);
					mu_gv_cur_reg_free();
					continue;
				}
				if (GDSVCURR != csd->desired_db_format)
				{
					util_out_print("MM access method cannot be set in DB compatibility mode",
						TRUE);
					util_out_print("Database file !AD not changed", TRUE, fn_len, fn);
					exit_stat |= EXIT_WRN;
					db_ipcs_reset(gv_cur_region, FALSE);
					mu_gv_cur_reg_free();
					continue;
				}
				if (JNL_ENABLED(csd) && csd->jnl_before_image)
				{
					util_out_print("MM access method cannot be set with BEFORE image journaling", TRUE);
					util_out_print("Database file !AD not changed", TRUE, fn_len, fn);
					exit_stat |= EXIT_WRN;
					db_ipcs_reset(gv_cur_region, FALSE);
					mu_gv_cur_reg_free();
					continue;
				}
				csd->jnl_before_image = FALSE;
			} else
			{
				if (defer_status)
				{
					util_out_print("DEFER cannot be specified with BG access method.", TRUE);
					util_out_print("Database file !AD not changed", TRUE, fn_len, fn);
					exit_stat |= EXIT_WRN;
					db_ipcs_reset(gv_cur_region, FALSE);
					mu_gv_cur_reg_free();
					continue;
				}
			}
			LSEEKWRITE(fd, 0, csd, sizeof(sgmnt_data), status);
			if (0 != status)
			{
				save_errno = errno;
				errptr = (char *)STRERROR(save_errno);
				util_out_print("write : !AZ", TRUE, errptr);
				util_out_print("Error writing header of file", TRUE);
				util_out_print("Database file !AD not changed: ", TRUE, fn_len, fn);
				rts_error(VARLSTCNT(4) ERR_DBRDERR, 2, fn_len, fn);
			}
			close(fd);
			/* --------------------- report results ------------------------- */
			if (glbl_buff_status)
				util_out_print("Database file !AD now has !UL global buffers",
						TRUE, fn_len, fn, csd->n_bts);
			if (defer_status && (dba_mm == csd->acc_meth))
				util_out_print("Database file !AD now has defer_time set to !SL",
						TRUE, fn_len, fn, csd->defer_time);
			if (rsrvd_bytes_status)
				util_out_print("Database file !AD now has !UL reserved bytes",
						TRUE, fn_len, fn, csd->reserved_bytes);
			if (extn_count_status)
				util_out_print("Database file !AD now has extension count !UL",
						TRUE, fn_len, fn, csd->extension_size);
			if (lock_space_status)
				util_out_print("Database file !AD now has lock space !UL pages",
						TRUE, fn_len, fn, csd->lock_space_size/OS_PAGELET_SIZE);
			if (disk_wait_status)
				util_out_print("Database file !AD now has wait disk set to !UL seconds",
						TRUE, fn_len, fn, csd->wait_disk_space);
			db_ipcs_reset(gv_cur_region, FALSE);
		} /* end of else part if (!need_standalone) */
		mu_gv_cur_reg_free();
	}
	free(csd);
	assert(!(exit_stat & EXIT_INF));
	return (exit_stat & EXIT_ERR ? (int4)ERR_WCERRNOTCHG :
		(exit_stat & EXIT_WRN ? (int4)ERR_WCWRNNOTCHG : SS_NORMAL));
}
コード例 #4
0
ファイル: mu_int_reg.c プロジェクト: mihawk/fis-gtm
void mu_int_reg(gd_region *reg, boolean_t *return_value)
{
	boolean_t		read_only, was_crit;
	freeze_status		status;
	node_local_ptr_t	cnl;
	sgmnt_addrs     	*csa;
	sgmnt_data_ptr_t	csd;
#	ifdef DEBUG
	boolean_t		need_to_wait = FALSE;
	int			trynum;
	uint4			curr_wbox_seq_num;
#	endif
	sgmnt_data		*csd_copy_ptr;
	gd_segment		*seg;
	int			gtmcrypt_errno;
	*return_value = FALSE;
	UNIX_ONLY(jnlpool_init_needed = TRUE);
	ESTABLISH(mu_int_reg_ch);
	if (dba_usr == reg->dyn.addr->acc_meth)
	{
		util_out_print("!/Can't integ region !AD; not GDS format", TRUE,  REG_LEN_STR(reg));
		mu_int_skipreg_cnt++;
		return;
	}
	gv_cur_region = reg;
	if (reg_cmcheck(reg))
	{
		util_out_print("!/Can't integ region across network", TRUE);
		mu_int_skipreg_cnt++;
		return;
	}
	gvcst_init(gv_cur_region);
	if (gv_cur_region->was_open)
	{	/* already open under another name */
		gv_cur_region->open = FALSE;
		return;
	}
	change_reg();
	csa = &FILE_INFO(gv_cur_region)->s_addrs;
	cnl = csa->nl;
	csd = csa->hdr;
	read_only = gv_cur_region->read_only;
	assert(NULL != mu_int_master);
	/* Ensure that we don't see an increase in the file header and master map size compared to it's maximum values */
	assert(SGMNT_HDR_LEN >= SIZEOF(sgmnt_data) && (MASTER_MAP_SIZE_MAX >= MASTER_MAP_SIZE(csd)));
	/* ONLINE INTEG if asked for explicitly by specifying -ONLINE is an error if the db has partial V4 blocks.
	 * However, if -ONLINE is not explicitly specified but rather assumed implicitly (as default for -REG)
	 * then turn off ONLINE INTEG for this region and continue as if -NOONLINE was specified
	 */
#	ifdef GTM_SNAPSHOT
	if (!csd->fully_upgraded)
	{
		ointeg_this_reg = FALSE; /* Turn off ONLINE INTEG for this region */
		if (online_specified)
		{
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_SSV4NOALLOW, 2, DB_LEN_STR(gv_cur_region));
			util_out_print(NO_ONLINE_ERR_MSG, TRUE);
			mu_int_skipreg_cnt++;
			return;
		}
	}
#	endif
	if (!ointeg_this_reg || read_only)
	{
		status = region_freeze(gv_cur_region, TRUE, FALSE, TRUE);
		switch (status)
		{
			case REG_ALREADY_FROZEN:
				UNIX_ONLY(if (csa->read_only_fs) break);
				util_out_print("!/Database for region !AD is already frozen, not integing",
					TRUE, REG_LEN_STR(gv_cur_region));
				mu_int_skipreg_cnt++;
				return;
			case REG_HAS_KIP:
				/* We have already waited for KIP to reset. This time do not wait for KIP */
				status = region_freeze(gv_cur_region, TRUE, FALSE, FALSE);
				if (REG_ALREADY_FROZEN == status)
				{
					UNIX_ONLY(if (csa->read_only_fs) break);
					util_out_print("!/Database for region !AD is already frozen, not integing",
						TRUE, REG_LEN_STR(gv_cur_region));
					mu_int_skipreg_cnt++;
					return;
				}
				break;
			case REG_FREEZE_SUCCESS:
				break;
			default:
				assert(FALSE);
		}
コード例 #5
0
void	mu_reorg_upgrd_dwngrd(void)
{
	blk_hdr			new_hdr;
	blk_segment		*bs1, *bs_ptr;
	block_id		*blkid_ptr, curblk, curbmp, start_blk, stop_blk, start_bmp, last_bmp;
	block_id		startblk_input, stopblk_input;
	boolean_t		upgrade, downgrade, safejnl, nosafejnl, region, first_reorg_in_this_db_fmt, reorg_entiredb;
	boolean_t		startblk_specified, stopblk_specified, set_fully_upgraded, db_got_to_v5_once, mark_blk_free;
	cache_rec_ptr_t		cr;
	char			*bml_lcl_buff = NULL, *command, *reorg_command;
	sm_uc_ptr_t		bptr = NULL;
	cw_set_element		*cse;
	enum cdb_sc		cdb_status;
	enum db_ver		new_db_format, ondsk_blkver;
	gd_region		*reg;
	int			cycle;
	int4			blk_seg_cnt, blk_size;	/* needed for BLK_INIT,BLK_SEG and BLK_FINI macros */
	int4			blocks_left, expected_blks2upgrd, actual_blks2upgrd, total_blks, free_blks;
	int4			status, status1, mapsize, lcnt, bml_status;
	reorg_stats_t		reorg_stats;
	sgmnt_addrs		*csa;
	sgmnt_data_ptr_t	csd;
	sm_uc_ptr_t		blkBase, bml_sm_buff;	/* shared memory pointer to the bitmap global buffer */
	srch_hist		alt_hist;
	srch_blk_status		*blkhist, bmlhist;
	tp_region		*rptr;
	trans_num		curr_tn;
	unsigned char    	save_cw_set_depth;
	uint4			lcl_update_trans;

	region    = (CLI_PRESENT == cli_present("REGION"));
	upgrade   = (CLI_PRESENT == cli_present("UPGRADE"));
	downgrade = (CLI_PRESENT == cli_present("DOWNGRADE"));
	assert(upgrade && !downgrade || !upgrade && downgrade);
	command = upgrade ? "UPGRADE" : "DOWNGRADE";
	reorg_command = upgrade ? "MUPIP REORG UPGRADE" : "MUPIP REORG DOWNGRADE";
	reorg_entiredb = TRUE;	/* unless STARTBLK or STOPBLK is specified we are going to {up,down}grade the entire database */
	startblk_specified = FALSE;
	assert(SIZEOF(block_id) == SIZEOF(uint4));
	if ((CLI_PRESENT == cli_present("STARTBLK")) && (cli_get_hex("STARTBLK", (uint4 *)&startblk_input)))
	{
		reorg_entiredb = FALSE;
		startblk_specified = TRUE;
	}
	stopblk_specified = FALSE;
	assert(SIZEOF(block_id) == SIZEOF(uint4));
	if ((CLI_PRESENT == cli_present("STOPBLK")) && (cli_get_hex("STOPBLK", (uint4 *)&stopblk_input)))
	{
		reorg_entiredb = FALSE;
		stopblk_specified = TRUE;
	}
	mu_reorg_upgrd_dwngrd_in_prog = TRUE;
	mu_reorg_nosafejnl = (CLI_NEGATED == cli_present("SAFEJNL")) ? TRUE : FALSE;

	assert(region);
	status = SS_NORMAL;
	error_mupip = FALSE;
	gvinit();	/* initialize gd_header (needed by the later call to mu_getlst) */
	mu_getlst("REG_NAME", SIZEOF(tp_region));	/* get the parameter corresponding to REGION qualifier */
	if (error_mupip)
	{
		util_out_print("!/MUPIP REORG !AD cannot proceed with above errors!/", TRUE, LEN_AND_STR(command));
		mupip_exit(ERR_MUNOACTION);
	}
	assert(DBKEYSIZE(MAX_KEY_SZ) == gv_keysize);	/* no need to invoke GVKEYSIZE_INIT_IF_NEEDED macro */
	gv_target = targ_alloc(gv_keysize, NULL, NULL);	/* t_begin needs this initialized */
	gv_target_list = NULL;
	memset(&alt_hist, 0, SIZEOF(alt_hist));	/* null-initialize history */
	blkhist = &alt_hist.h[0];
	for (rptr = grlist;  NULL != rptr;  rptr = rptr->fPtr)
	{
		if (mu_ctrly_occurred || mu_ctrlc_occurred)
			break;
		reg = rptr->reg;
		util_out_print("!/Region !AD : MUPIP REORG !AD started", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
		if (reg_cmcheck(reg))
		{
			util_out_print("Region !AD : MUPIP REORG !AD cannot run across network",
				TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			status = ERR_MUNOFINISH;
			continue;
		}
		mu_reorg_process = TRUE;	/* gvcst_init will use this value to use gtm_poollimit settings. */
		gvcst_init(reg);
		mu_reorg_process = FALSE;
		assert(update_array != NULL);
		/* access method stored in global directory and database file header might be different in which case
		 * the database setting prevails. therefore, the access method check can be done only after opening
		 * the database (i.e. after the gvcst_init)
		 */
		if (dba_bg != REG_ACC_METH(reg))
		{
			util_out_print("Region !AD : MUPIP REORG !AD cannot continue as access method is not BG",
				TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			status = ERR_MUNOFINISH;
			continue;
		}
		/* The mu_getlst call above uses insert_region to create the grlist, which ensures that duplicate regions mapping to
		 * the same db file correspond to only one grlist entry.
		 */
		assert(FALSE == reg->was_open);
		TP_CHANGE_REG(reg);	/* sets gv_cur_region, cs_addrs, cs_data */
		csa = cs_addrs;
		csd = cs_data;
		blk_size = csd->blk_size;	/* "blk_size" is used by the BLK_FINI macro */
		if (reg->read_only)
		{
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_DBRDONLY, 2, DB_LEN_STR(reg));
			status = ERR_MUNOFINISH;
			continue;
		}
		assert(GDSVCURR == GDSV6); /* so we trip this assert in case GDSVCURR changes without a change to this module */
		new_db_format = (upgrade ? GDSV6 : GDSV4);
		grab_crit(reg);
		curr_tn = csd->trans_hist.curr_tn;
		/* set the desired db format in the file header to the appropriate version, increment transaction number */
		status1 = desired_db_format_set(reg, new_db_format, reorg_command);
		assert(csa->now_crit);	/* desired_db_format_set() should not have released crit */
		first_reorg_in_this_db_fmt = TRUE;	/* with the current desired_db_format, this is the first reorg */
		if (SS_NORMAL != status1)
		{	/* "desired_db_format_set" would have printed appropriate error messages */
			if (ERR_MUNOACTION != status1)
			{	/* real error occurred while setting the db format. skip to next region */
				status = ERR_MUNOFINISH;
				rel_crit(reg);
				continue;
			}
			util_out_print("Region !AD : Desired DB Format remains at !AD after !AD", TRUE, REG_LEN_STR(reg),
				LEN_AND_STR(gtm_dbversion_table[new_db_format]), LEN_AND_STR(reorg_command));
			if (csd->reorg_db_fmt_start_tn == csd->desired_db_format_tn)
				first_reorg_in_this_db_fmt = FALSE;
		} else
			util_out_print("Region !AD : Desired DB Format set to !AD by !AD", TRUE, REG_LEN_STR(reg),
				LEN_AND_STR(gtm_dbversion_table[new_db_format]), LEN_AND_STR(reorg_command));
		assert(dba_bg == csd->acc_meth);
		/* Check blks_to_upgrd counter to see if upgrade/downgrade is complete */
		total_blks = csd->trans_hist.total_blks;
		free_blks = csd->trans_hist.free_blocks;
		actual_blks2upgrd = csd->blks_to_upgrd;
		/* If MUPIP REORG UPGRADE and there is no block to upgrade in the database as indicated by BOTH
		 * 	"csd->blks_to_upgrd" and "csd->fully_upgraded", then we can skip processing.
		 * If MUPIP REORG UPGRADE and all non-free blocks need to be upgraded then again we can skip processing.
		 */
		if ((upgrade && (0 == actual_blks2upgrd) && csd->fully_upgraded)
			|| (!upgrade && ((total_blks - free_blks) == actual_blks2upgrd)))
		{
			util_out_print("Region !AD : Blocks to Upgrade counter indicates no action needed for MUPIP REORG !AD",
				       TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			util_out_print("Region !AD : Total Blocks = [0x!XL] : Free Blocks = [0x!XL] : "
				       "Blocks to upgrade = [0x!XL]",
				       TRUE, REG_LEN_STR(reg), total_blks, free_blks, actual_blks2upgrd);
			util_out_print("Region !AD : MUPIP REORG !AD finished!/", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			rel_crit(reg);
			continue;
		}
		stop_blk = total_blks;
		if (stopblk_specified && stopblk_input <= stop_blk)
			stop_blk = stopblk_input;
		if (first_reorg_in_this_db_fmt)
		{	/* Note down reorg start tn (in case we are interrupted, future reorg will know to resume) */
			csd->reorg_db_fmt_start_tn = csd->desired_db_format_tn;
			csd->reorg_upgrd_dwngrd_restart_block = 0;
			start_blk = (startblk_specified ? startblk_input : 0);
		} else
		{	/* Either a concurrent MUPIP REORG of the same type ({up,down}grade) is currently running
			 * or a previously running REORG of the same type was interrupted (Ctrl-Ced).
			 * In either case resume processing from whatever restart block number is stored in fileheader
			 * the only exception is if "STARTBLK" was specified in the input in which use that unconditionally.
			 */
			start_blk = (startblk_specified ? startblk_input : csd->reorg_upgrd_dwngrd_restart_block);
		}
		if (start_blk > stop_blk)
			start_blk = stop_blk;
		mu_reorg_upgrd_dwngrd_start_tn = csd->reorg_db_fmt_start_tn;
		/* Before releasing crit, flush the file-header and dirty buffers in cache to disk. This is because we are now
		 * going to read each GDS block directly from disk to determine if it needs to be upgraded/downgraded or not.
		 */
		if (!wcs_flu(WCSFLU_FLUSH_HDR))	/* wcs_flu assumes gv_cur_region is set (which it is in this routine) */
		{
			rel_crit(reg);
			gtm_putmsg_csa(CSA_ARG(csa)
				VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT("MUPIP REORG UPGRADE/DOWNGRADE"), DB_LEN_STR(reg));
			status = ERR_MUNOFINISH;
			continue;
		}
		rel_crit(reg);
		/* Loop through entire database one GDS block at a time and upgrade/downgrade each of them */
		status1 = SS_NORMAL;
		start_bmp = ROUND_DOWN2(start_blk, BLKS_PER_LMAP);
		last_bmp  = ROUND_DOWN2(stop_blk - 1, BLKS_PER_LMAP);
		curblk = start_blk;	/* curblk is the block to be upgraded/downgraded */
		util_out_print("Region !AD : Started processing from block number [0x!XL]", TRUE, REG_LEN_STR(reg), curblk);
		if (NULL != bptr)
		{	/* malloc/free "bptr" for each region as GDS block-size can be different */
			free(bptr);
			bptr = NULL;
		}
		memset(&reorg_stats, 0, SIZEOF(reorg_stats));	/* initialize statistics for this region */
		for (curbmp = start_bmp; curbmp <= last_bmp; curbmp += BLKS_PER_LMAP)
		{
			if (mu_ctrly_occurred || mu_ctrlc_occurred)
			{
				status1 = ERR_MUNOFINISH;
				break;
			}
			/* --------------------------------------------------------------
			 *             Read in current bitmap block
			 * --------------------------------------------------------------
			 */
			assert(!csa->now_crit);
			bml_sm_buff = t_qread(curbmp, (sm_int_ptr_t)&cycle, &cr); /* bring block into the cache outside of crit */
			reorg_stats.blks_read_from_disk_bmp++;
			grab_crit_encr_cycle_sync(reg); /* needed so t_qread does not return NULL below */
			if (mu_reorg_upgrd_dwngrd_start_tn != csd->desired_db_format_tn)
			{	/* csd->desired_db_format changed since reorg started. discontinue the reorg */
				/* see later comment on "csd->reorg_upgrd_dwngrd_restart_block" for why the assignment
				 * of this field should be done only if a db format change did not occur.
				 */
				rel_crit(reg);
				status1 = ERR_MUNOFINISH;
				/* This "start_tn" check is redone after the for-loop and an error message is printed there */
				break;
			} else if (reorg_entiredb)
			{	/* Change "csd->reorg_upgrd_dwngrd_restart_block" only if STARTBLK or STOPBLK was NOT specified */
				assert(csd->reorg_upgrd_dwngrd_restart_block <= MAX(start_blk, curbmp));
				csd->reorg_upgrd_dwngrd_restart_block = curbmp;	/* previous blocks have been upgraded/downgraded */
			}
			/* Check blks_to_upgrd counter to see if upgrade/downgrade is complete.
			 * Repeat check done a few steps earlier outside of this for loop.
			 */
			total_blks = csd->trans_hist.total_blks;
			free_blks = csd->trans_hist.free_blocks;
			actual_blks2upgrd = csd->blks_to_upgrd;
			if ((upgrade && (0 == actual_blks2upgrd) && csd->fully_upgraded)
				|| (!upgrade && ((total_blks - free_blks) == actual_blks2upgrd)))
			{
				rel_crit(reg);
				break;
			}
			bml_sm_buff = t_qread(curbmp, (sm_int_ptr_t)&cycle, &cr); /* now that in crit, note down stable buffer */
			if (NULL == bml_sm_buff)
				rts_error_csa(CSA_ARG(csa) VARLSTCNT(1) ERR_DSEBLKRDFAIL);
			ondsk_blkver = cr->ondsk_blkver;	/* note down db fmt on disk for bitmap block */
			/* Take a copy of the shared memory bitmap buffer into process-private memory before releasing crit.
			 * We are interested in those blocks that are currently marked as USED in the bitmap.
			 * It is possible that once we release crit, concurrent updates change the bitmap state of those blocks.
			 * In that case, those updates will take care of doing the upgrade/downgrade of those blocks in the
			 * format currently set in csd->desired_db_format i.e. accomplishing MUPIP REORG UPGRADE/DOWNGRADE's job.
			 * If the desired_db_format changes concurrently, we will stop doing REORG UPGRADE/DOWNGRADE processing.
			 */
			if (NULL == bml_lcl_buff)
				bml_lcl_buff = malloc(BM_SIZE(BLKS_PER_LMAP));
			memcpy(bml_lcl_buff, (blk_hdr_ptr_t)bml_sm_buff, BM_SIZE(BLKS_PER_LMAP));
			if (FALSE == cert_blk(reg, curbmp, (blk_hdr_ptr_t)bml_lcl_buff, 0, FALSE))
			{	/* certify the block while holding crit as cert_blk uses fields from file-header (shared memory) */
				assert(FALSE);	/* in pro, skip ugprading/downgarding all blks in this unreliable local bitmap */
				rel_crit(reg);
				util_out_print("Region !AD : Bitmap Block [0x!XL] has integrity errors. Skipping this bitmap.",
					TRUE, REG_LEN_STR(reg), curbmp);
				status1 = ERR_MUNOFINISH;
				continue;
			}
			rel_crit(reg);
			/* ------------------------------------------------------------------------
			 *         Upgrade/Downgrade all BUSY blocks in the current bitmap
			 * ------------------------------------------------------------------------
			 */
			curblk = (curbmp == start_bmp) ? start_blk : curbmp;
			mapsize = (curbmp == last_bmp) ? (stop_blk - curbmp) : BLKS_PER_LMAP;
			assert(0 != mapsize);
			assert(mapsize <= BLKS_PER_LMAP);
			db_got_to_v5_once = csd->db_got_to_v5_once;
			for (lcnt = curblk - curbmp; lcnt < mapsize; lcnt++, curblk++)
			{
				if (mu_ctrly_occurred || mu_ctrlc_occurred)
				{
					status1 = ERR_MUNOFINISH;
					goto stop_reorg_on_this_reg;	/* goto needed because of nested FOR Loop */
				}
				GET_BM_STATUS(bml_lcl_buff, lcnt, bml_status);
				assert(BLK_MAPINVALID != bml_status); /* cert_blk ran clean so we dont expect invalid entries */
				if (BLK_FREE == bml_status)
				{
					reorg_stats.blks_skipped_free++;
					continue;
				}
				/* MUPIP REORG UPGRADE/DOWNGRADE will convert USED & RECYCLED blocks */
				if (db_got_to_v5_once || (BLK_RECYCLED != bml_status))
				{	/* Do NOT read recycled V4 block from disk unless it is guaranteed NOT to be too full */
					if (lcnt)
					{	/* non-bitmap block */
						/* read in block from disk into private buffer. dont pollute the cache yet */
						if (NULL == bptr)
							bptr = (sm_uc_ptr_t)malloc(blk_size);
						status1 = dsk_read(curblk, bptr, &ondsk_blkver, FALSE);
						/* dsk_read on curblk could return an error (DYNUPGRDFAIL) if curblk needs to be
						 * upgraded and if its block size was too big to allow the extra block-header space
						 * requirements for a dynamic upgrade. a MUPIP REORG DOWNGRADE should not error out
						 * in that case as the block is already in the downgraded format.
						 */
						if (SS_NORMAL != status1)
						{
							if (!upgrade && (ERR_DYNUPGRDFAIL == status1))
							{
								assert(GDSV4 == new_db_format);
								ondsk_blkver = new_db_format;
							} else
							{
								gtm_putmsg_csa(CSA_ARG(csa)
									VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(reg), status1);
								util_out_print("Region !AD : Error occurred while reading block "
									"[0x!XL]", TRUE, REG_LEN_STR(reg), curblk);
								status1 = ERR_MUNOFINISH;
								goto stop_reorg_on_this_reg;/* goto needed due to nested FOR Loop */
							}
						}
						reorg_stats.blks_read_from_disk_nonbmp++;
					} /* else bitmap block has been read in crit earlier and ondsk_blkver appropriately set */
					if (new_db_format == ondsk_blkver)
					{
						assert((SS_NORMAL == status1) || (!upgrade && (ERR_DYNUPGRDFAIL == status1)));
						status1 = SS_NORMAL;	/* treat DYNUPGRDFAIL as no error in case of downgrade */
						reorg_stats.blks_skipped_newfmtindisk++;
						continue;	/* current disk version is identical to what is desired */
					}
					assert(SS_NORMAL == status1);
				}
				/* Begin non-TP transaction to upgrade/downgrade the block.
				 * The way we do that is by updating the block using a null update array.
				 * Any update to a block will trigger an automatic upgrade/downgrade of the block based on
				 * 	the current fileheader desired_db_format setting and we use that here.
				 */
				t_begin(ERR_MUREORGFAIL, UPDTRNS_DB_UPDATED_MASK);
				for (; ;)
				{
					CHECK_AND_RESET_UPDATE_ARRAY;	/* reset update_array_ptr to update_array */
					curr_tn = csd->trans_hist.curr_tn;
					db_got_to_v5_once = csd->db_got_to_v5_once;
					if (db_got_to_v5_once || (BLK_RECYCLED != bml_status))
					{
						blkhist->cse = NULL;	/* start afresh (do not use value from previous retry) */
						blkBase = t_qread(curblk, (sm_int_ptr_t)&blkhist->cycle, &blkhist->cr);
						if (NULL == blkBase)
						{
							t_retry((enum cdb_sc)rdfail_detail);
							continue;
						}
						blkhist->blk_num = curblk;
						blkhist->buffaddr = blkBase;
						ondsk_blkver = blkhist->cr->ondsk_blkver;
						new_hdr = *(blk_hdr_ptr_t)blkBase;
						mu_reorg_upgrd_dwngrd_blktn = new_hdr.tn;
						mark_blk_free = FALSE;
						inctn_opcode = upgrade ? inctn_blkupgrd : inctn_blkdwngrd;
					} else
					{
						mark_blk_free = TRUE;
						inctn_opcode = inctn_blkmarkfree;
					}
					inctn_detail.blknum_struct.blknum = curblk;
					/* t_end assumes that the history it is passed does not contain a bitmap block.
					 * for bitmap block, the history validation information is passed through cse instead.
					 * therefore we need to handle bitmap and non-bitmap cases separately.
					 */
					if (!lcnt)
					{	/* Means a bitmap block.
						 * At this point we can do a "new_db_format != ondsk_blkver" check to determine
						 * if the block got converted since we did the dsk_read (see the non-bitmap case
						 * for a similar check done there), but in that case we will have a transaction
						 * which has read 1 bitmap block and is updating no block. "t_end" currently cannot
						 * handle this case as it expects any bitmap block that needs validation to also
						 * have a corresponding cse which will hold its history. Hence we avoid doing the
						 * new_db_format check. The only disadvantage of this is that we will end up
						 * modifying the bitmap block as part of this transaction (in an attempt to convert
						 * its ondsk_blkver) even though it is already in the right format. Since this
						 * overhead is going to be one per bitmap block and since the block is in the cache
						 * at this point, we should not lose much.
						 */
						assert(!mark_blk_free);
						BLK_ADDR(blkid_ptr, SIZEOF(block_id), block_id);
						*blkid_ptr = 0;
						t_write_map(blkhist, (unsigned char *)blkid_ptr, curr_tn, 0);
						assert(&alt_hist.h[0] == blkhist);
						alt_hist.h[0].blk_num = 0; /* create empty history for bitmap block */
						assert(update_trans);
					} else
					{	/* non-bitmap block. fill in history for validation in t_end */
						assert(curblk);	/* we should never come here for block 0 (bitmap) */
						if (!mark_blk_free)
						{
							assert(blkhist->blk_num == curblk);
							assert(blkhist->buffaddr == blkBase);
							blkhist->tn      = curr_tn;
							alt_hist.h[1].blk_num = 0;
						}
						/* Also need to pass the bitmap as history to detect if any concurrent M-kill
						 * is freeing up the same USED block that we are trying to convert OR if any
						 * concurrent M-set is reusing the same RECYCLED block that we are trying to
						 * convert. Because of t_end currently not being able to validate a bitmap
						 * without that simultaneously having a cse, we need to create a cse for the
						 * bitmap that is used only for bitmap history validation, but should not be
						 * used to update the contents of the bitmap block in bg_update.
						 */
						bmlhist.buffaddr = t_qread(curbmp, (sm_int_ptr_t)&bmlhist.cycle, &bmlhist.cr);
						if (NULL == bmlhist.buffaddr)
						{
							t_retry((enum cdb_sc)rdfail_detail);
							continue;
						}
						bmlhist.blk_num = curbmp;
						bmlhist.tn = curr_tn;
						GET_BM_STATUS(bmlhist.buffaddr, lcnt, bml_status);
						if (BLK_MAPINVALID == bml_status)
						{
							t_retry(cdb_sc_lostbmlcr);
							continue;
						}
						if (!mark_blk_free)
						{
							if ((new_db_format != ondsk_blkver) && (BLK_FREE != bml_status))
							{	/* block still needs to be converted. create cse */
								BLK_INIT(bs_ptr, bs1);
								BLK_SEG(bs_ptr, blkBase + SIZEOF(new_hdr),
									new_hdr.bsiz - SIZEOF(new_hdr));
								BLK_FINI(bs_ptr, bs1);
								t_write(blkhist, (unsigned char *)bs1, 0, 0,
									((blk_hdr_ptr_t)blkBase)->levl, FALSE,
									FALSE, GDS_WRITE_PLAIN);
								/* The directory tree status for now is only used to determine
								 * whether writing the block to snapshot file (see t_end_sysops.c).
 								 * For reorg upgrade/downgrade process, the block is updated in a
								 * sequential way without changing the gv_target. In this case, we
								 * assume the block is in directory tree so as to have it written to
								 * the snapshot file.
			 					 */
								BIT_SET_DIR_TREE(cw_set[cw_set_depth-1].blk_prior_state);
								/* reset update_trans in case previous retry had set it to 0 */
								update_trans = UPDTRNS_DB_UPDATED_MASK;
								if (BLK_RECYCLED == bml_status)
								{	/* If block that we are upgarding is RECYCLED, indicate to
									 * bg_update that blks_to_upgrd counter should NOT be
									 * touched in this case by setting "mode" to a special value
									 */
									assert(cw_set[cw_set_depth-1].mode == gds_t_write);
									cw_set[cw_set_depth-1].mode = gds_t_write_recycled;
									/* we SET block as NOT RECYCLED, otherwise, the mm_update()
									 * or bg_update_phase2 may skip writing it to snapshot file
									 * when its level is 0
									 */
									BIT_CLEAR_RECYCLED(cw_set[cw_set_depth-1].blk_prior_state);
								}
							} else
							{	/* Block got converted by another process since we did the dsk_read.
								 * 	or this block became marked free in the bitmap.
								 * No need to update this block. just call t_end for validation of
								 * 	both the non-bitmap block as well as the bitmap block.
								 * Note down that this transaction is no longer updating any blocks.
								 */
								update_trans = 0;
							}
							/* Need to put bit maps on the end of the cw set for concurrency checking.
							 * We want to simulate t_write_map, except we want to update "cw_map_depth"
							 * instead of "cw_set_depth". Hence the save and restore logic below.
							 * This part of the code is similar to the one in mu_swap_blk.c
							 */
							save_cw_set_depth = cw_set_depth;
							assert(!cw_map_depth);
							t_write_map(&bmlhist, NULL, curr_tn, 0); /* will increment cw_set_depth */
							cw_map_depth = cw_set_depth; /* set cw_map_depth to latest cw_set_depth */
							cw_set_depth = save_cw_set_depth;/* restore cw_set_depth */
							/* t_write_map simulation end */
						} else
						{
							if (BLK_RECYCLED != bml_status)
							{	/* Block was RECYCLED at beginning but no longer so. Retry */
								t_retry(cdb_sc_bmlmod);
								continue;
							}
							/* Mark recycled block as FREE in bitmap */
							assert(lcnt == (curblk - curbmp));
							assert(update_array_ptr == update_array);
							*((block_id *)update_array_ptr) = lcnt;
							update_array_ptr += SIZEOF(block_id);
							/* the following assumes SIZEOF(block_id) == SIZEOF(int) */
							assert(SIZEOF(block_id) == SIZEOF(int));
							*(int *)update_array_ptr = 0;
							t_write_map(&bmlhist, (unsigned char *)update_array, curr_tn, 0);
							update_trans = UPDTRNS_DB_UPDATED_MASK;
						}
					}
					assert(SIZEOF(lcl_update_trans) == SIZEOF(update_trans));
					lcl_update_trans = update_trans;	/* take a copy before t_end modifies it */
					if ((trans_num)0 != t_end(&alt_hist, NULL, TN_NOT_SPECIFIED))
					{	/* In case this is MM and t_end() remapped an extended database, reset csd */
						assert(csd == cs_data);
						if (!lcl_update_trans)
						{
							assert(lcnt);
							assert(!mark_blk_free);
							assert((new_db_format == ondsk_blkver) || (BLK_BUSY != bml_status));
							if (BLK_BUSY != bml_status)
								reorg_stats.blks_skipped_free++;
							else
								reorg_stats.blks_skipped_newfmtincache++;
						} else if (!lcnt)
							reorg_stats.blks_converted_bmp++;
						else
							reorg_stats.blks_converted_nonbmp++;
						break;
					}
					assert(csd == cs_data);
				}
			}
		}
	stop_reorg_on_this_reg:
		/* even though ctrl-c occurred, update file-header fields to store reorg's progress before exiting */
		grab_crit(reg);
		blocks_left = 0;
		assert(csd->trans_hist.total_blks >= csd->blks_to_upgrd);
		actual_blks2upgrd = csd->blks_to_upgrd;
		total_blks = csd->trans_hist.total_blks;
		free_blks = csd->trans_hist.free_blocks;
		/* Care should be taken not to set "csd->reorg_upgrd_dwngrd_restart_block" in case of a concurrent db fmt
		 * change. This is because let us say we are doing REORG UPGRADE. A concurrent REORG DOWNGRADE would
		 * have reset "csd->reorg_upgrd_dwngrd_restart_block" field to 0 and if that reorg is interrupted by a
		 * Ctrl-C (before this reorg came here) it would have updated "csd->reorg_upgrd_dwngrd_restart_block" to
		 * a non-zero value indicating how many blocks from 0 have been downgraded. We should not reset this
		 * field to "curblk" as it will be mis-interpreted as the number of blocks that have been DOWNgraded.
		 */
		set_fully_upgraded = FALSE;
		if (mu_reorg_upgrd_dwngrd_start_tn != csd->desired_db_format_tn)
		{	/* csd->desired_db_format changed since reorg started. discontinue the reorg */
			util_out_print("Region !AD : Desired DB Format changed during REORG. Stopping REORG.",
				TRUE, REG_LEN_STR(reg));
			status1 = ERR_MUNOFINISH;
		} else if (reorg_entiredb)
		{	/* Change "csd->reorg_upgrd_dwngrd_restart_block" only if STARTBLK or STOPBLK was NOT specified */
			assert(csd->reorg_upgrd_dwngrd_restart_block <= curblk);
			csd->reorg_upgrd_dwngrd_restart_block = curblk;	/* blocks lesser than this have been upgraded/downgraded */
			expected_blks2upgrd = upgrade ? 0 : (total_blks - free_blks);
			blocks_left = upgrade ? actual_blks2upgrd : (expected_blks2upgrd - actual_blks2upgrd);
			/* If this reorg command went through all blocks in the database, then it should have
			 * 	correctly concluded at this point whether the reorg is complete or not.
			 * If this reorg command started from where a previous incomplete reorg left
			 *	(i.e. first_reorg_in_this_db_fmt is FALSE), it cannot determine if the initial
			 *	GDS blocks that it skipped are completely {up,down}graded or not.
			 */
			assert((0 == blocks_left) || (SS_NORMAL != status1) || !first_reorg_in_this_db_fmt);
			/* If this is a MUPIP REORG UPGRADE that did go through every block in the database (indicated by
			 * "reorg_entiredb" && "first_reorg_in_this_db_fmt") and the current count of "blks_to_upgrd" is
			 * 0 in the file-header and the desired_db_format did not change since the start of the REORG,
			 * we can be sure that the entire database has been upgraded. Set "csd->fully_upgraded" to TRUE.
			 */
			if ((SS_NORMAL == status1) && first_reorg_in_this_db_fmt && upgrade && (0 == actual_blks2upgrd))
			{
				csd->fully_upgraded = TRUE;
				csd->db_got_to_v5_once = TRUE;
				set_fully_upgraded = TRUE;
			}
			/* flush all changes noted down in the file-header */
			if (!wcs_flu(WCSFLU_FLUSH_HDR))	/* wcs_flu assumes gv_cur_region is set (which it is in this routine) */
			{
				gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4,
					LEN_AND_LIT("MUPIP REORG UPGRADE/DOWNGRADE"), DB_LEN_STR(reg));
				status = ERR_MUNOFINISH;
				rel_crit(reg);
				continue;
			}
		}
		curr_tn = csd->trans_hist.curr_tn;
		rel_crit(reg);
		util_out_print("Region !AD : Stopped processing at block number [0x!XL]", TRUE, REG_LEN_STR(reg), curblk);
		/* Print statistics */
		util_out_print("Region !AD : Statistics : Blocks Read From Disk (Bitmap)     : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_read_from_disk_bmp);
		util_out_print("Region !AD : Statistics : Blocks Skipped (Free)              : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_skipped_free);
		util_out_print("Region !AD : Statistics : Blocks Read From Disk (Non-Bitmap) : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_read_from_disk_nonbmp);
		util_out_print("Region !AD : Statistics : Blocks Skipped (new fmt in disk)   : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_skipped_newfmtindisk);
		util_out_print("Region !AD : Statistics : Blocks Skipped (new fmt in cache)  : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_skipped_newfmtincache);
		util_out_print("Region !AD : Statistics : Blocks Converted (Bitmap)          : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_converted_bmp);
		util_out_print("Region !AD : Statistics : Blocks Converted (Non-Bitmap)      : 0x!XL",
			TRUE, REG_LEN_STR(reg), reorg_stats.blks_converted_nonbmp);
		if (reorg_entiredb && (SS_NORMAL == status1) && (0 != blocks_left))
		{	/* file-header counter does not match what reorg on the entire database expected to see */
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_DBBTUWRNG, 2, expected_blks2upgrd, actual_blks2upgrd);
			util_out_print("Region !AD : Run MUPIP INTEG (without FAST qualifier) to fix the counter",
				TRUE, REG_LEN_STR(reg));
			status1 = ERR_MUNOFINISH;
		} else
			util_out_print("Region !AD : Total Blocks = [0x!XL] : Free Blocks = [0x!XL] : "
				       "Blocks to upgrade = [0x!XL]",
				       TRUE, REG_LEN_STR(reg), total_blks, free_blks, actual_blks2upgrd);
		/* Issue success or failure message for this region */
		if (SS_NORMAL == status1)
		{	/* issue success only if REORG did not encounter any error in its processing */
			if (set_fully_upgraded)
				util_out_print("Region !AD : Database is now FULLY UPGRADED", TRUE, REG_LEN_STR(reg));
			util_out_print("Region !AD : MUPIP REORG !AD finished!/", TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			send_msg_csa(CSA_ARG(csa) VARLSTCNT(7) ERR_MUREUPDWNGRDEND, 5, REG_LEN_STR(reg),
										process_id, process_id, &curr_tn);
		} else
		{
			assert(ERR_MUNOFINISH == status1);
			assert((SS_NORMAL == status) || (ERR_MUNOFINISH == status));
			util_out_print("Region !AD : MUPIP REORG !AD incomplete. See above messages.!/",
					TRUE, REG_LEN_STR(reg), LEN_AND_STR(command));
			status = status1;
		}
	}
	if (NULL != bptr)
		free(bptr);
	if (NULL != bml_lcl_buff)
		free(bml_lcl_buff);
	if (mu_ctrly_occurred || mu_ctrlc_occurred)
	{
		gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_REORGCTRLY);
		status = ERR_MUNOFINISH;
	}
	mupip_exit(status);
}
コード例 #6
0
ファイル: gv_init_reg.c プロジェクト: 5HT/mumps
void gv_init_reg (gd_region *reg)
{
	gv_key		*temp_key;
	gv_namehead	*g;
	sgmnt_addrs	*csa;
	int		keysize;
#ifdef	NOLICENSE
	licensed= TRUE ;
#else
	CRYPT_CHKSYSTEM ;
#endif
	switch (reg->dyn.addr->acc_meth)
	{
	case dba_usr:
		gvusr_init (reg, &gv_cur_region, &gv_currkey, &gv_altkey);
		break;
		/* we may be left in dba_cm state for gt_cm, if we have rundown the db and again accessed
		   the db without quitting out of gtm */
	case dba_cm:
	case dba_mm:
	case dba_bg:
		if( FALSE == reg->open)
		  gvcst_init (reg);
		break;
	default:
		GTMASSERT;
	}
	assert(reg->open);

	keysize = (reg->max_key_size + MAX_NUM_SUBSC_LEN + 4) & (-4);

	if (keysize > gv_keysize)
	{
		gv_keysize = keysize;
		temp_key = (gv_key*)malloc(sizeof(gv_key) - 1 + gv_keysize);
		if (gv_currkey)
		{
			memcpy(temp_key, gv_currkey, sizeof(gv_key) + gv_currkey->end);
			free(gv_currkey);
		} else
			temp_key->base[0] = '\0';
		gv_currkey = temp_key;
		gv_currkey->top = gv_keysize;
		temp_key = (gv_key*)malloc(sizeof(gv_key) - 1 + gv_keysize);
		if (gv_altkey)
		{
			memcpy(temp_key, gv_altkey, sizeof(gv_key) + gv_altkey->end);
			free(gv_altkey);
		} else
			temp_key->base[0] = '\0';
		gv_altkey = temp_key;
		gv_altkey->top = gv_keysize;
	}
	if (reg->dyn.addr->acc_meth == dba_bg || reg->dyn.addr->acc_meth == dba_mm)
	{
		if (!reg->was_open)
		{
			csa = (sgmnt_addrs*)&FILE_INFO(reg)->s_addrs;
			g = csa->dir_tree;
			if (NULL != g)
			{	/* It is possible that dir_tree has already been targ_alloc'ed. This is because GT.CM or VMS DAL
				 * calls can run down regions without the process halting out. We don't want to double malloc.
				 */
				g->clue.end = 0;
			}
			SET_CSA_DIR_TREE(csa, reg->max_key_size, reg);
		}
	}
	return;
}
コード例 #7
0
ファイル: rc_fnd_file.c プロジェクト: ChristyV/fis-gtm
short rc_fnd_file(rc_xdsid *xdsid)
{
	gv_namehead	*g;
	short		dsid, node;
	gd_binding	*map;
	char		buff[1024], *cp, *cp1;
	mstr		fpath1, fpath2;
	mval		v;
	int		i, keysize;
	int             len, node2;

	GET_SHORT(dsid, &xdsid->dsid.value);
	GET_SHORT(node, &xdsid->node.value);
	if (!dsid_list)
	{
		/*	open special database, set up entry */
		dsid_list = (rc_dsid_list *)malloc(SIZEOF(rc_dsid_list));
		dsid_list->dsid = RC_NSPACE_DSID;
		dsid_list->next = NULL;
		fpath1.addr = RC_NSPACE_PATH;
		fpath1.len = SIZEOF(RC_NSPACE_PATH);
		if (SS_NORMAL != TRANS_LOG_NAME(&fpath1, &fpath2, buff, SIZEOF(buff), do_sendmsg_on_log2long))
		{
			char msg[256];
			SPRINTF(msg, "Invalid DB filename, \"%s\"", fpath1.addr);
			gtcm_rep_err(msg, errno);
			return RC_BADFILESPEC;
		}
		if (fpath2.len > MAX_FN_LEN)
			return RC_BADFILESPEC;
		dsid_list->fname = (char *)malloc(fpath2.len + 1);
		memcpy(dsid_list->fname, fpath2.addr, fpath2.len);
		*((char*)(dsid_list->fname + fpath2.len)) = 0;
		gv_cur_region = (gd_region *)malloc(SIZEOF(gd_region));
		memset(gv_cur_region, 0, SIZEOF(gd_region));
		gv_cur_region->dyn.addr = (gd_segment *)malloc(SIZEOF(gd_segment));
		memset(gv_cur_region->dyn.addr, 0, SIZEOF(gd_segment));
		memcpy(gv_cur_region->dyn.addr->fname, fpath2.addr, fpath2.len);
		gv_cur_region->dyn.addr->fname_len = fpath2.len;
		gv_cur_region->dyn.addr->acc_meth = dba_bg;
		ESTABLISH_RET(rc_fnd_file_ch1, RC_SUCCESS);
		gvcst_init(gv_cur_region);
		REVERT;
		change_reg();
		/* check to see if this DB has the reserved bytes field set
		 * correctly.  Global pages must always have some extra unused
		 * space left in them (RC_RESERVED bytes) so that the page
		 * will fit into the client buffer when unpacked by the
		 * client.
		 */
		if (cs_data->reserved_bytes < RC_RESERVED)
		{
			OMI_DBG((omi_debug,
			"Unable to access database file:  \"%s\"\nReserved_bytes field in the file header is too small for GT.CM\n",
			fpath2.addr));
			free(dsid_list->fname);
			dsid_list->fname = NULL;
			free(dsid_list);
			dsid_list = NULL;
			free(gv_cur_region->dyn.addr);
			gv_cur_region->dyn.addr = NULL;
			free(gv_cur_region);
			gv_cur_region = NULL;
			return RC_FILEACCESS;
		}
		gv_keysize = DBKEYSIZE(gv_cur_region->max_key_size);
		GVKEY_INIT(gv_currkey, gv_keysize);
		GVKEY_INIT(gv_altkey, gv_keysize);
		cs_addrs->dir_tree = (gv_namehead *)malloc(SIZEOF(gv_namehead) + 2 * SIZEOF(gv_key) + 3 * (gv_keysize - 1));
		g = cs_addrs->dir_tree;
		g->first_rec = (gv_key*)(g->clue.base + gv_keysize);
		g->last_rec = (gv_key*)(g->first_rec->base + gv_keysize);
		g->clue.top = g->last_rec->top = g->first_rec->top = gv_keysize;
		g->clue.prev = g->clue.end = 0;
		g->root = DIR_ROOT;
		dsid_list->gda = (gd_addr*)malloc(SIZEOF(gd_addr) + 3 * SIZEOF(gd_binding));
		dsid_list->gda->n_maps = 3;
		dsid_list->gda->n_regions = 1;
		dsid_list->gda->n_segments = 1;
		dsid_list->gda->maps = (gd_binding*)((char*)dsid_list->gda + SIZEOF(gd_addr));
		dsid_list->gda->max_rec_size = gv_cur_region->max_rec_size;
		map = dsid_list->gda->maps;
		map ++;
		memset(map->name, 0, SIZEOF(map->name));
		map->name[0] = '%';
		map->reg.addr = gv_cur_region;
		map++;
		map->reg.addr = gv_cur_region;
		memset(map->name, -1, SIZEOF(map->name));
		dsid_list->gda->tab_ptr = (hash_table_mname *)malloc(SIZEOF(hash_table_mname));
		init_hashtab_mname(dsid_list->gda->tab_ptr, 0, HASHTAB_NO_COMPACT, HASHTAB_NO_SPARE_TABLE);
		change_reg();
		if (rc_overflow->top < cs_addrs->hdr->blk_size)
		{
			if (rc_overflow->buff)
				free(rc_overflow->buff);
			rc_overflow->top = cs_addrs->hdr->blk_size;
			rc_overflow->buff = (char*)malloc(rc_overflow->top);
			if (rc_overflow_size < rc_overflow->top)
				rc_overflow_size = rc_overflow->top;
		}
	}
	for (fdi_ptr = dsid_list; fdi_ptr && (fdi_ptr->dsid != dsid); fdi_ptr = fdi_ptr->next)
		;
	if (!fdi_ptr)
	{	/*	need to open new database, add to list, set fdi_ptr */
		gd_header = dsid_list->gda;
		gv_currkey->end = 0;
		v.mvtype = MV_STR;
		v.str.len = RC_NSPACE_GLOB_LEN-1;
		v.str.addr = RC_NSPACE_GLOB;
		GV_BIND_NAME_AND_ROOT_SEARCH(gd_header, &v.str);
		if (!gv_target->root)	/* No namespace global */
			return RC_UNDEFNAMSPC;
		v.mvtype = MV_STR;
		v.str.len = SIZEOF(RC_NSPACE_DSI_SUB)-1;
		v.str.addr = RC_NSPACE_DSI_SUB;
		mval2subsc(&v,gv_currkey);
		node2 = node;
		MV_FORCE_MVAL(&v,node2);
		mval2subsc(&v,gv_currkey);
		i = dsid / 256;
		MV_FORCE_MVAL(&v,i);
		mval2subsc(&v,gv_currkey);
		if (gvcst_get(&v))
			return RC_UNDEFNAMSPC;
		for (cp = v.str.addr, i = 1; i < RC_FILESPEC_PIECE; i++)
			for (; *cp++ != RC_FILESPEC_DELIM; )
				;
		for (cp1 = cp; *cp1++ != RC_FILESPEC_DELIM; )
			;
		cp1--;
		len = (int)(cp1 - cp);
		if (len > MAX_FN_LEN)
			return RC_BADFILESPEC;
		fdi_ptr = (rc_dsid_list *)malloc(SIZEOF(rc_dsid_list));
		fdi_ptr->fname = (char *)malloc(len+1);
		fdi_ptr->dsid = dsid;
		memcpy(fdi_ptr->fname, cp, len);
		*(fdi_ptr->fname + (len)) = 0;
		gv_cur_region = (gd_region *)malloc(SIZEOF(gd_region));
		memset(gv_cur_region, 0, SIZEOF(gd_region));
		gv_cur_region->dyn.addr = (gd_segment *)malloc(SIZEOF(gd_segment));
		memset(gv_cur_region->dyn.addr, 0, SIZEOF(gd_segment));
		memcpy(gv_cur_region->dyn.addr->fname, cp, len);
		gv_cur_region->dyn.addr->fname_len = len;
		gv_cur_region->dyn.addr->acc_meth = dba_bg;
		ESTABLISH_RET(rc_fnd_file_ch2, RC_SUCCESS);
		gvcst_init(gv_cur_region);
		REVERT;
		change_reg();
		/* check to see if this DB has the reserved bytes field set
		 * correctly.  Global pages must always have some extra unused
		 * space left in them (RC_RESERVED bytes) so that the page
		 * will fit into the client buffer when unpacked by the
		 * client.
		 */
		if (cs_data->reserved_bytes < RC_RESERVED)
		{
			OMI_DBG((omi_debug,
			"Unable to access database file:  \"%s\"\nReserved_bytes field in the file header is too small for GT.CM\n",
			fdi_ptr->fname));
			free(dsid_list->fname);
			dsid_list->fname = NULL;
			free(dsid_list);
			dsid_list = NULL;
			free(gv_cur_region->dyn.addr);
			gv_cur_region->dyn.addr = NULL;
			free(gv_cur_region);
			gv_cur_region = NULL;
			return RC_FILEACCESS;
		}
		assert(!cs_addrs->hold_onto_crit);	/* this ensures we can safely do unconditional grab_crit and rel_crit */
		grab_crit(gv_cur_region);
		cs_data->rc_srv_cnt++;
		if (!cs_data->dsid)
		{
			cs_data->dsid = dsid;
			cs_data->rc_node = node;
		} else if (cs_data->dsid != dsid || cs_data->rc_node != node)
		{
			cs_data->rc_srv_cnt--;
			rel_crit(gv_cur_region);
			OMI_DBG((omi_debug, "Dataset ID/RC node mismatch"));
			OMI_DBG((omi_debug, "DB file:  \"%s\"\n", dsid_list->fname));
			OMI_DBG((omi_debug, "Stored DSID:  %d\tRC Node:  %d\n", cs_data->dsid, cs_data->rc_node));
			OMI_DBG((omi_debug, "RC Rq DSID:  %d\tRC Node:  %d\n", dsid,node));
			free(fdi_ptr->fname);
			fdi_ptr->fname = NULL;
			free(fdi_ptr);
			fdi_ptr = NULL;
			free(gv_cur_region->dyn.addr);
			gv_cur_region->dyn.addr = NULL;
			free(gv_cur_region);
			gv_cur_region = NULL;
			return RC_FILEACCESS;
		}
		rel_crit(gv_cur_region);
		keysize = DBKEYSIZE(gv_cur_region->max_key_size);
		GVKEYSIZE_INCREASE_IF_NEEDED(keysize);
		cs_addrs->dir_tree = (gv_namehead *)malloc(SIZEOF(gv_namehead) + 2 * SIZEOF(gv_key) + 3 * (keysize - 1));
		g = cs_addrs->dir_tree;
		g->first_rec = (gv_key*)(g->clue.base + keysize);
		g->last_rec = (gv_key*)(g->first_rec->base + keysize);
		g->clue.top = g->last_rec->top = g->first_rec->top = keysize;
		g->clue.prev = g->clue.end = 0;
		g->root = DIR_ROOT;
		fdi_ptr->gda = (gd_addr*)malloc(SIZEOF(gd_addr) + 3 * SIZEOF(gd_binding));
		fdi_ptr->gda->n_maps = 3;
		fdi_ptr->gda->n_regions = 1;
		fdi_ptr->gda->n_segments = 1;
		fdi_ptr->gda->maps = (gd_binding*)((char*)fdi_ptr->gda + SIZEOF(gd_addr));
		fdi_ptr->gda->max_rec_size = gv_cur_region->max_rec_size;
		map = fdi_ptr->gda->maps;
		map ++;
		memset(map->name, 0, SIZEOF(map->name));
		map->name[0] = '%';
		map->reg.addr = gv_cur_region;
		map++;
		map->reg.addr = gv_cur_region;
		memset(map->name, -1, SIZEOF(map->name));
		fdi_ptr->gda->tab_ptr = (hash_table_mname *)malloc(SIZEOF(hash_table_mname));
		init_hashtab_mname(fdi_ptr->gda->tab_ptr, 0, HASHTAB_NO_COMPACT, HASHTAB_NO_SPARE_TABLE);
		fdi_ptr->next = dsid_list->next;
		dsid_list->next = fdi_ptr;
	}
	gv_cur_region = fdi_ptr->gda->maps[1].reg.addr;
	change_reg();
	if (rc_overflow->top < cs_addrs->hdr->blk_size)
	{
		if (rc_overflow->buff)
			free(rc_overflow->buff);
		rc_overflow->top = cs_addrs->hdr->blk_size;
		rc_overflow->buff = (char*)malloc(rc_overflow->top);
		if (rc_overflow_size < rc_overflow->top)
			rc_overflow_size = rc_overflow->top;
	}
	if (!rc_overflow -> top)
	{
		rc_overflow -> top = rc_overflow_size;
		rc_overflow->buff = (char *)malloc(rc_overflow->top);
	}
	gd_header = fdi_ptr->gda;
	return RC_SUCCESS;
}
コード例 #8
0
ファイル: mupip_extend.c プロジェクト: CeperaCPP/fis-gtm
void mupip_extend(void)
{
	unsigned short	r_len;
	char		regionname[MAX_RN_LEN];
	uint4		bplmap, bit_maps, blocks, i, old_total, total, status;
	int4		tblocks;
	int		fd;

	r_len = SIZEOF(regionname);
	if (cli_get_str("REG_NAME", regionname, &r_len) == FALSE)
		rts_error(VARLSTCNT(1) ERR_MUNODBNAME);
	if (cli_get_int("BLOCKS",&tblocks))
	{
		if (tblocks < 1)
		{
			util_out_print("!/BLOCKS too small, no extension done",TRUE);
			mupip_exit(ERR_MUNOACTION);
		}
		blocks = tblocks;
	} else
		blocks = (uint4)-1;
	gvinit();
	for (i = 0, gv_cur_region = gd_header->regions; i < gd_header->n_regions; i++, gv_cur_region++)
	{
		if (memcmp(gv_cur_region->rname, regionname, r_len) == 0)
			break;
	}
	if (i >= gd_header->n_regions)
	{
		gtm_putmsg(VARLSTCNT(4) ERR_NOREGION, 2, r_len, regionname);
		mupip_exit(ERR_MUNOACTION);
	}
	if ((dba_bg != gv_cur_region->dyn.addr->acc_meth) && (dba_mm != gv_cur_region->dyn.addr->acc_meth))
	{
		util_out_print("Can only EXTEND BG and MM databases",TRUE);
		mupip_exit(ERR_MUNOACTION);
	}
	if (reg_cmcheck(gv_cur_region))
	{
		util_out_print("!/Can't EXTEND region !AD across network",TRUE, REG_LEN_STR(gv_cur_region));
		mupip_exit(ERR_MUNOACTION);
	}
#	if !defined(MM_FILE_EXT_OK) && defined(UNIX)
	if (dba_mm == gv_cur_region->dyn.addr->acc_meth)
	{
		FILE_CNTL_INIT(gv_cur_region->dyn.addr);
		if (!STANDALONE(gv_cur_region))
		{
			util_out_print("Can't get standalone access to database file !AD with MM access method, no extension done.",
				TRUE, DB_LEN_STR(gv_cur_region));
			mupip_exit(ERR_MUNOACTION);
		}
		assert((FILE_INFO(gv_cur_region))->grabbed_access_sem); /* we should have standalone access */
	}
#	endif
	gvcst_init(gv_cur_region);
	if (gv_cur_region->was_open)
	{	/* This should not happen as extend works on only one region at a time, but handle for safety */
		gtm_putmsg(VARLSTCNT(4) ERR_DBOPNERR, 2, DB_LEN_STR(gv_cur_region));
		DB_IPCS_RESET(gv_cur_region);
		mupip_exit(ERR_MUNOACTION);
	}
	cs_addrs = &FILE_INFO(gv_cur_region)->s_addrs;
	cs_data = cs_addrs->hdr;
	if ((uint4)-1 == blocks)
	{
		if (cs_addrs->hdr->extension_size == 0)
		{
			util_out_print("The extension size on file !AD is zero, no extension done.",TRUE,
				DB_LEN_STR(gv_cur_region));
			DB_IPCS_RESET(gv_cur_region);
			mupip_exit(ERR_MUNOACTION);
		}
		blocks = cs_addrs->hdr->extension_size;
	}
	/* cannot extend for read_only database. */
	if (gv_cur_region->read_only)
	{
		gtm_putmsg(VARLSTCNT(4) ERR_DBRDONLY, 2, DB_LEN_STR(gv_cur_region));
		DB_IPCS_RESET(gv_cur_region);
		mupip_exit(ERR_MUNOACTION);
	}
	switch(gv_cur_region->dyn.addr->acc_meth)
	{
		case dba_bg:
		case dba_mm:
			grab_crit(gv_cur_region);
			GRAB_UNFROZEN_CRIT(gv_cur_region, cs_addrs, cs_data);
			old_total = cs_addrs->ti->total_blks;
			if ((uint4)NO_FREE_SPACE == (status = gdsfilext(blocks, old_total)))
			{
				rel_crit(gv_cur_region);
				util_out_print("The extension failed on file !AD; check disk space and permissions.", TRUE,
					DB_LEN_STR(gv_cur_region));
				DB_IPCS_RESET(gv_cur_region);
				mupip_exit(ERR_MUNOACTION);
			} else
				assert(SS_NORMAL == status);
			total = cs_addrs->ti->total_blks;
			bplmap = cs_addrs->hdr->bplmap;
			bit_maps = DIVIDE_ROUND_UP(total, bplmap) - DIVIDE_ROUND_UP(old_total, bplmap);
			rel_crit(gv_cur_region);
			break;
		default:
			GTMASSERT;
	}
	util_out_print("Extension successful, file !AD extended by !UL blocks.  Total blocks = !UL.",TRUE,
		DB_LEN_STR(gv_cur_region), total - old_total - bit_maps, total - DIVIDE_ROUND_UP(total, bplmap));
	DB_IPCS_RESET(gv_cur_region); /* final cleanup (for successful case) before exit */
	mupip_exit(SS_NORMAL);
}