/*
 * Description:
 * 	Grab ftok semaphore on replication instance file
 *	Release all replication semaphores for the instance (both jnlpool and recvpool)
 * 	Release ftok semaphore
 * Parameters:
 * Return Value: TRUE, if succsessful
 *	         FALSE, if fails.
 */
boolean_t mu_replpool_remove_sem(boolean_t immediate)
{
	char            *instname;
	gd_region	*replreg;
	unix_db_info	*udi;
	unsigned int	full_len;
	int		save_errno;

	error_def(ERR_REPLFTOKSEM);
	error_def(ERR_REPLACCSEM);

	/*
	 * JNL POOL SEMAPHORES
	 */
	replreg = jnlpool.jnlpool_dummy_reg;
	assert(replreg);
	instname = (char *)replreg->dyn.addr->fname;
	full_len = replreg->dyn.addr->fname_len;
	if (0 == full_len)
		return TRUE;
	if (!ftok_sem_get(replreg, TRUE, REPLPOOL_ID, immediate))
		rts_error(VARLSTCNT(4) ERR_REPLFTOKSEM, 2, full_len, instname);
	if (0 != remove_sem_set(SOURCE))
	{
		save_errno = REPL_SEM_ERRNO;
		if (!ftok_sem_release(replreg, TRUE, TRUE))
			gtm_putmsg(VARLSTCNT(4) ERR_REPLFTOKSEM, 2, full_len, instname);
		udi = FILE_INFO(replreg);
		rts_error(VARLSTCNT(6) ERR_REPLACCSEM, 3, udi->semid, full_len, instname, save_errno);
	}
	repl_inst_jnlpool_reset();
	/*
	 * RECV POOL SEMAPHORES
	 */
	replreg = recvpool.recvpool_dummy_reg;
	assert(replreg);
	if (0 != remove_sem_set(RECV))
	{
		save_errno = REPL_SEM_ERRNO;
		if (!ftok_sem_release(replreg, TRUE, TRUE))
			gtm_putmsg(VARLSTCNT(4) ERR_REPLFTOKSEM, 2, full_len, instname);
		udi = FILE_INFO(replreg);
		rts_error(VARLSTCNT(6) ERR_REPLACCSEM, 3, udi->semid, full_len, instname, save_errno);
	}
	repl_inst_recvpool_reset();
	if (!ftok_sem_release(replreg, TRUE, immediate))
		rts_error(VARLSTCNT(4) ERR_REPLFTOKSEM, 2, full_len, instname);
	return TRUE;
}
/*
 * This will rundown a replication instance journal (and receiver) pool.
 *	Input Parameter:
 *		replpool_id of the instance. Instance file name must be null terminated in replpool_id.
 * Returns :
 *	TRUE,  if successful.
 *	FALSE, otherwise.
 */
boolean_t mu_rndwn_repl_instance(replpool_identifier *replpool_id, boolean_t immediate, boolean_t rndwn_both_pools,
					boolean_t *jnlpool_sem_created)
{
	boolean_t		jnlpool_stat = SS_NORMAL, recvpool_stat = SS_NORMAL, decr_cnt, sem_created = FALSE, ipc_rmvd;
	char			*instfilename;
	unsigned char		ipcs_buff[MAX_IPCS_ID_BUF], *ipcs_ptr;
	gd_region		*r_save;
	repl_inst_hdr		repl_instance;
	static	gd_region	*reg = NULL;
	struct semid_ds		semstat;
	struct shmid_ds		shmstat;
	unix_db_info		*udi;
	int			save_errno, sem_id, shm_id, status;
	sgmnt_addrs		*repl_csa;
	boolean_t		was_crit;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	if (NULL == reg)
	{
		r_save = gv_cur_region;
		mu_gv_cur_reg_init();
		reg = gv_cur_region;
		gv_cur_region = r_save;
	}
	*jnlpool_sem_created = FALSE;
	/* Assert that the layout of replpool_identifier is identical for all versions going forward as the function
	 * "validate_replpool_shm_entry" (used by the argumentless mupip rundown aka "mupip rundown") relies on this.
	 * This assert is placed here (instead of there) because the automated tests exercise this logic much more
	 * than the argumentless code. If any of these asserts fail, "validate_replpool_shm_entry" needs to change
	 * to handle the old and new layouts.
	 *
	 *	Structure ----> replpool_identifier <----    size 312 [0x0138]
	 *
	 *		offset = 0000 [0x0000]      size = 0012 [0x000c]    ----> replpool_identifier.label
	 *		offset = 0012 [0x000c]      size = 0001 [0x0001]    ----> replpool_identifier.pool_type
	 *		offset = 0013 [0x000d]      size = 0036 [0x0024]    ----> replpool_identifier.now_running
	 *		offset = 0052 [0x0034]      size = 0004 [0x0004]    ----> replpool_identifier.repl_pool_key_filler
	 *		offset = 0056 [0x0038]      size = 0256 [0x0100]    ----> replpool_identifier.instfilename
	 */
	assert(0 == OFFSETOF(replpool_identifier, label[0]));
	assert(12 == SIZEOF(((replpool_identifier *)NULL)->label));
	assert(12 == OFFSETOF(replpool_identifier, pool_type));
	assert(1 == SIZEOF(((replpool_identifier *)NULL)->pool_type));
	assert(13 == OFFSETOF(replpool_identifier, now_running[0]));
	assert(36 == SIZEOF(((replpool_identifier *)NULL)->now_running));
	assert(56 == OFFSETOF(replpool_identifier, instfilename[0]));
	assert(256 == SIZEOF(((replpool_identifier *)NULL)->instfilename));
	/* End asserts */
	jnlpool.jnlpool_dummy_reg = reg;
	recvpool.recvpool_dummy_reg = reg;
	instfilename = replpool_id->instfilename;
	reg->dyn.addr->fname_len = strlen(instfilename);
	assert(0 == instfilename[reg->dyn.addr->fname_len]);
	memcpy((char *)reg->dyn.addr->fname, instfilename, reg->dyn.addr->fname_len + 1);
	udi = FILE_INFO(reg);
	udi->fn = (char *)reg->dyn.addr->fname;
	/* Lock replication instance using ftok semaphore so that no other replication process can startup until we are done with
	 * rundown
	 */
	if (!ftok_sem_get(reg, TRUE, REPLPOOL_ID, immediate))
		return FALSE;
	ESTABLISH_RET(mu_rndwn_repl_instance_ch, FALSE);
	repl_inst_read(instfilename, (off_t)0, (sm_uc_ptr_t)&repl_instance, SIZEOF(repl_inst_hdr));
	assert(rndwn_both_pools || JNLPOOL_SEGMENT == replpool_id->pool_type || RECVPOOL_SEGMENT == replpool_id->pool_type);
	if (rndwn_both_pools || (JNLPOOL_SEGMENT == replpool_id->pool_type))
	{	/* --------------------------
		 * First rundown Journal pool
		 * --------------------------
		 */
		shm_id = repl_instance.jnlpool_shmid;
		if (SS_NORMAL == (jnlpool_stat = mu_replpool_grab_sem(&repl_instance, JNLPOOL_SEGMENT, &sem_created, immediate)))
		{
			/* Got JNL_POOL_ACCESS_SEM and incremented SRC_SRV_COUNT_SEM */
			assert(holds_sem[SOURCE][JNL_POOL_ACCESS_SEM]);
			assert(holds_sem[SOURCE][SRC_SERV_COUNT_SEM]);
			sem_id = repl_instance.jnlpool_semid;
			if ((INVALID_SHMID == shm_id) || (-1 == shmctl(shm_id, IPC_STAT, &shmstat))
				|| (shmstat.shm_ctime != repl_instance.jnlpool_shmid_ctime))
			{
				repl_instance.jnlpool_shmid = shm_id = INVALID_SHMID;
				repl_instance.jnlpool_shmid_ctime = 0;
			}
			assert((INVALID_SHMID != shm_id) || ((NULL == jnlpool.jnlpool_ctl) && (NULL == jnlpool_ctl)));
			ipc_rmvd = TRUE;
			if (INVALID_SHMID != shm_id)
			{
				replpool_id->pool_type = JNLPOOL_SEGMENT;
				jnlpool_stat = mu_rndwn_replpool(replpool_id, &repl_instance, shm_id, &ipc_rmvd);
				ipcs_ptr = i2asc((uchar_ptr_t)ipcs_buff, shm_id);
				*ipcs_ptr = '\0';
				if (rndwn_both_pools && ((SS_NORMAL != jnlpool_stat) || ipc_rmvd))
					gtm_putmsg(VARLSTCNT(6) (jnlpool_stat ? ERR_MUJPOOLRNDWNFL : ERR_MUJPOOLRNDWNSUC),
						4, LEN_AND_STR(ipcs_buff), LEN_AND_STR(instfilename));
			}
			assert(ipc_rmvd || (NULL != jnlpool_ctl));
			assert((NULL == jnlpool.jnlpool_ctl) || (SS_NORMAL == jnlpool_stat) || jgbl.onlnrlbk);
			assert((INVALID_SHMID != repl_instance.jnlpool_shmid) || (0 == repl_instance.jnlpool_shmid_ctime));
			assert((INVALID_SHMID == repl_instance.jnlpool_shmid) || (0 != repl_instance.jnlpool_shmid_ctime));
			assert(INVALID_SEMID != sem_id);
			if (!mur_options.rollback)
			{	/* Invoked by MUPIP RUNDOWN in which case the semaphores needs to be removed. But, remove the
				 * semaphore ONLY if we created it here OR the journal pool was successfully removed.
				 */
				if (NULL == jnlpool_ctl)
				{
					if (((sem_created || (SS_NORMAL == jnlpool_stat))
						&& (SS_NORMAL == mu_replpool_release_sem(&repl_instance, JNLPOOL_SEGMENT, TRUE))))
					{	/* Now that semaphores are removed, reset fields in file header */
						if (!sem_created)
						{	/* If sem_id was created by mu_replpool_grab_sem then do NOT report the
							 * MURPOOLRNDWNSUC message as it indicates that the semaphore was orphaned
							 * and we removed it when in fact there was no orphaned semaphore and we
							 * created it as part of mu_replpool_grab_sem to get standalone access to
							 * rundown the receiver pool (which may or may not exist)
							 */
							ipcs_ptr = i2asc((uchar_ptr_t)ipcs_buff, sem_id);
							*ipcs_ptr = '\0';
							gtm_putmsg(VARLSTCNT(9) ERR_MUJPOOLRNDWNSUC, 4, LEN_AND_STR(ipcs_buff),
								LEN_AND_STR(instfilename), ERR_SEMREMOVED, 1, sem_id);
						}
						repl_inst_jnlpool_reset();
					}
				} else
				{	/* Anticipatory Freeze scheme is turned ON. So, release just the JNL_POOL_ACCESS_SEM. The
					 * semaphore will be released/removed in the caller (mupip_rundown)
					 */
					assert(ANTICIPATORY_FREEZE_AVAILABLE);
					assertpro(SS_NORMAL == (status = rel_sem(SOURCE, JNL_POOL_ACCESS_SEM)));
					assert(!holds_sem[SOURCE][JNL_POOL_ACCESS_SEM]);
					/* Since we are not resetting the semaphore IDs in the file header, we need to write out
					 * the semaphore IDs in the instance file (if we created them).
					 */
					if (sem_created)
						repl_inst_write(instfilename, (off_t)0, (sm_uc_ptr_t)&repl_instance,
									SIZEOF(repl_inst_hdr));
				}
				/* If semaphore is not created and the journal pool rundown failed (due to attached processes),
				 * rundown process continues to holds the journal pool access control semaphore. This way, we hold
				 * the semaphore on behalf of the source server (now no longer alive) to prevent mu_rndwn_sem_all
				 * (invoked later) from cleaning up this orphaned semaphore (which causes REPLREQROLLBACK if the
				 * source server is restarted). But, since the semaphore is not released (until the rundown process
				 * dies), holds_sem[SOURCE][JNL_POOL_ACCESS_SEM] continues to remain TRUE. This causes asserts in
				 * ftok_sem_get if mu_rndwn_repl_instance is invoked for a different journal/receive pool. To
				 * workaround it, set holds_sem[SOURCE][JNL_POOL_ACCESS_SEM] to FALSE. This is an interim solution
				 * until we record such semaphores in an ignore-list (or some such) and change mu_rndwn_sem_all to
				 * skip the ones that are present in the ignore list.
				 */
				holds_sem[SOURCE][JNL_POOL_ACCESS_SEM] = FALSE;
			}
		} else if (rndwn_both_pools && (INVALID_SHMID != shm_id))
		{
			ipcs_ptr = i2asc((uchar_ptr_t)ipcs_buff, shm_id);
			*ipcs_ptr = '\0';
			if (rndwn_both_pools)
				gtm_putmsg(VARLSTCNT(6) ERR_MUJPOOLRNDWNFL, 4, LEN_AND_STR(ipcs_buff),
					LEN_AND_STR(instfilename));
		}
		*jnlpool_sem_created = sem_created;
	}
	if (((SS_NORMAL == jnlpool_stat) || !jgbl.mur_rollback) &&
		(rndwn_both_pools || (RECVPOOL_SEGMENT == replpool_id->pool_type)))
	{	/* --------------------------
		 * Now rundown Receivpool
		 * --------------------------
		 * Note: RECVPOOL is rundown ONLY if the JNLPOOL rundown was successful. This way, we don't end up
		 * creating new semaphores for the RECVPOOL if ROLLBACK is not going to start anyways because of the failed
		 * JNLPOOL rundown. The only exception is MUPIP RUNDOWN command in which case we try running down the
		 * RECVPOOL even if the JNLPOOL rundown failed.
		 */
		shm_id = repl_instance.recvpool_shmid;
		if (SS_NORMAL == (recvpool_stat = mu_replpool_grab_sem(&repl_instance, RECVPOOL_SEGMENT, &sem_created, immediate)))
		{
			sem_id = repl_instance.recvpool_semid;
			if ((INVALID_SHMID == shm_id) || (-1 == shmctl(shm_id, IPC_STAT, &shmstat))
				|| (shmstat.shm_ctime != repl_instance.recvpool_shmid_ctime))
			{
				repl_instance.recvpool_shmid = shm_id = INVALID_SHMID;
				repl_instance.recvpool_shmid_ctime = 0;
			}
			ipc_rmvd = TRUE;
			if (INVALID_SHMID != shm_id)
			{
				replpool_id->pool_type = RECVPOOL_SEGMENT;
				recvpool_stat = mu_rndwn_replpool(replpool_id, &repl_instance, shm_id, &ipc_rmvd);
				ipcs_ptr = i2asc((uchar_ptr_t)ipcs_buff, shm_id);
				*ipcs_ptr = '\0';
				if (rndwn_both_pools && ((SS_NORMAL != recvpool_stat) || ipc_rmvd))
					gtm_putmsg(VARLSTCNT(6) (recvpool_stat ? ERR_MURPOOLRNDWNFL : ERR_MURPOOLRNDWNSUC),
						4, LEN_AND_STR(ipcs_buff), LEN_AND_STR(instfilename));
			}
			assert((TRUE == ipc_rmvd) || (SS_NORMAL != recvpool_stat) || jgbl.onlnrlbk);
			assert((INVALID_SHMID != repl_instance.recvpool_shmid) || (0 == repl_instance.recvpool_shmid_ctime));
			assert((INVALID_SHMID == repl_instance.recvpool_shmid) || (0 != repl_instance.recvpool_shmid_ctime));
			assert(INVALID_SEMID != sem_id);
			if (!mur_options.rollback)
			{	/* Invoked by MUPIP RUNDOWN in which case the semaphores needs to be removed. But, remove the
				 * semaphore ONLY if we created it here OR the receive pool was successfully removed.
				 */
				if ((sem_created || (SS_NORMAL == recvpool_stat))
					&& (SS_NORMAL == mu_replpool_release_sem(&repl_instance, RECVPOOL_SEGMENT, TRUE)))
				{	/* Now that semaphores are removed, reset fields in file header */
					if (!sem_created)
					{	/* if sem_id was "created" by mu_replpool_grab_sem then do NOT report the
						 * MURPOOLRNDWNSUC message as it indicates that the semaphore was orphaned and we
						 * removed it when in fact there was no orphaned semaphore and we "created" it as
						 * part of mu_replpool_grab_sem to get standalone access to rundown the receiver
						 * pool (which may or may not exist)
						 */
						ipcs_ptr = i2asc((uchar_ptr_t)ipcs_buff, sem_id);
						*ipcs_ptr = '\0';
						gtm_putmsg(VARLSTCNT(9) ERR_MURPOOLRNDWNSUC, 4, LEN_AND_STR(ipcs_buff),
							LEN_AND_STR(instfilename), ERR_SEMREMOVED, 1, sem_id);
					}
					if (NULL != jnlpool_ctl)
					{	/* Journal pool is not yet removed. So, grab lock before resetting semid/shmid
						 * fields in the file header as the function expects the caller to hold crit
						 * if the journal pool is available
						 */
						repl_csa = &FILE_INFO(jnlpool.jnlpool_dummy_reg)->s_addrs;
						assert(!repl_csa->now_crit);
						assert(!repl_csa->hold_onto_crit);
						was_crit = repl_csa->now_crit;
						/* Since we do grab_lock, below, we need to do a per-process initialization. Also,
						 * start heartbeat so that grab_lock can issue MUTEXLCKALERT and get C-stacks if
						 * waiting for crit
						 */
						START_HEARTBEAT_IF_NEEDED;
						mutex_per_process_init();
						if (!was_crit)
							grab_lock(jnlpool.jnlpool_dummy_reg, TRUE, GRAB_LOCK_ONLY);
					}
					repl_inst_recvpool_reset();
					if ((NULL != jnlpool_ctl) && !was_crit)
						rel_lock(jnlpool.jnlpool_dummy_reg);
				}
				/* If semaphore is not created and the receive pool rundown failed (due to attached processes),
				 * rundown process continues to holds the receive pool access control semaphore. This way, we hold
				 * the semaphore on behalf of the receiver server (now no longer alive) to prevent mu_rndwn_sem_all
				 * (invoked later) from cleaning up this orphaned semaphore (which causes REPLREQROLLBACK if the
				 * receiver is restarted). But, since the semaphore is not released (until the rundown process
				 * dies), holds_sem[RECV][RECV_POOL_ACCESS_SEM] continues to remain TRUE. This causes asserts in
				 * ftok_sem_get if mu_rndwn_repl_instance is invoked for a different journal/receive pool. To
				 * workaround it, set holds_sem[SOURCE][RECV_POOL_ACCESS_SEM] to FALSE. This is an interim solution
				 * until we record such semaphores in an ignore-list (or some such) and change mu_rndwn_sem_all to
				 * skip the ones that are present in the ignore list.
				 */
				assert((sem_created || (SS_NORMAL == recvpool_stat)) || holds_sem[RECV][RECV_POOL_ACCESS_SEM]);
				DEBUG_ONLY(set_sem_set_recvr(sem_id));
			}
		} else if (rndwn_both_pools && (INVALID_SHMID != shm_id))
		{
			ipcs_ptr = i2asc((uchar_ptr_t)ipcs_buff, shm_id);
			*ipcs_ptr = '\0';
			if (rndwn_both_pools)
				gtm_putmsg(VARLSTCNT(6) ERR_MURPOOLRNDWNFL, 4, LEN_AND_STR(ipcs_buff),
					LEN_AND_STR(instfilename));
		}
	}
	assert(jgbl.onlnrlbk || ANTICIPATORY_FREEZE_AVAILABLE || (NULL == jnlpool.repl_inst_filehdr));
	if (mur_options.rollback && (SS_NORMAL == jnlpool_stat) && (SS_NORMAL == recvpool_stat))
	{
		assert(jgbl.onlnrlbk || ANTICIPATORY_FREEZE_AVAILABLE || ((INVALID_SHMID == repl_instance.jnlpool_shmid)
			&& (INVALID_SHMID == repl_instance.recvpool_shmid)));
		/* Initialize jnlpool.repl_inst_filehdr as it is used later by gtmrecv_fetchresync() */
		decr_cnt = FALSE;
		if (NULL == jnlpool.repl_inst_filehdr)
		{	/* Possible if there is NO journal pool in the first place. In this case, malloc the structure here and
			 * copy the file header from repl_instance structure.
			 */
			jnlpool.repl_inst_filehdr = (repl_inst_hdr_ptr_t)malloc(SIZEOF(repl_inst_hdr));
			memcpy(jnlpool.repl_inst_filehdr, &repl_instance, SIZEOF(repl_inst_hdr));
		} else
		{
			assert(repl_instance.jnlpool_semid == jnlpool.repl_inst_filehdr->jnlpool_semid);
			assert(repl_instance.jnlpool_semid_ctime == jnlpool.repl_inst_filehdr->jnlpool_semid_ctime);
			assert(repl_instance.jnlpool_shmid == jnlpool.repl_inst_filehdr->jnlpool_shmid);
			assert(repl_instance.jnlpool_shmid_ctime == jnlpool.repl_inst_filehdr->jnlpool_shmid_ctime);
			/* If the ONLINE ROLLBACK command is run on the primary when the source server is up and running,
			 * jnlpool.repl_inst_filehdr->recvpool_semid will be INVALID because there is NO receiver server
			 * running. However, ROLLBACK creates semaphores for both journal pool and receive pool and writes
			 * it to the instance file header. Copy this information to the file header copy in the jnlpool
			 * as well
			 */
			jnlpool.repl_inst_filehdr->recvpool_semid = repl_instance.recvpool_semid;
			jnlpool.repl_inst_filehdr->recvpool_semid_ctime = repl_instance.recvpool_semid_ctime;
		}
		/* Flush changes to the replication instance file header to disk */
		repl_inst_write(instfilename, (off_t)0, (sm_uc_ptr_t)&repl_instance, SIZEOF(repl_inst_hdr));
	} else /* for MUPIP RUNDOWN, semid fields in the file header are reset and is written in mu_replpool_release_sem() above */
		decr_cnt = (NULL == jnlpool_ctl); /* for anticipatory freeze, mupip_rundown releases the semaphore */
	REVERT;
	/* Release replication instance ftok semaphore lock */
	if (!ftok_sem_release(reg, decr_cnt, immediate)) /* Do not decrement the counter if ROLLBACK */
		return FALSE;
	return ((SS_NORMAL == jnlpool_stat) && (SS_NORMAL == recvpool_stat));
}
/*
 * This will rundown a replication instance journal (and receiver) pool.
 *	Input Parameter:
 *		replpool_id of the instance. Instance file name must be null terminated in replpool_id.
 * Returns :
 *	TRUE,  if successful.
 *	FALSE, otherwise.
 */
boolean_t mu_rndwn_repl_instance(replpool_identifier *replpool_id, boolean_t immediate, boolean_t rndwn_both_pools)
{
	boolean_t		jnlpool_stat = TRUE, recvpool_stat = TRUE;
	char			*instfilename, shmid_buff[TMP_BUF_LEN];
	gd_region		*r_save;
	repl_inst_hdr		repl_instance;
	static	gd_region	*reg = NULL;
	struct semid_ds		semstat;
	struct shmid_ds		shmstat;
	union semun		semarg;
	uchar_ptr_t		ret_ptr;
	unix_db_info		*udi;
	int			save_errno;

	error_def(ERR_MUJPOOLRNDWNSUC);
	error_def(ERR_MURPOOLRNDWNSUC);
	error_def(ERR_MUJPOOLRNDWNFL);
	error_def(ERR_MURPOOLRNDWNFL);
	error_def(ERR_SEMREMOVED);
	error_def(ERR_REPLACCSEM);
	error_def(ERR_SYSCALL);

	if (NULL == reg)
	{
		r_save = gv_cur_region;
		mu_gv_cur_reg_init();
		reg = gv_cur_region;
		gv_cur_region = r_save;
	}
	jnlpool.jnlpool_dummy_reg = reg;
	recvpool.recvpool_dummy_reg = reg;
	instfilename = replpool_id->instfilename;
	reg->dyn.addr->fname_len = strlen(instfilename);
	assert(0 == instfilename[reg->dyn.addr->fname_len]);
	memcpy((char *)reg->dyn.addr->fname, instfilename, reg->dyn.addr->fname_len + 1);
	udi = FILE_INFO(reg);
	udi->fn = (char *)reg->dyn.addr->fname;
	/* Lock replication instance using ftok semaphore */
	if (!ftok_sem_get(reg, TRUE, REPLPOOL_ID, immediate))
		return FALSE;
	repl_inst_read(instfilename, (off_t)0, (sm_uc_ptr_t)&repl_instance, SIZEOF(repl_inst_hdr));
	semarg.buf = &semstat;
	assert(rndwn_both_pools || JNLPOOL_SEGMENT == replpool_id->pool_type || RECVPOOL_SEGMENT == replpool_id->pool_type);
	if (rndwn_both_pools || (JNLPOOL_SEGMENT == replpool_id->pool_type))
	{	/* --------------------------
		 * First rundown Journal pool
		 * --------------------------
		 */
		if (INVALID_SEMID != repl_instance.jnlpool_semid)
			if ((-1 == semctl(repl_instance.jnlpool_semid, 0, IPC_STAT, semarg)) ||
					(semarg.buf->sem_ctime != repl_instance.jnlpool_semid_ctime))
				repl_instance.jnlpool_semid = INVALID_SEMID;
		if (INVALID_SHMID != repl_instance.jnlpool_shmid)
			if ((-1 == shmctl(repl_instance.jnlpool_shmid, IPC_STAT, &shmstat)) ||
					(shmstat.shm_ctime != repl_instance.jnlpool_shmid_ctime))
				repl_instance.jnlpool_shmid = INVALID_SHMID;
		if (INVALID_SHMID != repl_instance.jnlpool_shmid)
		{
			replpool_id->pool_type = JNLPOOL_SEGMENT;
			jnlpool_stat = mu_rndwn_replpool(replpool_id, repl_instance.jnlpool_semid, repl_instance.jnlpool_shmid);
			ret_ptr = i2asc((uchar_ptr_t)shmid_buff, repl_instance.jnlpool_shmid);
			*ret_ptr = '\0';
			if (rndwn_both_pools)
				gtm_putmsg(VARLSTCNT(6) (jnlpool_stat ? ERR_MUJPOOLRNDWNSUC : ERR_MUJPOOLRNDWNFL),
					4, LEN_AND_STR(shmid_buff), LEN_AND_STR(replpool_id->instfilename));
		} else if (INVALID_SEMID != repl_instance.jnlpool_semid)
		{
			if (0 == sem_rmid(repl_instance.jnlpool_semid))
			{	/* note that shmid_buff used here is actually a buffer to hold semid (not shmid) */
				ret_ptr = i2asc((uchar_ptr_t)shmid_buff, repl_instance.jnlpool_semid);
				*ret_ptr = '\0';
				gtm_putmsg(VARLSTCNT(9) ERR_MUJPOOLRNDWNSUC, 4, LEN_AND_STR(shmid_buff),
					LEN_AND_STR(replpool_id->instfilename), ERR_SEMREMOVED, 1, repl_instance.jnlpool_semid);
			} else
			{
				save_errno = errno;
				gtm_putmsg(VARLSTCNT(13) ERR_REPLACCSEM, 3, repl_instance.jnlpool_semid,
					RTS_ERROR_STRING(instfilename), ERR_SYSCALL, 5, RTS_ERROR_LITERAL("jnlpool sem_rmid()"),
					CALLFROM, save_errno);
			}
			/* Note that jnlpool_stat is not set to FALSE in case sem_rmid() fails above. This is because the
			 * journal pool is anyway not present and it is safer to reset the sem/shmids in the instance file.
			 * The only thing this might cause is a stranded semaphore but that is considered better than getting
			 * errors due to not resetting instance file.
			 */
		}
		if (jnlpool_stat)	/* Reset instance file for jnlpool info */
			repl_inst_jnlpool_reset();
	}
	if (rndwn_both_pools || (RECVPOOL_SEGMENT == replpool_id->pool_type))
	{	/* --------------------------
		 * Now rundown Receivpool
		 * --------------------------
		 */
		if (INVALID_SEMID != repl_instance.recvpool_semid)
			if ((-1 == semctl(repl_instance.recvpool_semid, 0, IPC_STAT, semarg)) ||
					(semarg.buf->sem_ctime != repl_instance.recvpool_semid_ctime))
				repl_instance.recvpool_semid = INVALID_SEMID;
		if (INVALID_SHMID != repl_instance.recvpool_shmid)
			if ((-1 == shmctl(repl_instance.recvpool_shmid, IPC_STAT, &shmstat)) ||
					(shmstat.shm_ctime != repl_instance.recvpool_shmid_ctime))
				repl_instance.recvpool_shmid = INVALID_SHMID;
		if (INVALID_SHMID != repl_instance.recvpool_shmid)
		{
			replpool_id->pool_type = RECVPOOL_SEGMENT;
			recvpool_stat = mu_rndwn_replpool(replpool_id, repl_instance.recvpool_semid, repl_instance.recvpool_shmid);
			ret_ptr = i2asc((uchar_ptr_t)shmid_buff, repl_instance.recvpool_shmid);
			*ret_ptr = '\0';
			if (rndwn_both_pools)
				gtm_putmsg(VARLSTCNT(6) (recvpool_stat ? ERR_MURPOOLRNDWNSUC : ERR_MURPOOLRNDWNFL),
					4, LEN_AND_STR(shmid_buff), LEN_AND_STR(replpool_id->instfilename));
		} else if (INVALID_SEMID != repl_instance.recvpool_semid)
		{
			if (0 == sem_rmid(repl_instance.recvpool_semid))
			{	/* note that shmid_buff used here is actually a buffer to hold semid (not shmid) */
				ret_ptr = i2asc((uchar_ptr_t)shmid_buff, repl_instance.recvpool_semid);
				*ret_ptr = '\0';
				gtm_putmsg(VARLSTCNT(9) ERR_MURPOOLRNDWNSUC, 4, LEN_AND_STR(shmid_buff),
					LEN_AND_STR(replpool_id->instfilename), ERR_SEMREMOVED, 1, repl_instance.recvpool_semid);
			} else
			{
				save_errno = errno;
				gtm_putmsg(VARLSTCNT(13) ERR_REPLACCSEM, 3, repl_instance.recvpool_semid,
					RTS_ERROR_STRING(instfilename), ERR_SYSCALL, 5, RTS_ERROR_LITERAL("recvpool sem_rmid()"),
					CALLFROM, save_errno);
			}
			/* Note that recvpool_stat is not set to FALSE in case sem_rmid() fails above. This is because the
			 * journal pool is anyway not present and it is safer to reset the sem/shmids in the instance file.
			 * The only thing this might cause is a stranded semaphore but that is considered better than getting
			 * errors due to not resetting instance file.
			 */
		}
		if (recvpool_stat)	/* Reset instance file for recvpool info */
			repl_inst_recvpool_reset();
	}
	/* Release replication instance ftok semaphore lock */
	if (!ftok_sem_release(reg, TRUE, immediate))
		return FALSE;
	return (jnlpool_stat && recvpool_stat);
}