Beispiel #1
0
/*
 * ------------------------------------------
 * Hang the process for a specified time.
 *
 *	Goes to sleep for a positive value.
 *	Any caught signal will terminate the sleep
 *	following the execution of that signal's catching routine.
 *
 * Arguments:
 *	num - time to sleep
 *
 * Return:
 *	none
 * ------------------------------------------
 */
void op_hang(mval* num)
{
	int 	ms;
#ifdef VMS
	uint4 	time[2];
	int4	efn_mask, status;
	error_def(ERR_SYSCALL);
#endif
	ms = 0;
	MV_FORCE_NUM(num);
	if (num->mvtype & MV_INT)
	{
		if (0 < num->m[1])
		{
			assert(MV_BIAS >= 1000);	/* if formats change overflow may need attention */
			ms = num->m[1] * (1000 / MV_BIAS);
		}
	} else if (0 == num->sgn) /* if sign is not 0 it means num is negative */
		ms = mval2i(num) * 1000;	/* too big to care about fractional amounts */
	if (ms)
	{
		UNIX_ONLY(hiber_start(ms);)
		VMS_ONLY(
			time[0] = -time_low_ms(ms);
			time[1] = -time_high_ms(ms) - 1;
			efn_mask = (1 << efn_outofband | 1 << efn_timer);
			if (SS$_NORMAL != (status = sys$setimr(efn_timer, &time, NULL, &time, 0)))
				rts_error(VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("$setimr"), CALLFROM, status);
			if (SS$_NORMAL != (status = sys$wflor(efn_outofband, efn_mask)))
				rts_error(VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("$wflor"), CALLFROM, status);
		)
		if (outofband)
Beispiel #2
0
int mu_rndwn_sem_all(void)
{
	int 			save_errno, exit_status = SS_NORMAL, semid;
	char			entry[MAX_ENTRY_LEN];
	FILE			*pf;
	char			fname[MAX_FN_LEN + 1], *fgets_res;
	boolean_t 		rem_sem;
	shm_parms		*parm_buff;

	if (NULL == (pf = POPEN(IPCS_SEM_CMD_STR ,"r")))
        {
		save_errno = errno;
		gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("POPEN()"), CALLFROM, save_errno);
                return ERR_MUNOTALLSEC;
        }
	while (NULL != (FGETS(entry, SIZEOF(entry), pf, fgets_res)) && entry[0] != '\n')
	{
		if (-1 != (semid = parse_sem_id(entry)))
		{
			if (is_orphaned_gtm_semaphore(semid))
			{	/* semval == 0 and corresponding shared memory has been removed */
				if (-1 != semctl(semid, 0, IPC_RMID))
				{
					gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(3) ERR_SEMREMOVED, 1, semid);
					send_msg_csa(CSA_ARG(NULL) VARLSTCNT(3) ERR_SEMREMOVED, 1, semid);
				}
			}
		}
	}
	pclose(pf);
	return exit_status;
}
int continue_proc(pid_t pid)
{
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	DEBUG_ONLY(if (!TREF(gtm_usesecshr)))	/* Cause debug builds to talk to gtmsecshr more often */
	{
		if (WBTEST_ENABLED(WBTEST_HOLD_GTMSOURCE_SRV_LATCH))
		{
			/* Simulate the kill below, but ignore its return status so that we end up invoking gtmsecshr */
			kill(pid, SIGCONT);
			/* Wait until the target quits so that kill() call by gtmsecshr fails with ESRCH */
			while (is_proc_alive(pid, 0))
				LONG_SLEEP(1);
		}
		else if (0 == kill(pid, SIGCONT))
			return 0;
		else if (ESRCH == errno)
		{
			send_msg_csa(CSA_ARG(NULL) VARLSTCNT(5) ERR_NOSUCHPROC, 3, pid, RTS_ERROR_LITERAL("continue"));
			return ESRCH;
		} else
			assert(EINVAL != errno);
	}
	return send_mesg2gtmsecshr(CONTINUE_PROCESS, pid, NULL, 0);
}
Beispiel #4
0
void iott_close(io_desc *v, mval *pp)
{
	d_tt_struct	*ttptr;
	params		ch;
	int		status;
	int		p_offset;
	boolean_t	ch_set;

	assert(v->type == tt);
	if (v->state != dev_open)
		return;
	ESTABLISH_GTMIO_CH(&v->pair, ch_set);
	iott_flush(v);
	if (v->pair.in != v)
		assert(v->pair.out == v);
	ttptr = (d_tt_struct *)v->dev_sp;
	if (v->pair.out != v)
		assert(v->pair.in == v);
	v->state = dev_closed;
	resetterm(v);

	p_offset = 0;
	while (*(pp->str.addr + p_offset) != iop_eol)
	{
		if ((ch = *(pp->str.addr + p_offset++)) == iop_exception)
		{
			v->error_handler.len = *(pp->str.addr + p_offset);
			v->error_handler.addr = (char *)(pp->str.addr + p_offset + 1);
			s2pool(&v->error_handler);
		}
		p_offset += ((IOP_VAR_SIZE == io_params_size[ch]) ?
			(unsigned char)*(pp->str.addr + p_offset) + 1 : io_params_size[ch]);
	}
	if (v == io_std_device.in || (v == io_std_device.out))
	{
		REVERT_GTMIO_CH(&v->pair, ch_set);
		return;
	}

	CLOSEFILE_RESET(ttptr->fildes, status);	/* resets "ttptr->fildes" to FD_INVALID */
	if (0 != status)
	{
		assert(status == errno);
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5,
				RTS_ERROR_LITERAL("iott_close(CLOSEFILE)"), CALLFROM, status);
	}
	if (ttptr->recall_buff.addr)
	{
		free(ttptr->recall_buff.addr);
		ttptr->recall_buff.addr = NULL;
	}
	REVERT_GTMIO_CH(&v->pair, ch_set);
	return;
}
Beispiel #5
0
int continue_proc(pid_t pid)
{
	error_def(ERR_NOSUCHPROC);

	if (0 == kill(pid, SIGCONT))
		return(0);
	else if (ESRCH == errno)
	{
		send_msg(VARLSTCNT(5) ERR_NOSUCHPROC, 3, pid, RTS_ERROR_LITERAL("continue"));
		return(ESRCH);
	} else
		assert(EINVAL != errno);
	return(send_mesg2gtmsecshr(CONTINUE_PROCESS, pid, (char *)NULL, 0));
}
Beispiel #6
0
int crit_wake (sm_uint_ptr_t pid)
{
	error_def(ERR_NOSUCHPROC);

	if (0 == kill(*pid, SIGALRM))
		return 0;
	else if (ESRCH == errno)
	{
		send_msg(VARLSTCNT(5) ERR_NOSUCHPROC, 3, *pid, RTS_ERROR_LITERAL("wake"));
		return(ESRCH);
	} else
		assert(EINVAL != errno);
	return send_mesg2gtmsecshr(WAKE_MESSAGE, *pid, (char *)NULL, 0);
}
Beispiel #7
0
int continue_proc(pid_t pid)
{
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	DEBUG_ONLY(if (!TREF(gtm_usesecshr)))	/* Cause debug builds to talk to gtmsecshr more often */
	{
		if (0 == kill(pid, SIGCONT))
			return 0;
		else if (ESRCH == errno)
		{
			send_msg(VARLSTCNT(5) ERR_NOSUCHPROC, 3, pid, RTS_ERROR_LITERAL("continue"));
			return ESRCH;
		} else
			assert(EINVAL != errno);
	}
	return send_mesg2gtmsecshr(CONTINUE_PROCESS, pid, NULL, 0);
}
Beispiel #8
0
int mu_rndwn_all(void)
{
	int 			save_errno, fname_len, exit_status = SS_NORMAL, shmid, tmp_exit_status;
	char			entry[MAX_ENTRY_LEN];
	FILE			*pf;
	char			*fname, *fgets_res;
	shm_parms		*parm_buff;

	if (NULL == (pf = POPEN(IPCS_CMD_STR ,"r")))
        {
		save_errno = errno;
		gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("POPEN()"), CALLFROM, save_errno);
                return ERR_MUNOTALLSEC;
        }
	fname = (char *)malloc(MAX_FN_LEN + 1);
	while (NULL != (FGETS(entry, SIZEOF(entry), pf, fgets_res)) && entry[0] != '\n')
	{
		tmp_exit_status = SS_NORMAL;
		parm_buff = get_shm_parm(entry);
		if (NULL == parm_buff)
		{
			exit_status = ERR_MUNOTALLSEC;
			continue;
		}
		mu_rndwn_all_helper(parm_buff, fname, &exit_status, &tmp_exit_status);
		if ((SS_NORMAL == exit_status) && (SS_NORMAL != tmp_exit_status))
			exit_status = tmp_exit_status;
		if (mu_rndwn_all_helper_error)
		{	/* Encountered a runtime error while processing this ipc. Make sure we return with
			 * MUNOTALLSEC and reset this static variable before starting processing on next ipc.
			 */
			mu_rndwn_all_helper_error = FALSE;
			if (SS_NORMAL == exit_status)
				exit_status = ERR_MUNOTALLSEC;
		}
		if (NULL != parm_buff)
			free(parm_buff);
	}
	pclose(pf);
	free(fname);
	return exit_status;
}
Beispiel #9
0
int gtmsource_losttncomplete(void)
{
	int			idx;
	gtmsource_local_ptr_t	gtmsourcelocal_ptr;

	error_def(ERR_MUPCLIERR);
	error_def(ERR_TEXT);

	assert(holds_sem[SOURCE][JNL_POOL_ACCESS_SEM]);
	/* We dont need the access control semaphore here. So release it first and avoid any potential deadlocks. */
	if (0 != rel_sem(SOURCE, JNL_POOL_ACCESS_SEM))
		rts_error(VARLSTCNT(5) ERR_TEXT, 2, RTS_ERROR_LITERAL("Error in source server losttncomplete rel_sem"),
				REPL_SEM_ERRNO);
	assert(NULL == jnlpool.gtmsource_local);
	repl_log(stderr, TRUE, TRUE, "Initiating LOSTTNCOMPLETE operation on instance [%s]\n",
		jnlpool.repl_inst_filehdr->this_instname);
	/* If this is a root primary instance, propagate this information to secondaries as well so they reset zqgblmod_seqno to 0.
	 * If propagating primary, no need to send this to tertiaries as the receiver on the tertiary cannot have started with
	 * non-zero "zqgblmod_seqno" to begin with (PRIMARYNOTROOT error would have been issued).
	 */
	if (!jnlpool.jnlpool_ctl->upd_disabled)
	{
		grab_lock(jnlpool.jnlpool_dummy_reg);
		jnlpool.jnlpool_ctl->send_losttn_complete = TRUE;
		gtmsourcelocal_ptr = jnlpool.gtmsource_local_array;
		for (idx = 0; idx < NUM_GTMSRC_LCL; idx++, gtmsourcelocal_ptr++)
		{
			if (('\0' == gtmsourcelocal_ptr->secondary_instname[0])
					&& (0 == gtmsourcelocal_ptr->read_jnl_seqno)
					&& (0 == gtmsourcelocal_ptr->connect_jnl_seqno))
				continue;
			gtmsourcelocal_ptr->send_losttn_complete = TRUE;
		}
		rel_lock(jnlpool.jnlpool_dummy_reg);
	}
	/* Reset zqgblmod_seqno and zqgblmod_tn to 0 in this instance as well */
	repl_inst_reset_zqgblmod_seqno_and_tn();
	return (NORMAL_SHUTDOWN);
}
Beispiel #10
0
static bool mu_open_try(io_log_name *naml, io_log_name *tl, mval *pp, mval *mspace)
{
	int4		status;
	int4		size;
	mstr		tn;		/* translated name */
	mstr		chset_mstr;
	int		oflag;
	unsigned char	ch;
	int		file_des;
	struct stat	outbuf;
	char		*buf, namebuf[LOGNAME_LEN + 1];
	d_mt_struct	*mt_ptr;
	int		umask_orig, umask_creat;
	int		char_or_block_special;
	int		fstat_res;
	int		save_errno;
	int		p_offset;
	boolean_t	ichset_specified, ochset_specified;

	error_def(ERR_SYSCALL);

	mt_ptr = NULL;
	char_or_block_special = FALSE;
	file_des = -2;
	oflag = 0;
	tn.len = tl->len;
	if (tn.len > LOGNAME_LEN)
		tn.len = LOGNAME_LEN;
	tn.addr = tl->dollar_io;
	memcpy(namebuf, tn.addr, tn.len);
	namebuf[tn.len] = '\0';
	buf = namebuf;
	if (0 == naml->iod)
	{
		if (0 == tl->iod)
		{
			tl->iod =  (io_desc *)malloc(sizeof(io_desc));
			memset((char*)tl->iod, 0, sizeof(io_desc));
			tl->iod->pair.in  = tl->iod;
			tl->iod->pair.out = tl->iod;
			tl->iod->trans_name = tl;
			tl->iod->type = n_io_dev_types;
			p_offset = 0;
			while(iop_eol != *(pp->str.addr + p_offset))
			{
				ch = *(pp->str.addr + p_offset++);
				if (iop_sequential == ch)
					tl->iod->type = rm;

				if (IOP_VAR_SIZE == io_params_size[ch])
					p_offset += *(pp->str.addr + p_offset) + 1;
				else
					p_offset += io_params_size[ch];
			}
		}
		if ((n_io_dev_types == tl->iod->type) && mspace && mspace->str.len)
			tl->iod->type = us;
		if (n_io_dev_types == tl->iod->type)
		{
			if (0 == memvcmp(tn.addr, tn.len, sys_input.addr, sys_input.len))
			{
				file_des = 0;
				FSTAT_FILE(file_des, &outbuf, fstat_res);
				if (-1 == fstat_res)
				{
					save_errno = errno;
					rts_error(VARLSTCNT(8) ERR_SYSCALL, 5,
						  RTS_ERROR_LITERAL("fstat()"),
						  CALLFROM, save_errno);
				}
			} else
			{
				if (0 == memvcmp(tn.addr, tn.len, sys_output.addr, sys_output.len))
				{
					file_des = 1;
					FSTAT_FILE(file_des, &outbuf, fstat_res);
					if (-1 == fstat_res)
					{
						save_errno = errno;
						rts_error(VARLSTCNT(8) ERR_SYSCALL, 5,
							  RTS_ERROR_LITERAL("fstat()"),
							  CALLFROM, save_errno);
					}
				} else  if (0 == memvcmp(tn.addr, tn.len, "/dev/null", 9))
					tl->iod->type = nl;
				else  if ((-1 == Stat(buf, &outbuf)) && (n_io_dev_types == tl->iod->type))
				{

					if (ENOENT == errno)
						tl->iod->type = rm;
					else
					{
						save_errno = errno;
						rts_error(VARLSTCNT(8) ERR_SYSCALL, 5,
							  RTS_ERROR_LITERAL("fstat()"),
							  CALLFROM, save_errno);
					}
				}
			}
		}
		if (n_io_dev_types == tl->iod->type)
		{
			switch(outbuf.st_mode & S_IFMT)
			{
				case S_IFCHR:
				case S_IFBLK:
					char_or_block_special = TRUE;
					break;
				case S_IFIFO:
					tl->iod->type = ff;
					break;
				case S_IFREG:
				case S_IFDIR:
					tl->iod->type = rm;
					break;
				case S_IFSOCK:
				case 0:
					tl->iod->type = ff;
					break;
				default:
					break;
			}
		}
		naml->iod = tl->iod;
	}
	active_device = naml->iod;

	if ((-2 == file_des) && (dev_open != naml->iod->state) && (us != naml->iod->type) && (tcp != naml->iod->type))
	{
		oflag |= (O_RDWR | O_CREAT | O_NOCTTY);
		size = 0;
		p_offset = 0;
		ichset_specified = ochset_specified = FALSE;
		while(iop_eol != *(pp->str.addr + p_offset))
		{
			assert((params) *(pp->str.addr + p_offset) < (params)n_iops);
			switch ((ch = *(pp->str.addr + p_offset++)))
			{
				case iop_allocation:
					if (rm == naml->iod->type)
					{
						GET_LONG(size, pp->str.addr + p_offset);
						size *= 512;
					}
					break;
				case iop_append:
					if (rm == naml->iod->type)
						oflag |= O_APPEND;
					break;
				case iop_contiguous:
					break;
				case iop_newversion:
					if ((dev_open != naml->iod->state) && (rm == naml->iod->type))
						oflag |= O_TRUNC;
					break;
				case iop_readonly:
					oflag  &=  ~(O_RDWR | O_CREAT | O_WRONLY);
					oflag  |=  O_RDONLY;
					break;
				case iop_writeonly:
					oflag  &= ~(O_RDWR | O_RDONLY);
					oflag  |= O_WRONLY | O_CREAT;
					break;
				case iop_ipchset:
#ifdef KEEP_zOS_EBCDIC
					if ( (iconv_t)0 != naml->iod->input_conv_cd )
					{
						ICONV_CLOSE_CD(naml->iod->input_conv_cd);
					}
					SET_CODE_SET(naml->iod->in_code_set,
						     (char *)(pp->str.addr + p_offset + 1));
					if (DEFAULT_CODE_SET != naml->iod->in_code_set)
						ICONV_OPEN_CD(naml->iod->input_conv_cd,
							      (char *)(pp->str.addr + p_offset + 1), INSIDE_CH_SET);
					break;
#endif
					if (gtm_utf8_mode)
					{
						chset_mstr.addr = (char *)(pp->str.addr + p_offset + 1);
                                                chset_mstr.len = *(pp->str.addr + p_offset);
						SET_ENCODING(naml->iod->ichset, &chset_mstr);
						ichset_specified = TRUE;
					}
					break;

				case iop_opchset:
#ifdef KEEP_zOS_EBCDIC
					if ( (iconv_t)0 != naml->iod->output_conv_cd)
					{
						ICONV_CLOSE_CD(naml->iod->output_conv_cd);
					}
					SET_CODE_SET(naml->iod->out_code_set,
						     (char *)(pp->str.addr + p_offset + 1));
					if (DEFAULT_CODE_SET != naml->iod->out_code_set)
						ICONV_OPEN_CD(naml->iod->output_conv_cd, INSIDE_CH_SET,
							      (char *)(pp->str.addr + p_offset + 1));
					break;
#endif
					if (gtm_utf8_mode)
					{
                                                chset_mstr.addr = (char *)(pp->str.addr + p_offset + 1);
                                                chset_mstr.len = *(pp->str.addr + p_offset);
                                                SET_ENCODING(naml->iod->ochset, &chset_mstr);
                                                ochset_specified = TRUE;
					}
					break;
				case iop_m:
				case iop_utf8:
				case iop_utf16:
				case iop_utf16be:
				case iop_utf16le:
					if (gtm_utf8_mode)
					{
						naml->iod->ichset = naml->iod->ochset =
							(iop_m       == ch) ? CHSET_M :
							(iop_utf8    == ch) ? CHSET_UTF8 :
							(iop_utf16   == ch) ? CHSET_UTF16 :
							(iop_utf16be == ch) ? CHSET_UTF16BE : CHSET_UTF16LE;
						ichset_specified = ochset_specified = TRUE;
					}
					break;
				default:
					break;
			}
			if (IOP_VAR_SIZE == io_params_size[ch])
				p_offset += *(pp->str.addr + p_offset) + 1;
			else
				p_offset += io_params_size[ch];
		}
		if (!ichset_specified)
			naml->iod->ichset = (gtm_utf8_mode) ? CHSET_UTF8 : CHSET_M;
		if (!ochset_specified)
			naml->iod->ochset = (gtm_utf8_mode) ? CHSET_UTF8 : CHSET_M;
		if (CHSET_M != naml->iod->ichset && CHSET_UTF16 != naml->iod->ichset)
			get_chset_desc(&chset_names[naml->iod->ichset]);
		if (CHSET_M != naml->iod->ochset && CHSET_UTF16 != naml->iod->ochset)
			get_chset_desc(&chset_names[naml->iod->ochset]);
		/* RW permissions for owner and others as determined by umask. */
		umask_orig = umask(000);	/* determine umask (destructive) */
		(void)umask(umask_orig);	/* reset umask */
		umask_creat = 0666 & ~umask_orig;
		/*
		 * the check for EINTR below is valid and should not be converte d to an EINTR
		 * wrapper macro, due to the other errno values being checked.
		 */
		while ((-1 == (file_des = OPEN4(buf, oflag, umask_creat, size))))
		{
			if (   EINTR == errno
			       || ETXTBSY == errno
			       || ENFILE == errno
			       || EBUSY == errno
			       || ((mb == naml->iod->type) && (ENXIO == errno)))
				continue;
			else
				break;
		}

		if (-1 == file_des)
			return FALSE;
	}

	assert (tcp != naml->iod->type);
#ifdef KEEP_zOS_EBCDIC
	SET_CODE_SET(naml->iod->in_code_set, OUTSIDE_CH_SET);
	if (DEFAULT_CODE_SET != naml->iod->in_code_set)
		ICONV_OPEN_CD(naml->iod->input_conv_cd, OUTSIDE_CH_SET, INSIDE_CH_SET);
	SET_CODE_SET(naml->iod->out_code_set, OUTSIDE_CH_SET);
	if (DEFAULT_CODE_SET != naml->iod->out_code_set)
		ICONV_OPEN_CD(naml->iod->output_conv_cd, INSIDE_CH_SET, OUTSIDE_CH_SET);
#endif

	/* smw 99/12/18 not possible to be -1 here */
	if (-1 == file_des)
	{
		rts_error(VARLSTCNT(8) ERR_SYSCALL, 5,
			  RTS_ERROR_LITERAL("open()"),
			  CALLFROM, save_errno);
	}

	if (n_io_dev_types == naml->iod->type)
	{
	        if (isatty(file_des))
			naml->iod->type = tt;
		else if (char_or_block_special && file_des > 2)
			/* assume mag tape */
			naml->iod->type = mt;
		else
			naml->iod->type = rm;
	}
	assert(naml->iod->type < n_io_dev_types);
	naml->iod->disp_ptr = &io_dev_dispatch_mupip[naml->iod->type];
	active_device = naml->iod;
	if (dev_never_opened == naml->iod->state)
	{
		naml->iod->wrap = DEFAULT_IOD_WRAP;
		naml->iod->width = DEFAULT_IOD_WIDTH;
		naml->iod->length = DEFAULT_IOD_LENGTH;
		naml->iod->write_filter = 0; /* MUPIP should not use FILTER */
	}
	if (dev_open != naml->iod->state)
	{
		naml->iod->dollar.x = 0;
		naml->iod->dollar.y = 0;
		naml->iod->dollar.za = 0;
		naml->iod->dollar.zb[0] = 0;
	}
	status = (naml->iod->disp_ptr->open)(naml, pp, file_des, mspace, NO_M_TIMEOUT);
	if (TRUE == status)
		naml->iod->state = dev_open;
	else  if (dev_open == naml->iod->state)
		naml->iod->state = dev_closed;
	if (1 == file_des)
		naml->iod->dollar.zeof = TRUE;
	active_device = 0;

	if (run_time)
		return (status);
	return TRUE;
}
int gtmrecv_upd_proc_init(boolean_t fresh_start)
{
	/* Update Process initialization */

	mstr	upd_proc_log_cmd, upd_proc_trans_cmd;
	char	upd_proc_cmd[UPDPROC_CMD_MAXLEN];
	int	status;
	int	upd_status, save_upd_status;
#ifdef UNIX
	pid_t	upd_pid, waitpid_res;
#elif defined(VMS)
	uint4	upd_pid;
	uint4	cmd_channel;
	$DESCRIPTOR(cmd_desc, UPDPROC_CMD_STR);
#endif

	/* Check if the update process is alive */

	if ((upd_status = is_updproc_alive()) == SRV_ERR)
	{
		gtm_putmsg(VARLSTCNT(7) ERR_RECVPOOLSETUP, 0, ERR_TEXT, 2,
			   RTS_ERROR_LITERAL("Receive pool semctl failure"), REPL_SEM_ERRNO);
		repl_errno = EREPL_UPDSTART_SEMCTL;
		return(UPDPROC_START_ERR);
	} else if (upd_status == SRV_ALIVE && !fresh_start)
	{
		gtm_putmsg(VARLSTCNT(4) ERR_TEXT, 2, RTS_ERROR_LITERAL("Update process already exists. Not starting it"));
		return(UPDPROC_EXISTS);
	} else if (upd_status == SRV_ALIVE)
	{
		gtm_putmsg(VARLSTCNT(6) ERR_RECVPOOLSETUP, 0, ERR_TEXT, 2,
			   RTS_ERROR_LITERAL("Update process already exists. Please kill it before a fresh start"));
		return(UPDPROC_EXISTS);
	}

	save_upd_status = recvpool.upd_proc_local->upd_proc_shutdown;
	recvpool.upd_proc_local->upd_proc_shutdown = NO_SHUTDOWN;

#ifdef UNIX
	if (0 > (upd_pid = fork()))	/* BYPASSOK: we exec immediately, no FORK_CLEAN needed */
	{
		recvpool.upd_proc_local->upd_proc_shutdown = save_upd_status;
		gtm_putmsg(VARLSTCNT(7) ERR_RECVPOOLSETUP, 0, ERR_TEXT, 2,
			   RTS_ERROR_LITERAL("Could not fork update process"), errno);
		repl_errno = EREPL_UPDSTART_FORK;
		return(UPDPROC_START_ERR);
	}
	if (0 == upd_pid)
	{
		/* Update Process */
		upd_proc_log_cmd.len = SIZEOF(UPDPROC_CMD) - 1;
		upd_proc_log_cmd.addr = UPDPROC_CMD;
		status = TRANS_LOG_NAME(&upd_proc_log_cmd, &upd_proc_trans_cmd, upd_proc_cmd, SIZEOF(upd_proc_cmd),
						dont_sendmsg_on_log2long);
		if (status != SS_NORMAL)
		{
			gtm_putmsg(VARLSTCNT(6) ERR_RECVPOOLSETUP, 0, ERR_TEXT, 2,
				   RTS_ERROR_LITERAL("Could not find path of Update Process. Check value of $gtm_dist"));
			if (SS_LOG2LONG == status)
				gtm_putmsg(VARLSTCNT(5) ERR_LOGTOOLONG, 3, LEN_AND_LIT(UPDPROC_CMD), SIZEOF(upd_proc_cmd) - 1);
			repl_errno = EREPL_UPDSTART_BADPATH;
			return(UPDPROC_START_ERR);
		}
		upd_proc_cmd[upd_proc_trans_cmd.len] = '\0';
		if (EXECL(upd_proc_cmd, upd_proc_cmd, UPDPROC_CMD_ARG1, UPDPROC_CMD_ARG2, NULL) < 0)
		{
			gtm_putmsg(VARLSTCNT(7) ERR_RECVPOOLSETUP, 0, ERR_TEXT, 2,
				   RTS_ERROR_LITERAL("Could not exec Update Process"), errno);
			repl_errno = EREPL_UPDSTART_EXEC;
			return(UPDPROC_START_ERR);
		}
	}
#elif defined(VMS)
	/* Create detached server and write startup commands to it */
	status = repl_create_server(&cmd_desc, "GTMU", "", &cmd_channel, &upd_pid, ERR_RECVPOOLSETUP);
	if (SS_NORMAL != status)
	{
		gtm_putmsg(VARLSTCNT(7) ERR_RECVPOOLSETUP, 0, ERR_TEXT, 2,
				RTS_ERROR_LITERAL("Unable to spawn Update process"), status);
		recvpool.upd_proc_local->upd_proc_shutdown = save_upd_status;
		repl_errno = EREPL_UPDSTART_FORK;
		return(UPDPROC_START_ERR);
	}
#endif
	if (recvpool.upd_proc_local->upd_proc_pid)
		recvpool.upd_proc_local->upd_proc_pid_prev = recvpool.upd_proc_local->upd_proc_pid;
	else
		recvpool.upd_proc_local->upd_proc_pid_prev = upd_pid;
	recvpool.upd_proc_local->upd_proc_pid = upd_pid;
	/* Receiver Server; wait for the update process to startup */
	REPL_DPRINT2("Waiting for update process %d to startup\n", upd_pid);
	while (get_sem_info(RECV, UPD_PROC_COUNT_SEM, SEM_INFO_VAL) == 0 && is_proc_alive(upd_pid, 0))
	{
		/* To take care of reassignment of PIDs, the while condition should be && with the
		 * condition (PPID of pid == process_id)
		 */
		REPL_DPRINT2("Waiting for update process %d to startup\n", upd_pid);
		UNIX_ONLY(WAITPID(upd_pid, &status, WNOHANG, waitpid_res);) /* Release defunct update process if dead */
		SHORT_SLEEP(GTMRECV_WAIT_FOR_SRV_START);
	}
Beispiel #12
0
int gtm_trigger_complink(gv_trigger_t *trigdsc, boolean_t dolink)
{
	char		rtnname[GTM_PATH_MAX + 1], rtnname_template[GTM_PATH_MAX + 1];
	char		objname[GTM_PATH_MAX + 1];
	char		zcomp_parms[(GTM_PATH_MAX * 2) + SIZEOF(mident_fixed) + SIZEOF(OBJECT_PARM) + SIZEOF(NAMEOFRTN_PARM)];
	mstr		save_zsource;
	int		rtnfd, rc, lenrtnname, lenobjname, len, alphnum_len, retry, save_errno;
	char		*mident_suffix_p1, *mident_suffix_p2, *mident_suffix_top, *namesub1, *namesub2, *zcomp_parms_ptr;
	mval		zlfile, zcompprm;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	DBGTRIGR_ONLY(memcpy(rtnname, trigdsc->rtn_desc.rt_name.addr, trigdsc->rtn_desc.rt_name.len));
	DBGTRIGR_ONLY(rtnname[trigdsc->rtn_desc.rt_name.len] = 0);
	DBGTRIGR((stderr, "gtm_trigger_complink: (Re)compiling trigger %s\n", rtnname));
	ESTABLISH_RET(gtm_trigger_complink_ch, ((0 == error_condition) ? TREF(dollar_zcstatus) : error_condition ));
	 /* Verify there are 2 available chars for uniqueness */
	assert((MAX_MIDENT_LEN - TRIGGER_NAME_RESERVED_SPACE) >= (trigdsc->rtn_desc.rt_name.len));
	assert(NULL == trigdsc->rtn_desc.rt_adr);
	gtm_trigger_comp_prev_run_time = run_time;
	run_time = TRUE;	/* Required by compiler */
	/* Verify the routine name set by MUPIP TRIGGER and read by gvtr_db_read_hasht() is not in use */
	if (NULL != find_rtn_hdr(&trigdsc->rtn_desc.rt_name))
	{	/* Ooops .. need name to be more unique.. */
		/* Though variable definitions are conventionally done at the function entry, the reason alphanumeric_table
		 * definition is done here is to minimize the time taken to initialize the below table in the most common case
		 * (i.e. no trigger name collisions).
		 */
		char 		alphanumeric_table[] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
							'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd',
							'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
							't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7',
							'8', '9', '\0'};
		alphnum_len = STR_LIT_LEN(alphanumeric_table);
		namesub1 = trigdsc->rtn_desc.rt_name.addr + trigdsc->rtn_desc.rt_name.len++;
		/* If WBTEST_HELPOUT_TRIGNAMEUNIQ is defined, set alphnum_len to 1. This way, we make the maximum
		 * possible combinations for the uniqe trigger names to be 3 which is significantly lesser than
		 * the actual number of combinations (62x62 = 3844). For eg., if ^a is a global having triggers defined
		 * in 4 global directories, then the possible unique trigger names are a#1# ; a#1#A ; a#1#AA.
		 */
		GTM_WHITE_BOX_TEST(WBTEST_HELPOUT_TRIGNAMEUNIQ, alphnum_len, 1);
		mident_suffix_top = (char *)alphanumeric_table + alphnum_len;
		/* Phase 1. See if any single character can add uniqueness */
		for (mident_suffix_p1 = (char *)alphanumeric_table; mident_suffix_p1 < mident_suffix_top; mident_suffix_p1++)
		{
			*namesub1 = *mident_suffix_p1;
			if (NULL == find_rtn_hdr(&trigdsc->rtn_desc.rt_name))
				break;
		}
		if (mident_suffix_p1 == mident_suffix_top)
		{	/* Phase 2. Phase 1 could not find uniqueness .. Find it with 2 char variations */
			namesub2 = trigdsc->rtn_desc.rt_name.addr + trigdsc->rtn_desc.rt_name.len++;
			for (mident_suffix_p1 = (char *)alphanumeric_table; mident_suffix_p1 < mident_suffix_top;
			     mident_suffix_p1++)
			{	/* First char loop */
				for (mident_suffix_p2 = (char *)alphanumeric_table; mident_suffix_p2 < mident_suffix_top;
				     mident_suffix_p2++)
				{	/* 2nd char loop */
					*namesub1 = *mident_suffix_p1;
					*namesub2 = *mident_suffix_p2;
					if (NULL == find_rtn_hdr(&trigdsc->rtn_desc.rt_name))
					{
						mident_suffix_p1 = mident_suffix_top + 1;	/* Break out of both loops */
						break;
					}
				}
			}
			if (mident_suffix_p1 == mident_suffix_top)
			{	/* Phase 3: Punt */
				assert(WBTEST_HELPOUT_TRIGNAMEUNIQ == gtm_white_box_test_case_number);
				rts_error(VARLSTCNT(5) ERR_TRIGNAMEUNIQ, 3, trigdsc->rtn_desc.rt_name.len - 2,
					  trigdsc->rtn_desc.rt_name.addr, alphnum_len * alphnum_len);
			}
		}
	}
	/* Write trigger execute string out to temporary file and compile it */
	assert(MAX_XECUTE_LEN >= trigdsc->xecute_str.str.len);
	rc = SNPRINTF(rtnname_template, GTM_PATH_MAX, "%s/trgtmpXXXXXX", DEFAULT_GTM_TMP);
	assert(0 < rc);					/* Note rc is return code aka length - we expect a non-zero length */
	assert(GTM_PATH_MAX >= rc);
	/* The mkstemp() routine is known to bogus-fail for no apparent reason at all especially on AIX 6.1. In the event
	 * this shortcoming plagues other platforms as well, we add a low-cost retry wrapper.
	 */
	retry = MAX_MKSTEMP_RETRIES;
	do
	{
		strcpy(rtnname, rtnname_template);
		rtnfd = mkstemp(rtnname);
	} while ((-1 == rtnfd) && (EEXIST == errno) && (0 < --retry));
	if (-1 == rtnfd)
	{
		save_errno = errno;
		assert(FALSE);
		rts_error(VARLSTCNT(12) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("mkstemp()"), CALLFROM,
			  ERR_TEXT, 2, RTS_ERROR_TEXT(rtnname), save_errno);
	}
	assert(0 < rtnfd);	/* Verify file descriptor */
	rc = 0;
#	ifdef GEN_TRIGCOMPFAIL_ERROR
	{	/* Used ONLY to generate an error in a trigger compile by adding some junk in a previous line */
		DOWRITERC(rtnfd, ERROR_CAUSING_JUNK, strlen(ERROR_CAUSING_JUNK), rc); /* BYPASSOK */
		if (0 != rc)
		{
			UNLINK(rtnname);
			rts_error(VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("write()"), CALLFROM, rc);
		}
	}
#	endif
	DOWRITERC(rtnfd, trigdsc->xecute_str.str.addr, trigdsc->xecute_str.str.len, rc);
	if (0 != rc)
	{
		UNLINK(rtnname);
		rts_error(VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("write()"), CALLFROM, rc);
	}
	if (NULL == memchr(trigdsc->xecute_str.str.addr, '\n', trigdsc->xecute_str.str.len))
	{
		DOWRITERC(rtnfd, NEWLINE, strlen(NEWLINE), rc);			/* BYPASSOK */
		if (0 != rc)
		{
			UNLINK(rtnname);
			rts_error(VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("write()"), CALLFROM, rc);
		}
	}
	CLOSEFILE(rtnfd, rc);
	if (0 != rc)
	{
		UNLINK(rtnname);
		rts_error(VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("close()"), CALLFROM, rc);
	}
	assert(MAX_MIDENT_LEN > trigdsc->rtn_desc.rt_name.len);
	zcomp_parms_ptr = zcomp_parms;
	lenrtnname = STRLEN(rtnname);
	MEMCPY_LIT(zcomp_parms_ptr, NAMEOFRTN_PARM);
	zcomp_parms_ptr += STRLEN(NAMEOFRTN_PARM);
	memcpy(zcomp_parms_ptr, trigdsc->rtn_desc.rt_name.addr, trigdsc->rtn_desc.rt_name.len);
	zcomp_parms_ptr += trigdsc->rtn_desc.rt_name.len;
	MEMCPY_LIT(zcomp_parms_ptr, OBJECT_PARM);
	zcomp_parms_ptr += STRLEN(OBJECT_PARM);
	strcpy(objname, rtnname);		/* Make copy of rtnname to become object name */
	strcat(objname, OBJECT_FTYPE);		/* Turn into object file reference */
	lenobjname = lenrtnname + STRLEN(OBJECT_FTYPE);
	memcpy(zcomp_parms_ptr, objname, lenobjname);
	zcomp_parms_ptr += lenobjname;
	*zcomp_parms_ptr++ = ' ';
	memcpy(zcomp_parms_ptr, rtnname, lenrtnname);
	zcomp_parms_ptr += lenrtnname;
	*zcomp_parms_ptr = '\0';		/* Null tail */
	len = INTCAST(zcomp_parms_ptr - zcomp_parms);
	assert((SIZEOF(zcomp_parms) - 1) > len);	/* Verify no overflow */
	zcompprm.mvtype = MV_STR;
	zcompprm.str.addr = zcomp_parms;
	zcompprm.str.len = len;
	/* Backup dollar_zsource so trigger doesn't show */
	PUSH_MV_STENT(MVST_MSAV);
	mv_chain->mv_st_cont.mvs_msav.v = dollar_zsource;
	mv_chain->mv_st_cont.mvs_msav.addr = &dollar_zsource;
	TREF(trigger_compile) = TRUE;		/* Set flag so compiler knows this is a special trigger compile */
	op_zcompile(&zcompprm, FALSE);	/* Compile but don't require a .m file extension */
	TREF(trigger_compile) = FALSE;	/* compile_source_file() establishes handler so always returns */
	if (0 != TREF(dollar_zcstatus))
	{	/* Someone err'd.. */
		run_time = gtm_trigger_comp_prev_run_time;
		REVERT;
		UNLINK(objname);	/* Remove files before return error */
		UNLINK(rtnname);
		return ERR_TRIGCOMPFAIL;
	}
	if (dolink)
	{	/* Link is optional as MUPIP TRIGGER doesn't need link */
		zlfile.mvtype = MV_STR;
		zlfile.str.addr = objname;
		zlfile.str.len = lenobjname;
		/* Specifying literal_null for a second arg (as opposed to NULL or 0) allows us to specify
		 * linking the object file (no compilation or looking for source). The 2nd arg is parms for
		 * recompilation and is non-null in an explicit zlink which we need to emulate.
		 */
#		ifdef GEN_TRIGLINKFAIL_ERROR
		UNLINK(objname);				/* delete object before it can be used */
#		endif
		op_zlink(&zlfile, (mval *)&literal_null);	/* need cast due to "extern const" attributes */
		/* No return here if link fails for some reason */
		trigdsc->rtn_desc.rt_adr = find_rtn_hdr(&trigdsc->rtn_desc.rt_name);
		if (NULL == trigdsc->rtn_desc.rt_adr)
			GTMASSERT;	/* Can't find routine we just put there? Catastrophic if happens */
		/* Replace the randomly generated source name with the constant "GTM Trigger" */
		trigdsc->rtn_desc.rt_adr->src_full_name.addr = GTM_TRIGGER_SOURCE_NAME;
		trigdsc->rtn_desc.rt_adr->src_full_name.len = STRLEN(GTM_TRIGGER_SOURCE_NAME);
		trigdsc->rtn_desc.rt_adr->trigr_handle = trigdsc;       /* Back pointer to trig def */
	}
	if (MVST_MSAV == mv_chain->mv_st_type && &dollar_zsource == mv_chain->mv_st_cont.mvs_msav.addr)
	{       /* Top mv_stent is one we pushed on there - restore dollar_zsource and get rid of it */
		dollar_zsource = mv_chain->mv_st_cont.mvs_msav.v;
		POP_MV_STENT();
	} else
		assert(FALSE); 	/* This mv_stent should be the one we just pushed */
	/* Remove temporary files created */
	UNLINK(objname);	/* Delete the object file first since rtnname is the unique key */
	UNLINK(rtnname);	/* Delete the source file */
	run_time = gtm_trigger_comp_prev_run_time;
	REVERT;
	return 0;
}
Beispiel #13
0
/*
 * ------------------------------------------
 * Hang the process for a specified time.
 *
 *	Goes to sleep for a positive value.
 *	Any caught signal will terminate the sleep
 *	following the execution of that signal's catching routine.
 *
 * 	The actual hang duration should be NO LESS than the specified
 * 	duration for specified durations greater than .001 seconds.
 * 	Certain applications depend on this assumption.
 *
 * Arguments:
 *	num - time to sleep
 *
 * Return:
 *	none
 * ------------------------------------------
 */
void op_hang(mval* num)
{
	int		ms;
	double		tmp;
	mv_stent	*mv_zintcmd;
	ABS_TIME	cur_time, end_time;
#	ifdef VMS
	uint4 		time[2];
	int4		efn_mask, status;
#	endif
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	ms = 0;
	MV_FORCE_NUM(num);
	if (num->mvtype & MV_INT)
	{
		if (0 < num->m[1])
		{
			assert(MV_BIAS >= 1000);	/* if formats change overflow may need attention */
			ms = num->m[1] * (1000 / MV_BIAS);
		}
	} else if (0 == num->sgn) 		/* if sign is not 0 it means num is negative */
	{
		tmp = mval2double(num) * (double)1000;
		ms = ((double)MAXPOSINT4 >= tmp) ? (int)tmp : (int)MAXPOSINT4;
	}
	if (ms)
	{
		if (TREF(tpnotacidtime) * 1000 < ms)
			TPNOTACID_CHECK(HANGSTR);
#		if defined(DEBUG) && defined(UNIX)
		if (WBTEST_ENABLED(WBTEST_DEFERRED_TIMERS) && (3 > gtm_white_box_test_case_count) && (123000 == ms))
		{
			DEFER_INTERRUPTS(INTRPT_NO_TIMER_EVENTS);
			DBGFPF((stderr, "OP_HANG: will sleep for 20 seconds\n"));
			LONG_SLEEP(20);
			DBGFPF((stderr, "OP_HANG: done sleeping\n"));
			ENABLE_INTERRUPTS(INTRPT_NO_TIMER_EVENTS);
			return;
		}
		if (WBTEST_ENABLED(WBTEST_BREAKMPC)&& (0 == gtm_white_box_test_case_count) && (999 == ms))
		{
			frame_pointer->old_frame_pointer->mpc = (unsigned char *)GTM64_ONLY(0xdeadbeef12345678)
				NON_GTM64_ONLY(0xdead1234);
			return;
		}
		if (WBTEST_ENABLED(WBTEST_UTIL_OUT_BUFFER_PROTECTION) && (0 == gtm_white_box_test_case_count) && (999 == ms))
		{	/* Upon seeing a .999s hang this white-box test launches a timer that pops with a period of
		 	 * UTIL_OUT_SYSLOG_INTERVAL and prints a long message via util_out_ptr.
			 */
			start_timer((TID)&util_out_syslog_dump, UTIL_OUT_SYSLOG_INTERVAL, util_out_syslog_dump, 0, NULL);
			return;
		}
#		endif
		sys_get_curr_time(&cur_time);
		mv_zintcmd = find_mvstent_cmd(ZINTCMD_HANG, restart_pc, restart_ctxt, FALSE);
		if (!mv_zintcmd)
			add_int_to_abs_time(&cur_time, ms, &end_time);
		else
		{
			end_time = mv_zintcmd->mv_st_cont.mvs_zintcmd.end_or_remain;
			cur_time = sub_abs_time(&end_time, &cur_time);	/* get remaing time to sleep */
			if (0 <= cur_time.at_sec)
				ms = (int4)(cur_time.at_sec * 1000 + cur_time.at_usec / 1000);
			else
				ms = 0;		/* all done */
			/* restore/pop previous zintcmd_active[ZINTCMD_HANG] hints */
			TAREF1(zintcmd_active, ZINTCMD_HANG).restart_pc_last = mv_zintcmd->mv_st_cont.mvs_zintcmd.restart_pc_prior;
			TAREF1(zintcmd_active, ZINTCMD_HANG).restart_ctxt_last
				= mv_zintcmd->mv_st_cont.mvs_zintcmd.restart_ctxt_prior;
			TAREF1(zintcmd_active, ZINTCMD_HANG).count--;
			assert(0 <= TAREF1(zintcmd_active, ZINTCMD_HANG).count);
			if (mv_chain == mv_zintcmd)
				POP_MV_STENT();	/* just pop if top of stack */
			else
			{	/* flag as not active */
				mv_zintcmd->mv_st_cont.mvs_zintcmd.command = ZINTCMD_NOOP;
				mv_zintcmd->mv_st_cont.mvs_zintcmd.restart_pc_check = NULL;
			}
			if (0 == ms)
				return;		/* done HANGing */
		}
#		ifdef UNIX
		if (ms < 10)
			SLEEP_USEC(ms * 1000, TRUE);	/* Finish the sleep if it is less than 10ms. */
		else
			hiber_start(ms);
#		elif defined(VMS)
		time[0] = -time_low_ms(ms);
		time[1] = -time_high_ms(ms) - 1;
		efn_mask = (1 << efn_outofband | 1 << efn_timer);
		if (SS$_NORMAL != (status = sys$setimr(efn_timer, &time, NULL, &time, 0)))
			rts_error_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("$setimr"), CALLFROM, status);
		if (SS$_NORMAL != (status = sys$wflor(efn_outofband, efn_mask)))
			rts_error_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("$wflor"), CALLFROM, status);
		if (outofband)
		{
			if (SS$_WASCLR == (status = sys$readef(efn_timer, &efn_mask)))
			{
				if (SS$_NORMAL != (status = sys$cantim(&time, 0)))
					rts_error_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("$cantim"),
						CALLFROM, status);
			} else
				assertpro(SS$_WASSET == status);
		}
#		endif
	} else
		rel_quant();
	if (outofband)
	{
		PUSH_MV_STENT(MVST_ZINTCMD);
		mv_chain->mv_st_cont.mvs_zintcmd.end_or_remain = end_time;
		mv_chain->mv_st_cont.mvs_zintcmd.restart_ctxt_check = restart_ctxt;
		mv_chain->mv_st_cont.mvs_zintcmd.restart_pc_check = restart_pc;
		/* save current information from zintcmd_active */
		mv_chain->mv_st_cont.mvs_zintcmd.restart_ctxt_prior = TAREF1(zintcmd_active, ZINTCMD_HANG).restart_ctxt_last;
		mv_chain->mv_st_cont.mvs_zintcmd.restart_pc_prior = TAREF1(zintcmd_active, ZINTCMD_HANG).restart_pc_last;
		TAREF1(zintcmd_active, ZINTCMD_HANG).restart_pc_last = restart_pc;
		TAREF1(zintcmd_active, ZINTCMD_HANG).restart_ctxt_last = restart_ctxt;
		TAREF1(zintcmd_active, ZINTCMD_HANG).count++;
		mv_chain->mv_st_cont.mvs_zintcmd.command = ZINTCMD_HANG;
		outofband_action(FALSE);
	}
	return;
}
Beispiel #14
0
boolean_t iosocket_bind(socket_struct *socketptr, int4 timepar, boolean_t update_bufsiz)
{
	int			temp_1 = 1;
	char			*errptr;
	int4			errlen, msec_timeout, real_errno;
	short			len;
	in_port_t		actual_port;
	boolean_t		no_time_left = FALSE;
	d_socket_struct		*dsocketptr;
	struct addrinfo		*ai_ptr;
	char			port_buffer[NI_MAXSERV];
	int			errcode;
	ABS_TIME        	cur_time, end_time;
	GTM_SOCKLEN_TYPE	addrlen;
	GTM_SOCKLEN_TYPE	sockbuflen;

	dsocketptr = socketptr->dev;
	ai_ptr = (struct addrinfo*)(&socketptr->local.ai);
	assert(NULL != dsocketptr);
	dsocketptr->iod->dollar.key[0] = '\0';
	if (FD_INVALID != socketptr->temp_sd)
	{
		socketptr->sd = socketptr->temp_sd;
		socketptr->temp_sd = FD_INVALID;
	}
	if (timepar != NO_M_TIMEOUT)
	{
		msec_timeout = timeout2msec(timepar);
		sys_get_curr_time(&cur_time);
		add_int_to_abs_time(&cur_time, msec_timeout, &end_time);
	}

	do
	{
		temp_1 = 1;
		if (-1 == tcp_routines.aa_setsockopt(socketptr->sd,
				SOL_SOCKET, SO_REUSEADDR, &temp_1, SIZEOF(temp_1)))
		{
		        real_errno = errno;
			errptr = (char *)STRERROR(real_errno);
			errlen = STRLEN(errptr);
			SOCKET_FREE(socketptr);
			rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_SETSOCKOPTERR, 5,
					RTS_ERROR_LITERAL("SO_REUSEADDR"), real_errno, errlen, errptr);
			return FALSE;
		}
#ifdef TCP_NODELAY
		temp_1 = socketptr->nodelay ? 1 : 0;
		if (-1 == tcp_routines.aa_setsockopt(socketptr->sd,
				IPPROTO_TCP, TCP_NODELAY, &temp_1, SIZEOF(temp_1)))
		{
		        real_errno = errno;
			errptr = (char *)STRERROR(real_errno);
			errlen = STRLEN(errptr);
			SOCKET_FREE(socketptr);
			rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_SETSOCKOPTERR, 5,
					RTS_ERROR_LITERAL("TCP_NODELAY"), real_errno, errlen, errptr);
			return FALSE;
		}
#endif
		if (update_bufsiz)
		{
			if (-1 == tcp_routines.aa_setsockopt(socketptr->sd,
				SOL_SOCKET, SO_RCVBUF, &socketptr->bufsiz, SIZEOF(socketptr->bufsiz)))
			{
			        real_errno = errno;
				errptr = (char *)STRERROR(real_errno);
				errlen = STRLEN(errptr);
				SOCKET_FREE(socketptr);
				rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_SETSOCKOPTERR, 5,
						RTS_ERROR_LITERAL("SO_RCVBUF"), real_errno, errlen, errptr);
				return FALSE;
			}
		} else
		{
			sockbuflen = SIZEOF(socketptr->bufsiz);
			if (-1 == tcp_routines.aa_getsockopt(socketptr->sd,
				SOL_SOCKET, SO_RCVBUF, &socketptr->bufsiz, &sockbuflen))
			{
			        real_errno = errno;
				errptr = (char *)STRERROR(real_errno);
				errlen = STRLEN(errptr);
				SOCKET_FREE(socketptr);
				rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_GETSOCKOPTERR, 5,
					RTS_ERROR_LITERAL("SO_RCVBUF"), real_errno, errlen, errptr);
				return FALSE;
			}
		}
		temp_1 = tcp_routines.aa_bind(socketptr->sd, SOCKET_LOCAL_ADDR(socketptr), ai_ptr->ai_addrlen);
		if (temp_1 < 0)
		{
			real_errno = errno;
			no_time_left = TRUE;
			switch (real_errno)
			{
				case EADDRINUSE:
					if (NO_M_TIMEOUT != timepar)
					{
						sys_get_curr_time(&cur_time);
						cur_time = sub_abs_time(&end_time, &cur_time);
						if (cur_time.at_sec > 0)
							no_time_left = FALSE;
					}
					break;
				case EINTR:
					break;
				default:
					SOCKET_FREE(socketptr);
					rts_error_csa(CSA_ARG(NULL) VARLSTCNT(4) ERR_SOCKBIND, 0, real_errno, 0);
					break;
			}
			if (no_time_left)
				return FALSE;
			hiber_start(100);
			tcp_routines.aa_close(socketptr->sd);
			if (-1 == (socketptr->sd = tcp_routines.aa_socket(ai_ptr->ai_family,ai_ptr->ai_socktype,
									  ai_ptr->ai_protocol)))
			{
				real_errno = errno;
				errptr = (char *)STRERROR(real_errno);
				errlen = STRLEN(errptr);
				SOCKET_FREE(socketptr);
				rts_error_csa(CSA_ARG(NULL) VARLSTCNT(5) ERR_SOCKINIT, 3, real_errno, errlen, errptr);
				return FALSE;
			}
		}
	} while (temp_1 < 0);

	/* obtain actual port from the bound address if port 0 was specified */
	addrlen = SOCKET_ADDRLEN(socketptr, ai_ptr, local);
	if (-1 == tcp_routines.aa_getsockname(socketptr->sd, SOCKET_LOCAL_ADDR(socketptr), &addrlen))
	{
		real_errno = errno;
		errptr = (char *)STRERROR(real_errno);
		errlen = STRLEN(errptr);
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(5) ERR_GETSOCKNAMERR, 3, real_errno, errlen, errptr);
	        return FALSE;
	}
	assert(ai_ptr->ai_addrlen == addrlen);
	GETNAMEINFO(SOCKET_LOCAL_ADDR(socketptr), addrlen, NULL, 0, port_buffer, NI_MAXSERV, NI_NUMERICSERV, errcode);
	if (0 != errcode)
	{
		SOCKET_FREE(socketptr);
		RTS_ERROR_ADDRINFO(NULL, ERR_GETNAMEINFO, errcode);
		return FALSE;
	}
	actual_port = ATOI(port_buffer);
	if (0 == socketptr->local.port)
		socketptr->local.port = actual_port;
	assert(socketptr->local.port == actual_port);
	socketptr->state = socket_bound;
	len = SIZEOF(BOUND) - 1;
        memcpy(&dsocketptr->iod->dollar.key[0], BOUND, len);
        dsocketptr->iod->dollar.key[len++] = '|';
        memcpy(&dsocketptr->iod->dollar.key[len], socketptr->handle, socketptr->handle_len);
        len += socketptr->handle_len;
        dsocketptr->iod->dollar.key[len++] = '|';
        SPRINTF(&dsocketptr->iod->dollar.key[len], "%d", socketptr->local.port);
	return TRUE;
}
/*
 * This will rundown a replication instance journal (and receiver) pool.
 *	Input Parameter:
 *		replpool_id of the instance. Instance file name must be null terminated in replpool_id.
 * Returns :
 *	TRUE,  if successful.
 *	FALSE, otherwise.
 */
boolean_t mu_rndwn_repl_instance(replpool_identifier *replpool_id, boolean_t immediate, boolean_t rndwn_both_pools)
{
	boolean_t		jnlpool_stat = TRUE, recvpool_stat = TRUE;
	char			*instfilename, shmid_buff[TMP_BUF_LEN];
	gd_region		*r_save;
	repl_inst_hdr		repl_instance;
	static	gd_region	*reg = NULL;
	struct semid_ds		semstat;
	struct shmid_ds		shmstat;
	union semun		semarg;
	uchar_ptr_t		ret_ptr;
	unix_db_info		*udi;
	int			save_errno;

	error_def(ERR_MUJPOOLRNDWNSUC);
	error_def(ERR_MURPOOLRNDWNSUC);
	error_def(ERR_MUJPOOLRNDWNFL);
	error_def(ERR_MURPOOLRNDWNFL);
	error_def(ERR_SEMREMOVED);
	error_def(ERR_REPLACCSEM);
	error_def(ERR_SYSCALL);

	if (NULL == reg)
	{
		r_save = gv_cur_region;
		mu_gv_cur_reg_init();
		reg = gv_cur_region;
		gv_cur_region = r_save;
	}
	jnlpool.jnlpool_dummy_reg = reg;
	recvpool.recvpool_dummy_reg = reg;
	instfilename = replpool_id->instfilename;
	reg->dyn.addr->fname_len = strlen(instfilename);
	assert(0 == instfilename[reg->dyn.addr->fname_len]);
	memcpy((char *)reg->dyn.addr->fname, instfilename, reg->dyn.addr->fname_len + 1);
	udi = FILE_INFO(reg);
	udi->fn = (char *)reg->dyn.addr->fname;
	/* Lock replication instance using ftok semaphore */
	if (!ftok_sem_get(reg, TRUE, REPLPOOL_ID, immediate))
		return FALSE;
	repl_inst_read(instfilename, (off_t)0, (sm_uc_ptr_t)&repl_instance, SIZEOF(repl_inst_hdr));
	semarg.buf = &semstat;
	assert(rndwn_both_pools || JNLPOOL_SEGMENT == replpool_id->pool_type || RECVPOOL_SEGMENT == replpool_id->pool_type);
	if (rndwn_both_pools || (JNLPOOL_SEGMENT == replpool_id->pool_type))
	{	/* --------------------------
		 * First rundown Journal pool
		 * --------------------------
		 */
		if (INVALID_SEMID != repl_instance.jnlpool_semid)
			if ((-1 == semctl(repl_instance.jnlpool_semid, 0, IPC_STAT, semarg)) ||
					(semarg.buf->sem_ctime != repl_instance.jnlpool_semid_ctime))
				repl_instance.jnlpool_semid = INVALID_SEMID;
		if (INVALID_SHMID != repl_instance.jnlpool_shmid)
			if ((-1 == shmctl(repl_instance.jnlpool_shmid, IPC_STAT, &shmstat)) ||
					(shmstat.shm_ctime != repl_instance.jnlpool_shmid_ctime))
				repl_instance.jnlpool_shmid = INVALID_SHMID;
		if (INVALID_SHMID != repl_instance.jnlpool_shmid)
		{
			replpool_id->pool_type = JNLPOOL_SEGMENT;
			jnlpool_stat = mu_rndwn_replpool(replpool_id, repl_instance.jnlpool_semid, repl_instance.jnlpool_shmid);
			ret_ptr = i2asc((uchar_ptr_t)shmid_buff, repl_instance.jnlpool_shmid);
			*ret_ptr = '\0';
			if (rndwn_both_pools)
				gtm_putmsg(VARLSTCNT(6) (jnlpool_stat ? ERR_MUJPOOLRNDWNSUC : ERR_MUJPOOLRNDWNFL),
					4, LEN_AND_STR(shmid_buff), LEN_AND_STR(replpool_id->instfilename));
		} else if (INVALID_SEMID != repl_instance.jnlpool_semid)
		{
			if (0 == sem_rmid(repl_instance.jnlpool_semid))
			{	/* note that shmid_buff used here is actually a buffer to hold semid (not shmid) */
				ret_ptr = i2asc((uchar_ptr_t)shmid_buff, repl_instance.jnlpool_semid);
				*ret_ptr = '\0';
				gtm_putmsg(VARLSTCNT(9) ERR_MUJPOOLRNDWNSUC, 4, LEN_AND_STR(shmid_buff),
					LEN_AND_STR(replpool_id->instfilename), ERR_SEMREMOVED, 1, repl_instance.jnlpool_semid);
			} else
			{
				save_errno = errno;
				gtm_putmsg(VARLSTCNT(13) ERR_REPLACCSEM, 3, repl_instance.jnlpool_semid,
					RTS_ERROR_STRING(instfilename), ERR_SYSCALL, 5, RTS_ERROR_LITERAL("jnlpool sem_rmid()"),
					CALLFROM, save_errno);
			}
			/* Note that jnlpool_stat is not set to FALSE in case sem_rmid() fails above. This is because the
			 * journal pool is anyway not present and it is safer to reset the sem/shmids in the instance file.
			 * The only thing this might cause is a stranded semaphore but that is considered better than getting
			 * errors due to not resetting instance file.
			 */
		}
		if (jnlpool_stat)	/* Reset instance file for jnlpool info */
			repl_inst_jnlpool_reset();
	}
	if (rndwn_both_pools || (RECVPOOL_SEGMENT == replpool_id->pool_type))
	{	/* --------------------------
		 * Now rundown Receivpool
		 * --------------------------
		 */
		if (INVALID_SEMID != repl_instance.recvpool_semid)
			if ((-1 == semctl(repl_instance.recvpool_semid, 0, IPC_STAT, semarg)) ||
					(semarg.buf->sem_ctime != repl_instance.recvpool_semid_ctime))
				repl_instance.recvpool_semid = INVALID_SEMID;
		if (INVALID_SHMID != repl_instance.recvpool_shmid)
			if ((-1 == shmctl(repl_instance.recvpool_shmid, IPC_STAT, &shmstat)) ||
					(shmstat.shm_ctime != repl_instance.recvpool_shmid_ctime))
				repl_instance.recvpool_shmid = INVALID_SHMID;
		if (INVALID_SHMID != repl_instance.recvpool_shmid)
		{
			replpool_id->pool_type = RECVPOOL_SEGMENT;
			recvpool_stat = mu_rndwn_replpool(replpool_id, repl_instance.recvpool_semid, repl_instance.recvpool_shmid);
			ret_ptr = i2asc((uchar_ptr_t)shmid_buff, repl_instance.recvpool_shmid);
			*ret_ptr = '\0';
			if (rndwn_both_pools)
				gtm_putmsg(VARLSTCNT(6) (recvpool_stat ? ERR_MURPOOLRNDWNSUC : ERR_MURPOOLRNDWNFL),
					4, LEN_AND_STR(shmid_buff), LEN_AND_STR(replpool_id->instfilename));
		} else if (INVALID_SEMID != repl_instance.recvpool_semid)
		{
			if (0 == sem_rmid(repl_instance.recvpool_semid))
			{	/* note that shmid_buff used here is actually a buffer to hold semid (not shmid) */
				ret_ptr = i2asc((uchar_ptr_t)shmid_buff, repl_instance.recvpool_semid);
				*ret_ptr = '\0';
				gtm_putmsg(VARLSTCNT(9) ERR_MURPOOLRNDWNSUC, 4, LEN_AND_STR(shmid_buff),
					LEN_AND_STR(replpool_id->instfilename), ERR_SEMREMOVED, 1, repl_instance.recvpool_semid);
			} else
			{
				save_errno = errno;
				gtm_putmsg(VARLSTCNT(13) ERR_REPLACCSEM, 3, repl_instance.recvpool_semid,
					RTS_ERROR_STRING(instfilename), ERR_SYSCALL, 5, RTS_ERROR_LITERAL("recvpool sem_rmid()"),
					CALLFROM, save_errno);
			}
			/* Note that recvpool_stat is not set to FALSE in case sem_rmid() fails above. This is because the
			 * journal pool is anyway not present and it is safer to reset the sem/shmids in the instance file.
			 * The only thing this might cause is a stranded semaphore but that is considered better than getting
			 * errors due to not resetting instance file.
			 */
		}
		if (recvpool_stat)	/* Reset instance file for recvpool info */
			repl_inst_recvpool_reset();
	}
	/* Release replication instance ftok semaphore lock */
	if (!ftok_sem_release(reg, TRUE, immediate))
		return FALSE;
	return (jnlpool_stat && recvpool_stat);
}
Beispiel #16
0
/*
 * ------------------------------------------
 * Hang the process for a specified time.
 *
 *	Goes to sleep for a positive value.
 *	Any caught signal will terminate the sleep
 *	following the execution of that signal's catching routine.
 *
 * Arguments:
 *	num - time to sleep
 *
 * Return:
 *	none
 * ------------------------------------------
 */
void op_hang(mval* num)
{
	int		ms;
	mv_stent	*mv_zintcmd;
	ABS_TIME	cur_time, end_time;
#	ifdef VMS
	uint4 		time[2];
	int4		efn_mask, status;
#	endif
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	ms = 0;
	MV_FORCE_NUM(num);
	if (num->mvtype & MV_INT)
	{
		if (0 < num->m[1])
		{
			assert(MV_BIAS >= 1000);	/* if formats change overflow may need attention */
			ms = num->m[1] * (1000 / MV_BIAS);
		}
	} else if (0 == num->sgn) 		/* if sign is not 0 it means num is negative */
		ms = mval2i(num) * 1000;	/* too big to care about fractional amounts */
	if (ms)
	{
		if (TREF(tpnotacidtime) * 1000 < ms)
			TPNOTACID_CHECK(HANGSTR);
#		if defined(DEBUG) && defined(UNIX)
		if (gtm_white_box_test_case_enabled
			&& (WBTEST_DEFERRED_TIMERS == gtm_white_box_test_case_number)
			&& (3 > gtm_white_box_test_case_count)
			&& (123000 == ms))
		{
			DEFER_INTERRUPTS(INTRPT_NO_TIMER_EVENTS);
			DBGFPF((stderr, "OP_HANG: will sleep for 20 seconds\n"));
			LONG_SLEEP(20);
			DBGFPF((stderr, "OP_HANG: done sleeping\n"));
			ENABLE_INTERRUPTS(INTRPT_NO_TIMER_EVENTS);
			return;
		}
		if (gtm_white_box_test_case_enabled
			&& (WBTEST_BREAKMPC == gtm_white_box_test_case_number)
			&& (0 == gtm_white_box_test_case_count)
			&& (999 == ms))
		{
			frame_pointer->old_frame_pointer->mpc = (unsigned char *)GTM64_ONLY(0xdeadbeef12345678)
				NON_GTM64_ONLY(0xdead1234);
			return;
		}
		/* Upon seeing a .999s hang this white-box test launches a timer that pops with a period of UTIL_OUT_SYSLOG_INTERVAL
		 * and prints a long message via util_out_ptr.
		 */
		if (gtm_white_box_test_case_enabled
			&& (WBTEST_UTIL_OUT_BUFFER_PROTECTION == gtm_white_box_test_case_number)
			&& (0 == gtm_white_box_test_case_count)
			&& (999 == ms))
		{
			start_timer((TID)&util_out_syslog_dump, UTIL_OUT_SYSLOG_INTERVAL, util_out_syslog_dump, 0, NULL);
			return;
		}
#		endif
		sys_get_curr_time(&cur_time);
		mv_zintcmd = find_mvstent_cmd(ZINTCMD_HANG, restart_pc, restart_ctxt, FALSE);
		if (!mv_zintcmd)
			add_int_to_abs_time(&cur_time, ms, &end_time);
		else
		{
			end_time = mv_zintcmd->mv_st_cont.mvs_zintcmd.end_or_remain;
			cur_time = sub_abs_time(&end_time, &cur_time);	/* get remaing time to sleep */
			if (0 <= cur_time.at_sec)
				ms = (int4)(cur_time.at_sec * 1000 + cur_time.at_usec / 1000);
			else
				ms = 0;		/* all done */
			/* restore/pop previous zintcmd_active[ZINTCMD_HANG] hints */
			TAREF1(zintcmd_active, ZINTCMD_HANG).restart_pc_last = mv_zintcmd->mv_st_cont.mvs_zintcmd.restart_pc_prior;
			TAREF1(zintcmd_active, ZINTCMD_HANG).restart_ctxt_last
				= mv_zintcmd->mv_st_cont.mvs_zintcmd.restart_ctxt_prior;
			TAREF1(zintcmd_active, ZINTCMD_HANG).count--;
			assert(0 <= TAREF1(zintcmd_active, ZINTCMD_HANG).count);
			if (mv_chain == mv_zintcmd)
				POP_MV_STENT();	/* just pop if top of stack */
			else
			{	/* flag as not active */
				mv_zintcmd->mv_st_cont.mvs_zintcmd.command = ZINTCMD_NOOP;
				mv_zintcmd->mv_st_cont.mvs_zintcmd.restart_pc_check = NULL;
			}
			if (0 == ms)
				return;		/* done HANGing */
		}
		UNIX_ONLY(hiber_start(ms);)
		VMS_ONLY(
			time[0] = -time_low_ms(ms);
			time[1] = -time_high_ms(ms) - 1;
			efn_mask = (1 << efn_outofband | 1 << efn_timer);
			if (SS$_NORMAL != (status = sys$setimr(efn_timer, &time, NULL, &time, 0)))
				rts_error(VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("$setimr"), CALLFROM, status);
			if (SS$_NORMAL != (status = sys$wflor(efn_outofband, efn_mask)))
				rts_error(VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("$wflor"), CALLFROM, status);
		)
		if (outofband)
Beispiel #17
0
void	obj_code (uint4 src_lines, void *checksum_ctx)
{
	int		status;
	rhdtyp		rhead;
	mline		*mlx, *mly;
	var_tabent	*vptr;
	int4		lnr_pad_len;
	intrpt_state_t	prev_intrpt_state;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	assert(!run_time);
	obj_init();
	/* Define the routine name global symbol. */
	define_symbol(GTM_MODULE_DEF_PSECT, (mstr *)&int_module_name, 0);
	memset(&rhead, 0, SIZEOF(rhead));
	alloc_reg();
	jmp_opto();
	curr_addr = SIZEOF(rhdtyp);
	cg_phase = CGP_APPROX_ADDR;
	cg_phase_last = CGP_NOSTATE;
	code_gen();
	code_size = curr_addr;
	cg_phase = CGP_ADDR_OPT;
	shrink_jmps();
	comp_lits(&rhead);
	if ((cmd_qlf.qlf & CQ_MACHINE_CODE))
	{
		cg_phase = CGP_ASSEMBLY;
		code_gen();
	}
	if (!(cmd_qlf.qlf & CQ_OBJECT))
		return;
	rhead.ptext_ptr = SIZEOF(rhead);
	set_rtnhdr_checksum(&rhead, (gtm_rtn_src_chksum_ctx *)checksum_ctx);
	rhead.vartab_ptr = code_size;
	rhead.vartab_len = mvmax;
	code_size += mvmax * SIZEOF(var_tabent);
	rhead.labtab_ptr = code_size;
	rhead.labtab_len = mlmax;
	code_size += mlmax * SIZEOF(lab_tabent);
	rhead.lnrtab_ptr = code_size;
	rhead.lnrtab_len = src_lines;
	rhead.compiler_qlf = cmd_qlf.qlf;
	if (cmd_qlf.qlf & CQ_EMBED_SOURCE)
	{
                rhead.routine_source_offset = TREF(routine_source_offset);
                rhead.routine_source_length = (uint4)(stringpool.free - stringpool.base) - TREF(routine_source_offset);
	}
	rhead.temp_mvals = sa_temps[TVAL_REF];
	rhead.temp_size = sa_temps_offset[TCAD_REF];
	code_size += src_lines * SIZEOF(int4);
	lnr_pad_len = PADLEN(code_size, SECTION_ALIGN_BOUNDARY);
	code_size += lnr_pad_len;
	DEFER_INTERRUPTS(INTRPT_IN_OBJECT_FILE_COMPILE, prev_intrpt_state);
	create_object_file(&rhead);
	ENABLE_INTERRUPTS(INTRPT_IN_OBJECT_FILE_COMPILE, prev_intrpt_state);
	cg_phase = CGP_MACHINE;
	code_gen();
	/* Variable table: */
	vptr = (var_tabent *)mcalloc(mvmax * SIZEOF(var_tabent));
	if (mvartab)
		walktree(mvartab, cg_var, (char *)&vptr);
	else
		assert(0 == mvmax);
	emit_immed((char *)vptr, mvmax * SIZEOF(var_tabent));
	/* Label table: */
	if (mlabtab)
		walktree((mvar *)mlabtab, cg_lab, (char *)rhead.lnrtab_ptr);
	else
		assert(0 == mlmax);
	/* External entry definitions: */
	emit_immed((char *)&(mline_root.externalentry->rtaddr), SIZEOF(mline_root.externalentry->rtaddr));	/* line 0 */
	for (mlx = mline_root.child; mlx; mlx = mly)
	{
		if (mlx->table)
			emit_immed((char *)&(mlx->externalentry->rtaddr), SIZEOF(mlx->externalentry->rtaddr));
		if (0 == (mly = mlx->child))				/* note assignment */
			if (0 == (mly = mlx->sibling))			/* note assignment */
				for (mly = mlx;  ;  )
				{
					if (0 == (mly = mly->parent))	/* note assignment */
						break;
					if (mly->sibling)
					{
						mly = mly->sibling;
						break;
					}
				}
	}
	if (0 != lnr_pad_len) /* emit padding so literal text pool starts on proper boundary */
		emit_immed(PADCHARS, lnr_pad_len);
#	if !defined(__MVS__) && !defined(__s390__)	/* assert not valid for instructions on OS390 */
	assert(code_size == psect_use_tab[GTM_CODE]);
#	endif
	emit_literals();
	DEFER_INTERRUPTS(INTRPT_IN_OBJECT_FILE_COMPILE, prev_intrpt_state);
	finish_object_file();
	ENABLE_INTERRUPTS(INTRPT_IN_OBJECT_FILE_COMPILE, prev_intrpt_state);
	CLOSE_OBJECT_FILE(object_file_des, status);
	if (-1 == status)
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("close()"), CALLFROM, errno);
	/* Ready to make object visible. Rename from tmp name to real routine name */
	RENAME_TMP_OBJECT_FILE(object_file_name);
}
Beispiel #18
0
		VMS_ONLY(
			time[0] = -time_low_ms(ms);
			time[1] = -time_high_ms(ms) - 1;
			efn_mask = (1 << efn_outofband | 1 << efn_timer);
			if (SS$_NORMAL != (status = sys$setimr(efn_timer, &time, NULL, &time, 0)))
				rts_error(VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("$setimr"), CALLFROM, status);
			if (SS$_NORMAL != (status = sys$wflor(efn_outofband, efn_mask)))
				rts_error(VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("$wflor"), CALLFROM, status);
		)
		if (outofband)
		{
			VMS_ONLY(
				if (SS$_WASCLR == (status = sys$readef(efn_timer, &efn_mask)))
				{
					if (SS$_NORMAL != (status = sys$cantim(&time, 0)))
						rts_error(VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("$cantim"), CALLFROM,
							status);
				} else
					assertpro(SS$_WASSET == status);
			)
		}
	} else
		rel_quant();
	if (outofband)
	{
		PUSH_MV_STENT(MVST_ZINTCMD);
		mv_chain->mv_st_cont.mvs_zintcmd.end_or_remain = end_time;
		mv_chain->mv_st_cont.mvs_zintcmd.restart_ctxt_check = restart_ctxt;
		mv_chain->mv_st_cont.mvs_zintcmd.restart_pc_check = restart_pc;
		/* save current information from zintcmd_active */
		mv_chain->mv_st_cont.mvs_zintcmd.restart_ctxt_prior = TAREF1(zintcmd_active, ZINTCMD_HANG).restart_ctxt_last;
Beispiel #19
0
int gtmsource()
{
	int			status, log_init_status, waitpid_res, save_errno;
	char			print_msg[1024], tmpmsg[1024];
	gd_region		*reg, *region_top;
	sgmnt_addrs		*csa, *repl_csa;
	boolean_t		all_files_open, isalive;
	pid_t			pid, ppid, procgp;
	seq_num			read_jnl_seqno, jnl_seqno;
	unix_db_info		*udi;
	gtmsource_local_ptr_t	gtmsource_local;
	boolean_t		this_side_std_null_coll;
	int			null_fd, rc;

	memset((uchar_ptr_t)&jnlpool, 0, SIZEOF(jnlpool_addrs));
	call_on_signal = gtmsource_sigstop;
	ESTABLISH_RET(gtmsource_ch, SS_NORMAL);
	if (-1 == gtmsource_get_opt())
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_MUPCLIERR);
	if (gtmsource_options.shut_down)
	{	/* Wait till shutdown time nears even before going to "jnlpool_init". This is because the latter will return
		 * with the ftok semaphore and access semaphore held and we do not want to be holding those locks (while
		 * waiting for the user specified timeout to expire) as that will affect new GTM processes and/or other
		 * MUPIP REPLIC commands that need these locks for their function.
		 */
		if (0 < gtmsource_options.shutdown_time)
		{
			repl_log(stdout, TRUE, TRUE, "Waiting for %d seconds before signalling shutdown\n",
												gtmsource_options.shutdown_time);
			LONG_SLEEP(gtmsource_options.shutdown_time);
		} else
			repl_log(stdout, TRUE, TRUE, "Signalling shutdown immediate\n");
	} else if (gtmsource_options.start)
	{
		repl_log(stdout, TRUE, TRUE, "Initiating START of source server for secondary instance [%s]\n",
			gtmsource_options.secondary_instname);
	}
	if (gtmsource_options.activate && (ROOTPRIMARY_SPECIFIED == gtmsource_options.rootprimary))
	{	/* MUPIP REPLIC -SOURCE -ACTIVATE -UPDOK has been specified. We need to open the gld and db regions now
		 * in case this is a secondary -> primary transition. This is so we can later switch journal files in all
		 * journaled regions when the transition actually happens inside "gtmsource_rootprimary_init". But since
		 * we have not yet done a "jnlpool_init", we dont know if updates are disabled in it or not. Although we
		 * need to do the gld/db open only if updates are currently disabled in the jnlpool, we do this always
		 * because once we do a jnlpool_init, we will come back with the ftok on the jnlpool held and that has
		 * issues with later db open since we will try to hold the db ftok as part of db open and the ftok logic
		 * currently has assumptions that a process holds only one ftok at any point in time.
		 */
		assert(NULL == gd_header);
		gvinit();
		all_files_open = region_init(FALSE);
		if (!all_files_open)
		{
			gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_NOTALLDBOPN);
			gtmsource_exit(ABNORMAL_SHUTDOWN);
		}
	}
	jnlpool_init(GTMSOURCE, gtmsource_options.start, &is_jnlpool_creator);
	/* is_jnlpool_creator == TRUE ==> this process created the journal pool
	 * is_jnlpool_creator == FALSE ==> journal pool already existed and this process simply attached to it.
	 */
	if (gtmsource_options.shut_down)
		gtmsource_exit(gtmsource_shutdown(FALSE, NORMAL_SHUTDOWN) - NORMAL_SHUTDOWN);
	else if (gtmsource_options.activate)
		gtmsource_exit(gtmsource_mode_change(GTMSOURCE_MODE_ACTIVE_REQUESTED) - NORMAL_SHUTDOWN);
	else if (gtmsource_options.deactivate)
		gtmsource_exit(gtmsource_mode_change(GTMSOURCE_MODE_PASSIVE_REQUESTED) - NORMAL_SHUTDOWN);
	else if (gtmsource_options.checkhealth)
		gtmsource_exit(gtmsource_checkhealth() - NORMAL_SHUTDOWN);
	else if (gtmsource_options.changelog)
		 gtmsource_exit(gtmsource_changelog() - NORMAL_SHUTDOWN);
	else if (gtmsource_options.showbacklog)
		gtmsource_exit(gtmsource_showbacklog() - NORMAL_SHUTDOWN);
	else if (gtmsource_options.stopsourcefilter)
		gtmsource_exit(gtmsource_stopfilter() - NORMAL_SHUTDOWN);
	else if (gtmsource_options.jnlpool)
		gtmsource_exit(gtmsource_jnlpool() - NORMAL_SHUTDOWN);
	else if (gtmsource_options.losttncomplete)
		gtmsource_exit(gtmsource_losttncomplete() - NORMAL_SHUTDOWN);
	else if (gtmsource_options.needrestart)
		gtmsource_exit(gtmsource_needrestart() - NORMAL_SHUTDOWN);
	else if (gtmsource_options.showfreeze)
		gtmsource_exit(gtmsource_showfreeze() - NORMAL_SHUTDOWN);
	else if (gtmsource_options.setfreeze)
		gtmsource_exit(gtmsource_setfreeze() - NORMAL_SHUTDOWN);
	else if (!gtmsource_options.start)
	{
		assert(CLI_PRESENT == cli_present("STATSLOG"));
		gtmsource_exit(gtmsource_statslog() - NORMAL_SHUTDOWN);
	}
	assert(gtmsource_options.start);
#	ifndef REPL_DEBUG_NOBACKGROUND
	/* Set "child_server_running" to FALSE before forking off child. Wait for it to be set to TRUE by the child. */
	gtmsource_local = jnlpool.gtmsource_local;
	gtmsource_local->child_server_running = FALSE;
	FORK(pid);
	if (0 > pid)
	{
		save_errno = errno;
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_JNLPOOLSETUP, 0,
			ERR_TEXT, 2, RTS_ERROR_LITERAL("Could not fork source server"), save_errno);
	} else if (0 < pid)
	{	/* Parent. Wait until child sets "child_server_running" to FALSE. That is an indication that the child
		 * source server has completed its initialization phase and is all set so the parent command can return.
		 */
		while (isalive = is_proc_alive(pid, 0))	/* note : intended assignment */
		{
			if (gtmsource_local->child_server_running)
				break;
			/* To take care of reassignment of PIDs, the while condition should be && with the condition
			 * (PPID of pid == process_id)
			 */
			SHORT_SLEEP(GTMSOURCE_WAIT_FOR_SRV_START);
			WAITPID(pid, &status, WNOHANG, waitpid_res); /* Release defunct child if dead */
		}
		if (isalive)
		{	/* Child process is alive and started with no issues */
			if (0 != (save_errno = rel_sem(SOURCE, JNL_POOL_ACCESS_SEM)))
				rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_JNLPOOLSETUP, 0,
					ERR_TEXT, 2, RTS_ERROR_LITERAL("Error in rel_sem"), save_errno);
			ftok_sem_release(jnlpool.jnlpool_dummy_reg, TRUE, TRUE);
		} else
		{	/* Child source server process errored out at startup and is no longer alive.
			 * If we were the one who created the journal pool, let us clean it up.
			 */
			repl_log(stdout, TRUE, TRUE, "Source server startup failed. See source server log file\n");
			if (is_jnlpool_creator)
				status = gtmsource_shutdown(TRUE, NORMAL_SHUTDOWN);
		}
		/* If the parent is killed (or crashes) between the fork and exit, checkhealth may not detect that startup
		 * is in progress - parent forks and dies, the system will release sem 0 and 1, checkhealth might test the
		 * value of sem 1 before the child grabs sem 1.
		 */
		gtmsource_exit(isalive ? SRV_ALIVE : SRV_ERR);
	}
	/* Point stdin to /dev/null */
	OPENFILE("/dev/null", O_RDONLY, null_fd);
	if (0 > null_fd)
		rts_error_csa(CSA_ARG(NULL) ERR_REPLERR, RTS_ERROR_LITERAL("Failed to open /dev/null for read"), errno, 0);
	FCNTL3(null_fd, F_DUPFD, 0, rc);
	if (0 > rc)
		rts_error_csa(CSA_ARG(NULL) ERR_REPLERR, RTS_ERROR_LITERAL("Failed to set stdin to /dev/null"), errno, 0);
	CLOSEFILE(null_fd, rc);
	if (0 > rc)
		rts_error_csa(CSA_ARG(NULL) ERR_REPLERR, RTS_ERROR_LITERAL("Failed to close /dev/null"), errno, 0);
	/* The parent process (source server startup command) will be holding the ftok semaphore and jnlpool access semaphore
	 * at this point. The variables that indicate this would have been copied over to the child during the fork. This will
	 * make the child think it is actually holding them as well when actually it is not. Reset those variables in the child
	 * to ensure they do not misrepresent the holder of those semaphores.
	 */
	ftok_sem_reg = NULL;
	udi = FILE_INFO(jnlpool.jnlpool_dummy_reg);
	assert(udi->grabbed_ftok_sem);
	udi->grabbed_ftok_sem = FALSE;
	assert(holds_sem[SOURCE][JNL_POOL_ACCESS_SEM]);
	holds_sem[SOURCE][JNL_POOL_ACCESS_SEM] = FALSE;
	assert(!holds_sem[SOURCE][SRC_SERV_COUNT_SEM]);
	/* Start child source server initialization */
	is_src_server = TRUE;
	OPERATOR_LOG_MSG;
	process_id = getpid();
	/* Reinvoke secshr related initialization with the child's pid */
	INVOKE_INIT_SECSHR_ADDRS;
	/* Initialize mutex socket, memory semaphore etc. before any "grab_lock" is done by this process on the journal pool.
	 * Note that the initialization would already have been done by the parent receiver startup command but we need to
	 * redo the initialization with the child process id.
	 */
	assert(mutex_per_process_init_pid && (mutex_per_process_init_pid != process_id));
	mutex_per_process_init();
	START_HEARTBEAT_IF_NEEDED;
	ppid = getppid();
	log_init_status = repl_log_init(REPL_GENERAL_LOG, &gtmsource_log_fd, gtmsource_options.log_file);
	assert(SS_NORMAL == log_init_status);
	repl_log_fd2fp(&gtmsource_log_fp, gtmsource_log_fd);
	if (-1 == (procgp = setsid()))
		send_msg_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_JNLPOOLSETUP, 0, ERR_TEXT, 2,
				RTS_ERROR_LITERAL("Source server error in setsid"), errno);
#	endif /* REPL_DEBUG_NOBACKGROUND */
	if (ZLIB_CMPLVL_NONE != gtm_zlib_cmp_level)
		gtm_zlib_init();	/* Open zlib shared library for compression/decompression */
	REPL_DPRINT1("Setting up regions\n");
	gvinit();

	/* We use the same code dse uses to open all regions but we must make sure they are all open before proceeding. */
	all_files_open = region_init(FALSE);
	if (!all_files_open)
	{
		gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_NOTALLDBOPN);
		gtmsource_exit(ABNORMAL_SHUTDOWN);
	}
	/* Determine primary side null subscripts collation order */
	/* Also check whether all regions have same null collation order */
	this_side_std_null_coll = -1;
	for (reg = gd_header->regions, region_top = gd_header->regions + gd_header->n_regions; reg < region_top; reg++)
	{
		csa = &FILE_INFO(reg)->s_addrs;
		if (this_side_std_null_coll != csa->hdr->std_null_coll)
		{
			if (-1 == this_side_std_null_coll)
				this_side_std_null_coll = csa->hdr->std_null_coll;
			else
			{
				gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_NULLCOLLDIFF);
				gtmsource_exit(ABNORMAL_SHUTDOWN);
			}
		}
		if (!REPL_ALLOWED(csa) && JNL_ALLOWED(csa))
		{
			gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(4) ERR_REPLOFFJNLON, 2, DB_LEN_STR(reg));
			gtmsource_exit(ABNORMAL_SHUTDOWN);
		}
		if (reg->read_only && REPL_ALLOWED(csa))
		{
			gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(6) ERR_JNLPOOLSETUP, 0, ERR_TEXT, 2,
				   RTS_ERROR_LITERAL("Source Server does not have write permissions to one or "
					             "more database files that are replicated"));
			gtmsource_exit(ABNORMAL_SHUTDOWN);
		}
	}
	/* Initialize source server alive/dead state related fields in "gtmsource_local" before the ftok semaphore is released */
	gtmsource_local->gtmsource_pid = process_id;
	gtmsource_local->gtmsource_state = GTMSOURCE_START;
	if (is_jnlpool_creator)
	{
		DEBUG_ONLY(jnlpool.jnlpool_ctl->jnlpool_creator_pid = process_id);
		gtmsource_seqno_init(this_side_std_null_coll);
		if (ROOTPRIMARY_SPECIFIED == gtmsource_options.rootprimary)
		{	/* Created the journal pool as a root primary. Append a history record to the replication instance file.
			 * Invoke the function "gtmsource_rootprimary_init" to do that.
			 */
			gtmsource_rootprimary_init(jnlpool.jnlpool_ctl->jnl_seqno);
		}
	}
	/* after this point we can no longer have the case where all the regions are unreplicated/non-journaled. */
#	ifndef REPL_DEBUG_NOBACKGROUND
	/* It is necessary for every process that is using the ftok semaphore to increment the counter by 1. This is used
	 * by the last process that shuts down to delete the ftok semaphore when it notices the counter to be 0.
	 * Note that the parent source server startup command would have done an increment of the ftok counter semaphore
	 * for the replication instance file. But the source server process (the child) that comes here would not have done
	 * that. Do that while the parent is still holding on to the ftok semaphore waiting for our okay.
	 */
	if (!ftok_sem_incrcnt(jnlpool.jnlpool_dummy_reg))
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_JNLPOOLSETUP);
	/* Increment the source server count semaphore */
	status = incr_sem(SOURCE, SRC_SERV_COUNT_SEM);
	if (0 != status)
	{
		save_errno = errno;
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_JNLPOOLSETUP, 0, ERR_TEXT, 2,
			RTS_ERROR_LITERAL("Counter semaphore increment failure in child source server"), save_errno);
	}
#	else
	if (0 != (save_errno = rel_sem_immediate(SOURCE, JNL_POOL_ACCESS_SEM)))
	{
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_JNLPOOLSETUP, 0, ERR_TEXT, 2,
			RTS_ERROR_LITERAL("Error in rel_sem_immediate"), save_errno);
	}
#	endif /* REPL_DEBUG_NOBACKGROUND */

	gtmsource_srv_count++;
	gtmsource_local->child_server_running = TRUE;	/* At this point, the parent startup command will stop waiting for child */
	gtm_event_log_init();
	/* Log source server startup command line first */
	SPRINTF(tmpmsg, "%s %s\n", cli_lex_in_ptr->argv[0], cli_lex_in_ptr->in_str);
	repl_log(gtmsource_log_fp, TRUE, TRUE, tmpmsg);

	SPRINTF(tmpmsg, "GTM Replication Source Server with Pid [%d] started for Secondary Instance [%s]",
		process_id, gtmsource_local->secondary_instname);
	sgtm_putmsg(print_msg, VARLSTCNT(4) ERR_REPLINFO, 2, LEN_AND_STR(tmpmsg));
	repl_log(gtmsource_log_fp, TRUE, TRUE, print_msg);
	if (is_jnlpool_creator)
	{
		repl_log(gtmsource_log_fp, TRUE, TRUE, "Created jnlpool with shmid = [%d] and semid = [%d]\n",
			jnlpool.repl_inst_filehdr->jnlpool_shmid, jnlpool.repl_inst_filehdr->jnlpool_semid);
	} else
		repl_log(gtmsource_log_fp, TRUE, TRUE, "Attached to existing jnlpool with shmid = [%d] and semid = [%d]\n",
			jnlpool.repl_inst_filehdr->jnlpool_shmid, jnlpool.repl_inst_filehdr->jnlpool_semid);
	gtm_event_log(GTM_EVENT_LOG_ARGC, "MUPIP", "REPLINFO", print_msg);
#	ifdef GTM_TLS
	if (REPL_TLS_REQUESTED)
	{
		repl_do_tls_init(gtmsource_log_fp);
		assert(REPL_TLS_REQUESTED || PLAINTEXT_FALLBACK);
	}
#	endif
	if (jnlpool.jnlpool_ctl->freeze)
	{
		last_seen_freeze_flag = jnlpool.jnlpool_ctl->freeze;
		sgtm_putmsg(print_msg, VARLSTCNT(3) ERR_REPLINSTFROZEN, 1, jnlpool.repl_inst_filehdr->inst_info.this_instname);
		repl_log(gtmsource_log_fp, TRUE, FALSE, print_msg);
		sgtm_putmsg(print_msg, VARLSTCNT(3) ERR_REPLINSTFREEZECOMMENT, 1, jnlpool.jnlpool_ctl->freeze_comment);
		repl_log(gtmsource_log_fp, TRUE, TRUE, print_msg);
	}
	gtmsource_local->jnlfileonly = gtmsource_options.jnlfileonly;
	do
	{ 	/* If mode is passive, go to sleep. Wakeup every now and then and check to see if I have to become active. */
		gtmsource_state = gtmsource_local->gtmsource_state = GTMSOURCE_START;
		if ((gtmsource_local->mode == GTMSOURCE_MODE_PASSIVE) && (gtmsource_local->shutdown == NO_SHUTDOWN))
		{
			gtmsource_poll_actions(FALSE);
			SHORT_SLEEP(GTMSOURCE_WAIT_FOR_MODE_CHANGE);
			continue;
		}
		if (GTMSOURCE_MODE_PASSIVE == gtmsource_local->mode)
		{	/* Shutdown initiated */
			assert(gtmsource_local->shutdown == SHUTDOWN);
			sgtm_putmsg(print_msg, VARLSTCNT(4) ERR_REPLINFO, 2,
				    RTS_ERROR_LITERAL("GTM Replication Source Server Shutdown signalled"));
			repl_log(gtmsource_log_fp, TRUE, TRUE, print_msg);
			gtm_event_log(GTM_EVENT_LOG_ARGC, "MUPIP", "REPLINFO", print_msg);
			break;
		}
		gtmsource_poll_actions(FALSE);
		if (GTMSOURCE_CHANGING_MODE == gtmsource_state)
			continue;
		if (GTMSOURCE_MODE_ACTIVE_REQUESTED == gtmsource_local->mode)
			gtmsource_local->mode = GTMSOURCE_MODE_ACTIVE;
		SPRINTF(tmpmsg, "GTM Replication Source Server now in ACTIVE mode using port %d", gtmsource_local->secondary_port);
		sgtm_putmsg(print_msg, VARLSTCNT(4) ERR_REPLINFO, 2, LEN_AND_STR(tmpmsg));
		repl_log(gtmsource_log_fp, TRUE, TRUE, print_msg);
		gtm_event_log(GTM_EVENT_LOG_ARGC, "MUPIP", "REPLINFO", print_msg);
		DEBUG_ONLY(repl_csa = &FILE_INFO(jnlpool.jnlpool_dummy_reg)->s_addrs;)
		assert(!repl_csa->hold_onto_crit);	/* so it is ok to invoke "grab_lock" and "rel_lock" unconditionally */
		grab_lock(jnlpool.jnlpool_dummy_reg, TRUE, HANDLE_CONCUR_ONLINE_ROLLBACK);
		if (GTMSOURCE_HANDLE_ONLN_RLBK == gtmsource_state)
		{
			repl_log(gtmsource_log_fp, TRUE, TRUE, "Starting afresh due to ONLINE ROLLBACK\n");
			repl_log(gtmsource_log_fp, TRUE, TRUE, "REPL INFO - Current Jnlpool Seqno : %llu\n",
					jnlpool.jnlpool_ctl->jnl_seqno);
			continue;
		}
		QWASSIGN(gtmsource_local->read_addr, jnlpool.jnlpool_ctl->write_addr);
		gtmsource_local->read = jnlpool.jnlpool_ctl->write;
		gtmsource_local->read_state = gtmsource_local->jnlfileonly ? READ_FILE : READ_POOL;
		read_jnl_seqno = gtmsource_local->read_jnl_seqno;
		assert(read_jnl_seqno <= jnlpool.jnlpool_ctl->jnl_seqno);
		if (read_jnl_seqno < jnlpool.jnlpool_ctl->jnl_seqno)
		{
			gtmsource_local->read_state = READ_FILE;
			QWASSIGN(gtmsource_save_read_jnl_seqno, jnlpool.jnlpool_ctl->jnl_seqno);
			gtmsource_pool2file_transition = TRUE; /* so that we read the latest gener jnl files */
		}
		rel_lock(jnlpool.jnlpool_dummy_reg);
		if (SS_NORMAL != (status = gtmsource_alloc_tcombuff()))
			rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
				  RTS_ERROR_LITERAL("Error allocating initial tcom buffer space. Malloc error"), status);
		gtmsource_filter = NO_FILTER;
		if ('\0' != gtmsource_local->filter_cmd[0])
		{
			if (SS_NORMAL == (status = repl_filter_init(gtmsource_local->filter_cmd)))
				gtmsource_filter |= EXTERNAL_FILTER;
			else
				gtmsource_exit(ABNORMAL_SHUTDOWN);
		}
		gtmsource_process();
		/* gtmsource_process returns only when mode needs to be changed to PASSIVE */
		assert(gtmsource_state == GTMSOURCE_CHANGING_MODE);
		gtmsource_ctl_close();
		gtmsource_free_msgbuff();
		gtmsource_free_tcombuff();
		gtmsource_free_filter_buff();
		gtmsource_stop_heartbeat();
		if (FD_INVALID != gtmsource_sock_fd)
			repl_close(&gtmsource_sock_fd);
		if (gtmsource_filter & EXTERNAL_FILTER)
			repl_stop_filter();
	} while (TRUE);
Beispiel #20
0
void iorm_write_utf(mstr *v)
{
	int4		inchars, char_count;		/* in characters */
	int4		inlen, outbytes, mblen;		/* in bytes */
	int4		availwidth, usedwidth, mbwidth;	/* in display columns */
	int		status, padsize,fstat_res,save_errno;
	wint_t		utf_code;
	io_desc		*iod;
	d_rm_struct	*rm_ptr;
	unsigned char	*inptr, *top, *nextmb, *outptr, *nextoutptr, *outstart, temppad, temppadarray[2];
	char		*out_ptr;
	boolean_t	utf8_active = TRUE;		/* needed by GTM_IO_WCWIDTH macro */
	boolean_t	stream, wrap;
	struct stat	statbuf;
	boolean_t	ch_set;

	iod = io_curr_device.out;
	ESTABLISH_GTMIO_CH(&io_curr_device, ch_set);
	rm_ptr = (d_rm_struct *)iod->dev_sp;
	assert(NULL != rm_ptr);
	inptr = (unsigned char *)v->addr;
	inlen = v->len;
	top = inptr + inlen;
	if (!rm_ptr->fixed && 0 == iod->dollar.x)
		rm_ptr->out_bytes = 0;			/* user reset $X */
	inchars = UTF8_LEN_STRICT(v->addr, v->len);	/* validate and get good char count */
	if (0 >= inchars)
	{
		REVERT_GTMIO_CH(&io_curr_device, ch_set);
		return;
	}
	usedwidth = 0;
	stream = rm_ptr->stream;
	wrap = iod->wrap;
	if (stream && !wrap)
	{	/* For STREAM and NOWRAP, allow the entire record to be written without any record truncations/terminations */
		availwidth = inlen;	/* calculate worst case requirement of width (in chars) to write out input bytes */
		rm_ptr->out_bytes = 0;
	} else
		availwidth = iod->width - iod->dollar.x;
	outbytes = 0;
	if (CHSET_UTF8 != iod->ochset)
	{
		outstart = nextoutptr = outptr = &rm_ptr->outbuf[rm_ptr->out_bytes];
		if (!rm_ptr->done_1st_write)
		{	/* get the file size  */
			FSTAT_FILE(rm_ptr->fildes, &statbuf, fstat_res);
			if (-1 == fstat_res)
			{
				save_errno = errno;
				rts_error_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("fstat"),
					      CALLFROM, save_errno);
			}
			if (CHSET_UTF16 == iod->ochset)
			{	/* Write BOM but do not count it towards the bytes in the current record */
				/* write BOM if file is empty */
				if (0 == statbuf.st_size)
				{
					memcpy(outptr, UTF16BE_BOM, UTF16BE_BOM_LEN);
					outbytes = UTF16BE_BOM_LEN;
					if (rm_ptr->output_encrypted)
					{
						REALLOC_CRYPTBUF_IF_NEEDED(outbytes);
						WRITE_ENCRYPTED_DATA(rm_ptr, iod->trans_name, outstart, outbytes,
							pvt_crypt_buf.addr);
						out_ptr = pvt_crypt_buf.addr;
					} else
						out_ptr = (char *)outstart;
					DOWRITERC_RM(rm_ptr, out_ptr, outbytes, status);
					ISSUE_NOPRINCIO_IF_NEEDED_RM(status, ==, iod);
					rm_ptr->write_occurred = TRUE;
					outptr = outstart;
					rm_ptr->out_bytes = outbytes = 0;
					/* save UTF16BE_BOM_LEN in bom_num_bytes until bom is checked, but don't
					 indicate that bom has been checked - which still needs to be done for reading
					 the exception is if the file was opened WRITEONLY */
					rm_ptr->bom_num_bytes = UTF16BE_BOM_LEN;
					if (rm_ptr->write_only)
						rm_ptr->bom_checked = TRUE;
				}
				iod->ochset = CHSET_UTF16BE;
				get_chset_desc(&chset_names[iod->ochset]);
			}
Beispiel #21
0
bool	same_device_check (mstr tname, char *buf)
{
	int			fstat_res, gsn_stat;
	struct stat		outbuf1, outbuf2;
	GTM_SOCKLEN_TYPE	socknamelen1;
	GTM_SOCKLEN_TYPE	socknamelen2;
	GTM_SOCKLEN_TYPE	psocknamelen1;
	GTM_SOCKLEN_TYPE	psocknamelen2;
	struct sockaddr_storage	sockname1;
	struct sockaddr_storage	sockname2;
	struct sockaddr_storage	psockname1;
	struct sockaddr_storage	psockname2;
	char			port_buffer1[NI_MAXSERV];
	char			port_buffer2[NI_MAXSERV];
	char			pport_buffer1[NI_MAXSERV];
	char			pport_buffer2[NI_MAXSERV];
	char			host_buffer1[NI_MAXHOST];
	char			host_buffer2[NI_MAXHOST];
	char			phost_buffer1[NI_MAXHOST];
	char			phost_buffer2[NI_MAXHOST];
	int			errcode, tmplen, save_errno;
	const char		*errptr;

	FSTAT_FILE(0, &outbuf1, fstat_res);
	if (-1 == fstat_res)
	{
		save_errno = errno;
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("fstat"), CALLFROM, save_errno);
	}
	FSTAT_FILE(1, &outbuf2, fstat_res);
	if (-1 == fstat_res)
	{
		save_errno = errno;
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("fstat"), CALLFROM, save_errno);
	}

	if ((S_IFMT & outbuf1.st_mode) != (S_IFMT & outbuf2.st_mode))
		return FALSE;


	if (S_ISSOCK(outbuf1.st_mode))
	{
		/* if here then both 0,1 are sockets */
		socknamelen1 = SIZEOF(sockname1);
		if (-1 == (gsn_stat = getsockname(0, (struct sockaddr *)&sockname1, (GTM_SOCKLEN_TYPE *)&socknamelen1)))
		{
			save_errno = errno;
			if (IS_SOCKNAME_UNIXERROR(save_errno))
			{
				/* problem with getsockname for AF_UNIX socket so just assign family for the switch below */
				(((sockaddr_ptr)&sockname1)->sa_family)	= AF_UNIX;
			} else
			{
				/* process error */
				errptr = (char *)STRERROR(save_errno);
				tmplen = STRLEN(errptr);
				rts_error_csa(CSA_ARG(NULL) VARLSTCNT(5) ERR_GETSOCKNAMERR, 3, save_errno, tmplen, errptr);
			}
		}

		socknamelen2 = SIZEOF(sockname2);
		if (-1 == (gsn_stat = getsockname(1, (struct sockaddr *)&sockname2, (GTM_SOCKLEN_TYPE *)&socknamelen2)))
		{
			save_errno = errno;
			if (IS_SOCKNAME_UNIXERROR(save_errno))
			{
				/* problem with getsockname for AF_UNIX socket so just assign family for the switch below */
				(((sockaddr_ptr)&sockname2)->sa_family)	= AF_UNIX;
			} else
			{
				/* process error */
				errptr = (char *)STRERROR(save_errno);
				tmplen = STRLEN(errptr);
				rts_error_csa(CSA_ARG(NULL) VARLSTCNT(5) ERR_GETSOCKNAMERR, 3, save_errno, tmplen, errptr);
			}
		}
		/* if both sockets not the same family then not the same device */
		if ((((sockaddr_ptr)&sockname1)->sa_family) != (((sockaddr_ptr)&sockname2)->sa_family))
			return FALSE;

		switch(((sockaddr_ptr)&sockname1)->sa_family)
		{
		case AF_INET:
		case AF_INET6:
			GETNAMEINFO((struct sockaddr *)&sockname1, socknamelen1, host_buffer1, NI_MAXHOST,
				    port_buffer1, NI_MAXSERV, NI_NUMERICHOST|NI_NUMERICSERV, errcode);
			if (0 != errcode)
			{
				RTS_ERROR_ADDRINFO(NULL, ERR_GETNAMEINFO, errcode);
				return FALSE;
			}

			GETNAMEINFO((struct sockaddr *)&sockname2, socknamelen2, host_buffer2, NI_MAXHOST,
				    port_buffer2, NI_MAXSERV, NI_NUMERICHOST|NI_NUMERICSERV, errcode);
			if (0 != errcode)
			{
				RTS_ERROR_ADDRINFO(NULL, ERR_GETNAMEINFO, errcode);
				return FALSE;
			}

			/* hosts and ports must be the same */
			if (STRCMP(host_buffer1, host_buffer2) || STRCMP(port_buffer1, port_buffer2))
				return FALSE;

			psocknamelen1 = SIZEOF(psockname1);
			if (-1 == (gsn_stat = getpeername(0, (struct sockaddr *)&psockname1,
							   (GTM_SOCKLEN_TYPE *)&psocknamelen1)))
			{
				save_errno = errno;
				errptr = (char *)STRERROR(save_errno);
				tmplen = STRLEN(errptr);
				rts_error_csa(CSA_ARG(NULL) VARLSTCNT(5) ERR_GETSOCKNAMERR, 3, save_errno, tmplen, errptr);
			}


			psocknamelen2 = SIZEOF(psockname2);
			if (-1 == (gsn_stat = getpeername(1, (struct sockaddr *)&psockname2,
							   (GTM_SOCKLEN_TYPE *)&psocknamelen2)))
			{
				save_errno = errno;
				errptr = (char *)STRERROR(save_errno);
				tmplen = STRLEN(errptr);
				rts_error_csa(CSA_ARG(NULL) VARLSTCNT(5) ERR_GETSOCKNAMERR, 3, save_errno, tmplen, errptr);
			}

			GETNAMEINFO((struct sockaddr *)&psockname1, psocknamelen1, phost_buffer1, NI_MAXHOST,
				    pport_buffer1, NI_MAXSERV, NI_NUMERICHOST|NI_NUMERICSERV, errcode);
			if (0 != errcode)
			{
				RTS_ERROR_ADDRINFO(NULL, ERR_GETNAMEINFO, errcode);
				return FALSE;
			}

			GETNAMEINFO((struct sockaddr *)&psockname2, psocknamelen2, phost_buffer2, NI_MAXHOST,
				pport_buffer2, NI_MAXSERV, NI_NUMERICHOST|NI_NUMERICSERV, errcode);
			if (0 != errcode)
			{
				RTS_ERROR_ADDRINFO(NULL, ERR_GETNAMEINFO, errcode);
				return FALSE;
			}

			/* hosts and ports for the peer sockets must also be the same */
			if (STRCMP(phost_buffer1, phost_buffer2) || STRCMP(pport_buffer1, pport_buffer2))
				return FALSE;
			break;
		case AF_UNIX:
		default:
			/* if inodes are different or st_dev different then not the same device */
			if ((outbuf1.st_ino != outbuf2.st_ino) || (outbuf1.st_dev != outbuf2.st_dev))
				return FALSE;
			break;
			}
		return TRUE;
	} else if (S_ISCHR(outbuf1.st_mode))
	{
		/* if here then both 0,1 are character devices */
		/* if inodes are different or st_dev different then not the same device */
		if ((outbuf1.st_ino != outbuf2.st_ino) || (outbuf1.st_dev != outbuf2.st_dev))
			return FALSE;
		else
			return TRUE;
	} else
	{
		/* unexpected type so assert */
		assert(FALSE);
		return FALSE;
	}
}
int gtmrecv_poll_actions1(int *pending_data_len, int *buff_unprocessed, unsigned char *buffp)
{
	static int		report_cnt = 1;
	static int		next_report_at = 1;
	static boolean_t	send_xoff = FALSE;
	static boolean_t	xoff_sent = FALSE;
	static seq_num		send_seqno;
	static boolean_t	log_draining_msg = FALSE;
	static boolean_t	send_badtrans = FALSE;
	static boolean_t	send_cmp2uncmp = FALSE;
	static boolean_t	upd_shut_too_early_logged = FALSE;
	static time_t		last_reap_time = 0;
	repl_msg_t		xoff_msg;
	repl_badtrans_msg_t	bad_trans_msg;
	boolean_t		alert = FALSE, info = FALSE;
	int			return_status;
	gd_region		*region_top;
	unsigned char		*msg_ptr;				/* needed for REPL_{SEND,RECV}_LOOP */
	int			tosend_len, sent_len, sent_this_iter;	/* needed for REPL_SEND_LOOP */
	int			torecv_len, recvd_len, recvd_this_iter;	/* needed for REPL_RECV_LOOP */
	int			status;					/* needed for REPL_{SEND,RECV}_LOOP */
	int			temp_len, pending_msg_size;
	int			upd_start_status, upd_start_attempts;
	int			buffered_data_len;
	int			upd_exit_status;
	seq_num			temp_send_seqno;
	boolean_t		bad_trans_detected = FALSE, onln_rlbk_flg_set = FALSE;
	uint4			jnl_status;
	recvpool_ctl_ptr_t	recvpool_ctl;
	upd_proc_local_ptr_t	upd_proc_local;
	gtmrecv_local_ptr_t	gtmrecv_local;
	upd_helper_ctl_ptr_t	upd_helper_ctl;
	pid_t			waitpid_res;
	int4			msg_type, msg_len;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	recvpool_ctl = recvpool.recvpool_ctl;
	upd_proc_local = recvpool.upd_proc_local;
	gtmrecv_local = recvpool.gtmrecv_local;
	upd_helper_ctl = recvpool.upd_helper_ctl;
	jnl_status = 0;
	if (SHUTDOWN == gtmrecv_local->shutdown)
	{
		repl_log(gtmrecv_log_fp, TRUE, TRUE, "Shutdown signalled\n");
		gtmrecv_end(); /* Won't return */
	}
	/* Reset report_cnt and next_report_at to 1 when a new upd proc is forked */
	if ((1 == report_cnt) || (report_cnt == next_report_at))
	{
		/* A comment on the usage of NO_SHUTDOWN below for the alert variable. Since upd_proc_local->upd_proc_shutdown is
		 * a shared memory field (and could be concurrently changed by either the receiver server or the update process),
		 * we want to make sure it is the same value BEFORE and AFTER checking whether the update process is alive or not.
		 * If it is not NO_SHUTDOWN (i.e. is SHUTDOWN or NORMAL_SHUTDOWN or ABNORMAL_SHUTDOWN) it has shut down due to
		 * an external request so we do want to send out a false update-process-is-not-alive alert.
		 */
		if ((alert = ((NO_SHUTDOWN == upd_proc_local->upd_proc_shutdown) && (SRV_DEAD == is_updproc_alive())
				&& (NO_SHUTDOWN == upd_proc_local->upd_proc_shutdown)))
			|| (info = (((NORMAL_SHUTDOWN == upd_proc_local->upd_proc_shutdown)
				|| (ABNORMAL_SHUTDOWN == upd_proc_local->upd_proc_shutdown)) && (SRV_DEAD == is_updproc_alive()))))
		{
			if (alert)
				repl_log(gtmrecv_log_fp, TRUE, TRUE,
					"ALERT : Receiver Server detected that Update Process is not ALIVE\n");
			else
				repl_log(gtmrecv_log_fp, TRUE, TRUE,
					"INFO : Update process not running due to user initiated shutdown\n");
			if (1 == report_cnt)
			{
				send_xoff = TRUE;
				QWASSIGN(recvpool_ctl->old_jnl_seqno, recvpool_ctl->jnl_seqno);
				QWASSIGNDW(recvpool_ctl->jnl_seqno, 0);
				/* Even though we have identified that the update process is NOT alive, a waitpid on the update
				 * process PID is necessary so that the system doesn't leave any zombie process lying around.
				 * This is possible since any child process that dies without the parent doing a waitpid on it
				 * will be defunct unless the parent dies at which point the "init" process takes the role of
				 * the parent and invokes waitpid to remove the zombies.
				 * NOTE: It is possible that the update process was killed before the receiver server got a
				 * chance to record it's PID in the recvpool.upd_proc_local structure. In such a case, don't
				 * invoke waitpid as that will block us (receiver server) if this instance of the receiver
				 * server was started with helper processes.
				 */
				if (0 < upd_proc_local->upd_proc_pid)
				{
					WAITPID(upd_proc_local->upd_proc_pid, &upd_exit_status, 0, waitpid_res);
					/* Since the update process as part of its shutdown does NOT reset the upd_proc_pid, reset
					 * it here ONLY if the update process was NOT kill -9ed. This is needed because receiver
					 * server as part of its shutdown relies on this field (upd_proc_pid) to determine if the
					 * update process was cleanly shutdown or was kill -9ed.
					 */
					if (!alert)
						upd_proc_local->upd_proc_pid = 0;
				}
				upd_proc_local->bad_trans = FALSE; /* No point in doing bad transaction processing */
				upd_proc_local->onln_rlbk_flg = FALSE; /* No point handling online rollback */
			}
			gtmrecv_wait_for_jnl_seqno = TRUE;
			REPL_DPRINT1(
			       "gtmrecv_poll_actions : Setting gtmrecv_wait_for_jnl_seqno to TRUE because of upd crash/shutdown\n");
			next_report_at *= GTMRECV_NEXT_REPORT_FACTOR;
			report_cnt++;
		}
	} else
		report_cnt++;
	/* Check if REPL_CMP2UNCMP or REPL_BADTRANS message needs to be sent */
	if (upd_proc_local->onln_rlbk_flg)
	{	/* Update process detected an online rollback and is requesting us to restart the connection. But before that, send
		 * REPL_XOFF source side and drain the replication pipe
		 */
		onln_rlbk_flg_set = TRUE;
		send_xoff = TRUE;
	}
	else if (!send_cmp2uncmp && gtmrecv_send_cmp2uncmp)
	{
		send_xoff = TRUE;
		send_seqno = recvpool_ctl->jnl_seqno;
		send_cmp2uncmp = TRUE;
	} else if (!send_badtrans && upd_proc_local->bad_trans)
	{
		send_xoff = TRUE;
		send_seqno = upd_proc_local->read_jnl_seqno;
		send_badtrans = TRUE;
		bad_trans_detected = TRUE;
	} else if (!upd_proc_local->bad_trans && send_badtrans && 1 != report_cnt)
	{
		send_badtrans = FALSE;
		bad_trans_detected = FALSE;
	}
	if (send_xoff && !xoff_sent)
	{	/* Send XOFF_ACK_ME if the receiver has a connection to the source. Do not attempt to send it if we dont even
		 * know the endianness of the remote side. In that case, we are guaranteed no initial handshake occurred and
		 * so no point sending the XOFF too. This saves us lots of trouble in case of cross-endian replication connections.
		 */
		assert((FD_INVALID  != gtmrecv_sock_fd) || repl_connection_reset);
		if ((FD_INVALID != gtmrecv_sock_fd) && remote_side->endianness_known)
		{
			send_seqno = upd_proc_local->read_jnl_seqno;
			if (!remote_side->cross_endian)
			{
				xoff_msg.type = REPL_XOFF_ACK_ME;
				xoff_msg.len = MIN_REPL_MSGLEN;
				memcpy((uchar_ptr_t)&xoff_msg.msg[0], (uchar_ptr_t)&send_seqno, SIZEOF(seq_num));
			} else
			{
				xoff_msg.type = GTM_BYTESWAP_32(REPL_XOFF_ACK_ME);
				xoff_msg.len = GTM_BYTESWAP_32(MIN_REPL_MSGLEN);
				temp_send_seqno = GTM_BYTESWAP_64(send_seqno);
				memcpy((uchar_ptr_t)&xoff_msg.msg[0], (uchar_ptr_t)&temp_send_seqno, SIZEOF(seq_num));
			}
			REPL_SEND_LOOP(gtmrecv_sock_fd, &xoff_msg, MIN_REPL_MSGLEN, FALSE, &gtmrecv_poll_immediate)
				; /* Empty Body */
			if (SS_NORMAL != status)
			{
				if (REPL_CONN_RESET(status) && EREPL_SEND == repl_errno)
				{
					repl_log(gtmrecv_log_fp, TRUE, TRUE, "Connection reset while sending XOFF_ACK_ME. "
							"Status = %d ; %s\n", status, STRERROR(status));
					repl_close(&gtmrecv_sock_fd);
					repl_connection_reset = TRUE;
					xoff_sent = FALSE;
					send_badtrans = FALSE;

				} else if (EREPL_SEND == repl_errno)
					rts_error(VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
						RTS_ERROR_LITERAL("Error sending XOFF msg due to BAD_TRANS or UPD crash/shutdown. "
								"Error in send"), status);
				else
				{
					assert(EREPL_SELECT == repl_errno);
					rts_error(VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
						RTS_ERROR_LITERAL("Error sending XOFF msg due to BAD_TRANS or UPD crash/shutdown. "
								"Error in select"), status);
				}
			} else
			{
				xoff_sent = TRUE;
				log_draining_msg = TRUE;
			}
			repl_log(gtmrecv_log_fp, TRUE, TRUE, "REPL_XOFF_ACK_ME sent due to upd shutdown/crash or bad trans "
					"or ONLINE_ROLLBACK\n");
			send_xoff = FALSE;
		} else
		{	/* Connection has been lost OR initial handshake needs to happen again, so no point sending XOFF/BADTRANS */
			send_xoff = FALSE;
			send_badtrans = FALSE;
		}
	}
	/* Drain pipe */
	if (xoff_sent)
	{
		if (log_draining_msg)
		{	/* avoid multiple logs per instance */
			repl_log(gtmrecv_log_fp, TRUE, TRUE, "REPL INFO - Draining replication pipe due to %s\n",
					send_cmp2uncmp ? "CMP2UNCMP" : (send_badtrans ? "BAD_TRANS" :
							(onln_rlbk_flg_set ? "ONLINE_ROLLBACK" : "UPD shutdown/crash")));
			log_draining_msg = FALSE;
		}
		if (0 != *buff_unprocessed)
		{	/* Throw away the current contents of the buffer */
			buffered_data_len = ((*pending_data_len <= *buff_unprocessed) ? *pending_data_len : *buff_unprocessed);
			*buff_unprocessed -= buffered_data_len;
			buffp += buffered_data_len;
			*pending_data_len -= buffered_data_len;
			REPL_DPRINT2("gtmrecv_poll_actions : (1) Throwing away %d bytes from old buffer while draining\n",
				buffered_data_len);
			assert(remote_side->endianness_known);	/* only then is remote_side->cross_endian reliable */
			while (REPL_MSG_HDRLEN <= *buff_unprocessed)
			{
				assert(0 == (((unsigned long)buffp) % REPL_MSG_ALIGN));
				msg_len = ((repl_msg_ptr_t)buffp)->len;
				msg_type = ((repl_msg_ptr_t)buffp)->type;
		        	if (remote_side->cross_endian)
			        {
			                msg_len = GTM_BYTESWAP_32(msg_len);
			                msg_type = GTM_BYTESWAP_32(msg_type);
			        }
				msg_type = (msg_type & REPL_TR_CMP_MSG_TYPE_MASK);
				assert((REPL_TR_CMP_JNL_RECS == msg_type) || (0 == (msg_len % REPL_MSG_ALIGN)));
				*pending_data_len = ROUND_UP2(msg_len, REPL_MSG_ALIGN);
				buffered_data_len = ((*pending_data_len <= *buff_unprocessed) ?
								*pending_data_len : *buff_unprocessed);
				*buff_unprocessed -= buffered_data_len;
				buffp += buffered_data_len;
				*pending_data_len -= buffered_data_len;
				REPL_DPRINT3("gtmrecv_poll_actions : (1) Throwing away message of "
					"type %d and length %d from old buffer while draining\n", msg_type, buffered_data_len);
			}
			if (0 < *buff_unprocessed)
			{
				memmove((unsigned char *)gtmrecv_msgp, buffp, *buff_unprocessed);
				REPL_DPRINT2("gtmrecv_poll_actions : Incomplete header of length %d while draining\n",
					*buff_unprocessed);
			}
		}
		status = SS_NORMAL;
		if (0 != *buff_unprocessed || 0 == *pending_data_len)
		{	/* Receive the header of a message */
			assert(REPL_MSG_HDRLEN > *buff_unprocessed);	/* so we dont pass negative length in REPL_RECV_LOOP */
			REPL_RECV_LOOP(gtmrecv_sock_fd, ((unsigned char *)gtmrecv_msgp) + *buff_unprocessed,
				       (REPL_MSG_HDRLEN - *buff_unprocessed), FALSE, &gtmrecv_poll_interval)
				; /* Empty Body */
			if (SS_NORMAL == status)
			{
				assert(remote_side->endianness_known);	/* only then is remote_side->cross_endian reliable */
		        	if (!remote_side->cross_endian)
	        		{
			                msg_len = gtmrecv_msgp->len;
			                msg_type = gtmrecv_msgp->type;
			        } else
			        {
			                msg_len = GTM_BYTESWAP_32(gtmrecv_msgp->len);
			                msg_type = GTM_BYTESWAP_32(gtmrecv_msgp->type);
			        }
				msg_type = (msg_type & REPL_TR_CMP_MSG_TYPE_MASK);
				assert((REPL_TR_CMP_JNL_RECS == msg_type) || (0 == (msg_len % REPL_MSG_ALIGN)));
				msg_len = ROUND_UP2(msg_len, REPL_MSG_ALIGN);
				REPL_DPRINT3("gtmrecv_poll_actions : Received message of type %d and length %d while draining\n",
					msg_type, msg_len);
			}
		}
		if ((SS_NORMAL == status) && (0 != *buff_unprocessed || 0 == *pending_data_len) && (REPL_XOFF_ACK == msg_type))
		{	/* Receive the rest of the XOFF_ACK msg and signal the drain as complete */
			REPL_RECV_LOOP(gtmrecv_sock_fd, gtmrecv_msgp, (MIN_REPL_MSGLEN - REPL_MSG_HDRLEN), FALSE,
					&gtmrecv_poll_interval)
				; /* Empty Body */
			if (SS_NORMAL == status)
			{
				repl_log(gtmrecv_log_fp, TRUE, TRUE,
						"REPL INFO - XOFF_ACK received. Drained replication pipe completely\n");
				upd_shut_too_early_logged = FALSE;
				xoff_sent = FALSE;
				return_status = STOP_POLL;
			}
		} else if (SS_NORMAL == status)
		{	/* Drain the rest of the message */
			if (0 < *pending_data_len)
			{
				pending_msg_size = *pending_data_len;
				REPL_DPRINT2("gtmrecv_poll_actions : (2) Throwing away %d bytes from pipe\n", pending_msg_size);
			} else
			{
				pending_msg_size = msg_len - REPL_MSG_HDRLEN;
				REPL_DPRINT3("gtmrecv_poll_actions : (2) Throwing away message of "
					"type %d and length %d from pipe\n", msg_type, msg_len);
			}
			for ( ; SS_NORMAL == status && 0 < pending_msg_size; pending_msg_size -= gtmrecv_max_repl_msglen)
			{
				temp_len = (pending_msg_size < gtmrecv_max_repl_msglen)? pending_msg_size : gtmrecv_max_repl_msglen;
				REPL_RECV_LOOP(gtmrecv_sock_fd, gtmrecv_msgp, temp_len, FALSE, &gtmrecv_poll_interval)
					; /* Empty Body */
			}
			*buff_unprocessed = 0; *pending_data_len = 0;
			if (SS_NORMAL == status && info && !upd_shut_too_early_logged)
			{
				repl_log(gtmrecv_log_fp, TRUE, TRUE, "ALERT : User initiated shutdown of Update Process done "
						"when there was data in the replication pipe\n");
				upd_shut_too_early_logged = TRUE;
			}
			return_status = CONTINUE_POLL;
		}
		if (SS_NORMAL != status)
		{
			if (EREPL_RECV == repl_errno)
			{
				if (REPL_CONN_RESET(status))
				{
					repl_log(gtmrecv_log_fp, TRUE, TRUE, "Connection reset while receiving XOFF_ACK. "
							"Status = %d ; %s\n", status, STRERROR(status));
					repl_close(&gtmrecv_sock_fd);
					repl_connection_reset = TRUE;
					xoff_sent = FALSE;
					send_badtrans = FALSE;
					return_status = STOP_POLL;
				} else
					rts_error(VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
						RTS_ERROR_LITERAL("Error while draining replication pipe. Error in recv"), status);
			} else
			{
				assert(EREPL_SELECT == repl_errno);
				rts_error(VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
					RTS_ERROR_LITERAL("Error while draining replication pipe. Error in select"), status);
			}
		}
	} else
		return_status = STOP_POLL;
	/* Like was done before for the XOFF_ACK_ME message, send a BADTRANS/CMP2UNCMP message only if we know
	 * the endianness of the other side. If not, no point in sending one anyways and saves us trouble in
	 * case of cross-endian replication connections.
	 */
	if ((STOP_POLL == return_status) && (send_badtrans || send_cmp2uncmp)
		&& (FD_INVALID != gtmrecv_sock_fd) && remote_side->endianness_known)
	{	/* Send REPL_BADTRANS or REPL_CMP2UNCMP message */
		if (!remote_side->cross_endian)
		{
			bad_trans_msg.type = send_cmp2uncmp ? REPL_CMP2UNCMP : REPL_BADTRANS;
			bad_trans_msg.len  = MIN_REPL_MSGLEN;
			bad_trans_msg.start_seqno = send_seqno;
		} else
		{
			bad_trans_msg.type = send_cmp2uncmp ? GTM_BYTESWAP_32(REPL_CMP2UNCMP) : GTM_BYTESWAP_32(REPL_BADTRANS);
			bad_trans_msg.len  = GTM_BYTESWAP_32(MIN_REPL_MSGLEN);
			bad_trans_msg.start_seqno = GTM_BYTESWAP_64(send_seqno);
		}
		REPL_SEND_LOOP(gtmrecv_sock_fd, &bad_trans_msg, bad_trans_msg.len, FALSE, &gtmrecv_poll_immediate)
			; /* Empty Body */
		if (SS_NORMAL == status)
		{
			if (send_cmp2uncmp)
				repl_log(gtmrecv_log_fp, TRUE, TRUE, "REPL_CMP2UNCMP message sent with seqno %llu\n", send_seqno);
			else
				repl_log(gtmrecv_log_fp, TRUE, TRUE, "REPL_BADTRANS message sent with seqno %llu\n", send_seqno);
		} else
		{
			if (REPL_CONN_RESET(status) && EREPL_SEND == repl_errno)
			{
				if (send_cmp2uncmp)
				{
					repl_log(gtmrecv_log_fp, TRUE, TRUE, "Connection reset while sending REPL_CMP2UNCMP. "
							"Status = %d ; %s\n", status, STRERROR(status));
				} else
				{
					repl_log(gtmrecv_log_fp, TRUE, TRUE, "Connection reset while sending REPL_BADTRANS. "
							"Status = %d ; %s\n", status, STRERROR(status));
				}
				repl_close(&gtmrecv_sock_fd);
				repl_connection_reset = TRUE;
				return_status = STOP_POLL;
			} else if (EREPL_SEND == repl_errno)
				rts_error(VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
					RTS_ERROR_LITERAL("Error sending REPL_BADTRANS/REPL_CMP2UNCMP. Error in send"), status);
			else
			{
				assert(EREPL_SELECT == repl_errno);
				rts_error(VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
					RTS_ERROR_LITERAL("Error sending REPL_BADTRANS/REPL_CMP2UNCMP. Error in select"), status);
			}
		}
		send_badtrans = FALSE;
		if (send_cmp2uncmp)
		{
			REPL_DPRINT1("gtmrecv_poll_actions : Setting gtmrecv_wait_for_jnl_seqno to TRUE because this receiver"
				"server requested a fall-back from compressed to uncompressed operation\n");
			gtmrecv_wait_for_jnl_seqno = TRUE;/* set this to TRUE to break out and go back to a fresh "do_main_loop" */
			gtmrecv_bad_trans_sent = TRUE;
			gtmrecv_send_cmp2uncmp = FALSE;
			send_cmp2uncmp = FALSE;
		}
	}
	if ((upd_proc_local->bad_trans && bad_trans_detected) || onln_rlbk_flg_set
		|| (UPDPROC_START == upd_proc_local->start_upd) && (1 != report_cnt))
	{
		if (UPDPROC_START == upd_proc_local->start_upd)
		{
			assert(is_updproc_alive() != SRV_ALIVE);
			upd_proc_local->upd_proc_shutdown = NO_SHUTDOWN;
		}
		recvpool_ctl->wrapped = FALSE;
		recvpool_ctl->write_wrap = recvpool_ctl->recvpool_size;
		recvpool_ctl->write = 0;
		/* Reset last_rcvd_histinfo, last_valid_histinfo etc. as they reflect context from unprocessed data
		 * in the receive pool and those are no longer valid because we have drained the receive pool.
		 */
		GTMRECV_CLEAR_CACHED_HISTINFO(recvpool.recvpool_ctl, jnlpool, jnlpool_ctl, INSERT_STRM_HISTINFO_FALSE);
		if (UPDPROC_START == upd_proc_local->start_upd)
		{
			/* Attempt starting the update process */
			for (upd_start_attempts = 0;
			     UPDPROC_START_ERR == (upd_start_status = gtmrecv_upd_proc_init(FALSE)) &&
			     GTMRECV_MAX_UPDSTART_ATTEMPTS > upd_start_attempts;
			     upd_start_attempts++)
			{
				if (EREPL_UPDSTART_SEMCTL == repl_errno || EREPL_UPDSTART_BADPATH == repl_errno)
				{
					gtmrecv_autoshutdown();
				} else if (EREPL_UPDSTART_FORK == repl_errno)
				{
					/* Couldn't start up update now, can try later */
					LONG_SLEEP(GTMRECV_WAIT_FOR_PROC_SLOTS);
					continue;
				} else if (EREPL_UPDSTART_EXEC == repl_errno)
				{
					/* In forked child, could not exec, should exit */
					gtmrecv_exit(ABNORMAL_SHUTDOWN);
				}
			}
			if (UPDPROC_STARTED == (upd_proc_local->start_upd = upd_start_status))
			{
				REPL_DPRINT1("gtmrecv_poll_actions : Setting gtmrecv_wait_for_jnl_seqno to TRUE because of "
					     "upd restart\n");
				gtmrecv_wait_for_jnl_seqno = TRUE;
				report_cnt = next_report_at = 1;
				if (send_xoff && (FD_INVALID == gtmrecv_sock_fd))
				{
					/* Update start command was issued before connection was established,
					 * no point in sending XOFF.  */
					send_xoff = FALSE;
				}
			} else
			{
				repl_log(gtmrecv_log_fp, TRUE, TRUE, "%d failed attempts to fork update process. Try later\n",
					 upd_start_attempts);
			}
		} else
		{
			gtmrecv_wait_for_jnl_seqno = TRUE;/* set this to TRUE to break out and go back to a fresh "do_main_loop" */
			if (onln_rlbk_flg_set)
			{
				assert(NULL != jnlpool_ctl);
				repl_log(gtmrecv_log_fp, TRUE, TRUE, "Closing connection due to ONLINE ROLLBACK\n");
 				repl_log(gtmrecv_log_fp, TRUE, TRUE, "REPL INFO - Current Jnlpool Seqno : %llu\n",
 						jnlpool_ctl->jnl_seqno);
				repl_log(gtmrecv_log_fp, TRUE, TRUE, "REPL INFO - Current Receive Pool Seqno : %llu\n",
						recvpool_ctl->jnl_seqno);
				repl_close(&gtmrecv_sock_fd);
				repl_connection_reset = TRUE;
				xoff_sent = FALSE;
				send_badtrans = FALSE;
				upd_proc_local->onln_rlbk_flg = FALSE;
				/* Before restarting afresh, sync the online rollback cycles. This way any future grab_lock that
				 * we do after restarting should not realize an unhandled online rollback.  For receiver, it is
				 * just syncing the journal pool cycles as the databases are not opened. But, to be safe, grab
				 * the lock and sync the cycles.
				 */
				GRAB_LOCK(jnlpool.jnlpool_dummy_reg, GRAB_LOCK_ONLY);
				SYNC_ONLN_RLBK_CYCLES;
				rel_lock(jnlpool.jnlpool_dummy_reg);
				return_status = STOP_POLL;
				recvpool_ctl->jnl_seqno = 0;
			} else
			{
				REPL_DPRINT1("gtmrecv_poll_actions : Setting gtmrecv_wait_for_jnl_seqno to TRUE because bad trans"
						"sent\n");
				gtmrecv_bad_trans_sent = TRUE;
				upd_proc_local->bad_trans = FALSE;
				recvpool_ctl->jnl_seqno = upd_proc_local->read_jnl_seqno;
			}
		}
	}
	if ((0 == *pending_data_len) && (0 != gtmrecv_local->changelog))
	{
		if (gtmrecv_local->changelog & REPLIC_CHANGE_LOGINTERVAL)
		{
			repl_log(gtmrecv_log_fp, TRUE, TRUE, "Changing log interval from %u to %u\n",
					log_interval, gtmrecv_local->log_interval);
			log_interval = gtmrecv_local->log_interval;
			gtmrecv_reinit_logseqno(); /* will force a LOG on the first recv following the interval change */
		}
		if (gtmrecv_local->changelog & REPLIC_CHANGE_LOGFILE)
		{
			repl_log(gtmrecv_log_fp, TRUE, TRUE, "Changing log file to %s\n", gtmrecv_local->log_file);
			repl_log_init(REPL_GENERAL_LOG, &gtmrecv_log_fd, NULL, gtmrecv_local->log_file, NULL);
			repl_log_fd2fp(&gtmrecv_log_fp, gtmrecv_log_fd);
			repl_log(gtmrecv_log_fp, TRUE, TRUE, "Change log to %s successful\n",gtmrecv_local->log_file);
		}
		upd_proc_local->changelog = gtmrecv_local->changelog; /* Pass changelog request to the update process */
		/* NOTE: update process and receiver each ignore any setting specific to the other (REPLIC_CHANGE_UPD_LOGINTERVAL,
		 * REPLIC_CHANGE_LOGINTERVAL) */
		gtmrecv_local->changelog = 0;
	}
	if (0 == *pending_data_len && !gtmrecv_logstats && gtmrecv_local->statslog)
	{
		gtmrecv_logstats = TRUE;
		repl_log_init(REPL_STATISTICS_LOG, &gtmrecv_log_fd, &gtmrecv_statslog_fd, gtmrecv_local->log_file,
				gtmrecv_local->statslog_file);
		repl_log_fd2fp(&gtmrecv_statslog_fp, gtmrecv_statslog_fd);
		repl_log(gtmrecv_log_fp, TRUE, TRUE, "Starting stats log to %s\n", gtmrecv_local->statslog_file);
		repl_log(gtmrecv_statslog_fp, TRUE, TRUE, "Begin statistics logging\n");
	} else if (0 == *pending_data_len && gtmrecv_logstats && !gtmrecv_local->statslog)
	{
		gtmrecv_logstats = FALSE;
		repl_log(gtmrecv_log_fp, TRUE, TRUE, "Stopping stats log\n");
		/* Force all data out to the file before closing the file */
		repl_log(gtmrecv_statslog_fp, TRUE, TRUE, "End statistics logging\n");
		CLOSEFILE_RESET(gtmrecv_statslog_fd, status);	/* resets "gtmrecv_statslog_fd" to FD_INVALID */
		/* We need to FCLOSE because a later open() in repl_log_init() might return the same file descriptor as the one
		 * that we just closed. In that case, FCLOSE done in repl_log_fd2fp() affects the newly opened file and
		 * FDOPEN will fail returning NULL for the file pointer. So, we close both the file descriptor and file pointer.
		 * Note the same problem does not occur with GENERAL LOG because the current log is kept open while opening
		 * the new log and hence the new file descriptor will be different (we keep the old log file open in case there
		 * are errors during DUPing. In such a case, we do not switch the log file, but keep the current one).
		 * We can FCLOSE the old file pointer later in repl_log_fd2fp() */
		FCLOSE(gtmrecv_statslog_fp, status);
		gtmrecv_statslog_fp = NULL;
	}
	if (0 == *pending_data_len)
	{
		if (upd_helper_ctl->start_helpers)
		{
			gtmrecv_helpers_init(upd_helper_ctl->start_n_readers, upd_helper_ctl->start_n_writers);
			upd_helper_ctl->start_helpers = FALSE;
		}
		if (HELPER_REAP_NONE != (status = upd_helper_ctl->reap_helpers) ||
			(double)GTMRECV_REAP_HELPERS_INTERVAL <= difftime(gtmrecv_now, last_reap_time))
		{
			gtmrecv_reap_helpers(HELPER_REAP_WAIT == status);
			last_reap_time = gtmrecv_now;
		}
	}
	return (return_status);
}
static int helper_init(upd_helper_entry_ptr_t helper, recvpool_user helper_type)
{
	int			save_errno, save_shutdown;
	char			helper_cmd[UPDHELPER_CMD_MAXLEN];
	int			status;
	int4			i4status;
	mstr			helper_log_cmd, helper_trans_cmd;
	upd_helper_ctl_ptr_t	upd_helper_ctl;
#ifdef UNIX
	pid_t			helper_pid, waitpid_res;
#elif defined(VMS)
	uint4			helper_pid, cmd_channel;
	char			mbx_suffix[2 + 1]; /* hex representation of numbers 0 through MAX_UPD_HELPERS-1, +1 for '\0' */
	$DESCRIPTOR(cmd_desc_reader, UPDHELPER_READER_CMD_STR);
	$DESCRIPTOR(cmd_desc_writer, UPDHELPER_WRITER_CMD_STR);
#endif

	upd_helper_ctl = recvpool.upd_helper_ctl;
	save_shutdown = helper->helper_shutdown;
	helper->helper_shutdown = NO_SHUTDOWN;
#ifdef UNIX
	if (0 > (helper_pid = fork()))	/* BYPASSOK: we exec immediately, no FORK_CLEAN needed */
	{
		save_errno = errno;
		helper->helper_shutdown = save_shutdown;
		gtm_putmsg(VARLSTCNT(7) ERR_RECVPOOLSETUP, 0, ERR_TEXT, 2,
				LEN_AND_LIT("Could not fork update process"), save_errno);
		repl_errno = EREPL_UPDSTART_FORK;
		return UPDPROC_START_ERR;
	}
	if (0 == helper_pid)
	{	/* helper */
		getjobnum();
		helper->helper_pid_prev = process_id; /* identify owner of slot */
		helper_log_cmd.len  = STR_LIT_LEN(UPDHELPER_CMD);
		helper_log_cmd.addr = UPDHELPER_CMD;
		if (SS_NORMAL != (i4status = TRANS_LOG_NAME(&helper_log_cmd, &helper_trans_cmd, helper_cmd, SIZEOF(helper_cmd),
								dont_sendmsg_on_log2long)))
		{
			helper->helper_shutdown = save_shutdown;
			gtm_putmsg(VARLSTCNT(6) ERR_RECVPOOLSETUP, 0, ERR_TEXT, 2,
				   LEN_AND_LIT("Could not find path of Helper Process. Check value of $gtm_dist"));
			if (SS_LOG2LONG == i4status)
				gtm_putmsg(VARLSTCNT(5) ERR_LOGTOOLONG, 3, LEN_AND_LIT(UPDHELPER_CMD), SIZEOF(helper_cmd) - 1);
			repl_errno = EREPL_UPDSTART_BADPATH;
			return UPDPROC_START_ERR;
		}
		helper_cmd[helper_trans_cmd.len] = '\0';
		if (-1 == EXECL(helper_cmd, helper_cmd, UPDHELPER_CMD_ARG1, UPDHELPER_CMD_ARG2,
				(UPD_HELPER_READER == helper_type) ? UPDHELPER_READER_CMD_ARG3 : UPDHELPER_WRITER_CMD_ARG3, NULL))
		{
			save_errno = errno;
			helper->helper_shutdown = save_shutdown;
			gtm_putmsg(VARLSTCNT(7) ERR_RECVPOOLSETUP, 0, ERR_TEXT, 2,
				   LEN_AND_LIT("Could not exec Helper Process"), save_errno);
			repl_errno = EREPL_UPDSTART_EXEC;
			return UPDPROC_START_ERR;
		}
	}
#elif defined(VMS)
	/* Create detached server and write startup commands to it */
	i2hex(helper - upd_helper_ctl->helper_list, LIT_AND_LEN(mbx_suffix));
	mbx_suffix[SIZEOF(mbx_suffix) - 1] = '\0';
	/* A mailbox is created per helper, and the mailbox name is assigned to a logical. This logical will persist until the
	 * helper terminates. So, we need to assign a unique logical per helper. Hence the suffix. */
	if (SS_NORMAL != (status = repl_create_server((UPD_HELPER_READER == helper_type) ?  &cmd_desc_reader : &cmd_desc_writer,
					    		UPDHELPER_MBX_PREFIX, mbx_suffix, &cmd_channel, &helper->helper_pid_prev,
							ERR_RECVPOOLSETUP)))
	{
		gtm_putmsg(VARLSTCNT(7) ERR_RECVPOOLSETUP, 0, ERR_TEXT, 2, LEN_AND_LIT("Unable to spawn Helper process"), status);
		helper->helper_shutdown = save_shutdown;
		repl_errno = EREPL_UPDSTART_FORK;
		return UPDPROC_START_ERR;
	}
	helper_pid = helper->helper_pid_prev;
#endif
	/* Wait for helper to startup */
	while (helper_pid != helper->helper_pid && is_proc_alive(helper_pid, 0))
	{
		SHORT_SLEEP(GTMRECV_WAIT_FOR_SRV_START);
		UNIX_ONLY(WAITPID(helper_pid, &status, WNOHANG, waitpid_res);) /* Release defunct helper process if dead */
	}
	/* The helper has now gone far enough in the initialization, or died before initialization. Consider startup completed. */
#if defined(VMS)
	/* Deassign the send-cmd mailbox channel */
	if (SS_NORMAL != (status = sys$dassgn(cmd_channel)))
	{
		gtm_putmsg(VARLSTCNT(7) ERR_RECVPOOLSETUP, 0, ERR_TEXT, 2,
				RTS_ERROR_LITERAL("Unable to close upd-send-cmd mbox channel"), status);
		helper->helper_shutdown = save_shutdown;
		repl_errno = EREPL_UPDSTART_BADPATH; /* Just to make an auto-shutdown */
		return UPDPROC_START_ERR;
	}
#endif
	repl_log(gtmrecv_log_fp, TRUE, TRUE, "Helper %s started. PID %d [0x%X]\n",
			(UPD_HELPER_READER == helper_type) ? "reader" : "writer", helper_pid, helper_pid);
	return UPDPROC_STARTED;
}
Beispiel #24
0
int gtmrecv_shutdown(boolean_t auto_shutdown, int exit_status)
{
	uint4           savepid;
	boolean_t       shut_upd_too = FALSE;
	int             status;
	unix_db_info	*udi;

	error_def(ERR_RECVPOOLSETUP);
	error_def(ERR_TEXT);

	repl_log(stdout, TRUE, TRUE, "Initiating shut down\n");
	call_on_signal = NULL;		/* So we don't reenter on error */
	/* assert that auto shutdown should be invoked only if the current process is a receiver server */
	assert(!auto_shutdown || gtmrecv_srv_count);
	if (auto_shutdown)
	{	/* grab the ftok semaphore and recvpool access control lock IN THAT ORDER (to avoid deadlocks) */
		repl_inst_ftok_sem_lock();
		status = grab_sem(RECV, RECV_POOL_ACCESS_SEM);
		if (0 > status)
		{
			repl_log(stderr, TRUE, TRUE,
				"Error grabbing receive pool control semaphore : %s. Shutdown not complete\n", REPL_SEM_ERROR);
			return (ABNORMAL_SHUTDOWN);
		}
	} else
	{	/* ftok semaphore and recvpool access semaphore should already be held from the previous call to "recvpool_init" */
		DEBUG_ONLY(udi = (unix_db_info *)FILE_INFO(recvpool.recvpool_dummy_reg);)
		assert(udi->grabbed_ftok_sem);
		assert(holds_sem[RECV][RECV_POOL_ACCESS_SEM]);
		/* We do not want to hold the options semaphore to avoid deadlocks with receiver server startup (C9F12-002766) */
		assert(!holds_sem[RECV][RECV_SERV_OPTIONS_SEM]);
		recvpool.gtmrecv_local->shutdown = SHUTDOWN;
		/* Wait for receiver server to die. But release ftok semaphore and recvpool access control semaphore before
		 * waiting as the concurrently running receiver server might need these (e.g. if it is about to call the
		 * function "repl_inst_was_rootprimary").
		 */
		if (0 != rel_sem(RECV, RECV_POOL_ACCESS_SEM))
			gtm_putmsg(VARLSTCNT(7) ERR_TEXT, 2, RTS_ERROR_LITERAL("Error in receiver server shutdown rel_sem"),
				REPL_SEM_ERRNO);
		repl_inst_ftok_sem_release();
		/* Wait for receiver server to shut down */
		while((SHUTDOWN == recvpool.gtmrecv_local->shutdown)
				&& (0 < (savepid = recvpool.gtmrecv_local->recv_serv_pid))
				&& is_proc_alive(savepid, 0))
			SHORT_SLEEP(GTMRECV_WAIT_FOR_SHUTDOWN);
		/* (Re)Grab the ftok semaphore and recvpool access control semaphore IN THAT ORDER (to avoid deadlocks) */
		repl_inst_ftok_sem_lock();
		status = grab_sem(RECV, RECV_POOL_ACCESS_SEM);
		if (0 > status)
		{
			repl_log(stderr, TRUE, TRUE,
				"Error regrabbing receive pool control semaphore : %s. Shutdown not complete\n", REPL_SEM_ERROR);
			return (ABNORMAL_SHUTDOWN);
		}
		exit_status = recvpool.gtmrecv_local->shutdown;
		if (SHUTDOWN == exit_status)
		{
			if (0 == savepid) /* No Receiver Process */
				exit_status = NORMAL_SHUTDOWN;
			else /* Receiver Server Crashed */
			{
				repl_log(stderr, FALSE, TRUE, "Receiver Server exited abnormally\n");
				exit_status = ABNORMAL_SHUTDOWN;
				shut_upd_too = TRUE;
			}
		}
	}
Beispiel #25
0
void db_init(gd_region *reg, sgmnt_data_ptr_t tsd)
{
	static boolean_t	mutex_init_done = FALSE;
	boolean_t       	is_bg, read_only;
	char            	machine_name[MAX_MCNAMELEN];
	file_control    	*fc;
	int			gethostname_res, stat_res, mm_prot;
	int4            	status, semval, dblksize, fbwsize;
	sm_long_t       	status_l;
	sgmnt_addrs     	*csa;
	sgmnt_data_ptr_t        csd;
	struct sembuf   	sop[3];
	struct stat     	stat_buf;
	union semun		semarg;
	struct semid_ds		semstat;
	struct shmid_ds         shmstat;
	struct statvfs		dbvfs;
	uint4           	sopcnt;
	unix_db_info    	*udi;
#ifdef periodic_timer_removed
	void            	periodic_flush_check();
#endif

	error_def(ERR_CLSTCONFLICT);
	error_def(ERR_CRITSEMFAIL);
	error_def(ERR_DBNAMEMISMATCH);
	error_def(ERR_DBIDMISMATCH);
	error_def(ERR_NLMISMATCHCALC);
	error_def(ERR_REQRUNDOWN);
	error_def(ERR_SYSCALL);

	assert(tsd->acc_meth == dba_bg  ||  tsd->acc_meth == dba_mm);
	is_bg = (dba_bg == tsd->acc_meth);
	read_only = reg->read_only;
	new_dbinit_ipc = FALSE;	/* we did not create a new ipc resource */
	udi = FILE_INFO(reg);
	memset(machine_name, 0, sizeof(machine_name));
	if (GETHOSTNAME(machine_name, MAX_MCNAMELEN, gethostname_res))
		rts_error(VARLSTCNT(5) ERR_TEXT, 2, LEN_AND_LIT("Unable to get the hostname"), errno);
	assert(strlen(machine_name) < MAX_MCNAMELEN);
	csa = &udi->s_addrs;
	csa->db_addrs[0] = csa->db_addrs[1] = csa->lock_addrs[0] = NULL;   /* to help in dbinit_ch  and gds_rundown */
	reg->opening = TRUE;
	/*
	 * Create ftok semaphore for this region.
	 * We do not want to make ftok counter semaphore to be 2 for on mupip journal recover process.
	 */
	if (!ftok_sem_get(reg, !mupip_jnl_recover, GTM_ID, FALSE))
		rts_error(VARLSTCNT(4) ERR_DBFILERR, 2, DB_LEN_STR(reg));
	/*
	 * At this point we have ftok_semid sempahore based on ftok key.
	 * Any ftok conflicted region will block at this point.
	 * Say, a.dat and b.dat both has same ftok and we have process A to access a.dat and
	 * process B to access b.dat. In this case only one can continue to do db_init()
	 */
	fc = reg->dyn.addr->file_cntl;
	fc->file_type = reg->dyn.addr->acc_meth;
	fc->op = FC_READ;
	fc->op_buff = (sm_uc_ptr_t)tsd;
	fc->op_len = sizeof(*tsd);
	fc->op_pos = 1;
	dbfilop(fc);		/* Read file header */
	udi->shmid = tsd->shmid;
	udi->semid = tsd->semid;
	udi->sem_ctime = tsd->sem_ctime.ctime;
	udi->shm_ctime = tsd->shm_ctime.ctime;
	dbsecspc(reg, tsd); 	/* Find db segment size */
	if (!mupip_jnl_recover)
	{
		if (INVALID_SEMID == udi->semid)
		{
			if (0 != udi->sem_ctime || INVALID_SHMID != udi->shmid || 0 != udi->shm_ctime)
			/* We must have somthing wrong in protocol or, code, if this happens */
				GTMASSERT;
			/*
			 * Create new semaphore using IPC_PRIVATE. System guarantees a unique id.
			 */
			if (-1 == (udi->semid = semget(IPC_PRIVATE, FTOK_SEM_PER_ID, RWDALL | IPC_CREAT)))
			{
				udi->semid = INVALID_SEMID;
				rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
					ERR_TEXT, 2, LEN_AND_LIT("Error with database control semget"), errno);
			}
			udi->shmid = INVALID_SHMID;	/* reset shmid so dbinit_ch does not get confused in case we go there */
			new_dbinit_ipc = TRUE;
			tsd->semid = udi->semid;
			semarg.val = GTM_ID;
			/*
			 * Following will set semaphore number 2 (=FTOK_SEM_PER_ID - 1)  value as GTM_ID.
			 * In case we have orphaned semaphore for some reason, mupip rundown will be
			 * able to identify GTM semaphores from the value and can remove.
			 */
			if (-1 == semctl(udi->semid, FTOK_SEM_PER_ID - 1, SETVAL, semarg))
				rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
					ERR_TEXT, 2, LEN_AND_LIT("Error with database control semctl SETVAL"), errno);
			/*
			 * Warning: We must read the sem_ctime using IPC_STAT after SETVAL, which changes it.
			 *	    We must NOT do any more SETVAL after this. Our design is to use
			 *	    sem_ctime as creation time of semaphore.
			 */
			semarg.buf = &semstat;
			if (-1 == semctl(udi->semid, FTOK_SEM_PER_ID - 1, IPC_STAT, semarg))
				rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
					ERR_TEXT, 2, LEN_AND_LIT("Error with database control semctl IPC_STAT"), errno);
			tsd->sem_ctime.ctime = udi->sem_ctime = semarg.buf->sem_ctime;
		} else
		{
			if (INVALID_SHMID == udi->shmid)
				/* if mu_rndwn_file gets standalone access of this region and
				 * somehow mupip process crashes, we can have semid != -1 but shmid == -1
				 */
				rts_error(VARLSTCNT(10) ERR_REQRUNDOWN, 4, DB_LEN_STR(reg), LEN_AND_STR(tsd->machine_name),
						ERR_TEXT, 2, LEN_AND_LIT("semid is valid but shmid is invalid"));
			semarg.buf = &semstat;
			if (-1 == semctl(udi->semid, 0, IPC_STAT, semarg))
				/* file header has valid semid but semaphore does not exists */
				rts_error(VARLSTCNT(6) ERR_REQRUNDOWN, 4, DB_LEN_STR(reg), LEN_AND_STR(tsd->machine_name));
			else if (semarg.buf->sem_ctime != tsd->sem_ctime.ctime)
				rts_error(VARLSTCNT(10) ERR_REQRUNDOWN, 4, DB_LEN_STR(reg), LEN_AND_STR(tsd->machine_name),
						ERR_TEXT, 2, LEN_AND_LIT("sem_ctime does not match"));
			if (-1 == shmctl(udi->shmid, IPC_STAT, &shmstat))
				rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
					ERR_TEXT, 2, LEN_AND_LIT("Error with database control shmctl"), errno);
			else if (shmstat.shm_ctime != tsd->shm_ctime.ctime)
				rts_error(VARLSTCNT(10) ERR_REQRUNDOWN, 4, DB_LEN_STR(reg), LEN_AND_STR(tsd->machine_name),
					ERR_TEXT, 2, LEN_AND_LIT("shm_ctime does not match"));
		}
		/* We already have ftok semaphore of this region, so just plainly do semaphore operation */
		/* This is the database access control semaphore for any region */
		sop[0].sem_num = 0; sop[0].sem_op = 0;	/* Wait for 0 */
		sop[1].sem_num = 0; sop[1].sem_op = 1;	/* Lock */
		sopcnt = 2;
		if (!read_only)
		{
			sop[2].sem_num = 1; sop[2].sem_op  = 1;	 /* increment r/w access counter */
			sopcnt = 3;
		}
		sop[0].sem_flg = sop[1].sem_flg = sop[2].sem_flg = SEM_UNDO | IPC_NOWAIT;
		SEMOP(udi->semid, sop, sopcnt, status);
		if (-1 == status)
		{
			errno_save = errno;
			gtm_putmsg(VARLSTCNT(4) ERR_CRITSEMFAIL, 2, DB_LEN_STR(reg));
			rts_error(VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("semop()"), CALLFROM, errno_save);
		}
	} else /* for mupip_jnl_recover we were already in mu_rndwn_file and got "semid" semaphore  */
	{
		if (INVALID_SEMID == udi->semid || 0 == udi->sem_ctime)
			/* make sure mu_rndwn_file() has reset created semaphore for standalone access */
			GTMASSERT;
		if (INVALID_SHMID != udi->shmid || 0 != udi->shm_ctime)
			/* make sure mu_rndwn_file() has reset shared memory */
			GTMASSERT;
		udi->shmid = INVALID_SHMID;	/* reset shmid so dbinit_ch does not get confused in case we go there */
		new_dbinit_ipc = TRUE;
	}
	sem_incremented = TRUE;
	if (new_dbinit_ipc)
	{
		/* Create new shared memory using IPC_PRIVATE. System guarantees a unique id */
#ifdef __MVS__
		if (-1 == (status_l = udi->shmid = shmget(IPC_PRIVATE, ROUND_UP(reg->sec_size, MEGA_BOUND),
			__IPC_MEGA | IPC_CREAT | RWDALL)))
#else
		if (-1 == (status_l = udi->shmid = shmget(IPC_PRIVATE, reg->sec_size, RWDALL | IPC_CREAT)))
#endif
		{
			udi->shmid = status_l = INVALID_SHMID;
			rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
				  ERR_TEXT, 2, LEN_AND_LIT("Error with database shmget"), errno);
		}
		tsd->shmid = udi->shmid;
		if (-1 == shmctl(udi->shmid, IPC_STAT, &shmstat))
			rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
				ERR_TEXT, 2, LEN_AND_LIT("Error with database control shmctl"), errno);
		tsd->shm_ctime.ctime = udi->shm_ctime = shmstat.shm_ctime;
	}
#ifdef DEBUG_DB64
	status_l = (sm_long_t)(csa->db_addrs[0] = (sm_uc_ptr_t)do_shmat(udi->shmid, next_smseg, SHM_RND));
	next_smseg = (sm_uc_ptr_t)ROUND_UP((sm_long_t)(next_smseg + reg->sec_size), SHMAT_ADDR_INCS);
#else
	status_l = (sm_long_t)(csa->db_addrs[0] = (sm_uc_ptr_t)do_shmat(udi->shmid, 0, SHM_RND));
#endif
	if (-1 == status_l)
	{
		rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
			  ERR_TEXT, 2, LEN_AND_LIT("Error attaching to database shared memory"), errno);
	}
	csa->nl = (node_local_ptr_t)csa->db_addrs[0];
	csa->critical = (mutex_struct_ptr_t)(csa->db_addrs[0] + NODE_LOCAL_SIZE);
	assert(((int)csa->critical & 0xf) == 0); 			/* critical should be 16-byte aligned */
#ifdef CACHELINE_SIZE
	assert(0 == ((int)csa->critical & (CACHELINE_SIZE - 1)));
#endif
	/* Note: Here we check jnl_sate from database file and its value cannot change without standalone access.
	 * The jnl_buff buffer should be initialized irrespective of read/write process */
	JNL_INIT(csa, reg, tsd);
	csa->backup_buffer = (backup_buff_ptr_t)(csa->db_addrs[0] + NODE_LOCAL_SPACE + JNL_SHARE_SIZE(tsd));
	csa->lock_addrs[0] = (sm_uc_ptr_t)csa->backup_buffer + BACKUP_BUFFER_SIZE + 1;
	csa->lock_addrs[1] = csa->lock_addrs[0] + LOCK_SPACE_SIZE(tsd) - 1;
	csa->total_blks = tsd->trans_hist.total_blks;   		/* For test to see if file has extended */
	if (new_dbinit_ipc)
	{
		memset(csa->nl, 0, sizeof(*csa->nl));			/* We allocated shared storage -- we have to init it */
		if (JNL_ALLOWED(csa))
		{	/* initialize jb->cycle to a value different from initial value of jpc->cycle (0). although this is not
			 * necessary right now, in the future, the plan is to change jnl_ensure_open() to only do a cycle mismatch
			 * check in order to determine whether to call jnl_file_open() or not. this is in preparation for that.
			 */
			csa->jnl->jnl_buff->cycle = 1;
		}
	}
	if (is_bg)
		csd = csa->hdr = (sgmnt_data_ptr_t)(csa->lock_addrs[1] + 1 + CACHE_CONTROL_SIZE(tsd));
	else
	{
		csa->acc_meth.mm.mmblk_state = (mmblk_que_heads_ptr_t)(csa->lock_addrs[1] + 1);
		FSTAT_FILE(udi->fd, &stat_buf, stat_res);
		if (-1 == stat_res)
			rts_error(VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(reg), errno);
		mm_prot = read_only ? PROT_READ : (PROT_READ | PROT_WRITE);
#ifdef DEBUG_DB64
		if (-1 == (sm_long_t)(csa->db_addrs[0] = (sm_uc_ptr_t)mmap((caddr_t)get_mmseg((size_t)stat_buf.st_size),
									   (size_t)stat_buf.st_size,
									   mm_prot,
									   GTM_MM_FLAGS, udi->fd, (off_t)0)))
			rts_error(VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(reg), errno);
		put_mmseg((caddr_t)(csa->db_addrs[0]), (size_t)stat_buf.st_size);
#else
		if (-1 == (sm_long_t)(csa->db_addrs[0] = (sm_uc_ptr_t)mmap((caddr_t)NULL,
									   (size_t)stat_buf.st_size,
									   mm_prot,
									   GTM_MM_FLAGS, udi->fd, (off_t)0)))
			rts_error(VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(reg), errno);
#endif
		csa->db_addrs[1] = csa->db_addrs[0] + stat_buf.st_size - 1;
		csd = csa->hdr = (sgmnt_data_ptr_t)csa->db_addrs[0];
	}
	if (!csa->nl->glob_sec_init)
	{
		assert(new_dbinit_ipc);
		if (is_bg)
			*csd = *tsd;
		if (csd->machine_name[0])                  /* crash occured */
		{
			if (0 != memcmp(csd->machine_name, machine_name, MAX_MCNAMELEN))  /* crashed on some other node */
				rts_error(VARLSTCNT(6) ERR_CLSTCONFLICT, 4, DB_LEN_STR(reg), LEN_AND_STR(csd->machine_name));
			else
				rts_error(VARLSTCNT(6) ERR_REQRUNDOWN, 4, DB_LEN_STR(reg), LEN_AND_STR(csd->machine_name));
		}
		if (is_bg)
		{
			bt_malloc(csa);
			csa->nl->cache_off = -CACHE_CONTROL_SIZE(tsd);
			db_csh_ini(csa);
		}
		db_csh_ref(csa);
		strcpy(csa->nl->machine_name, machine_name);					/* machine name */
		assert(MAX_REL_NAME > gtm_release_name_len);
		memcpy(csa->nl->now_running, gtm_release_name, gtm_release_name_len + 1);	/* GT.M release name */
		memcpy(csa->nl->label, GDS_LABEL, GDS_LABEL_SZ - 1);				/* GDS label */
		memcpy(csa->nl->fname, reg->dyn.addr->fname, reg->dyn.addr->fname_len);		/* database filename */
		csa->nl->creation_date_time = csd->creation.date_time;
		csa->nl->highest_lbm_blk_changed = -1;
		csa->nl->wcs_timers = -1;
		csa->nl->nbb = BACKUP_NOT_IN_PROGRESS;
		csa->nl->unique_id.uid = FILE_INFO(reg)->fileid;            /* save what file we initialized this storage for */
		/* save pointers in csa to access shared memory */
		csa->nl->critical = (sm_off_t)((sm_uc_ptr_t)csa->critical - (sm_uc_ptr_t)csa->nl);
		if (JNL_ALLOWED(csa))
			csa->nl->jnl_buff = (sm_off_t)((sm_uc_ptr_t)csa->jnl->jnl_buff - (sm_uc_ptr_t)csa->nl);
		csa->nl->backup_buffer = (sm_off_t)((sm_uc_ptr_t)csa->backup_buffer - (sm_uc_ptr_t)csa->nl);
		csa->nl->hdr = (sm_off_t)((sm_uc_ptr_t)csd - (sm_uc_ptr_t)csa->nl);
		csa->nl->lock_addrs = (sm_off_t)((sm_uc_ptr_t)csa->lock_addrs[0] - (sm_uc_ptr_t)csa->nl);
		if (!read_only || is_bg)
		{
			csd->trans_hist.early_tn = csd->trans_hist.curr_tn;
			csd->max_update_array_size = csd->max_non_bm_update_array_size
				= ROUND_UP2(MAX_NON_BITMAP_UPDATE_ARRAY_SIZE(csd), UPDATE_ARRAY_ALIGN_SIZE);
			csd->max_update_array_size += ROUND_UP2(MAX_BITMAP_UPDATE_ARRAY_SIZE, UPDATE_ARRAY_ALIGN_SIZE);
			/* add current db_csh counters into the cumulative counters and reset the current counters */
#define TAB_DB_CSH_ACCT_REC(COUNTER, DUMMY1, DUMMY2)					\
				csd->COUNTER.cumul_count += csd->COUNTER.curr_count;	\
				csd->COUNTER.curr_count = 0;
#include "tab_db_csh_acct_rec.h"
#undef TAB_DB_CSH_ACCT_REC
		}
		if (!read_only)
		{
			if (is_bg)
			{
				assert(memcmp(csd, GDS_LABEL, GDS_LABEL_SZ - 1) == 0);
				LSEEKWRITE(udi->fd, (off_t)0, (sm_uc_ptr_t)csd, sizeof(sgmnt_data), errno_save);
				if (0 != errno_save)
				{
					rts_error(VARLSTCNT(9) ERR_DBFILERR, 2, DB_LEN_STR(reg),
						  ERR_TEXT, 2, LEN_AND_LIT("Error with database write"), errno_save);
				}
			}
		}
		reg->dyn.addr->ext_blk_count = csd->extension_size;
		mlk_shr_init(csa->lock_addrs[0], csd->lock_space_size, csa, (FALSE == read_only));
		DEBUG_ONLY(locknl = csa->nl;)	/* for DEBUG_ONLY LOCK_HIST macro */
Beispiel #26
0
int repl_log_init(repl_log_file_t log_type,
		  int *log_fd,
		  int *stats_fd,
		  char *log,
		  char *stats_log)
{
	/* Open the log file */

	char	log_file_name[MAX_FN_LEN + 1], *err_code;
	int	tmp_fd;
	int	save_errno;
	int	stdout_status, stderr_status;

	error_def(ERR_REPLLOGOPN);
	error_def(ERR_TEXT);

	if (*log == '\0')
		return(EREPL_LOGFILEOPEN);

	strcpy(log_file_name, log);
	if (log_type == REPL_STATISTICS_LOG)
	{
		if (strcmp(log_file_name, stats_log) != 0)
			strcpy(log_file_name, stats_log);
		else
		{
			*stats_fd = *log_fd;
			return(SS_NORMAL);
		}
	}

	OPENFILE3(log_file_name, O_RDWR | O_CREAT | O_APPEND, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, tmp_fd);
	if (tmp_fd < 0)
	{
		if (log_type == REPL_GENERAL_LOG && *log_fd == -1 || *stats_fd == -1)
		{
			save_errno = ERRNO;
			err_code = STRERROR(save_errno);
			send_msg(VARLSTCNT(8) ERR_REPLLOGOPN, 6,
			 	 LEN_AND_STR(log_file_name),
				 LEN_AND_STR(err_code),
				 LEN_AND_STR(NULL_DEVICE));
			strcpy(log_file_name, NULL_DEVICE);
			if (log_type == REPL_GENERAL_LOG)
				strcpy(log, log_file_name);
			else
				strcpy(stats_log, log_file_name);
			OPENFILE(log_file_name, O_RDWR, tmp_fd); /* Should not fail */
		} else
		{
			save_errno = ERRNO;
			err_code = STRERROR(save_errno);
			gtm_putmsg(VARLSTCNT(8) ERR_REPLLOGOPN, 6,
			 	   LEN_AND_STR(log_file_name),
				   LEN_AND_STR(err_code),
				   (log_type == REPL_GENERAL_LOG) ?
				   strlen(log) : strlen(stats_log),
				   (log_type == REPL_GENERAL_LOG) ?
				   log : stats_log);

			return(EREPL_LOGFILEOPEN);
		}
	}

	if (log_type == REPL_GENERAL_LOG)
	{
		int dup2_res;
		/* Duplicate stdout and stderr onto log file */
		DUP2(tmp_fd, 1, stdout_status);
		if (stdout_status >= 0)
		{
			DUP2(tmp_fd, 2, stderr_status);
			if (stderr_status < 0)
			{
				save_errno = ERRNO;
				if (*log_fd != -1)
				{
					DUP2(*log_fd, 1, dup2_res); /* Restore old log file */
					DUP2(*log_fd, 2, dup2_res);
				}
			}
		} else
		{
			save_errno = ERRNO;
			if (*log_fd != -1)
				DUP2(*log_fd, 1, dup2_res); /* Restore old log file */
		}

		if (stdout_status >= 0 && stderr_status >= 0)
		{
			if (*log_fd != -1)
				close(*log_fd);
			*log_fd = tmp_fd;
		} else
		{
			err_code = STRERROR(save_errno);
			gtm_putmsg(VARLSTCNT(10) ERR_REPLLOGOPN, 6,
			 	   LEN_AND_STR(log_file_name),
				   LEN_AND_STR(err_code),
				   (log_type == REPL_GENERAL_LOG) ?
				   strlen(log) : strlen(stats_log),
				   (log_type == REPL_GENERAL_LOG) ?
				   log : stats_log,
				   ERR_TEXT, 2, RTS_ERROR_LITERAL("Error in dup2"));
		}
	} else
	{
		if (*stats_fd != -1)
			close(*stats_fd);
		*stats_fd = tmp_fd;
	}

	return(SS_NORMAL);
}
Beispiel #27
0
uint4	 gdsfilext(uint4 blocks, uint4 filesize, boolean_t trans_in_prog)
{
	sm_uc_ptr_t		old_base[2], mmap_retaddr;
	boolean_t		was_crit, is_mm;
	int			result, save_errno, status;
	DEBUG_ONLY(int		first_save_errno);
	uint4			new_bit_maps, bplmap, map, new_blocks, new_total, max_tot_blks, old_total;
	uint4			jnl_status;
	gtm_uint64_t		avail_blocks, mmap_sz;
	off_t			new_eof, new_size;
	trans_num		curr_tn;
	unix_db_info		*udi;
	inctn_opcode_t		save_inctn_opcode;
	int4			prev_extend_blks_to_upgrd;
	jnl_private_control	*jpc;
	jnl_buffer_ptr_t	jbp;
	cache_rec_ptr_t         cr;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	assert(!IS_DSE_IMAGE);
	assert((cs_addrs->nl == NULL) || (process_id != cs_addrs->nl->trunc_pid)); /* mu_truncate shouldn't extend file... */
	assert(!process_exiting);
	DEBUG_ONLY(old_base[0] = old_base[1] = NULL);
	assert(!gv_cur_region->read_only);
	udi = FILE_INFO(gv_cur_region);
	is_mm = (dba_mm == cs_addrs->hdr->acc_meth);
#	if !defined(MM_FILE_EXT_OK)
	if (!udi->grabbed_access_sem && is_mm)
		return (uint4)(NO_FREE_SPACE); /* should this be changed to show extension not allowed ? */
#	endif
	/* Both blocks and total blocks are unsigned ints so make sure we aren't asking for huge numbers that will
	   overflow and end up doing silly things.
	*/
	assert((blocks <= (MAXTOTALBLKS(cs_data) - cs_data->trans_hist.total_blks)) || WBTEST_ENABLED(WBTEST_FILE_EXTEND_ERROR));
#	if defined(__sun) || defined(__hpux)
	cs_data->defer_allocate = TRUE;
#	endif
	if (!blocks && (cs_data->defer_allocate || (TRANS_IN_PROG_TRUE == trans_in_prog)))
		return (uint4)(NO_FREE_SPACE); /* should this be changed to show extension not enabled ? */
	bplmap = cs_data->bplmap;
	/* New total of non-bitmap blocks will be number of current, non-bitmap blocks, plus new blocks desired
	 * There are (bplmap - 1) non-bitmap blocks per bitmap, so add (bplmap - 2) to number of non-bitmap blocks
	 *      and divide by (bplmap - 1) to get total number of bitmaps for expanded database. (must round up in this
	 *      manner as every non-bitmap block must have an associated bitmap)
	 * Current number of bitmaps is (total number of current blocks + bplmap - 1) / bplmap.
	 * Subtract current number of bitmaps from number needed for expanded database to get number of new bitmaps needed.
	 */
	new_bit_maps = DIVIDE_ROUND_UP(cs_data->trans_hist.total_blks
			- DIVIDE_ROUND_UP(cs_data->trans_hist.total_blks, bplmap) + blocks, bplmap - 1)
			- DIVIDE_ROUND_UP(cs_data->trans_hist.total_blks, bplmap);
	new_blocks = blocks + new_bit_maps;
	assert((0 < (int)new_blocks) || (!cs_data->defer_allocate && (0 == new_blocks)));
	if (new_blocks + cs_data->trans_hist.total_blks > MAXTOTALBLKS(cs_data))
	{
		assert(WBTEST_ENABLED(WBTEST_FILE_EXTEND_ERROR));
		send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(1) ERR_TOTALBLKMAX);
		return (uint4)(NO_FREE_SPACE);
	}
	if (0 != (save_errno = disk_block_available(udi->fd, &avail_blocks, FALSE)))
	{
		send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(gv_cur_region), save_errno);
		rts_error_csa(CSA_ARG(cs_addrs) VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(gv_cur_region), save_errno);
	} else
	{
		if (!(gtmDebugLevel & GDL_IgnoreAvailSpace))
		{	/* Bypass this space check if debug flag above is on. Allows us to create a large sparce DB
			 * in space it could never fit it if wasn't sparse. Needed for some tests.
			 */
			avail_blocks = avail_blocks / (cs_data->blk_size / DISK_BLOCK_SIZE);
			if ((blocks * EXTEND_WARNING_FACTOR) > avail_blocks)
			{
				if (blocks > (uint4)avail_blocks)
				{
					if (!INST_FREEZE_ON_NOSPC_ENABLED(cs_addrs))
						return (uint4)(NO_FREE_SPACE);
					else
						send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(6) MAKE_MSG_WARNING(ERR_NOSPACEEXT), 4,
							DB_LEN_STR(gv_cur_region), new_blocks, (uint4)avail_blocks);
				} else
					send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(5) ERR_DSKSPACEFLOW, 3, DB_LEN_STR(gv_cur_region),
						 (uint4)(avail_blocks - ((new_blocks <= avail_blocks) ? new_blocks : 0)));
			}
		}
	}
#	ifdef DEBUG
	if (WBTEST_ENABLED(WBTEST_MM_CONCURRENT_FILE_EXTEND) && dollar_tlevel && !MEMCMP_LIT(gv_cur_region->rname, "DEFAULT"))
	{
		SYSTEM("$gtm_dist/mumps -run $gtm_wbox_mrtn");
		assert(1 == cs_addrs->nl->wbox_test_seq_num);	/* should have been set by mubfilcpy */
		cs_addrs->nl->wbox_test_seq_num = 2;	/* signal mupip backup to stop sleeping in mubfilcpy */
	}
#	endif
	/* From here on, we need to use GDSFILEXT_CLNUP before returning to the caller */
	was_crit = cs_addrs->now_crit;
	assert(!cs_addrs->hold_onto_crit || was_crit);
	/* If we are coming from mupip_extend (which gets crit itself) we better have waited for any unfreezes to occur.
	 * If we are coming from online rollback (when that feature is available), we will come in holding crit and in
	 * 	the final retry. In that case too, we expect to have waited for unfreezes to occur in the caller itself.
	 * Therefore if we are coming in holding crit from MUPIP, we expect the db to be unfrozen so no need to wait for
	 * freeze.
	 * If we are coming from GT.M and final retry (in which case we come in holding crit) we expect to have waited
	 * 	for any unfreezes (by invoking tp_crit_all_regions) to occur (TP or non-TP) before coming into this
	 *	function. However, there is one exception. In the final retry, if tp_crit_all_regions notices that
	 *	at least one of the participating regions did ONLY READs, it will not wait for any freeze on THAT region
	 *	to complete before grabbing crit. Later, in the final retry, if THAT region did an update which caused
	 *	op_tcommit to invoke bm_getfree->gdsfilext, then we would have come here with a frozen region on which
	 *	we hold crit.
	 */
	assert(!was_crit || !FROZEN_HARD(cs_data) || (dollar_tlevel && (CDB_STAGNATE <= t_tries)));
	/*
	 * If we are in the final retry and already hold crit, it is possible that csa->nl->wc_blocked is also set to TRUE
	 * (by a concurrent process in phase2 which encountered an error in the midst of commit and secshr_db_clnup
	 * finished the job for it). In this case we do NOT want to invoke wcs_recover as that will update the "bt"
	 * transaction numbers without correspondingly updating the history transaction numbers (effectively causing
	 * a cdb_sc_blkmod type of restart). Therefore do NOT call grab_crit (which unconditionally invokes wcs_recover)
	 * if we already hold crit.
	 */
	if (!was_crit)
	{
		for ( ; ; )
		{
			grab_crit(gv_cur_region);
			if (FROZEN_CHILLED(cs_data))
				DO_CHILLED_AUTORELEASE(cs_addrs, cs_data);
			if (!FROZEN(cs_data) && !IS_REPL_INST_FROZEN)
				break;
			rel_crit(gv_cur_region);
			while (FROZEN(cs_data) || IS_REPL_INST_FROZEN)
			{
				hiber_start(1000);
				if (FROZEN_CHILLED(cs_data) && CHILLED_AUTORELEASE(cs_data))
					break;
			}
		}
	} else if (FROZEN_HARD(cs_data) && dollar_tlevel)
	{	/* We don't want to continue with file extension as explained above. Hence return with an error code which
		 * op_tcommit will recognize (as a cdb_sc_needcrit/cdb_sc_instancefreeze type of restart) and restart accordingly.
		 */
		assert(CDB_STAGNATE <= t_tries);
		GDSFILEXT_CLNUP;
		return (uint4)FINAL_RETRY_FREEZE_PROG;
	} else
		WAIT_FOR_REGION_TO_UNCHILL(cs_addrs, cs_data);
	if (IS_REPL_INST_FROZEN && trans_in_prog)
	{
		assert(CDB_STAGNATE <= t_tries);
		GDSFILEXT_CLNUP;
		return (uint4)FINAL_RETRY_INST_FREEZE;
	}
	assert(cs_addrs->ti->total_blks == cs_data->trans_hist.total_blks);
	old_total = cs_data->trans_hist.total_blks;
	if (old_total != filesize)
	{	/* Somebody else has already extended it, since we are in crit, this is trust-worthy. However, in case of MM,
		 * we still need to remap the database
		 */
		assert((old_total > filesize) || !is_mm);
		/* For BG, someone else could have truncated or extended - we have no idea */
		GDSFILEXT_CLNUP;
		return (SS_NORMAL);
	}
	if (trans_in_prog && SUSPICIOUS_EXTEND)
	{
		if (!was_crit)
		{
			GDSFILEXT_CLNUP;
			return (uint4)(EXTEND_SUSPECT);
		}
		/* If free_blocks counter is not ok, then correct it. Do the check again. If still fails, then it means we held
		 * crit through bm_getfree into gdsfilext and still didn't get it right.
		 */
		assertpro(!is_free_blks_ctr_ok() && !SUSPICIOUS_EXTEND);
	}
	if (JNL_ENABLED(cs_data))
	{
		if (!jgbl.dont_reset_gbl_jrec_time)
			SET_GBL_JREC_TIME;	/* needed before jnl_ensure_open as that can write jnl records */
		jpc = cs_addrs->jnl;
		jbp = jpc->jnl_buff;
		/* Before writing to jnlfile, adjust jgbl.gbl_jrec_time if needed to maintain time order
		 * of jnl records. This needs to be done BEFORE the jnl_ensure_open as that could write
		 * journal records (if it decides to switch to a new journal file).
		 */
		ADJUST_GBL_JREC_TIME(jgbl, jbp);
		jnl_status = jnl_ensure_open(gv_cur_region, cs_addrs);
		if (jnl_status)
		{
			GDSFILEXT_CLNUP;
			send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(6) jnl_status, 4, JNL_LEN_STR(cs_data), DB_LEN_STR(gv_cur_region));
			return (uint4)(NO_FREE_SPACE);	/* should have better return status */
		}
	}
	if (is_mm)
	{
		cs_addrs->nl->mm_extender_pid = process_id;
		status = wcs_wtstart(gv_cur_region, 0, NULL, NULL);
		cs_addrs->nl->mm_extender_pid = 0;
		assertpro(SS_NORMAL == status);
		old_base[0] = cs_addrs->db_addrs[0];
		old_base[1] = cs_addrs->db_addrs[1];
		cs_addrs->db_addrs[0] = NULL; /* don't rely on it until the mmap below */
#		ifdef _AIX
		status = shmdt(old_base[0] - BLK_ZERO_OFF(cs_data->start_vbn));
#		else
		status = munmap((caddr_t)old_base[0], (size_t)(old_base[1] - old_base[0]));
#		endif
		if (0 != status)
		{
			save_errno = errno;
			GDSFILEXT_CLNUP;
			send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(12) ERR_DBFILERR, 2, DB_LEN_STR(gv_cur_region),
					ERR_SYSCALL, 5, LEN_AND_STR(MEM_UNMAP_SYSCALL), CALLFROM, save_errno);
			return (uint4)(NO_FREE_SPACE);
		}
	} else
	{	/* Due to concurrency issues, it is possible some process had issued a disk read of the GDS block# corresponding
		 * to "old_total" right after a truncate wrote a GDS-block of zeros on disk (to signal end of the db file).
		 * If so, the global buffer containing this block needs to be invalidated now as part of the extend. If not, it is
		 * possible the EOF block on disk is now going to be overwritten by a properly initialized bitmap block (as part
		 * of the gdsfilext below) while the global buffer continues to have an incorrect copy of that bitmap block and
		 * this in turn would cause XXXX failures due to a bad bitmap block in shared memory. (GTM-7519)
		 */
		cr = db_csh_get((block_id)old_total);
		if ((NULL != cr) && ((cache_rec_ptr_t)CR_NOTVALID != cr))
		{
			assert((0 == cr->dirty) && (0 == cr->bt_index) && !cr->stopped);
			cr->cycle++;
			cr->blk = CR_BLKEMPTY;
		}
	}
	CHECK_TN(cs_addrs, cs_data, cs_data->trans_hist.curr_tn);	/* can issue rts_error TNTOOLARGE */
	new_total = old_total + new_blocks;
	new_eof = BLK_ZERO_OFF(cs_data->start_vbn) + ((off_t)new_total * cs_data->blk_size);
#	if !defined(__sun) && !defined(__hpux)
	if (!cs_data->defer_allocate)
	{
		new_size = new_eof + cs_data->blk_size;
		save_errno = posix_fallocate(udi->fd, 0, new_size);
		DEBUG_ONLY(first_save_errno = save_errno);
		if ((ENOSPC == save_errno) && IS_GTM_IMAGE)
			save_errno = extend_wait_for_fallocate(udi, new_size);
		if (0 != save_errno)
		{
			GDSFILEXT_CLNUP;
			assert(ENOSPC == save_errno);
			if (ENOSPC != save_errno)
				send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(5) ERR_PREALLOCATEFAIL, 2, DB_LEN_STR(gv_cur_region),
					     save_errno);
			return (uint4)(NO_FREE_SPACE);
		}
	}
#	endif
	save_errno = db_write_eof_block(udi, udi->fd, cs_data->blk_size, new_eof, &(TREF(dio_buff)));
	if ((ENOSPC == save_errno) && IS_GTM_IMAGE)
		save_errno = extend_wait_for_write(udi, cs_data->blk_size, new_eof);
	if (0 != save_errno)
	{
		GDSFILEXT_CLNUP;
		if (ENOSPC != save_errno)
			send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(gv_cur_region), save_errno);
		return (uint4)(NO_FREE_SPACE);
	}
	if (WBTEST_ENABLED(WBTEST_FILE_EXTEND_INTERRUPT_1))
	{
		LONG_SLEEP(600);
		assert(FALSE);
	}
	/* Ensure the EOF and metadata get to disk BEFORE any bitmap writes. Otherwise, the file size could no longer reflect
	 * a proper extent and subsequent invocations of gdsfilext could corrupt the database.
	 */
	if (!IS_STATSDB_CSA(cs_addrs))
	{
		GTM_DB_FSYNC(cs_addrs, udi->fd, status);
		assert(0 == status);
		if (0 != status)
		{
			GDSFILEXT_CLNUP;
			send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(8) ERR_DBFILERR, 5,
						RTS_ERROR_LITERAL("fsync1()"), CALLFROM, status);
			return (uint4)(NO_FREE_SPACE);
		}
	}
	if (WBTEST_ENABLED(WBTEST_FILE_EXTEND_INTERRUPT_2))
	{
		LONG_SLEEP(600);
		assert(FALSE); /* Should be killed before that */
	}
	DEBUG_ONLY(prev_extend_blks_to_upgrd = cs_data->blks_to_upgrd;)
Beispiel #28
0
/* 1. Allocate a basic initial condition handler stack that can be expanded later if necessary.
 * 2. On Linux, make sure bits 0,1, 4, 5, and 6 are set in /proc/PID/coredump_filter so dumps the sections that GT.M
 *    cores need to have in them.
 */
void err_init(void (*x)())
{
	chnd = (condition_handler *)malloc((CONDSTK_INITIAL_INCR + CONDSTK_RESERVE) * SIZEOF(condition_handler));
	chnd[0].ch_active = FALSE;
	chnd[0].save_active_ch = NULL;
	active_ch = ctxt = &chnd[0];
	ctxt->ch = x;
	chnd_end = &chnd[CONDSTK_INITIAL_INCR]; /* chnd_end is the end of the condition handler stack */
	chnd_incr = CONDSTK_INITIAL_INCR * 2;
#	if defined(__linux__) || defined(__NetBSD__)
	/* Read the coredump_filter value from /proc for this process, update the value if necessary so we have the proper
	 * flags set to get the info we (and gtmpcat) need to properly process a core file. Note any errors we encounter just
	 * send a message to the operator log and return as nothing here should prevent GT.M from running.
	 *
	 * Note "man 5 core" on x86-64 Linux (Ubuntu 12.04) notes that the /proc/PID/coredump_filter file is only provided when
	 * the Linux kernel is built with the CONFIG_ELF_CORE configuration option. This *seems* to control whether or not the
	 * kernel supports the ELF loader or not. To date, all Linux flavors GT.M supports use ELF so we regard this as largely
	 * mandatory though in the future it may happen that GT.M works yet runs with something other than ELF. In that case,
	 * we'd need to change the below to avoid the operator log messages every time GT.M initializes.
	 */
	{
		int 		rc;
		unsigned int	filterbits;
		char		procfn[SIZEOF(COREDUMPFILTERFN) + MAX_DIGITS_IN_INT];	/* File name of file to update */
		char		filter[FILTERPARMSIZE], *filterend;			/* Value read in & written out */
		char		*rcc;
		FILE		*filterstrm;						/* filter stream file block */

		/* Note use simple basic methods since this early in initialization not everything is necessarily setup to
		 * be able to properly use the *print*() wrapper functions.
		 */
		rc = snprintf(procfn, SIZEOF(procfn), COREDUMPFILTERFN, getpid());
		if (0 > rc)
		{
			send_msg_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("sprintf()"), CALLFROM, rc);
			return;
		}
		filterstrm = fopen(procfn, "r");
		if (NULL == filterstrm)
		{
			rc = errno;
			send_msg_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("fopen()"), CALLFROM, rc);
			return;
		}
		rcc = fgets(filter, SIZEOF(filter), filterstrm);
		if (NULL == rcc)
		{
			send_msg_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("fgets()"), CALLFROM, rc);
			return;
		}
		rc = fclose(filterstrm);
		if (0 > rc)
		{
			send_msg_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("fclose()"), CALLFROM, rc);
			return;
		}
		filterend = filter + SIZEOF(filter);
		filterbits = (unsigned int)strtol(filter, &filterend, 16);
		if (FILTERENABLEBITS != (filterbits & FILTERENABLEBITS))
		{	/* At least one flag was missing - reset them */
			filterbits = filterbits | FILTERENABLEBITS;
			filterstrm = fopen(procfn, "w");
			if (NULL == filterstrm)
			{
				rc = errno;
				send_msg_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("fopen()"),
					     CALLFROM, rc);
				return;
			}
			rc = fprintf(filterstrm, "0x%08x", filterbits);
			if (0 > rc)
			{
				send_msg_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("fprintf"),
					     CALLFROM, rc);
				return;
			}
			fclose(filterstrm);
			if (0 > rc)
			{
				send_msg_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5, RTS_ERROR_LITERAL("fclose()"),
					     CALLFROM, rc);
				return;
			}
		}
	}
#	endif
}
/* Initialize communication stuff */
int gtmrecv_comm_init(in_port_t port)
{
	struct addrinfo		*ai_ptr = NULL, hints;
	const	int		enable_reuseaddr = 1;
	struct  linger  	disable_linger = {0, 0};
	int			rc;
	int			errcode;
	char			port_buffer[NI_MAXSERV];
	int			port_buffer_len;
	int			temp_sock_fd;
	int			af;

	if (FD_INVALID != gtmrecv_listen_sock_fd) /* Initialization done already */
		return (0);

	/* Create the socket used for communicating with primary */
	af = ((GTM_IPV6_SUPPORTED && !ipv4_only) ? AF_INET6 : AF_INET);
	if (FD_INVALID == (temp_sock_fd = socket(af, SOCK_STREAM, IPPROTO_TCP)))
	{
		af = AF_INET;
		if (FD_INVALID == (temp_sock_fd = socket(af, SOCK_STREAM, IPPROTO_TCP)))
		{
			rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
					RTS_ERROR_LITERAL("Error with receiver server socket create"), ERRNO);
			return (-1);
		}
	}

	/* Make it known to the world that you are ready for a Source Server */
	SERVER_HINTS(hints, af);
	SPRINTF(port_buffer, "%hu", port);
	if (0 != (errcode = getaddrinfo(NULL, port_buffer, &hints, &ai_ptr)))
	{
		CLOSEFILE(temp_sock_fd, rc);
		RTS_ERROR_ADDRINFO_CTX(NULL, ERR_GETADDRINFO, errcode, "FAILED in obtaining IP address on receiver server.");
		return -1;
	}


	gtmrecv_listen_sock_fd = temp_sock_fd;
	if (0 > setsockopt(gtmrecv_listen_sock_fd, SOL_SOCKET, SO_LINGER, (const void *)&disable_linger, SIZEOF(disable_linger)))
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
				RTS_ERROR_LITERAL("Error with receiver server listen socket disable linger"), ERRNO);
	if (0 > setsockopt(gtmrecv_listen_sock_fd, SOL_SOCKET, SO_REUSEADDR, (const void *)&enable_reuseaddr,
			SIZEOF(enable_reuseaddr)))
	{
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
				RTS_ERROR_LITERAL("Error with receiver server listen socket enable reuseaddr"), ERRNO);
	}
	if (0 > BIND(gtmrecv_listen_sock_fd, ai_ptr->ai_addr, ai_ptr->ai_addrlen))
	{
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
				 RTS_ERROR_LITERAL("Could not bind local address"), ERRNO);
		CLOSEFILE_RESET(gtmrecv_listen_sock_fd, rc);	/* resets "gtmrecv_listen_sock_fd" to FD_INVALID */
		return (-1);
	}

	if (0 > listen(gtmrecv_listen_sock_fd, 5))
	{
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
				 RTS_ERROR_LITERAL("Could not listen"), ERRNO);
		CLOSEFILE_RESET(gtmrecv_listen_sock_fd, rc);	/* resets "gtmrecv_listen_sock_fd" to FD_INVALID */
		return (-1);
	}

	return (0);
}
/*
 * Description:
 * 	Grab ftok semaphore on replication instance file
 *	Grab all replication semaphores for the instance (both jnlpool and recvpool)
 * 	Release ftok semaphore
 * Parameters:
 * Return Value: TRUE, if succsessful
 *	         FALSE, if fails.
 */
boolean_t mu_replpool_grab_sem(boolean_t immediate)
{
	char			instfilename[MAX_FN_LEN + 1];
	gd_region		*r_save;
	static gd_region 	*replreg;
	int			status, save_errno;
	union semun		semarg;
	struct semid_ds		semstat;
	repl_inst_hdr		repl_instance;
	unix_db_info		*udi;
	unsigned int		full_len;

	error_def(ERR_RECVPOOLSETUP);
	error_def(ERR_JNLPOOLSETUP);
	error_def(ERR_REPLFTOKSEM);
	error_def(ERR_TEXT);

	if (NULL == replreg)
	{
		r_save = gv_cur_region;
		mu_gv_cur_reg_init();
		replreg = gv_cur_region;
		gv_cur_region = r_save;
	}
	jnlpool.jnlpool_dummy_reg = replreg;
	recvpool.recvpool_dummy_reg = replreg;
	if (!repl_inst_get_name(instfilename, &full_len, MAX_FN_LEN + 1, issue_rts_error))
		GTMASSERT;	/* rts_error should have been issued by repl_inst_get_name */
	assert(full_len);
	memcpy((char *)replreg->dyn.addr->fname, instfilename, full_len);
	replreg->dyn.addr->fname_len = full_len;
	udi = FILE_INFO(replreg);
	udi->fn = (char *)replreg->dyn.addr->fname;
	if (!ftok_sem_get(replreg, TRUE, REPLPOOL_ID, immediate))
		rts_error(VARLSTCNT(4) ERR_REPLFTOKSEM, 2, full_len, instfilename);
	repl_inst_read(instfilename, (off_t)0, (sm_uc_ptr_t)&repl_instance, SIZEOF(repl_inst_hdr));
	/*
	 * --------------------------
	 * First semaphores of jnlpool
	 * --------------------------
	 */
	if (-1 == (udi->semid = init_sem_set_source(IPC_PRIVATE, NUM_SRC_SEMS, RWDALL | IPC_CREAT)))
	{
		ftok_sem_release(replreg, TRUE, TRUE);
		rts_error(VARLSTCNT(7) ERR_JNLPOOLSETUP, 0, ERR_TEXT, 2,
			  RTS_ERROR_LITERAL("Error creating journal pool"), REPL_SEM_ERRNO);
	}
	semarg.val = GTM_ID;
	if (-1 == semctl(udi->semid, SOURCE_ID_SEM, SETVAL, semarg))
	{
		save_errno = errno;
		remove_sem_set(SOURCE);		/* Remove what we created */
		ftok_sem_release(replreg, TRUE, TRUE);
		rts_error(VARLSTCNT(7) ERR_JNLPOOLSETUP, 0, ERR_TEXT, 2,
			 RTS_ERROR_LITERAL("Error with jnlpool semctl"), save_errno);
	}
	semarg.buf = &semstat;
	if (-1 == semctl(udi->semid, 0, IPC_STAT, semarg))
	{
		save_errno = errno;
		remove_sem_set(SOURCE);		/* Remove what we created */
		ftok_sem_release(replreg, TRUE, TRUE);
		rts_error(VARLSTCNT(7) ERR_JNLPOOLSETUP, 0, ERR_TEXT, 2,
			 RTS_ERROR_LITERAL("Error with jnlpool semctl"), save_errno);
	}
	udi->gt_sem_ctime = semarg.buf->sem_ctime;
	status = grab_sem_all_source();
	if (0 != status)
	{
		remove_sem_set(SOURCE);		/* Remove what we created */
		ftok_sem_release(replreg, TRUE, TRUE);
		rts_error(VARLSTCNT(1) ERR_JNLPOOLSETUP);
	}
	repl_instance.jnlpool_semid = udi->semid;
	repl_instance.jnlpool_semid_ctime = udi->gt_sem_ctime;
	/*
	 * --------------------------
	 * Now semaphores of recvpool
	 * --------------------------
	 */
	assert(NUM_SRC_SEMS == NUM_RECV_SEMS);
	if (-1 == (udi->semid = init_sem_set_recvr(IPC_PRIVATE, NUM_RECV_SEMS, RWDALL | IPC_CREAT)))
	{
		remove_sem_set(SOURCE);
		ftok_sem_release(replreg, TRUE, TRUE);
		rts_error(VARLSTCNT(7) ERR_RECVPOOLSETUP, 0,
			  ERR_TEXT, 2,
			  RTS_ERROR_LITERAL("Error creating recv pool"), REPL_SEM_ERRNO);
	}
	semarg.val = GTM_ID;
	if (-1 == semctl(udi->semid, RECV_ID_SEM, SETVAL, semarg))
	{
		save_errno = errno;
		remove_sem_set(SOURCE);
		remove_sem_set(RECV);
		ftok_sem_release(replreg, TRUE, TRUE);
		rts_error(VARLSTCNT(7) ERR_RECVPOOLSETUP, 0, ERR_TEXT, 2,
			 RTS_ERROR_LITERAL("Error with recvpool semctl"), save_errno);
	}
	semarg.buf = &semstat;
	if (-1 == semctl(udi->semid, 0, IPC_STAT, semarg)) /* For creation time */
	{
		save_errno = errno;
		remove_sem_set(SOURCE);
		remove_sem_set(RECV);
		ftok_sem_release(replreg, TRUE, TRUE);
		rts_error(VARLSTCNT(7) ERR_RECVPOOLSETUP, 0, ERR_TEXT, 2,
			 RTS_ERROR_LITERAL("Error with recvpool semctl"), save_errno);
	}
	udi->gt_sem_ctime = semarg.buf->sem_ctime;
	status = grab_sem_all_receive();
	if (0 != status)
	{
		remove_sem_set(SOURCE);
		remove_sem_set(RECV);
		ftok_sem_release(replreg, TRUE, TRUE);
		rts_error(VARLSTCNT(1) ERR_RECVPOOLSETUP);
	}
	repl_instance.recvpool_semid = udi->semid;
	repl_instance.recvpool_semid_ctime = udi->gt_sem_ctime;
	/* Initialize jnlpool.repl_inst_filehdr as it is used later by gtmrecv_fetchresync() */
	assert(NULL == jnlpool.repl_inst_filehdr);
	jnlpool.repl_inst_filehdr = (repl_inst_hdr_ptr_t)malloc(SIZEOF(repl_inst_hdr));
	memcpy(jnlpool.repl_inst_filehdr, &repl_instance, SIZEOF(repl_inst_hdr));
	/* Flush changes to the replication instance file header to disk */
	repl_inst_write(instfilename, (off_t)0, (sm_uc_ptr_t)&repl_instance, SIZEOF(repl_inst_hdr));
	/* Now release jnlpool/recvpool ftok semaphore */
	if (!ftok_sem_release(replreg, FALSE, immediate))
	{
		remove_sem_set(SOURCE);
		remove_sem_set(RECV);
		rts_error(VARLSTCNT(4) ERR_REPLFTOKSEM, 2, full_len, instfilename);
	}
	return TRUE;
}