示例#1
0
int jnl_file_extend(jnl_private_control *jpc, uint4 total_jnl_rec_size)
{
	file_control		*fc;
	boolean_t		need_extend;
	jnl_buffer_ptr_t     	jb;
	jnl_create_info 	jnl_info;
	jnl_file_header		*header;
	unsigned char		hdr_buff[JNL_HDR_LEN + ALIGNMENT_SIZE];
	uint4			new_alq;
	sgmnt_addrs		*csa;
	sgmnt_data_ptr_t	csd;
	char			prev_jnl_fn[JNL_NAME_SIZE];
	uint4			jnl_status = 0, status;
	int			new_blocks, result;
	GTM_BAVAIL_TYPE		avail_blocks;
	uint4			aligned_tot_jrec_size, count;

	switch(jpc->region->dyn.addr->acc_meth)
	{
	case dba_mm:
	case dba_bg:
		csa = &FILE_INFO(jpc->region)->s_addrs;
		break;
	default:
		GTMASSERT;
	}
	csd = csa->hdr;
	assert(csa == cs_addrs && csd == cs_data);
	assert(csa->now_crit || (csd->clustered && (CCST_CLOSED == csa->nl->ccp_state)));
	assert(&FILE_INFO(jpc->region)->s_addrs == csa);
	assert(csa->jnl_state == csd->jnl_state);
	if (!JNL_ENABLED(csa) || (NOJNL == jpc->channel) || (JNL_FILE_SWITCHED(jpc)))
		GTMASSERT;	/* crit and messing with the journal file - how could it have vanished? */
	if (!csd->jnl_deq)
	{
		assert(DIVIDE_ROUND_UP(total_jnl_rec_size, DISK_BLOCK_SIZE) <= csd->jnl_alq);
		assert(csd->jnl_alq == csd->autoswitchlimit);
		new_blocks = csd->jnl_alq;
	} else
		/* May cause extension of  csd->jnl_deq * n blocks where n > 0 */
		new_blocks = ROUND_UP(DIVIDE_ROUND_UP(total_jnl_rec_size, DISK_BLOCK_SIZE), csd->jnl_deq);
	jpc->status = SS_NORMAL;
	jb = jpc->jnl_buff;
	assert(0 <= new_blocks);
	DEBUG_ONLY(count = 0);
	for (need_extend = (0 != new_blocks); need_extend; )
	{
		DEBUG_ONLY(count++);
		/* usually we will do the loop just once where we do the file extension.
		 * rarely we might need to do an autoswitch instead after which again rarely
		 * 	we might need to do an extension on the new journal to fit in the transaction's	journal requirements.
		 * therefore we should do this loop a maximum of twice. hence the assert below.
		 */
		assert(count <= 2);
		need_extend = FALSE;
		if (SS_NORMAL == (status = disk_block_available(jpc->channel, &avail_blocks, TRUE)))
		{
			if ((new_blocks * EXTEND_WARNING_FACTOR) > avail_blocks)
			{
				if (new_blocks > avail_blocks)
				{	/* if we cannot satisfy the request, it is an error */
					send_msg(VARLSTCNT(6) ERR_NOSPACEEXT, 4, JNL_LEN_STR(csd),
						new_blocks, avail_blocks);
					new_blocks = 0;
					jpc->status = SS_NORMAL;
					break;
				} else
					send_msg(VARLSTCNT(5) ERR_DSKSPACEFLOW, 3, JNL_LEN_STR(csd),
						(avail_blocks - new_blocks));
			}
		} else
			send_msg(VARLSTCNT(5) ERR_JNLFILEXTERR, 2, JNL_LEN_STR(csd), status);
		new_alq = jb->filesize + new_blocks;
		/* ensure current journal file size is well within autoswitchlimit --> design constraint */
		assert(csd->autoswitchlimit >= jb->filesize);
		if (csd->autoswitchlimit < (jb->filesize + (EXTEND_WARNING_FACTOR * new_blocks)))	/* close to max */
			send_msg(VARLSTCNT(5) ERR_JNLSPACELOW, 3, JNL_LEN_STR(csd), csd->autoswitchlimit - jb->filesize);
		if (csd->autoswitchlimit < new_alq)
		{	/* Reached max, need to autoswitch */
			/* Ensure new journal file can hold the entire current transaction's journal record requirements */
			assert(csd->autoswitchlimit >= MAX_REQD_JNL_FILE_SIZE(total_jnl_rec_size));
			memset(&jnl_info, 0, sizeof(jnl_info));
			jnl_info.prev_jnl = &prev_jnl_fn[0];
			set_jnl_info(gv_cur_region, &jnl_info);
			assert(JNL_ENABLED(csa) && (NOJNL != jpc->channel) && !(JNL_FILE_SWITCHED(jpc)));
			jnl_status = jnl_ensure_open();
			if (0 == jnl_status)
			{	/* flush the cache and jnl-buffer-contents to current journal file before
				 * switching to a new journal. */
				wcs_flu(WCSFLU_FLUSH_HDR | WCSFLU_WRITE_EPOCH);
				jnl_file_close(gv_cur_region, TRUE, TRUE);
				assert((dba_mm == cs_data->acc_meth) || (csd == cs_data));
				csd = cs_data;	/* In MM, wcs_flu() can remap an extended DB, so reset csd to be sure */
			} else
				rts_error(VARLSTCNT(7) jnl_status, 4, JNL_LEN_STR(csd), DB_LEN_STR(gv_cur_region), jpc->status);
			assert(!jgbl.forw_phase_recovery || (NULL != jgbl.mur_pini_addr_reset_fnptr));
			if (jgbl.forw_phase_recovery && (NULL != jgbl.mur_pini_addr_reset_fnptr))
				(*jgbl.mur_pini_addr_reset_fnptr)();
			assert(!jnl_info.no_rename);
			assert(!jnl_info.no_prev_link);
			if (EXIT_NRM == cre_jnl_file(&jnl_info))
			{
				assert(0 == memcmp(csd->jnl_file_name, jnl_info.jnl, jnl_info.jnl_len));
				assert(csd->jnl_file_name[jnl_info.jnl_len] == '\0');
				assert(csd->jnl_file_len == jnl_info.jnl_len);
				assert(csd->jnl_buffer_size == jnl_info.buffer);
				assert(csd->jnl_alq == jnl_info.alloc);
				assert(csd->jnl_deq == jnl_info.extend);
				assert(csd->jnl_before_image == jnl_info.before_images);
				csd->trans_hist.header_open_tn = jnl_info.tn;	/* needed for successful jnl_file_open() */
				csd->jnl_checksum = jnl_info.checksum;
				send_msg(VARLSTCNT(4) ERR_NEWJNLFILECREAT, 2, JNL_LEN_STR(csd));
				fc = gv_cur_region->dyn.addr->file_cntl;
				fc->op = FC_WRITE;
				fc->op_buff = (sm_uc_ptr_t)csd;
				fc->op_len = SGMNT_HDR_LEN;
				fc->op_pos = 1;
				status = dbfilop(fc);
				if (SS_NORMAL != status)
					send_msg(VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(gv_cur_region), status);
				assert(JNL_ENABLED(csa));
				/* call jnl_ensure_open instead of jnl_file_open to make sure jpc->pini_addr is set to 0 */
				jnl_status = jnl_ensure_open();	/* sets jpc->status */
				if (0 != jnl_status)
					rts_error(VARLSTCNT(6) jnl_status, 4, JNL_LEN_STR(csd), DB_LEN_STR(gv_cur_region));
				assert(jb->filesize == csd->jnl_alq);
				aligned_tot_jrec_size = ALIGNED_ROUND_UP(MAX_REQD_JNL_FILE_SIZE(total_jnl_rec_size),
										csd->jnl_alq, csd->jnl_deq);
				if (aligned_tot_jrec_size > csd->jnl_alq)
				{	/* need to extend more than initial allocation in the new journal file
					 * to accommodate the current transaction.
					 */
					new_blocks = aligned_tot_jrec_size - csd->jnl_alq;
					assert(new_blocks);
					assert(0 == new_blocks % csd->jnl_deq);
					need_extend = TRUE;
				}
			} else
			{
				send_msg(VARLSTCNT(4) ERR_JNLCREATERR, 2, JNL_LEN_STR(csd));
				jpc->status = ERR_JNLNOCREATE;
				new_blocks = -1;
			}
		} else
		{
			assert(!need_extend);	/* ensure we won't go through the for loop again */
			/* Virtually extend currently used journal file */
			header = (jnl_file_header *)(ROUND_UP2((uintszofptr_t)hdr_buff, DISK_BLOCK_SIZE));
			jb->filesize = new_alq;	/* Actually this is virtual file size blocks */
			DO_FILE_READ(jpc->channel, 0, header, JNL_HDR_LEN, jpc->status, jpc->status2);
			if (SS_NORMAL != jpc->status)
			{
				assert(FALSE);
				rts_error(VARLSTCNT(5) ERR_JNLRDERR, 2, JNL_LEN_STR(csd), jpc->status);
			}
			assert((header->virtual_size + new_blocks) == new_alq);
			header->virtual_size = new_alq;
			DO_FILE_WRITE(jpc->channel, 0, header, JNL_HDR_LEN, jpc->status, jpc->status2);
			if (SS_NORMAL != jpc->status)
			{
				assert(FALSE);
				rts_error(VARLSTCNT(5) ERR_JNLWRERR, 2, JNL_LEN_STR(csd), jpc->status);
			}
		}
		if (0 >= new_blocks)
			break;
	}
	if (0 >= new_blocks)
	{
		jpc->status = ERR_JNLREADEOF;
		jnl_file_lost(jpc, ERR_JNLEXTEND);
       		new_blocks = -1;
	}
	return new_blocks;
}
示例#2
0
int gtmrecv_poll_actions1(int *pending_data_len, int *buff_unprocessed, unsigned char *buffp)
{
	static int		report_cnt = 1;
	static int		next_report_at = 1;
	static boolean_t	send_xoff = FALSE;
	static boolean_t	xoff_sent = FALSE;
	static seq_num		send_seqno;
	static boolean_t	log_draining_msg = FALSE;
	static boolean_t	send_badtrans = FALSE;
	static boolean_t	send_cmp2uncmp = FALSE;
	static boolean_t	upd_shut_too_early_logged = FALSE;
	static time_t		last_reap_time = 0;
	repl_msg_t		xoff_msg;
	repl_badtrans_msg_t	bad_trans_msg;
	boolean_t		alert = FALSE, info = FALSE;
	int			return_status;
	gd_region		*region_top;
	unsigned char		*msg_ptr;				/* needed for REPL_{SEND,RECV}_LOOP */
	int			tosend_len, sent_len, sent_this_iter;	/* needed for REPL_SEND_LOOP */
	int			torecv_len, recvd_len, recvd_this_iter;	/* needed for REPL_RECV_LOOP */
	int			status;					/* needed for REPL_{SEND,RECV}_LOOP */
	int			temp_len, pending_msg_size;
	int			upd_start_status, upd_start_attempts;
	int			buffered_data_len;
	int			upd_exit_status;
	seq_num			temp_send_seqno;
	boolean_t		bad_trans_detected = FALSE, onln_rlbk_flg_set = FALSE;
	uint4			jnl_status;
	recvpool_ctl_ptr_t	recvpool_ctl;
	upd_proc_local_ptr_t	upd_proc_local;
	gtmrecv_local_ptr_t	gtmrecv_local;
	upd_helper_ctl_ptr_t	upd_helper_ctl;
	pid_t			waitpid_res;
	int4			msg_type, msg_len;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	recvpool_ctl = recvpool.recvpool_ctl;
	upd_proc_local = recvpool.upd_proc_local;
	gtmrecv_local = recvpool.gtmrecv_local;
	upd_helper_ctl = recvpool.upd_helper_ctl;
	jnl_status = 0;
	if (SHUTDOWN == gtmrecv_local->shutdown)
	{
		repl_log(gtmrecv_log_fp, TRUE, TRUE, "Shutdown signalled\n");
		gtmrecv_end(); /* Won't return */
	}
	/* Reset report_cnt and next_report_at to 1 when a new upd proc is forked */
	if ((1 == report_cnt) || (report_cnt == next_report_at))
	{
		/* A comment on the usage of NO_SHUTDOWN below for the alert variable. Since upd_proc_local->upd_proc_shutdown is
		 * a shared memory field (and could be concurrently changed by either the receiver server or the update process),
		 * we want to make sure it is the same value BEFORE and AFTER checking whether the update process is alive or not.
		 * If it is not NO_SHUTDOWN (i.e. is SHUTDOWN or NORMAL_SHUTDOWN or ABNORMAL_SHUTDOWN) it has shut down due to
		 * an external request so we do want to send out a false update-process-is-not-alive alert.
		 */
		if ((alert = ((NO_SHUTDOWN == upd_proc_local->upd_proc_shutdown) && (SRV_DEAD == is_updproc_alive())
				&& (NO_SHUTDOWN == upd_proc_local->upd_proc_shutdown)))
			|| (info = (((NORMAL_SHUTDOWN == upd_proc_local->upd_proc_shutdown)
				|| (ABNORMAL_SHUTDOWN == upd_proc_local->upd_proc_shutdown)) && (SRV_DEAD == is_updproc_alive()))))
		{
			if (alert)
				repl_log(gtmrecv_log_fp, TRUE, TRUE,
					"ALERT : Receiver Server detected that Update Process is not ALIVE\n");
			else
				repl_log(gtmrecv_log_fp, TRUE, TRUE,
					"INFO : Update process not running due to user initiated shutdown\n");
			if (1 == report_cnt)
			{
				send_xoff = TRUE;
				QWASSIGN(recvpool_ctl->old_jnl_seqno, recvpool_ctl->jnl_seqno);
				QWASSIGNDW(recvpool_ctl->jnl_seqno, 0);
				/* Even though we have identified that the update process is NOT alive, a waitpid on the update
				 * process PID is necessary so that the system doesn't leave any zombie process lying around.
				 * This is possible since any child process that dies without the parent doing a waitpid on it
				 * will be defunct unless the parent dies at which point the "init" process takes the role of
				 * the parent and invokes waitpid to remove the zombies.
				 * NOTE: It is possible that the update process was killed before the receiver server got a
				 * chance to record it's PID in the recvpool.upd_proc_local structure. In such a case, don't
				 * invoke waitpid as that will block us (receiver server) if this instance of the receiver
				 * server was started with helper processes.
				 */
				if (0 < upd_proc_local->upd_proc_pid)
				{
					WAITPID(upd_proc_local->upd_proc_pid, &upd_exit_status, 0, waitpid_res);
					/* Since the update process as part of its shutdown does NOT reset the upd_proc_pid, reset
					 * it here ONLY if the update process was NOT kill -9ed. This is needed because receiver
					 * server as part of its shutdown relies on this field (upd_proc_pid) to determine if the
					 * update process was cleanly shutdown or was kill -9ed.
					 */
					if (!alert)
						upd_proc_local->upd_proc_pid = 0;
				}
				upd_proc_local->bad_trans = FALSE; /* No point in doing bad transaction processing */
				upd_proc_local->onln_rlbk_flg = FALSE; /* No point handling online rollback */
			}
			gtmrecv_wait_for_jnl_seqno = TRUE;
			REPL_DPRINT1(
			       "gtmrecv_poll_actions : Setting gtmrecv_wait_for_jnl_seqno to TRUE because of upd crash/shutdown\n");
			next_report_at *= GTMRECV_NEXT_REPORT_FACTOR;
			report_cnt++;
		}
	} else
		report_cnt++;
	/* Check if REPL_CMP2UNCMP or REPL_BADTRANS message needs to be sent */
	if (upd_proc_local->onln_rlbk_flg)
	{	/* Update process detected an online rollback and is requesting us to restart the connection. But before that, send
		 * REPL_XOFF source side and drain the replication pipe
		 */
		onln_rlbk_flg_set = TRUE;
		send_xoff = TRUE;
	}
	else if (!send_cmp2uncmp && gtmrecv_send_cmp2uncmp)
	{
		send_xoff = TRUE;
		send_seqno = recvpool_ctl->jnl_seqno;
		send_cmp2uncmp = TRUE;
	} else if (!send_badtrans && upd_proc_local->bad_trans)
	{
		send_xoff = TRUE;
		send_seqno = upd_proc_local->read_jnl_seqno;
		send_badtrans = TRUE;
		bad_trans_detected = TRUE;
	} else if (!upd_proc_local->bad_trans && send_badtrans && 1 != report_cnt)
	{
		send_badtrans = FALSE;
		bad_trans_detected = FALSE;
	}
	if (send_xoff && !xoff_sent)
	{	/* Send XOFF_ACK_ME if the receiver has a connection to the source. Do not attempt to send it if we dont even
		 * know the endianness of the remote side. In that case, we are guaranteed no initial handshake occurred and
		 * so no point sending the XOFF too. This saves us lots of trouble in case of cross-endian replication connections.
		 */
		assert((FD_INVALID  != gtmrecv_sock_fd) || repl_connection_reset);
		if ((FD_INVALID != gtmrecv_sock_fd) && remote_side->endianness_known)
		{
			send_seqno = upd_proc_local->read_jnl_seqno;
			if (!remote_side->cross_endian)
			{
				xoff_msg.type = REPL_XOFF_ACK_ME;
				xoff_msg.len = MIN_REPL_MSGLEN;
				memcpy((uchar_ptr_t)&xoff_msg.msg[0], (uchar_ptr_t)&send_seqno, SIZEOF(seq_num));
			} else
			{
				xoff_msg.type = GTM_BYTESWAP_32(REPL_XOFF_ACK_ME);
				xoff_msg.len = GTM_BYTESWAP_32(MIN_REPL_MSGLEN);
				temp_send_seqno = GTM_BYTESWAP_64(send_seqno);
				memcpy((uchar_ptr_t)&xoff_msg.msg[0], (uchar_ptr_t)&temp_send_seqno, SIZEOF(seq_num));
			}
			REPL_SEND_LOOP(gtmrecv_sock_fd, &xoff_msg, MIN_REPL_MSGLEN, FALSE, &gtmrecv_poll_immediate)
				; /* Empty Body */
			if (SS_NORMAL != status)
			{
				if (REPL_CONN_RESET(status) && EREPL_SEND == repl_errno)
				{
					repl_log(gtmrecv_log_fp, TRUE, TRUE, "Connection reset while sending XOFF_ACK_ME. "
							"Status = %d ; %s\n", status, STRERROR(status));
					repl_close(&gtmrecv_sock_fd);
					repl_connection_reset = TRUE;
					xoff_sent = FALSE;
					send_badtrans = FALSE;

				} else if (EREPL_SEND == repl_errno)
					rts_error(VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
						RTS_ERROR_LITERAL("Error sending XOFF msg due to BAD_TRANS or UPD crash/shutdown. "
								"Error in send"), status);
				else
				{
					assert(EREPL_SELECT == repl_errno);
					rts_error(VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
						RTS_ERROR_LITERAL("Error sending XOFF msg due to BAD_TRANS or UPD crash/shutdown. "
								"Error in select"), status);
				}
			} else
			{
				xoff_sent = TRUE;
				log_draining_msg = TRUE;
			}
			repl_log(gtmrecv_log_fp, TRUE, TRUE, "REPL_XOFF_ACK_ME sent due to upd shutdown/crash or bad trans "
					"or ONLINE_ROLLBACK\n");
			send_xoff = FALSE;
		} else
		{	/* Connection has been lost OR initial handshake needs to happen again, so no point sending XOFF/BADTRANS */
			send_xoff = FALSE;
			send_badtrans = FALSE;
		}
	}
	/* Drain pipe */
	if (xoff_sent)
	{
		if (log_draining_msg)
		{	/* avoid multiple logs per instance */
			repl_log(gtmrecv_log_fp, TRUE, TRUE, "REPL INFO - Draining replication pipe due to %s\n",
					send_cmp2uncmp ? "CMP2UNCMP" : (send_badtrans ? "BAD_TRANS" :
							(onln_rlbk_flg_set ? "ONLINE_ROLLBACK" : "UPD shutdown/crash")));
			log_draining_msg = FALSE;
		}
		if (0 != *buff_unprocessed)
		{	/* Throw away the current contents of the buffer */
			buffered_data_len = ((*pending_data_len <= *buff_unprocessed) ? *pending_data_len : *buff_unprocessed);
			*buff_unprocessed -= buffered_data_len;
			buffp += buffered_data_len;
			*pending_data_len -= buffered_data_len;
			REPL_DPRINT2("gtmrecv_poll_actions : (1) Throwing away %d bytes from old buffer while draining\n",
				buffered_data_len);
			assert(remote_side->endianness_known);	/* only then is remote_side->cross_endian reliable */
			while (REPL_MSG_HDRLEN <= *buff_unprocessed)
			{
				assert(0 == (((unsigned long)buffp) % REPL_MSG_ALIGN));
				msg_len = ((repl_msg_ptr_t)buffp)->len;
				msg_type = ((repl_msg_ptr_t)buffp)->type;
		        	if (remote_side->cross_endian)
			        {
			                msg_len = GTM_BYTESWAP_32(msg_len);
			                msg_type = GTM_BYTESWAP_32(msg_type);
			        }
				msg_type = (msg_type & REPL_TR_CMP_MSG_TYPE_MASK);
				assert((REPL_TR_CMP_JNL_RECS == msg_type) || (0 == (msg_len % REPL_MSG_ALIGN)));
				*pending_data_len = ROUND_UP2(msg_len, REPL_MSG_ALIGN);
				buffered_data_len = ((*pending_data_len <= *buff_unprocessed) ?
								*pending_data_len : *buff_unprocessed);
				*buff_unprocessed -= buffered_data_len;
				buffp += buffered_data_len;
				*pending_data_len -= buffered_data_len;
				REPL_DPRINT3("gtmrecv_poll_actions : (1) Throwing away message of "
					"type %d and length %d from old buffer while draining\n", msg_type, buffered_data_len);
			}
			if (0 < *buff_unprocessed)
			{
				memmove((unsigned char *)gtmrecv_msgp, buffp, *buff_unprocessed);
				REPL_DPRINT2("gtmrecv_poll_actions : Incomplete header of length %d while draining\n",
					*buff_unprocessed);
			}
		}
		status = SS_NORMAL;
		if (0 != *buff_unprocessed || 0 == *pending_data_len)
		{	/* Receive the header of a message */
			assert(REPL_MSG_HDRLEN > *buff_unprocessed);	/* so we dont pass negative length in REPL_RECV_LOOP */
			REPL_RECV_LOOP(gtmrecv_sock_fd, ((unsigned char *)gtmrecv_msgp) + *buff_unprocessed,
				       (REPL_MSG_HDRLEN - *buff_unprocessed), FALSE, &gtmrecv_poll_interval)
				; /* Empty Body */
			if (SS_NORMAL == status)
			{
				assert(remote_side->endianness_known);	/* only then is remote_side->cross_endian reliable */
		        	if (!remote_side->cross_endian)
	        		{
			                msg_len = gtmrecv_msgp->len;
			                msg_type = gtmrecv_msgp->type;
			        } else
			        {
			                msg_len = GTM_BYTESWAP_32(gtmrecv_msgp->len);
			                msg_type = GTM_BYTESWAP_32(gtmrecv_msgp->type);
			        }
				msg_type = (msg_type & REPL_TR_CMP_MSG_TYPE_MASK);
				assert((REPL_TR_CMP_JNL_RECS == msg_type) || (0 == (msg_len % REPL_MSG_ALIGN)));
				msg_len = ROUND_UP2(msg_len, REPL_MSG_ALIGN);
				REPL_DPRINT3("gtmrecv_poll_actions : Received message of type %d and length %d while draining\n",
					msg_type, msg_len);
			}
		}
		if ((SS_NORMAL == status) && (0 != *buff_unprocessed || 0 == *pending_data_len) && (REPL_XOFF_ACK == msg_type))
		{	/* Receive the rest of the XOFF_ACK msg and signal the drain as complete */
			REPL_RECV_LOOP(gtmrecv_sock_fd, gtmrecv_msgp, (MIN_REPL_MSGLEN - REPL_MSG_HDRLEN), FALSE,
					&gtmrecv_poll_interval)
				; /* Empty Body */
			if (SS_NORMAL == status)
			{
				repl_log(gtmrecv_log_fp, TRUE, TRUE,
						"REPL INFO - XOFF_ACK received. Drained replication pipe completely\n");
				upd_shut_too_early_logged = FALSE;
				xoff_sent = FALSE;
				return_status = STOP_POLL;
			}
		} else if (SS_NORMAL == status)
		{	/* Drain the rest of the message */
			if (0 < *pending_data_len)
			{
				pending_msg_size = *pending_data_len;
				REPL_DPRINT2("gtmrecv_poll_actions : (2) Throwing away %d bytes from pipe\n", pending_msg_size);
			} else
			{
				pending_msg_size = msg_len - REPL_MSG_HDRLEN;
				REPL_DPRINT3("gtmrecv_poll_actions : (2) Throwing away message of "
					"type %d and length %d from pipe\n", msg_type, msg_len);
			}
			for ( ; SS_NORMAL == status && 0 < pending_msg_size; pending_msg_size -= gtmrecv_max_repl_msglen)
			{
				temp_len = (pending_msg_size < gtmrecv_max_repl_msglen)? pending_msg_size : gtmrecv_max_repl_msglen;
				REPL_RECV_LOOP(gtmrecv_sock_fd, gtmrecv_msgp, temp_len, FALSE, &gtmrecv_poll_interval)
					; /* Empty Body */
			}
			*buff_unprocessed = 0; *pending_data_len = 0;
			if (SS_NORMAL == status && info && !upd_shut_too_early_logged)
			{
				repl_log(gtmrecv_log_fp, TRUE, TRUE, "ALERT : User initiated shutdown of Update Process done "
						"when there was data in the replication pipe\n");
				upd_shut_too_early_logged = TRUE;
			}
			return_status = CONTINUE_POLL;
		}
		if (SS_NORMAL != status)
		{
			if (EREPL_RECV == repl_errno)
			{
				if (REPL_CONN_RESET(status))
				{
					repl_log(gtmrecv_log_fp, TRUE, TRUE, "Connection reset while receiving XOFF_ACK. "
							"Status = %d ; %s\n", status, STRERROR(status));
					repl_close(&gtmrecv_sock_fd);
					repl_connection_reset = TRUE;
					xoff_sent = FALSE;
					send_badtrans = FALSE;
					return_status = STOP_POLL;
				} else
					rts_error(VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
						RTS_ERROR_LITERAL("Error while draining replication pipe. Error in recv"), status);
			} else
			{
				assert(EREPL_SELECT == repl_errno);
				rts_error(VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
					RTS_ERROR_LITERAL("Error while draining replication pipe. Error in select"), status);
			}
		}
	} else
		return_status = STOP_POLL;
	/* Like was done before for the XOFF_ACK_ME message, send a BADTRANS/CMP2UNCMP message only if we know
	 * the endianness of the other side. If not, no point in sending one anyways and saves us trouble in
	 * case of cross-endian replication connections.
	 */
	if ((STOP_POLL == return_status) && (send_badtrans || send_cmp2uncmp)
		&& (FD_INVALID != gtmrecv_sock_fd) && remote_side->endianness_known)
	{	/* Send REPL_BADTRANS or REPL_CMP2UNCMP message */
		if (!remote_side->cross_endian)
		{
			bad_trans_msg.type = send_cmp2uncmp ? REPL_CMP2UNCMP : REPL_BADTRANS;
			bad_trans_msg.len  = MIN_REPL_MSGLEN;
			bad_trans_msg.start_seqno = send_seqno;
		} else
		{
			bad_trans_msg.type = send_cmp2uncmp ? GTM_BYTESWAP_32(REPL_CMP2UNCMP) : GTM_BYTESWAP_32(REPL_BADTRANS);
			bad_trans_msg.len  = GTM_BYTESWAP_32(MIN_REPL_MSGLEN);
			bad_trans_msg.start_seqno = GTM_BYTESWAP_64(send_seqno);
		}
		REPL_SEND_LOOP(gtmrecv_sock_fd, &bad_trans_msg, bad_trans_msg.len, FALSE, &gtmrecv_poll_immediate)
			; /* Empty Body */
		if (SS_NORMAL == status)
		{
			if (send_cmp2uncmp)
				repl_log(gtmrecv_log_fp, TRUE, TRUE, "REPL_CMP2UNCMP message sent with seqno %llu\n", send_seqno);
			else
				repl_log(gtmrecv_log_fp, TRUE, TRUE, "REPL_BADTRANS message sent with seqno %llu\n", send_seqno);
		} else
		{
			if (REPL_CONN_RESET(status) && EREPL_SEND == repl_errno)
			{
				if (send_cmp2uncmp)
				{
					repl_log(gtmrecv_log_fp, TRUE, TRUE, "Connection reset while sending REPL_CMP2UNCMP. "
							"Status = %d ; %s\n", status, STRERROR(status));
				} else
				{
					repl_log(gtmrecv_log_fp, TRUE, TRUE, "Connection reset while sending REPL_BADTRANS. "
							"Status = %d ; %s\n", status, STRERROR(status));
				}
				repl_close(&gtmrecv_sock_fd);
				repl_connection_reset = TRUE;
				return_status = STOP_POLL;
			} else if (EREPL_SEND == repl_errno)
				rts_error(VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
					RTS_ERROR_LITERAL("Error sending REPL_BADTRANS/REPL_CMP2UNCMP. Error in send"), status);
			else
			{
				assert(EREPL_SELECT == repl_errno);
				rts_error(VARLSTCNT(7) ERR_REPLCOMM, 0, ERR_TEXT, 2,
					RTS_ERROR_LITERAL("Error sending REPL_BADTRANS/REPL_CMP2UNCMP. Error in select"), status);
			}
		}
		send_badtrans = FALSE;
		if (send_cmp2uncmp)
		{
			REPL_DPRINT1("gtmrecv_poll_actions : Setting gtmrecv_wait_for_jnl_seqno to TRUE because this receiver"
				"server requested a fall-back from compressed to uncompressed operation\n");
			gtmrecv_wait_for_jnl_seqno = TRUE;/* set this to TRUE to break out and go back to a fresh "do_main_loop" */
			gtmrecv_bad_trans_sent = TRUE;
			gtmrecv_send_cmp2uncmp = FALSE;
			send_cmp2uncmp = FALSE;
		}
	}
	if ((upd_proc_local->bad_trans && bad_trans_detected) || onln_rlbk_flg_set
		|| (UPDPROC_START == upd_proc_local->start_upd) && (1 != report_cnt))
	{
		if (UPDPROC_START == upd_proc_local->start_upd)
		{
			assert(is_updproc_alive() != SRV_ALIVE);
			upd_proc_local->upd_proc_shutdown = NO_SHUTDOWN;
		}
		recvpool_ctl->wrapped = FALSE;
		recvpool_ctl->write_wrap = recvpool_ctl->recvpool_size;
		recvpool_ctl->write = 0;
		/* Reset last_rcvd_histinfo, last_valid_histinfo etc. as they reflect context from unprocessed data
		 * in the receive pool and those are no longer valid because we have drained the receive pool.
		 */
		GTMRECV_CLEAR_CACHED_HISTINFO(recvpool.recvpool_ctl, jnlpool, jnlpool_ctl, INSERT_STRM_HISTINFO_FALSE);
		if (UPDPROC_START == upd_proc_local->start_upd)
		{
			/* Attempt starting the update process */
			for (upd_start_attempts = 0;
			     UPDPROC_START_ERR == (upd_start_status = gtmrecv_upd_proc_init(FALSE)) &&
			     GTMRECV_MAX_UPDSTART_ATTEMPTS > upd_start_attempts;
			     upd_start_attempts++)
			{
				if (EREPL_UPDSTART_SEMCTL == repl_errno || EREPL_UPDSTART_BADPATH == repl_errno)
				{
					gtmrecv_autoshutdown();
				} else if (EREPL_UPDSTART_FORK == repl_errno)
				{
					/* Couldn't start up update now, can try later */
					LONG_SLEEP(GTMRECV_WAIT_FOR_PROC_SLOTS);
					continue;
				} else if (EREPL_UPDSTART_EXEC == repl_errno)
				{
					/* In forked child, could not exec, should exit */
					gtmrecv_exit(ABNORMAL_SHUTDOWN);
				}
			}
			if (UPDPROC_STARTED == (upd_proc_local->start_upd = upd_start_status))
			{
				REPL_DPRINT1("gtmrecv_poll_actions : Setting gtmrecv_wait_for_jnl_seqno to TRUE because of "
					     "upd restart\n");
				gtmrecv_wait_for_jnl_seqno = TRUE;
				report_cnt = next_report_at = 1;
				if (send_xoff && (FD_INVALID == gtmrecv_sock_fd))
				{
					/* Update start command was issued before connection was established,
					 * no point in sending XOFF.  */
					send_xoff = FALSE;
				}
			} else
			{
				repl_log(gtmrecv_log_fp, TRUE, TRUE, "%d failed attempts to fork update process. Try later\n",
					 upd_start_attempts);
			}
		} else
		{
			gtmrecv_wait_for_jnl_seqno = TRUE;/* set this to TRUE to break out and go back to a fresh "do_main_loop" */
			if (onln_rlbk_flg_set)
			{
				assert(NULL != jnlpool_ctl);
				repl_log(gtmrecv_log_fp, TRUE, TRUE, "Closing connection due to ONLINE ROLLBACK\n");
 				repl_log(gtmrecv_log_fp, TRUE, TRUE, "REPL INFO - Current Jnlpool Seqno : %llu\n",
 						jnlpool_ctl->jnl_seqno);
				repl_log(gtmrecv_log_fp, TRUE, TRUE, "REPL INFO - Current Receive Pool Seqno : %llu\n",
						recvpool_ctl->jnl_seqno);
				repl_close(&gtmrecv_sock_fd);
				repl_connection_reset = TRUE;
				xoff_sent = FALSE;
				send_badtrans = FALSE;
				upd_proc_local->onln_rlbk_flg = FALSE;
				/* Before restarting afresh, sync the online rollback cycles. This way any future grab_lock that
				 * we do after restarting should not realize an unhandled online rollback.  For receiver, it is
				 * just syncing the journal pool cycles as the databases are not opened. But, to be safe, grab
				 * the lock and sync the cycles.
				 */
				GRAB_LOCK(jnlpool.jnlpool_dummy_reg, GRAB_LOCK_ONLY);
				SYNC_ONLN_RLBK_CYCLES;
				rel_lock(jnlpool.jnlpool_dummy_reg);
				return_status = STOP_POLL;
				recvpool_ctl->jnl_seqno = 0;
			} else
			{
				REPL_DPRINT1("gtmrecv_poll_actions : Setting gtmrecv_wait_for_jnl_seqno to TRUE because bad trans"
						"sent\n");
				gtmrecv_bad_trans_sent = TRUE;
				upd_proc_local->bad_trans = FALSE;
				recvpool_ctl->jnl_seqno = upd_proc_local->read_jnl_seqno;
			}
		}
	}
	if ((0 == *pending_data_len) && (0 != gtmrecv_local->changelog))
	{
		if (gtmrecv_local->changelog & REPLIC_CHANGE_LOGINTERVAL)
		{
			repl_log(gtmrecv_log_fp, TRUE, TRUE, "Changing log interval from %u to %u\n",
					log_interval, gtmrecv_local->log_interval);
			log_interval = gtmrecv_local->log_interval;
			gtmrecv_reinit_logseqno(); /* will force a LOG on the first recv following the interval change */
		}
		if (gtmrecv_local->changelog & REPLIC_CHANGE_LOGFILE)
		{
			repl_log(gtmrecv_log_fp, TRUE, TRUE, "Changing log file to %s\n", gtmrecv_local->log_file);
			repl_log_init(REPL_GENERAL_LOG, &gtmrecv_log_fd, NULL, gtmrecv_local->log_file, NULL);
			repl_log_fd2fp(&gtmrecv_log_fp, gtmrecv_log_fd);
			repl_log(gtmrecv_log_fp, TRUE, TRUE, "Change log to %s successful\n",gtmrecv_local->log_file);
		}
		upd_proc_local->changelog = gtmrecv_local->changelog; /* Pass changelog request to the update process */
		/* NOTE: update process and receiver each ignore any setting specific to the other (REPLIC_CHANGE_UPD_LOGINTERVAL,
		 * REPLIC_CHANGE_LOGINTERVAL) */
		gtmrecv_local->changelog = 0;
	}
	if (0 == *pending_data_len && !gtmrecv_logstats && gtmrecv_local->statslog)
	{
		gtmrecv_logstats = TRUE;
		repl_log_init(REPL_STATISTICS_LOG, &gtmrecv_log_fd, &gtmrecv_statslog_fd, gtmrecv_local->log_file,
				gtmrecv_local->statslog_file);
		repl_log_fd2fp(&gtmrecv_statslog_fp, gtmrecv_statslog_fd);
		repl_log(gtmrecv_log_fp, TRUE, TRUE, "Starting stats log to %s\n", gtmrecv_local->statslog_file);
		repl_log(gtmrecv_statslog_fp, TRUE, TRUE, "Begin statistics logging\n");
	} else if (0 == *pending_data_len && gtmrecv_logstats && !gtmrecv_local->statslog)
	{
		gtmrecv_logstats = FALSE;
		repl_log(gtmrecv_log_fp, TRUE, TRUE, "Stopping stats log\n");
		/* Force all data out to the file before closing the file */
		repl_log(gtmrecv_statslog_fp, TRUE, TRUE, "End statistics logging\n");
		CLOSEFILE_RESET(gtmrecv_statslog_fd, status);	/* resets "gtmrecv_statslog_fd" to FD_INVALID */
		/* We need to FCLOSE because a later open() in repl_log_init() might return the same file descriptor as the one
		 * that we just closed. In that case, FCLOSE done in repl_log_fd2fp() affects the newly opened file and
		 * FDOPEN will fail returning NULL for the file pointer. So, we close both the file descriptor and file pointer.
		 * Note the same problem does not occur with GENERAL LOG because the current log is kept open while opening
		 * the new log and hence the new file descriptor will be different (we keep the old log file open in case there
		 * are errors during DUPing. In such a case, we do not switch the log file, but keep the current one).
		 * We can FCLOSE the old file pointer later in repl_log_fd2fp() */
		FCLOSE(gtmrecv_statslog_fp, status);
		gtmrecv_statslog_fp = NULL;
	}
	if (0 == *pending_data_len)
	{
		if (upd_helper_ctl->start_helpers)
		{
			gtmrecv_helpers_init(upd_helper_ctl->start_n_readers, upd_helper_ctl->start_n_writers);
			upd_helper_ctl->start_helpers = FALSE;
		}
		if (HELPER_REAP_NONE != (status = upd_helper_ctl->reap_helpers) ||
			(double)GTMRECV_REAP_HELPERS_INTERVAL <= difftime(gtmrecv_now, last_reap_time))
		{
			gtmrecv_reap_helpers(HELPER_REAP_WAIT == status);
			last_reap_time = gtmrecv_now;
		}
	}
	return (return_status);
}
示例#3
0
void tcm_heap_freemem(void *mem, int size)
{
	MemChunk *prev;
	//ASSERT(mem);
	struct Heap* h = &g_tcm_heap;
	_irqL 	irqL;

	rtw_enter_critical(&tcm_lock, &irqL);	
	
	if(!g_heap_inited)	tcm_heap_init();

#ifdef _DEBUG
	memset(mem, FREE_FILL_CODE, size);
#endif

	/* Round size up to the allocation granularity */
	size = ROUND_UP2(size, sizeof(MemChunk));

	/* Handle allocations of 0 bytes */
	if (!size)
		size = sizeof(MemChunk);

	/* Special cases: first chunk in the free list or memory completely full */
	//ASSERT((uint8_t*)mem != (uint8_t*)h->FreeList);
	if (((uint8_t *)mem) < ((uint8_t *)h->FreeList) || !h->FreeList)
	{
		/* Insert memory block before the current free list head */
		prev = (MemChunk *)mem;
		prev->next = h->FreeList;
		prev->size = size;
		h->FreeList = prev;
	}
	else /* Normal case: not the first chunk in the free list */
	{
		/*
		 * Walk on the free list. Stop at the insertion point (when mem
		 * is between prev and prev->next)
		 */
		prev = h->FreeList;
		while (prev->next < (MemChunk *)mem && prev->next)
			prev = prev->next;

		/* Make sure mem is not *within* prev */
		//ASSERT((uint8_t*)mem >= (uint8_t*)prev + prev->size);

		/* Should it be merged with previous block? */
		if (((uint8_t *)prev) + prev->size == ((uint8_t *)mem))
		{
			/* Yes */
			prev->size += size;
		}
		else /* not merged with previous chunk */
		{
			MemChunk *curr = (MemChunk*)mem;

			/* insert it after the previous node
			 * and move the 'prev' pointer forward
			 * for the following operations
			 */
			curr->next = prev->next;
			curr->size = size;
			prev->next = curr;

			/* Adjust for the following test */
			prev = curr;
		}
	}

	/* Also merge with next chunk? */
	if (((uint8_t *)prev) + prev->size == ((uint8_t *)prev->next))
	{
		prev->size += prev->next->size;
		prev->next = prev->next->next;

		/* There should be only one merge opportunity, becuase we always merge on free */
		//ASSERT((uint8_t*)prev + prev->size != (uint8_t*)prev->next);
	}
	
	rtw_exit_critical(&tcm_lock, &irqL);	
	//printf("---FREE %x--\n\r", mem);
	//tcm_heap_dump();
	//printf("--------------\n\r");
	
}
示例#4
0
void *tcm_heap_allocmem(int size)
{
	MemChunk *chunk, *prev;
	struct Heap* h = &g_tcm_heap;
	_irqL 	irqL;

	rtw_enter_critical(&tcm_lock, &irqL);
	
	if(!g_heap_inited)	tcm_heap_init();

	/* Round size up to the allocation granularity */
	size = ROUND_UP2(size, sizeof(MemChunk));

	/* Handle allocations of 0 bytes */
	if (!size)
		size = sizeof(MemChunk);

	/* Walk on the free list looking for any chunk big enough to
	 * fit the requested block size.
	 */
	for (prev = (MemChunk *)&h->FreeList, chunk = h->FreeList;
		chunk;
		prev = chunk, chunk = chunk->next)
	{
		if (chunk->size >= size)
		{
			if (chunk->size == size)
			{
				/* Just remove this chunk from the free list */
				prev->next = chunk->next;
				#ifdef _DEBUG
					memset(chunk, ALLOC_FILL_CODE, size);
				#endif
				
				rtw_exit_critical(&tcm_lock, &irqL);
				//printf("----ALLOC1-----\n\r");
				//tcm_heap_dump();
				//printf("--------------\n\r");
				return (void *)chunk;
			}
			else
			{
				/* Allocate from the END of an existing chunk */
				chunk->size -= size;
				#ifdef _DEBUG
					memset((uint8_t *)chunk + chunk->size, ALLOC_FILL_CODE, size);
				#endif
				rtw_exit_critical(&tcm_lock, &irqL);
				//printf("----ALLOC2-----\n\r");
				//tcm_heap_dump();
				//printf("--------------\n\r");
				
				return (void *)((uint8_t *)chunk + chunk->size);
			}
		}
	}
	
	rtw_exit_critical(&tcm_lock, &irqL);
	//printf("----ALLOC3-----\n\r");
	//tcm_heap_dump();
	//printf("--------------\n\r");
	return NULL; /* fail */
}
示例#5
0
rhdtyp *make_mode (int mode_index)
{
	rhdtyp		*base_address;
	lab_tabent	*lbl;
	lnr_tabent	*lnr;
	CODEBUF_TYPE	*code;
	dyn_modes	*dmode;
	int algnd_rtnhdr_size = (int)ROUND_UP2(SIZEOF(rhdtyp), SECTION_ALIGN_BOUNDARY);
	int algnd_code_size   = (int)ROUND_UP2(CODE_SIZE, NATIVE_WSIZE);
	int algnd_lbltab_size = (int)ROUND_UP2(SIZEOF(lab_tabent), NATIVE_WSIZE);
	int algnd_lnrtab_size = (int)ROUND_UP2(CODE_LINES * SIZEOF(lnr_tabent), NATIVE_WSIZE);

	assert((DM_MODE == mode_index) || (CI_MODE == mode_index));
        base_address = (rhdtyp *)GTM_TEXT_ALLOC(algnd_rtnhdr_size + algnd_code_size + algnd_lbltab_size + algnd_lnrtab_size);
	memset(base_address, 0, algnd_rtnhdr_size + algnd_code_size + algnd_lbltab_size + algnd_lnrtab_size);
	dmode = &our_modes[mode_index];
	base_address->routine_name.len = dmode->rtn_name_len;
	base_address->routine_name.addr = dmode->rtn_name;
	base_address->ptext_adr = (unsigned char *)base_address + algnd_rtnhdr_size;
	base_address->ptext_end_adr = (unsigned char *)base_address->ptext_adr + algnd_code_size;
	base_address->lnrtab_adr = (lnr_tabent *)base_address->ptext_end_adr;
	base_address->labtab_adr = (lab_tabent *)((unsigned char *)base_address + algnd_rtnhdr_size +
						  algnd_code_size + algnd_lnrtab_size);
	base_address->lnrtab_len = CODE_LINES;
	base_address->labtab_len = 1;
	code = (CODEBUF_TYPE *)base_address->ptext_adr;	/* start of executable code */
#ifdef __ia64
	if (dyn_modes_type[mode_index][0] == 'C')
	{
		GEN_CALL_C(CODE_ADDRESS(dmode->func_ptr1))		/* line 0,1 */
	} else
	{
		GEN_CALL_ASM(CODE_ADDRESS(dmode->func_ptr1))		/* line 0,1 */
	}
#else
	GEN_CALL(dmode->func_ptr1);			/* line 0,1 */
#endif /* __ia64 */

#ifdef _AIX
	if (CI_MODE == mode_index)
	{
		/* Following 2 instructions are generated to call the routine stored in GTM_REG_ACCUM.
		 * ci_restart would have loaded this register with the address of op_extcall/op_extexfun.
		 * On other platforms, ci_start usually invokes op_ext* which will return directly
		 * to the generated code. Since RS6000 doesn't support call instruction without altering
		 * return address register (LR), the workaround is to call op_ext* not from ci_restart
		 * but from this dummy code
		 */
		*code++ = RS6000_INS_MTLR | GTM_REG_ACCUM << RS6000_SHIFT_RS;
		*code++ = RS6000_INS_BRL;
	}
#endif

#ifdef __ia64
        if (dyn_modes_type[mode_index][1] == 'C')
	{
                GEN_CALL_C(CODE_ADDRESS(dmode->func_ptr2))
	} else
	{
                GEN_CALL_ASM(CODE_ADDRESS(dmode->func_ptr2))
	}
#else
        GEN_CALL(dmode->func_ptr2);
#endif /* __ia64 */

#if defined (__ia64)
	if (DM_MODE == mode_index)
	{
		GEN_UNCOD_JUMP(-(2 * 5)); /* branch to dm_setup which is at the top of the direct mode frame. */
	}
#elif defined(__hpux)
	if (DM_MODE == mode_index)
	{
		*code++ = HPPA_INS_BEQ | (MAKE_COND_BRANCH_TARGET(-8) << HPPA_SHIFT_OFFSET); /* BEQ r0,r0, -8 */
		*code++ = HPPA_INS_NOP;
	}
#endif /* __ia64 */

#ifdef __ia64
        if (dyn_modes_type[mode_index][2] == 'C')
	{
                GEN_CALL_C(CODE_ADDRESS(dmode->func_ptr3));  	/* line 2 */
	} else
	{
                GEN_CALL_ASM(CODE_ADDRESS(dmode->func_ptr3));  /* line 2 */
	}
#else
        GEN_CALL(dmode->func_ptr3); 			/* line 2 */
#endif /* __ia64 */
	lnr = LNRTAB_ADR(base_address);
	*lnr++ = 0;								/* line 0 */
	*lnr++ = 0;								/* line 1 */
	IA64_ONLY(*lnr++ = 2 * CALL_SIZE + EXTRA_INST_SIZE;)			/* line 2 */