void ccp_exitwm1( ccp_db_header	*db)
{
	sgmnt_addrs	*cs_addrs;
	uint4	status;


	assert(lib$ast_in_prog());

	if ((db->qio_iosb.cond & 1) == 0)
		ccp_signal_cont(db->qio_iosb.cond);	/***** Is this reasonable? *****/

	cs_addrs = db->segment;
	if (db->last_lk_sequence < cs_addrs->ti->lock_sequence)
	{
		status = sys$qio(0, FILE_INFO(db->greg)->fab->fab$l_stv, IO$_WRITEVBLK, &db->qio_iosb, ccp_exitwm1a, db,
				 cs_addrs->lock_addrs[0], db->glob_sec->lock_space_size, LOCK_BLOCK(db->glob_sec) + 1, 0, 0, 0);
		if ((status & 1) == 0)
			ccp_signal_cont(status);	/***** Is this reasonable? *****/
		db->last_lk_sequence = cs_addrs->ti->lock_sequence;
	}
	else
		ccp_exitwm2(db);

	return;
}
void	ccp_exitwm3( ccp_db_header *db)
{
	sgmnt_addrs		*csa;
	bt_rec			*que_base, *que_top, *p;
	ccp_action_record	request;
	uint4		status;


	assert(lib$ast_in_prog());

	csa = db->segment;
	assert(csa->nl->ccp_state == CCST_WMXREQ);
	assert(csa->ti->curr_tn == csa->ti->early_tn);

	db->wm_iosb.valblk[CCP_VALBLK_TRANS_HIST] = csa->ti->curr_tn + csa->ti->lock_sequence;
	if (JNL_ENABLED(csa->hdr)  &&  csa->jnl != NULL)
	{
		assert(csa->jnl->channel != 0);
		db->wm_iosb.valblk[CCP_VALBLK_JNL_ADDR] = csa->jnl->jnl_buff->freeaddr;
		db->wm_iosb.valblk[CCP_VALBLK_EPOCH_TN] = csa->jnl->jnl_buff->epoch_tn;
		/* lastaddr is no longer a field in jnl_buff
		 *	db->wm_iosb.valblk[CCP_VALBLK_LST_ADDR] = csa->jnl->jnl_buff->lastaddr;
		 */
	}

	/* Convert Write-mode lock from Protected Write to Concurrent Read, writing the lock value block */
	status = ccp_enqw(EFN$C_ENF, LCK$K_CRMODE, &db->wm_iosb, LCK$M_CONVERT | LCK$M_VALBLK, NULL, 0,
			  NULL, 0, NULL, PSL$C_USER, 0);
	/***** Check error status here? *****/

	for (que_base = csa->bt_header, que_top = que_base + csa->hdr->bt_buckets;
	     que_base < que_top;
	     ++que_base)
	{
		assert(que_base->blk == BT_QUEHEAD);

		for (p = (bt_rec *)((char *)que_base + que_base->blkque.fl);
		     p != que_base;
		     p = (bt_rec *)((char *)p + p->blkque.fl))
		{
			if (((int4)p & 3) != 0)
				ccp_signal_cont(ERR_GTMCHECK);	/***** Is this reasonable? *****/
			p->flushing = FALSE;
		}
	}

	db->blocking_ast_received = FALSE;
	db->wmexit_requested = FALSE;
	csa->nl->ccp_state = CCST_WMXGNT;
	db->wc_rover = 0;

	request.action = CCTR_EWMWTBF;
	request.pid = 0;
	request.v.h = db;
	ccp_act_request(&request);

	return;
}
Beispiel #3
0
/*
 * ---------------------------------------------
 * System call to cancel timer.
 * ---------------------------------------------
 */
void cancel_timer(TID tid)
{
	/* An interrupt should never cancel a timer that has been started in the mainline code.
	 * Or else it is possible the mainline code might hibernate for ever.
	 * In VMS, interrupt is equivalent to being in an AST. Hence assert we are never in an AST if we are here.
	 * The only exception is if we are exiting in which case we are not going to be hibernating so it is ok.
	 */
	assert(!lib$ast_in_prog() || process_exiting);
	sys$cantim(tid, 0);
}
void ccp_exitwm1a( ccp_db_header *db)
{
	assert(lib$ast_in_prog());

	if ((db->qio_iosb.cond & 1) == 0)
		ccp_signal_cont(db->qio_iosb.cond);	/***** Is this reasonable? *****/

	ccp_exitwm2(db);

	return;
}
Beispiel #5
0
/* make sure that the journal file is available if appropriate */
uint4   jnl_ensure_open(void)
{
	uint4			jnl_status;
	jnl_private_control	*jpc;
	sgmnt_addrs		*csa;
	sgmnt_data_ptr_t	csd;
	boolean_t		first_open_of_jnl, need_to_open_jnl;
	int			close_res;
#       if defined(VMS)
	static const gds_file_id	file;
	uint4				status;
#       endif

	error_def(ERR_JNLFILOPN);

	csa = cs_addrs;
	csd = csa->hdr;
	assert(csa->now_crit);
	jpc = csa->jnl;
	assert(NULL != jpc);
	assert(JNL_ENABLED(csa->hdr));
	/* The goal is to change the code below to do only one JNL_FILE_SWITCHED(jpc) check instead of the additional
	 * (NOJNL == jpc->channel) check done below. The assert below ensures that the NOJNL check can indeed
	 * be subsumed by the JNL_FILE_SWITCHED check (with the exception of the source-server which has a special case that
	 * needs to be fixed in C9D02-002241). Over time, this has to be changed to one check.
	 */
	assert((NOJNL != jpc->channel) || JNL_FILE_SWITCHED(jpc) || is_src_server);
	need_to_open_jnl = FALSE;
	jnl_status = 0;
	if (NOJNL == jpc->channel)
	{
#               ifdef VMS
		if (NOJNL != jpc->old_channel)
		{
			if (lib$ast_in_prog())          /* called from wcs_wipchk_ast */
				jnl_oper_user_ast(gv_cur_region);
			else
			{
				status = sys$setast(DISABLE);
				jnl_oper_user_ast(gv_cur_region);
				if (SS$_WASSET == status)
					ENABLE_AST;
			}
		}
#               endif
		need_to_open_jnl = TRUE;
	} else if (JNL_FILE_SWITCHED(jpc))
	{       /* The journal file has been changed "on the fly"; close the old one and open the new one */
		VMS_ONLY(assert(FALSE);)        /* everyone having older jnl open should have closed it at time of switch in VMS */
		JNL_FD_CLOSE(jpc->channel, close_res);  /* sets jpc->channel to NOJNL */
		need_to_open_jnl = TRUE;
	}
Beispiel #6
0
void ccp_lkrqwake1( ccp_db_header *db)
{
    uint4	status;


    assert(lib$ast_in_prog());

    if (db->lock_iosb.cond == SS$_NORMAL)
        return;

    ccp_signal_cont(db->lock_iosb.cond);	/***** Is this reasonable? *****/

    if (db->lock_iosb.cond == SS$_DEADLOCK)
    {
        /* Just try again */
        status = ccp_enq(0, LCK$K_CRMODE, &db->lock_iosb, LCK$M_CONVERT | LCK$M_SYNCSTS, NULL, 0,
                         ccp_lkrqwake1, db, ccp_lkdowake_blkast, PSL$C_USER, 0);
        /***** Check error status here? *****/
    }

    return;
}
void ccp_reqdrtbuf_interrupt( ccp_db_header *db)
{
	uint4		status;
	ccp_action_record	request;


	assert(lib$ast_in_prog());

	if (db->flush_iosb.cond == SS$_NORMAL)
	{
		request.action = CCTR_GOTDRT;
		request.pid = 0;
		request.v.h = db;
		ccp_act_request(&request);
		return;
	}

	ccp_signal_cont(db->flush_iosb.cond);	/***** Is this reasonable? *****/

	if (db->flush_iosb.cond == SS$_DEADLOCK)
	{
		/* Just try again */
		status = ccp_enq(0, LCK$K_EXMODE, &db->flush_iosb, LCK$M_CONVERT | LCK$M_SYNCSTS, NULL, 0,
				 ccp_reqdrtbuf_interrupt, db, NULL, PSL$C_USER, 0);
		if (status == SS$_SYNCH)
		{
			request.action = CCTR_GOTDRT;
			request.pid = 0;
			request.v.h = db;
			ccp_act_request(&request);
		}
		/***** Check error status here? *****/
	}

	return;
}
Beispiel #8
0
void	grab_crit(gd_region *reg)
{
	unsigned short		cycle_count, cycle;
	ccp_action_aux_value	msg;
	sgmnt_addrs		*csa;
	sgmnt_data_ptr_t	csd;
	node_local_ptr_t	cnl;
	enum cdb_sc		status;

	csa = &FILE_INFO(reg)->s_addrs;
	csd = csa->hdr;
	cnl = csa->nl;

	assert(!lib$ast_in_prog());

	if (!csa->now_crit)
	{
		assert(0 == crit_count);
		crit_count++;
		if (csd->clustered)
		{
			/* For an explanation of the code dealing with clusters, see CCP_EXITWM_ATTEMPT.C.
			   Please do not change this code without updating the comments in that file. */
			cycle = cnl->ccp_cycle;
			while (!CCP_SEGMENT_STATE(cnl, CCST_MASK_WRITE_MODE))
			{
				(void)ccp_sendmsg(CCTR_WRITEDB, &FILE_INFO(reg)->file_id);
				(void)ccp_userwait(reg, CCST_MASK_WRITE_MODE, 0, cycle);
				cycle = cnl->ccp_cycle;
			}
		}

		if (cdb_sc_normal !=
			(status = MUTEX_LOCKW(csa->critical, crash_count, &csa->now_crit, &csd->mutex_spin_parms)))
		{
			crit_count = 0;
			switch (status)
			{
			case cdb_sc_critreset:
				rts_error(ERR_CRITRESET, 2, REG_LEN_STR(reg));
			case cdb_sc_dbccerr:
				rts_error(ERR_DBCCERR, 2, REG_LEN_STR(reg));
			default:
				GTMASSERT;
			}
			return;
		}

		assert(cnl->in_crit == 0);
		cnl->in_crit = process_id;

		CRIT_TRACE(crit_ops_gw);		/* see gdsbt.h for comment on placement */

		if (csd->clustered)
		{
			cycle = cnl->ccp_cycle;
			if (cnl->ccp_crit_blocked)
			{
				msg.exreq.fid = FILE_INFO(reg)->file_id;
				msg.exreq.cycle = cycle;
				(void)ccp_sendmsg(CCTR_EXITWM, &msg);
				(void)ccp_userwait(reg, ~(CCST_MASK_WRITE_MODE), 0, msg.exreq.cycle);
				while (cnl->ccp_crit_blocked  &&  cnl->ccp_cycle == msg.exreq.cycle  ||
				       !CCP_SEGMENT_STATE(cnl, CCST_MASK_WRITE_MODE))
				{
					cycle = cnl->ccp_cycle;
					(void)ccp_sendmsg(CCTR_WRITEDB, &FILE_INFO(reg)->file_id);
					(void)ccp_userwait(reg, CCST_MASK_WRITE_MODE, 0, cycle);
				}
			}
		}
		crit_count = 0;
	}
	if (cnl->wc_blocked)
		wcs_recover(reg);
}
Beispiel #9
0
/* ------------------------------------------------------------------
 * *** INTERRUPT HANDLER ***
 * Sets up transfer table changes needed for:
 *   - Synchronous handling of asynchronous events.
 *   - Single-stepping and breakpoints
 * Note:
 *   - Call here from a routine specific to each event type.
 *   - Pass in a single value to pass on to xfer_table set function
 *     for that type. Calling routine should record any other event
 *     info, if needed, in volatile global variables.
 *   - If this is first event logged, will call back to the function
 *     provided and pass along parameter.
 * Future:
 *   - mdb_condition_handler does not call here -- should change it.
 *   - Ditto for routines related to zbreak and zstep.
 *   - Should put handler prototypes in a header file & include it here,
 *     if can use with some way to ensure type checking.
 *   - A higher-level interface (e.g. change sets) might be better.
 * ------------------------------------------------------------------
 */
boolean_t xfer_set_handlers(int4  event_type, void (*set_fn)(int4 param), int4 param_val)
{
	boolean_t 	is_first_event = FALSE;

	/* ------------------------------------------------------------
	 * Keep track of what event types have come in.
	 * - Get and set value atomically in case of concurrent
	 *   events and/or resetting while setting.
	 * ------------------------------------------------------------------
	 * Use interlocked operations to prevent races between set and reset,
	 * and to avoid missing overlapping sets.
	 * On HPUX-HPPA:
	 *    OK only if there's no a risk a conflicting operation is
	 *    in progress  (can deadlock in micro-lock).
	 * On all platforms:
	 *    Don't want I/O from a sensitive area.
	 * Avoid both by testing fast_lock_count, and doing interlocks and
	 * I/O only if it is non-zero. Can't be resetting then, so worst
	 * risk is missing an event when there's already one happening.
	 * ------------------------------------------------------------------
	 */
	VMS_ONLY(assert(lib$ast_in_prog()));
	if (fast_lock_count == 0)
	{
		DBGDFRDEVNT((stderr, "xfer_set_handlers: Before interlocked operations:  "
			     "xfer_table_events[%d]=%d, first_event=%s, num_deferred=%d\n",
			     event_type, xfer_table_events[event_type], (is_first_event ? "TRUE" : "FALSE"),
			     num_deferred));
		if (1 == INCR_CNT_SP(&xfer_table_events[event_type], &defer_latch))
			/* Concurrent events can collide here, too */
			is_first_event =  (1 == INCR_CNT_SP(&num_deferred, &defer_latch));
		DBGDFRDEVNT((stderr, "xfer_set_handlers: After interlocked operations:   "
			     "xfer_table_events[%d]=%d, first_event=%s, num_deferred=%d\n",
			     event_type,xfer_table_events[event_type], (is_first_event ? "TRUE" : "FALSE"),
			     num_deferred));
	} else if (1 == ++xfer_table_events[event_type])
		is_first_event = (1 == ++num_deferred);
	if (is_first_event)
	{
		first_event = event_type;
#		ifdef DEBUG_DEFERRED_EVENT
		if (0 != fast_lock_count)
			DBGDFRDEVNT((stderr, "xfer_set_handlers: Setting xfer_table for event type %d\n",
				     event_type));
#		endif
		/* -------------------------------------------------------
		 * If table changed, it was not synchronized.
		 * (Assumes these entries are all that would be changed)
		 * Note asserts bypassed for Itanium due to nature of the
		 * fixed up addresses making direct comparisions non-trivial.
		 * --------------------------------------------------------
		 */
#		ifndef __ia64
		assert((xfer_table[xf_linefetch] == op_linefetch) ||
		       (xfer_table[xf_linefetch] == op_zstepfetch) ||
		       (xfer_table[xf_linefetch] == op_zst_fet_over) ||
		       (xfer_table[xf_linefetch] == op_mproflinefetch));
		assert((xfer_table[xf_linestart] == op_linestart) ||
		       (xfer_table[xf_linestart] == op_zstepstart) ||
		       (xfer_table[xf_linestart] == op_zst_st_over) ||
		       (xfer_table[xf_linestart] == op_mproflinestart));
		assert((xfer_table[xf_zbfetch] == op_zbfetch) ||
		       (xfer_table[xf_zbfetch] == op_zstzb_fet_over) ||
		       (xfer_table[xf_zbfetch] == op_zstzbfetch));
		assert((xfer_table[xf_zbstart] == op_zbstart) ||
		       (xfer_table[xf_zbstart] == op_zstzb_st_over) ||
		       (xfer_table[xf_zbstart] == op_zstzbstart));
		assert((xfer_table[xf_forchk1] == op_forchk1) ||
		       (xfer_table[xf_forchk1] == op_mprofforchk1));
		assert((xfer_table[xf_forloop] == op_forloop));
		assert(xfer_table[xf_ret] == opp_ret ||
		       xfer_table[xf_ret] == opp_zst_over_ret ||
		       xfer_table[xf_ret] == opp_zstepret);
		assert(xfer_table[xf_retarg] == op_retarg ||
		       xfer_table[xf_retarg] == opp_zst_over_retarg ||
		       xfer_table[xf_retarg] == opp_zstepretarg);
#		endif /* !IA64 */
		/* -----------------------------------------------
		 * Now call the specified set function to swap in
		 * the desired handlers (and set flags or whatever).
		 * -----------------------------------------------
		 */
		DBGDFRDEVNT((stderr, "xfer_set_handlers: Driving event setup handler\n"));
		set_fn(param_val);
	}
#	ifdef DEBUG_DEFERRED_EVENT
	else if (0 != fast_lock_count)
	{
		DBGDFRDEVNT((stderr, "xfer_set_handlers: ---Multiple deferred events---\n"
			     "Event type %d occurred while type %d was pending\n", event_type, first_event));
	} else
	{
		DBGDFRDEVNT((stderr, "xfer_set_handlers: Event bypassed -- was not first event\n"));
	}
#	endif
 	assert(no_event != first_event);
	return is_first_event;
}
Beispiel #10
0
/*
** Name: CScp_resume	- Resume (a thread in) a process
**
** Description:
**	This routine resumes the indicated thread in the indicated process.
**
**	If the indicated process is this process, then this is a simple
**	CSresume operation. If the indicated process is another process, then
**	that process must be notified that it should CSresume the indicated
**	thread.
**
** Inputs:
**	cpid		pointer to CS_CPID with
**	   .pid		- the indicated process
**	   .sid		- the indicated session
**	   .iosb	- a thread-safe IOSB
**	   .data	- where we'll place a pointer
**			  to the CPchan written to.
**
** Outputs:
**	None
**
** Returns:
**	void
**
** History:
**	Summer, 1992 (bryanp)
**	    Working on the new portable logging and locking system.
**	9-oct-1992 (bryanp)
**	    Use global IOSB, not stack IOSB, so that when QIO completes
**	    some time from now it will not overwrite arbitrary stack stuff.
**	19-oct-1992 (bryanp)
**	    CSresume expects to be called at AST level. Oblige it by invoking it
**	    via sys$dclast().
**	20-oct-1992 (bryanp)
**	    Back out the DCLAST change; CSresume can now be called at normal
**	    level.
**	14-dec-1992 (bryanp)
**	    ERsend() calls should be ERlog() calls.
**	29-sep-1993 (walt)
**	    Get an event flag number from lib$get_ef rather than use event flag
**	    zero in the sys$qio call.  
**	18-oct-1993 (rachael)
**	    Call lib$signal(SS$_DEBUG) only when compiled with xDEBUG flag.
**	16-Nov-1998 (jenjo02)
**	    Prototype changed to pass CS_CPID * instead of PID, SID.
**	08-Nov-2007 (jonj)
**	    Write with IO$M_NOW and IO$M_READERCHECK, check for dead reader
**	    process (NOREADER), mark this PID/channel as dead for subsequent
**	    resumers to ignore. IO$M_NOW does not wait for the reader to
**	    read.
**	04-Apr-2008 (jonj)
**	    Embed IOSB in CS_CPID and reinstate lib$get_ef() to assure
**	    thread-safeness.
**	    Disable/reenable ASTs to prevent seen duplicate reads on the
**	    other end.
**	    Supply cpres_mbx_write_complete() AST to check IOSB status
**	    for NOREADER.
*/
void
CScp_resume( CS_CPID *cpid )
{
    i4		    	vms_status;
    CS_CP_WAKEUP_MSG	wakeup_msg;
    i4		    	mbox_chan;
    char		msg_buf[100];
    CL_ERR_DESC		local_sys_err;
    CP_CHANNEL		*CPchan;
    II_VMS_EF_NUMBER	efn;
    i4			ReenableASTs;

    if (cpid->pid == Cs_srv_block.cs_pid)
    {
	CSresume(cpid->sid);
    }
    else
    {
	/* Disable AST delivery */
	ReenableASTs = (sys$setast(0) == SS$_WASSET);

	/* Initialize to success */
	vms_status = SS$_NORMAL;

	if ( cpres_mbx_assign(cpid->pid, &CPchan) == OK )
	{
	    /* If reader is not alive, do nothing */
	    if ( CPchan->state == CPchanIsAlive )
	    {
		/* The SID of the session to resume */
		wakeup_msg.wakeup_sid = cpid->sid;

		/* horda03 - Fill in details to help track Cross-Process
		**           ACCVIO problem.
		*/
		wakeup_msg.wakeup_pid    = cpid->pid;
		wakeup_msg.from_pid      = Cs_srv_block.cs_pid;

		/* If from AST, "from_sid" is meaningless */
		if ( (wakeup_msg.sender_in_ast = lib$ast_in_prog()) )
		    wakeup_msg.from_sid = 0;
		else
		    wakeup_msg.from_sid = (CS_SID)Cs_srv_block.cs_current;

		/*
		** Plunk message, don't wait for reader to read it.
		**
		** Use IOSB embedded in CS_CPID, pass CS_CPID* to
		** AST completion.
		*/

		/* Set CPchan in the CS_CPID for AST's use */
		cpid->data = (PTR)CPchan;

		vms_status = sys$qio(EFN$C_ENF, CPchan->chan, 
				    IO$_WRITEVBLK | IO$M_NOW | IO$M_READERCHECK,
				    &cpid->iosb,
				    cpres_mbx_write_complete, 
				    cpid, 
				    &wakeup_msg, sizeof(wakeup_msg),
				    0, 0, 0, 0);

		if ( vms_status != SS$_NORMAL )
		{
		    STprintf(msg_buf, "[%x.%x] Error (%x) queueing write to %x on channel %d",
			    wakeup_msg.from_pid,
			    wakeup_msg.from_sid,
			    vms_status, CPchan->pid, CPchan->chan);
		    ERlog(msg_buf, STlength(msg_buf), &local_sys_err);
		}
	    }
	}
	else
	{
	    STprintf(msg_buf, "Unable to assign channel to %x", cpid->pid);
	    ERlog(msg_buf, STlength(msg_buf), &local_sys_err);

	    STprintf(msg_buf, "Ignoring error in assigning mailbox for PID %x", 
			cpid->pid);
	    ERlog(msg_buf, STlength(msg_buf), &local_sys_err);
	    /*
	    ** The process we were going to send this message to will probably
	    ** "hang", which at least allows some sort of diagnosis. Killing
	    ** ourselves at this point is less useful, since it tends to crash
	    ** the entire installation.
	    */
	}
	
	if ( vms_status != SS$_NORMAL )
	{
	    STprintf(msg_buf, "CScp_resume QIO to %x failed with status %x",
		    cpid->pid, vms_status);
	    ERlog(msg_buf, STlength(msg_buf), &local_sys_err);
#ifdef xDEBUG
	    lib$signal(SS$_DEBUG);
#endif
	    PCexit(FAIL);
	}

	if ( ReenableASTs )
	    sys$setast(1);
    }
    return;
}
Beispiel #11
0
void ccp_reqwm_interrupt(ccp_db_header **pdb)
{
	ccp_db_header	*db;
	sgmnt_addrs	*csa;
	uint4	status;


	assert(lib$ast_in_prog());

	db = *pdb;

	csa = db->segment;
	if (csa == NULL  ||  csa->nl->ccp_state == CCST_CLOSED)
		return;

	switch (db->wm_iosb.cond)
	{
	case SS$_DEADLOCK:
		ccp_signal_cont(SS$_DEADLOCK);
		/* Just try again */
		ccp_request_write_mode(db);
		return;

	case SS$_CANCEL:
		/* Lock cancelled by close */
		return;

	case SS$_VALNOTVALID:
		/* Force reads from disk */
		db->wm_iosb.valblk[CCP_VALBLK_TRANS_HIST] = 0;
		db->last_lk_sequence = db->master_map_start_tn
				     = 0;
		/* Drop through ... */

	case SS$_NORMAL:
		if (db->wm_iosb.valblk[CCP_VALBLK_TRANS_HIST] == csa->ti->curr_tn + csa->ti->lock_sequence)
		{
			/* No change to current tn, do not need to update header */
			if (csa->now_crit)
			{
				assert (csa->nl->in_crit == process_id);
				csa->nl->in_crit = 0;
				(void)mutex_unlockw(csa->critical, csa->critical->crashcnt, &csa->now_crit);
				/***** Check error status here? *****/
			}
			ccp_writedb5(db);
		}
		else
		{
			if (csa->nl->in_crit == 0)
			{
				if (mutex_lockwim(csa->critical, csa->critical->crashcnt, &csa->now_crit) == cdb_sc_normal)
					csa->nl->in_crit = process_id;		/* now_crit was set by mutex_lockwim */
				else
					if (csa->nl->in_crit == 0)		/***** Why is this re-tested? *****/
					{
						status = sys$setimr(0, delta_100_msec, ccp_reqwm_interrupt, &db->wmcrit_timer_id,
								    0);
						if (status != SS$_NORMAL)
							ccp_signal_cont(status);	/***** Is this reasonable? *****/
						return;
					}
			}
			status = sys$qio(0, FILE_INFO(db->greg)->fab->fab$l_stv, IO$_READVBLK, &db->qio_iosb, ccp_writedb2, db,
					 &db->glob_sec->trans_hist, BT_SIZE(csa->hdr) + SIZEOF(th_index), TH_BLOCK, 0, 0, 0);
			if (status != SS$_NORMAL)
				ccp_signal_cont(status);	/***** Is this reasonable? *****/
		}
		return;

	default:
		ccp_signal_cont(db->wm_iosb.cond);		/***** Is this reasonable? *****/
		return;
	}
}
gtcm_server()
{
	static readonly int4	reptim[2] = {-100000, -1};	/* 10ms */
       	static readonly int4	wait[2] =  {-1000000, -1};	/* 100ms */
	void		gtcm_ch(), gtcm_exi_handler(), gtcm_init_ast(), gtcm_int_unpack(), gtcm_mbxread_ast(),
			gtcm_neterr(), gtcm_read_ast(), gtcm_remove_from_action_queue(), gtcm_shutdown_ast(), gtcm_write_ast(),
			la_freedb();
	bool		gtcm_link_accept();
	bool		alid;
	char		buff[512];
	char		*h = NULL;
	char		*la_getdb();
	char		nbuff[256];
	char		*pak = NULL;
	char		reply;
	unsigned short	outlen;
	int4		closewait[2] = {0, -1};
	int4		inid = 0, mdl = 0, nid = 0, days = 0;
	int4		lic_status;
	int4		lic_x;
	int4		lm_mdl_nid();
	uint4		status;
	int		i, receive(), value;
	mstr		name1, name2;
	struct NTD	*cmu_ntdroot();
	connection_struct *prev_curr_entry;
	struct	dsc$descriptor_s	dprd;
	struct	dsc$descriptor_s	dver;
	$DESCRIPTOR(node_name, nbuff);
	$DESCRIPTOR(proc_name, "GTCM_SERVER");
	$DESCRIPTOR(timout, buff);
	DCL_THREADGBL_ACCESS;

	GTM_THREADGBL_INIT;
        assert(0 == EMPTY_QUEUE);       /* check so dont need gdsfhead everywhere */
	common_startup_init(GTCM_GNP_SERVER_IMAGE); /* Side-effect: Sets skip_dbtriggers to TRUE for non-trigger platforms */
	gtm_env_init();	/* read in all environment variables */
	name1.addr = "GTCMSVRNAM";
	name1.len = SIZEOF("GTCMSVRNAM") - 1;
	status = trans_log_name(&name1, &name2, nbuff);
	if (SS$_NORMAL == status)
	{
		proc_name.dsc$a_pointer = nbuff;
		proc_name.dsc$w_length = node_name.dsc$w_length = name2.len;
	} else if (SS$_NOLOGNAM == status)
	{
		MEMCPY_LIT(nbuff, "GTCMSVR");
		node_name.dsc$w_length = SIZEOF("GTCMSVR") - 1;
	} else
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(1) status);
	sys$setprn(&proc_name);
	status = lib$get_foreign(&timout, 0, &outlen, 0);
	if ((status & 1) && (6 > outlen))
	{
		for (i = 0;  i < outlen;  i++)
		{
			value = value * 10;
			if (buff[i] <= '9' && buff[i] >= '0')
				value += buff[i] - 48;
			else
				break;
		}
		if (outlen && (i == outlen))
		{
			cm_timeout = TRUE;
			closewait[0] = value * -10000000;
		}
	}
	dprd.dsc$w_length = cm_prd_len;
	dprd.dsc$b_dtype  = DSC$K_DTYPE_T;
	dprd.dsc$b_class  = DSC$K_CLASS_S;
	dprd.dsc$a_pointer= cm_prd_name;
	dver.dsc$w_length = cm_ver_len;
	dver.dsc$b_dtype  = DSC$K_DTYPE_T;
	dver.dsc$b_class  = DSC$K_CLASS_S;
	dver.dsc$a_pointer= cm_ver_name;
	ast_init();
	licensed = TRUE;
	lkid = 2;
#	ifdef NOLICENSE
	lid = 1;
#	else
	/* this code used to be scattered to discourage reverse engineering, but since it now disabled, that seems pointless */
	lic_status = ((NULL == (h = la_getdb(LMDB))) ? LP_NOCNFDB : SS$_NORMAL);
	lic_status = ((1 == (lic_status & 1)) ? lm_mdl_nid(&mdl, &nid, &inid) : lic_status);
	lic_status = ((1 == (lic_status & 1)) ? lp_licensed(h, &dprd, &dver, mdl, nid, &lid, &lic_x, &days, pak) : lic_status);
	if (LP_NOCNFDB != lic_status)
		la_freedb(h);
	if (1 == (lic_status & 1))
	{
		licensed = TRUE;
		if (days < 14)
			rts_error_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_WILLEXPIRE);
	} else
	{
		licensed = FALSE;
		sys$exit(lic_status);
	}
#	endif
	gtcm_ast_avail = astq_dyn_avail - GTCM_AST_OVRHD;
	stp_init(STP_INITSIZE);
	rts_stringpool = stringpool;
	cache_init();
	procnum = 0;
	get_proc_info(0, TADR(login_time), &image_count);
        memset(proc_to_clb, 0, SIZEOF(proc_to_clb));
	status = cmi_init(&node_name, 0, 0, gtcm_init_ast, gtcm_link_accept);
	if (!(status & 1))
	{
		rts_error_csa(CSA_ARG(NULL) VARLSTCNT(1) ((status ^ 3) | 4));
		sys$exit(status);
	}
	ntd_root = cmu_ntdroot();
	ntd_root->mbx_ast =  gtcm_mbxread_ast;
	ntd_root->err = gtcm_neterr;
	gtcm_connection = FALSE;
	lib$establish(gtcm_ch);
	gtcm_exi_blk.exit_hand = &gtcm_exi_handler;
	gtcm_exi_blk.arg_cnt = 1;
	gtcm_exi_blk.cond_val = &gtcm_exi_condition;
	sys$dclexh(&gtcm_exi_blk);
	INVOKE_INIT_SECSHR_ADDRS;
	initialize_pattern_table();
	assert(run_time); /* Should have been set by common_startup_init */
	while (!cm_shutdown)
	{
		if (blkdlist)
			gtcml_chkreg();

		assert(!lib$ast_in_prog());
		status = sys$dclast(&gtcm_remove_from_action_queue, 0, 0);
		if (SS$_NORMAL != status)
			rts_error_csa(CSA_ARG(NULL) VARLSTCNT(4) CMERR_CMSYSSRV, 0, status, 0);
		if (INTERLOCK_FAIL == curr_entry)
			rts_error_csa(CSA_ARG(NULL) VARLSTCNT(1) CMERR_CMINTQUE);
		if (EMPTY_QUEUE != curr_entry)
		{
			switch (*curr_entry->clb_ptr->mbf)
			{
				case CMMS_L_LKCANALL:
					reply = gtcmtr_lkcanall();
					break;
				case CMMS_L_LKCANCEL:
					reply = gtcmtr_lkcancel();
					break;
				case CMMS_L_LKREQIMMED:
					reply = gtcmtr_lkreqimmed();
					break;
				case CMMS_L_LKREQNODE:
					reply = gtcmtr_lkreqnode();
					break;
				case CMMS_L_LKREQUEST:
					reply = gtcmtr_lkrequest();
					break;
				case CMMS_L_LKRESUME:
					reply = gtcmtr_lkresume();
					break;
				case CMMS_L_LKACQUIRE:
					reply = gtcmtr_lkacquire();
					break;
				case CMMS_L_LKSUSPEND:
					reply = gtcmtr_lksuspend();
					break;
				case CMMS_L_LKDELETE:
					reply = gtcmtr_lkdelete();
					break;
				case CMMS_Q_DATA:
					reply = gtcmtr_data();
					break;
				case CMMS_Q_GET:
					reply = gtcmtr_get();
					break;
				case CMMS_Q_KILL:
					reply = gtcmtr_kill();
					break;
				case CMMS_Q_ORDER:
					reply = gtcmtr_order();
					break;
				case CMMS_Q_PREV:
					reply = gtcmtr_zprevious();
					break;
				case CMMS_Q_PUT:
					reply = gtcmtr_put();
					break;
				case CMMS_Q_QUERY:
					reply = gtcmtr_query();
					break;
				case CMMS_Q_ZWITHDRAW:
					reply = gtcmtr_zwithdraw();
					break;
				case CMMS_S_INITPROC:
					reply = gtcmtr_initproc();
					break;
				case CMMS_S_INITREG:
					reply = gtcmtr_initreg();
					break;
				case CMMS_S_TERMINATE:
					reply = gtcmtr_terminate(TRUE);
					break;
				case CMMS_E_TERMINATE:
					reply = gtcmtr_terminate(FALSE);
					break;
				case CMMS_U_LKEDELETE:
					reply = gtcmtr_lke_clearrep(curr_entry->clb_ptr, curr_entry->clb_ptr->mbf);
					break;
				case CMMS_U_LKESHOW:
					reply = gtcmtr_lke_showrep(curr_entry->clb_ptr, curr_entry->clb_ptr->mbf);
					break;
				case CMMS_B_BUFRESIZE:
					reply = CM_WRITE;
					value = *(unsigned short *)(curr_entry->clb_ptr->mbf + 1);
					if (value > curr_entry->clb_ptr->mbl)
					{
						free(curr_entry->clb_ptr->mbf);
						curr_entry->clb_ptr->mbf = malloc(value);
					}
					*curr_entry->clb_ptr->mbf = CMMS_C_BUFRESIZE;
					curr_entry->clb_ptr->mbl = value;
					curr_entry->clb_ptr->cbl = 1;
					break;
				case CMMS_B_BUFFLUSH:
					reply = gtcmtr_bufflush();
					break;
				case CMMS_Q_INCREMENT:
					reply = gtcmtr_increment();
					break;
				default:
					reply = FALSE;
					if (SS$_NORMAL == status)
                                                rts_error_csa(CSA_ARG(NULL)
							VARLSTCNT(3) ERR_BADGTMNETMSG, 1, (int)*curr_entry->clb_ptr->mbf);
					break;
			}
			if (curr_entry)		/* curr_entry can be NULL if went through gtcmtr_terminate */
			{
				status = sys$gettim(&curr_entry->lastact[0]);
				if (SS$_NORMAL != status)
					rts_error_csa(CSA_ARG(NULL) VARLSTCNT(1) status);
				/* curr_entry is used by gtcm_mbxread_ast to determine if it needs to defer the interrupt message */
				prev_curr_entry = curr_entry;
				if (CM_WRITE == reply)
				{	/* if ast == gtcm_write_ast, let it worry */
					curr_entry->clb_ptr->ast = gtcm_write_ast;
					curr_entry = EMPTY_QUEUE;
					cmi_write(prev_curr_entry->clb_ptr);
				} else
				{
					curr_entry = EMPTY_QUEUE;
					if (1 == (prev_curr_entry->int_cancel.laflag & 1))
					{  /* valid interrupt cancel msg, handle in gtcm_mbxread_ast */
						status = sys$dclast(gtcm_int_unpack, prev_curr_entry, 0);
						if (SS$_NORMAL != status)
							rts_error_csa(CSA_ARG(NULL) VARLSTCNT(1) status);
					} else  if (CM_READ == reply)
					{
						prev_curr_entry->clb_ptr->ast = gtcm_read_ast;
						cmi_read(prev_curr_entry->clb_ptr);
					}
				}
			}
		} else  if (1 < astq_dyn_avail)
		{
#			ifdef GTCM_REPTIM
			/* if reptim is not needed - and smw doesn't know why it would be - remove this	*/
			status = sys$schdwk(0, 0, &wait[0], &reptim[0]);
#			else
			status = sys$schdwk(0, 0, &wait[0], 0);
#			endif
			sys$hiber();
			sys$canwak(0, 0);
		}
		if (cm_timeout && (0 == gtcm_users))
                        sys$setimr(efn_ignore, closewait, gtcm_shutdown_ast, &cm_shutdown, 0);
	}
}