Exemplo n.º 1
0
GSTATUS 
DDFSemOpen (
	PSEMAPHORE sem, 
	bool exclusive) 
{
    GSTATUS err = GSTAT_OK;
    STATUS	status;

    if ((status = CSp_semaphore(TRUE, &sem->semaphore)) == OK)
    {
	while (status == OK && 
	       ((exclusive == TRUE) ? sem->have != 0 : sem->have < 0))
	    status = CScnd_wait(&sem->cond, &sem->semaphore);

	if (status == OK) 
	{
	    if (exclusive == TRUE)
		sem->have--;
	    else
		sem->have++;
	    status = CSv_semaphore(&sem->semaphore);
	}
	else if (status != E_CS000F_REQUEST_ABORTED)
	{
	    CSv_semaphore(&sem->semaphore);
	    err = DDFStatusAlloc (E_DF0008_SEM_CANNOT_OPEN);
	}
    }
    return(err);
}
Exemplo n.º 2
0
GSTATUS 
DDFSemClose (
	PSEMAPHORE sem) 
{
    GSTATUS err = GSTAT_OK;
    STATUS	status;

    G_ASSERT(!sem, E_DF0010_SEM_BAD_INIT);

    if ((status = CSp_semaphore(TRUE, &sem->semaphore)) == OK) 
    {
	if (sem->have < 0) 
	{
	    sem->have = 0;
	    /* when releaseing an exclusive - several shares may wake */
	    status = CScnd_broadcast(&sem->cond);
	} 
	else 
	{
	    sem->have--;
	    if (sem->have == 0) 
	    {
		/* when releaseing a share - at most one exclusive may wake */
		status = CScnd_signal(&sem->cond, (CS_SID)NULL);
	    }
	}
	if (status == OK) 
	{
	    if (CSv_semaphore(&sem->semaphore) != OK)
		err = DDFStatusAlloc (E_DF0009_SEM_CANNOT_CLOSE);
	} 
	else 
	{
	    CSv_semaphore(&sem->semaphore);
	}
    }
    return(err);
}
Exemplo n.º 3
0
/*{
** Name: ops_unlock	- release exclusive access to server control block
**
** Description:
{@comment_line@}...
**
** Inputs:
[@PARAM_DESCR@]...
**
** Outputs:
[@PARAM_DESCR@]...
**	Returns:
**	    {@return_description@}
**	Exceptions:
**	    [@description_or_none@]
**
** Side Effects:
**	    [@description_or_none@]
**
** History:
**	09-Oct-1998 (jenjo02)
**	    Removed SCF semaphore functions, inlining the CS calls instead.
[@history_template@]...
*/
DB_STATUS
ops_unlock(
    OPF_CB             *opf_cb,
    SCF_SEMAPHORE      *semaphore)
{
    STATUS		status;

    status = CSv_semaphore(semaphore);

    if (status != OK)
    {
# ifdef E_OP0092_SEMWAIT
        opx_rverror( opf_cb, E_DB_ERROR, E_OP0092_SEMWAIT, status);
# endif
        return(E_DB_ERROR);
    }
    return(E_DB_OK);
}
Exemplo n.º 4
0
/* release a semaphore */
STATUS
Vsem(
CS_SEMAPHORE *sp)
{
    STATUS rv;
#ifdef EX_DEBUG
    EX_CONTEXT context;

    if (EXdeclare(ex_handler, &context) != OK) {
	/* some exception was raised */
	SIfprintf( stderr,"Error: unexpected exception in Vsem()...");
	EXdelete();
	return FAIL;
    }
#endif

    if( rv = CSv_semaphore( sp ) )
	semerr( rv, sp, "Vsem" );

#ifdef EX_DEBUG
    EXdelete();
#endif
    return( rv );
}
Exemplo n.º 5
0
/*
** Name: QEN_PRINT_ROW	- This routine prints the current row for a given 
**								    QEN_NODE
**
** Description:
**      This routine prints out the current row for the given QEN_NODE. 
**      This is done with the help of ADF, who converts data into a 
**      printable form, similiar to what the terminal monitor needs to do. 
**
**	FIXME this really belongs in qenutl since it applies to any node,
**	not just fsm-joins.
**
** Inputs:
**	node -
**	    The qen_node that refers to the row.
**	qef_rcb -
**	    The request control block that we get most of our query common 
**	    info from, like cbs, the dsh, dsh rows, etc.
**
** Outputs:
**
**	Returns:
**	    DB_STATUS
**	Exceptions:
**	    none
**
** Side Effects:
**	    causes a row to be printed to the FE (via SCC_TRACE).
**
** History:
**      17-may-89 (eric)
**          created
**	6-aug-04 (inkdo01)
**	    Drop increment of node_rcount - it's already incremented in the 
**	    node handlers and this simply doubles its value.
**	13-Dec-2005 (kschendel)
**	    Can count on qen-ade-cx now.
**      11-Mar-2010 (hanal04) Bug 123415 
**          Make sure QE99 and OP173 do not interleave output with
**          using parallel query.
**	12-May-2010 (kschendel) Bug 123720
**	    Above doesn't quite go far enough, mutex the buffer.
**          Take qef_trsem for valid parallel-query output.
*/
DB_STATUS
qen_print_row(
QEN_NODE	*node,
QEF_RCB		*qef_rcb,
QEE_DSH		*dsh )
{
    QEN_STATUS	    *qen_status = dsh->dsh_xaddrs[node->qen_num]->qex_status;
    QEN_ADF	    *qen_adf;
    DB_STATUS	    status = E_DB_OK;
    char	    *cbuf = dsh->dsh_qefcb->qef_trfmt;
    i4		    cbufsize = dsh->dsh_qefcb->qef_trsize;

    qen_adf = node->qen_prow;		/* Checked non-NULL by caller */
    if (qen_status->node_rcount > 0)
    {
	/* process the row print expression */
	status = qen_execute_cx(dsh, dsh->dsh_xaddrs[node->qen_num]->qex_prow);
	if (status != E_DB_OK)
	{
# ifdef	xDEBUG
	    (VOID) qe2_chk_qp(dsh);
# endif
	    return (status);
	
	}

	/* Print in one operation to avoid interleaved output */
	CSp_semaphore(1, &dsh->dsh_qefcb->qef_trsem);
	STprintf(cbuf, "Row %d of node %d\n%s\n\n", 
			    qen_status->node_rcount, node->qen_num,
			    dsh->dsh_row[qen_adf->qen_output]);
        qec_tprintf(qef_rcb, cbufsize, cbuf);
	CSv_semaphore(&dsh->dsh_qefcb->qef_trsem);
    }			
    return(E_DB_OK);
}
Exemplo n.º 6
0
/*{
** Name: psf_debug	- Standard entry point for debugging PSF.
**
** Description:
**      This function is the standard entry point to PSF for setting and
**	clearing tracepoints.
**
** Inputs:
**      debug_cb                        Pointer to a DB_DEBUG_CB
**        .db_trswitch                    What operation to perform
**	    DB_TR_NOCHANGE		    None
**	    DB_TR_ON			    Turn on a tracepoint
**	    DB_TR_OFF			    Turn off a tracepoint
**	  .db_trace_point		  The number of the tracepoint to be
**					  effected
**	  .db_vals[2]			  Optional values, to be interpreted
**					  differently for each tracepoint
**	  .db_value_count		  The number of values specified in
**					  the above array
**
** Outputs:
**      None
**	Returns:
**	    E_DB_OK			Success
**	    E_DB_WARN			Operation completed with warning(s)
**	    E_DB_ERROR			Function failed; non-catastrophic error
**	    E_DB_FATAL			Function failed; catastrophic error
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	17-apr-86 (jeff)
**          written
**	13-feb-90 (andre)
**	    set scf_stype to SCU_EXCLUSIVE before calling scu_swait.
**	14-jul-93 (ed)
**	    replacing <dbms.h> by <gl.h> <sl.h> <iicommon.h> <dbdbms.h>
**	10-aug-93 (andre)
**	   removed declaration of scf_call()
**	08-oct-93 (rblumer)
**	   In order to allow session trace points that take a value,
**	   changed ult_set_macro call to use firstval and secondval
**	   instead of hard-coding zeros for the values.
**	09-Oct-1998 (jenjo02)
**	    Removed SCF semaphore functions, inlining the CS calls instead.
**      04-may-1999 (hanch04)
**          Change TRformat's print function to pass a PTR not an i4.
**      21-Feb-2007 (hanal04) Bug 117736
**          Added trace point PS503 to dump PSF's ULM memory usage to the
**          DBMS log.
*/
DB_STATUS
psf_debug(
	DB_DEBUG_CB        *debug_cb)
{
    i4                  flag;
    i4		firstval;
    i4		secondval;
    PSS_SESBLK		*sess_cb;
    extern PSF_SERVBLK	*Psf_srvblk;

    /* Get the session control block */
    sess_cb = psf_sesscb();

    /* Flags 0 - PSF_TBITS-1 are for the server; all others are for sessions */
    flag = debug_cb->db_trace_point;
    if (flag >= PSF_TBITS)
    {
	flag = flag - PSF_TBITS;
	if (flag >= PSS_TBITS)
	    return (E_DB_ERROR);
    }

    /* There can be UP TO two values, but maybe they weren't given */
    if (debug_cb->db_value_count > 0)
	firstval = debug_cb->db_vals[0];
    else
	firstval = 0L;

    if (debug_cb->db_value_count > 1)
	secondval = debug_cb->db_vals[1];
    else
	secondval = 0L;

    /*
    ** Three possible actions: Turn on flag, turn it off, or do nothing.
    */
    switch (debug_cb->db_trswitch)
    {
    case DB_TR_ON:
	/* First PSF_TBITS flags belong to server, others to session */
	if (debug_cb->db_trace_point < PSF_TBITS)
	{
	    CSp_semaphore(1, &Psf_srvblk->psf_sem); /* exclusive */
	    ult_set_macro(&Psf_srvblk->psf_trace, flag, firstval, secondval);
	    CSv_semaphore(&Psf_srvblk->psf_sem);
	}
	else
	{
	    /* Do nothing if couln't get session control block */
	    if (sess_cb != (PSS_SESBLK *) NULL)
	    {
		if(flag == PSS_ULM_DUMP_POOL)
		{
		    ULM_RCB	ulm_rcb;
		    char	buf[512];
		    SCF_CB	scf_cb;

		    ulm_rcb.ulm_poolid = Psf_srvblk->psf_poolid;
		    ulm_rcb.ulm_facility = DB_PSF_ID;
		    _VOID_ ulm_mappool(&ulm_rcb);
		    ulm_print_pool(&ulm_rcb);
		    STprintf(buf, "ULM Memory Pool Map and ULM Memory Print Pool for PSF has been \nwritten to the DBMS log file.");
		    
		    scf_cb.scf_length = sizeof(scf_cb);
		    scf_cb.scf_type = SCF_CB_TYPE;
		    scf_cb.scf_facility = DB_PSF_ID;
		    scf_cb.scf_session = DB_NOSESSION;
		    scf_cb.scf_nbr_union.scf_local_error = 0;
		    scf_cb.scf_len_union.scf_blength = STlength(buf);
		    scf_cb.scf_ptr_union.scf_buffer = buf;
		    _VOID_ scf_call(SCC_TRACE, &scf_cb);
		}
		else
		{
		    ult_set_macro(&sess_cb->pss_trace, flag, firstval, 
				secondval);

		    /* Yacc debugging requires a special call */
		    if (flag == PSS_YTRACE)
		        psl_trace((PTR) sess_cb->pss_yacc, TRUE);
		}
	    }
	}
	break;

    case DB_TR_OFF:
	/* First PSF_TBITS flags belong to server, others to session */
	if (debug_cb->db_trace_point < PSF_TBITS)
	{
	    CSp_semaphore(1, &Psf_srvblk->psf_sem); /* exclusive */
	    ult_clear_macro(&Psf_srvblk->psf_trace, flag);
	    CSv_semaphore(&Psf_srvblk->psf_sem);
	}
	else
	{
	    /* Do nothing if couldn't get session control block */
	    if (sess_cb != (PSS_SESBLK *) NULL)
	    {
		ult_clear_macro(&sess_cb->pss_trace, flag);
		/* Yacc debugging requires a special call */
		if (flag == PSS_YTRACE)
		    psl_trace((PTR) sess_cb->pss_yacc, FALSE);
	    }
	}
	break;

    case DB_TR_NOCHANGE:
	/* Do nothing */
	break;

    default:
	return (E_DB_ERROR);
    }

    return (E_DB_OK);
}
Exemplo n.º 7
0
DB_STATUS
dmx_secure(
DMX_CB    *dmx_cb)
{
    DML_SCB             *scb;
    DMX_CB		*dmx = dmx_cb;
    DML_XCB		*xcb;
    DB_STATUS		status = E_DB_ERROR;
    i4		error,local_error;
    DMP_RCB		*rcb;
    CL_ERR_DESC		sys_err;
    STATUS		cl_stat;
    i4			qnext;
    DMC_REPQ		*repq = (DMC_REPQ *)(Dmc_rep + 1);
    i4			rep_iq_lock;
    i4			rep_maxlocks;
    DB_TAB_TIMESTAMP	timestamp;
    LK_EVENT		lk_event;

    CLRDBERR(&dmx->error);

    for (;;)
    {
	xcb = (DML_XCB *)dmx->dmx_tran_id;
	if (dm0m_check((DM_OBJECT *)xcb, (i4)XCB_CB) != E_DB_OK)
	{
	    SETDBERR(&dmx->error, 0, E_DM003B_BAD_TRAN_ID);
	    break;
	}

	if (dmx->dmx_dis_tran_id.db_dis_tran_id_type == 0)
	{
	    SETDBERR(&dmx->error, 0, E_DM002A_BAD_PARAMETER);
	    break;
	}
	if (xcb->xcb_state & (XCB_STMTABORT | XCB_TRANABORT))
	{
	    SETDBERR(&dmx->error, 0, E_DM0064_USER_ABORT);
	    break;
	}
	if (xcb->xcb_state & XCB_WILLING_COMMIT)
	{
	    SETDBERR(&dmx->error, 0, E_DM0132_ILLEGAL_STMT);
	    break;
	}
        scb = xcb->xcb_scb_ptr;

	/*  Clear user interrupt state in SCB. */

	scb->scb_ui_state &= ~SCB_USER_INTR;

	/* Clear the force abort state of the SCB. */

	if (xcb->xcb_state & XCB_FORCE_ABORT)
	    scb->scb_ui_state &= ~SCB_FORCE_ABORT;

	/*
	** If the transaction has written no Begin Transaction record,
	** then force one to the log file now.
	*/
	if (xcb->xcb_flags & XCB_DELAYBT)
	{
	    status = dmxe_writebt(xcb, TRUE, &dmx->error);
	    if (status != E_DB_OK)
	    {
		xcb->xcb_state |= XCB_TRANABORT;
		break;
	    }
	}

	/*
	** If the database is replicated, the input queue must
	** be processed before putting the transaction in a
	** willing commit state.
	**
	** All of this transaction's tables, including those
	** associated with replication, must be closed and
	** their pages tossed from the cache.
	**
	** This is the same processing as that for a 
	** "normal" commit.
	*/
	if ((xcb->xcb_odcb_ptr->odcb_dcb_ptr->dcb_status & DCB_S_REPLICATE) &&
	    (DMZ_SES_MACRO(32) == 0 || dmd_reptrace() == FALSE) &&
	    ((xcb->xcb_rep_input_q == NULL) || 
		(xcb->xcb_rep_input_q == (DMP_RCB *)-1)))
	{
	    if (dmf_svcb->svcb_rep_iqlock == DMC_C_ROW)
		rep_iq_lock = DM2T_RIX;
	    else if (dmf_svcb->svcb_rep_iqlock == DMC_C_TABLE)
		rep_iq_lock = DM2T_X;
	    else
		rep_iq_lock = DM2T_IX;
 
	    /* allow a minimum maxlocks value of 50 ... */
	    if (dmf_svcb->svcb_rep_dtmaxlock > 50)
		rep_maxlocks = dmf_svcb->svcb_rep_dtmaxlock;
	    /* ...but default to 100 */
	    else if (dmf_svcb->svcb_lk_maxlocks > 100)
		rep_maxlocks = dmf_svcb->svcb_lk_maxlocks;
	    else
		rep_maxlocks = 100;
	    
	    status = dm2t_open(xcb->xcb_odcb_ptr->odcb_dcb_ptr, 
		&xcb->xcb_odcb_ptr->odcb_dcb_ptr->rep_input_q, rep_iq_lock,
                DM2T_UDIRECT, DM2T_A_WRITE, (i4)0, rep_maxlocks,
                (i4)0, xcb->xcb_log_id, xcb->xcb_lk_id, (i4)0, 
		(i4)0, (i4)0, &(xcb->xcb_tran_id), &timestamp, 
		&(xcb->xcb_rep_input_q), (DML_SCB *)0, &dmx->error);
	    if (status != E_DB_OK)
	    {
		xcb->xcb_state |= XCB_TRANABORT;
		break;
	    }
	}
        /*
        ** clean up replication input queue RCB
        */
        if (xcb->xcb_rep_input_q)
        {
	    HRSYSTIME		curtime;
	    bool	list_ok = TRUE;
	    i4		semaphore_status = -1;

	    (VOID)TMhrnow(&curtime);

	    /* If rep_txq_size=0, Dmc_rep will be NULL and we have to
	    ** do the distribution synchronously now (kibro01) b118566
	    */
	    if (Dmc_rep == NULL)
		list_ok = FALSE;

	    /*
	    ** if we had the input queue open, then we need to distribute
	    */
	    if (list_ok)
	    {
	        semaphore_status = CSp_semaphore(TRUE, &Dmc_rep->rep_sem);
		if (semaphore_status == E_DB_OK)
		{
		    qnext = (Dmc_rep->queue_start == Dmc_rep->queue_end &&
			repq[Dmc_rep->queue_start].active == 0 &&
			repq[Dmc_rep->queue_start].tx_id == 0) ?
			  Dmc_rep->queue_end :
			  (Dmc_rep->queue_end + 1) % Dmc_rep->seg_size;
		    if (qnext == Dmc_rep->queue_start && 
			Dmc_rep->queue_start != Dmc_rep->queue_end)
		    {
			list_ok = FALSE;
		    }
		} else
		{
	            TRdisplay("%@ dmxsecure() CSp_semaphore failed %d\n",
                        semaphore_status);

		    /* Error getting the semaphore, can't rely on the list */
		    list_ok = FALSE;
		}
	    }
	    if (!list_ok)
	    {
		/* queue is full, print warning and distribute manually */
		if (semaphore_status == E_DB_OK)
			CSv_semaphore(&Dmc_rep->rep_sem);

		/* If we have a queue and it's full, log that fact */
		if (Dmc_rep)
		{
            	    uleFormat(NULL, W_DM9561_REP_TXQ_FULL, NULL, ULE_LOG, NULL, 
		        (char *)NULL, (i4)0, (i4 *)NULL, &local_error, 0);
		}

		status = dm2rep_qman(xcb->xcb_odcb_ptr->odcb_dcb_ptr,
		    xcb->xcb_rep_remote_tx ? xcb->xcb_rep_remote_tx :
			(i4)xcb->xcb_tran_id.db_low_tran,
		    &curtime, xcb->xcb_rep_input_q, xcb->xcb_lk_id, 
		    &dmx->error, FALSE);
		if (status != E_DB_OK)
		{
		    if (dmx->error.err_code > E_DM004A_INTERNAL_ERROR)
		    {
		        uleFormat( &dmx->error, 0, NULL, ULE_LOG , NULL, 
				    (char * )NULL, 0L, (i4 *)NULL, 
				    &local_error, 0);
			SETDBERR(&dmx->error, 0, E_DM957F_REP_DISTRIB_ERROR);
		    }
		    xcb->xcb_state |= XCB_TRANABORT;
		    break;
		}
	    }
	    else
	    {
	        /* we have mutex and an entry we can use, so fill out entry */
	        /* we assume we are only accessing one db in this tx */
	        STRUCT_ASSIGN_MACRO(xcb->xcb_odcb_ptr->odcb_dcb_ptr->dcb_name,
		    repq[qnext].dbname);
	        /*
	        ** use remote TX if it's set (by the replication server)
	        */
	        if (xcb->xcb_rep_remote_tx)
		    repq[qnext].tx_id = xcb->xcb_rep_remote_tx;
	        else
	            repq[qnext].tx_id = (i4)xcb->xcb_tran_id.db_low_tran;
	        repq[qnext].active = FALSE;
	        MEcopy((char *)&curtime, sizeof(HRSYSTIME), 
		       (char *)&repq[qnext].trans_time);
	        Dmc_rep->queue_end = qnext;
	        CSv_semaphore(&Dmc_rep->rep_sem);
	        /*
	        ** signal the queue management thread(s)
	        */
		lk_event.type_high = REP_READQ;
		lk_event.type_low = 0;
		lk_event.value = REP_READQ_VAL;
		
	        cl_stat = LKevent(LK_E_CLR | LK_E_CROSS_PROCESS, xcb->xcb_lk_id,
		    &lk_event, &sys_err);
                if (cl_stat != OK)
                {
                    uleFormat(NULL, cl_stat, &sys_err, ULE_LOG, NULL,
                        (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
                    uleFormat(&dmx->error, E_DM904B_BAD_LOCK_EVENT, &sys_err, ULE_LOG, 
			NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 3,
                        0, LK_E_CLR, 0, REP_READQ, 0, xcb->xcb_lk_id);
                    status = E_DB_ERROR;
                    break;
                }
	    }
	    /*
	    ** user updates to the input queue will cause xcb_rep_input_q to
	    ** be set to -1, the table will already be closed by the user in
	    ** this case
	    */
	    if (xcb->xcb_rep_input_q != (DMP_RCB *)-1)
	    {
                status = dm2t_close(xcb->xcb_rep_input_q, (i4)0, &dmx->error);

		if (status != E_DB_OK)
		{
		    if (dmx->error.err_code != E_DM004A_INTERNAL_ERROR)
		    {
			uleFormat( &dmx->error, 0, NULL, ULE_LOG , NULL,
			    (char * )NULL, 0L, (i4 *)NULL,
			    &local_error, 0);
			SETDBERR(&dmx->error, 0, E_DM009A_ERROR_SECURE_TRAN);
		    }
		    xcb->xcb_state |= XCB_TRANABORT;
		    return (status);
		}
		xcb->xcb_rep_input_q = (DMP_RCB*)NULL;
	    }
        }

	/*  Close all open tables and destroy all open temporary tables.    */

	while (xcb->xcb_rq_next != (DMP_RCB*) &xcb->xcb_rq_next)
	{
	    /*	Get next RCB. */

	    rcb = (DMP_RCB *)((char *)xcb->xcb_rq_next -
		(char *)&(((DMP_RCB*)0)->rcb_xq_next));

	    /*	Remove from the XCB. */

	    rcb->rcb_xq_next->rcb_q_prev = rcb->rcb_xq_prev;
	    rcb->rcb_xq_prev->rcb_q_next = rcb->rcb_xq_next;

	    /*	Deallocate the RCB. */

	    status = dm2t_close(rcb, (i4)0, &dmx->error);
	    if (status != E_DB_OK)
	    {
		if (dmx->error.err_code != E_DM004A_INTERNAL_ERROR)
		{
		    uleFormat( &dmx->error, 0, NULL, ULE_LOG , NULL, 
			(char * )NULL, 0L, (i4 *)NULL, 
			&local_error, 0);
		    SETDBERR(&dmx->error, 0, E_DM009A_ERROR_SECURE_TRAN);
		}
		xcb->xcb_state |= XCB_TRANABORT;
		return (status);
	    }
	}

	/*  Now prepare to commit the transaction. */

	STRUCT_ASSIGN_MACRO(dmx->dmx_dis_tran_id, xcb->xcb_dis_tran_id);

	status = dmxe_secure(&xcb->xcb_tran_id, &xcb->xcb_dis_tran_id,
				xcb->xcb_log_id, xcb->xcb_lk_id, &dmx->error);
	if (status != E_DB_OK)
	{
	    if (dmx->error.err_code > E_DM_INTERNAL)
	    {
		uleFormat(&dmx->error, 0, NULL, ULE_LOG, NULL, 
			    (char * )NULL, (i4)0, (i4 *)NULL, &error, 0);
		SETDBERR(&dmx->error, 0, E_DM009A_ERROR_SECURE_TRAN);
	    }

	    xcb->xcb_state |= XCB_TRANABORT;
	    return (E_DB_ERROR);
	}

	/* Mark the state of the transaction in xcb to WILLING COMMIT. */

	xcb->xcb_x_type |= XCB_DISTRIBUTED;
	xcb->xcb_state |= XCB_WILLING_COMMIT;
	return (E_DB_OK);
    }
    return (status);
}
Exemplo n.º 8
0
/*{
** Name: LGK_initialize()	-  initialize the lg/lk shared mem segment.
**
** Description:
**	This routine is called by the LGinitialize or LKinitialize routine.  IT
**	assumes that a previous caller has allocated the shared memory segment.
**
**	If it discovers that the shared memory segment has not yet been
**	initialized, it calls the LG and LK initialize-memory routines to do so.
**
** Inputs:
**	flag		- bit mask of:
**			  LOCK_LGK_MEMORY to lock the shared data segment
**			  LGK_IS_CSP if process is CSP process this node.
**
** Outputs:
**	sys_err		- place for system-specific error information.
**
**	Returns:
**	    OK	- success
**	    !OK - failure (CS*() routine failure, segment not mapped, ...)
**	
**  History:
**	Summer, 1992 (bryanp)
**	    Working on the new portable logging and locking system.
**	19-oct-1992 (bryanp)
**	    Check memory version number when attaching.
**	22-oct-1992 (bryanp)
**	    Change LGLKDATA.MEM to lglkdata.mem.
**	23-Oct-1992 (daveb)
**	    name the semaphore too.
**	13-feb-1993 (keving)
**	    Remove support for II_LGK_MEMORY_SIZE. If II_LG_MEMSIZE
**	    is not set then calculate memory size from PM values. 
**	24-may-1993 (bryanp)
**	    If the shared memory is the wrong version, don't install the
**	    at_exit handlers (the rundown routines won't be able to interpret
**	    the memory properly).
**	26-jul-1993 (jnash)
**	    Add 'flag' param lock the LGK data segment.
**	20-sep-1993 (bryanp)
**	    In addition to calling PCatexit, call (on VMS) sys$dclexh, since
**		there are some situations (image death and image rundown without
**		process rundown) which are caught neither by PCatexit (since
**		PCexit isn't run) nor by check-dead threads (since process
**		rundown never happened). This fixes a hole where an access-
**		violating ckpdb or auditdb command never got cleaned up.
**	31-jan-1994 (bryanp)
**	    Back out a few "features" which are proving countereffective:
**	    1) Don't bother checking mem_creator_pid to see if the previous
**		creator of the shared memory has died. This was an attempt to
**		gracefully re-use sticky shared memory following a system crash,
**		but it is suspected as being the culprit in a series of system
**		failures by re-initializing the shared memory at inopportune
**		times.
**	    2) Don't complain if the shared memory already exists but is of a
**		different size than you expected. Just go ahead and try to use
**		it anyway.
**	21-feb-1994 (bryanp)
**	    Reverse item (1) of the above 31-jan-1994 change and re-enable the
**		graceful re-use of shared memory. People weren't happy with
**		having to run ipcclean and csinstall all the time.
**	23-may-1994 (bryanp)
**	    On VMS, disable ^Y for LG/LK-aware processes. We don't want to allow
**		^Y because you might interrupt the process right in the middle
**		of an LG or LK operation, while holding the shared memory
**		semaphore, and this would then wedge the whole installation.
**          
**      17-May-1994 (daveb) 59127
**          Attach lgk_mem semaphore if we're attaching to the segment.
**      30-jan-1995 (lawst01) bug 61984
**          Use memory needed calculation from the 'lgk_calculate_size'
**          function to determine the size of the shared memory pool for
**          locking and locking. If the II_LG_MEMSIZE variable is specified
**          with a value larger than needed use the supplied value. If
**          lgk_calculate_size is unable to calculate a size then use the
**          magic number of 400000.  In addition issue a warning message
**          and continue executing in the event the number of pages
**          allocated is less than the number requested. 
**	24-apr-1997 (nanpr01)
**	    Reinstate Bryanp's change. In the process of fixing bug 61984
**	    by Steve Lawrence and subsequent undo of Steve's fix by Nick
**	    Ireland on 25-jun-96 (nick) caused the if 0 code removed.
**	    Part of the Steve's change was not reinstated such as not returning
**	    the status and exit and continue.
**	    1. Don't complain if the shared memory already exists but is of a
**	    different size than you expected. Just go ahead and try to use
**	    it.
**     18-aug-1998 (hweho01)
**          Reclaim the kernel resource if LG/LK shared memory segment is  
**          reinitialized. If the shared segment is re-used (the previous creator 
**          of the shared segment has died), the cross-process semaphores get 
**          initialized more than once at the same locations. That cause the
**          kernel resource leaks on DG/UX (OS release 4.11MU04). To fix the 
**          problem, CS_cp_sem_cleanup() is called to destroy all the 
**          semaphores before LG/LK shraed segment get recreated. 
**          CS_cp_sem_cleanup() is made dependent on xCL_NEED_SEM_CLEANUP and
**          OS_THREADS_USED, it returns immediately for most platforms.  
**	27-Mar-2000 (jenjo02)
**	    Added test for crossed thread types, refuse connection
**	    to LGK memory with E_DMA811_LGK_MT_MISMATCH.
**	18-apr-2001 (devjo01)
**	    s103715 (Portable cluster support)
**	    - Add CX mem requirement calculations.
**	    - Add LGK_IS_CSP flag to indicate that LGK memory is being
**	      initialized for a CSP process.
**	    - Add basic CX initialization.
**      19-sep-2002 (devjo01)
**          If running NUMA clustered allocate memory out of local RAD.
**	30-Apr-2003 (jenjo02)
**	    Rearchitected to silence long-tolerated race conditions.
**	    BUG 110121.
**	27-feb-2004 (devjo01)
**	    Rework allocation of CX shared memory to be compatible
**	    with race condition fix introduced for bug 110121.
**	29-Dec-2008 (jonj)
**	    If lgk_calculate_size() returns FAIL, the total memory
**	    needed exceeds MAX_SIZE_TYPE and we can't continue, but
**	    tell what we can about the needs of the various bits of
**	    memory before quitting.
**	06-Aug-2009 (wanfr01)
**	    Bug 122418 - Return E_DMA812 if LOCK_LGK_MUST_ATTACH is
**	    is passed in and memory segment does not exist
**      20-Nov-2009 (maspa05) bug 122642
**          In order to synchronize creation of UUIDs across servers added
**          a semaphore and a 'last time' variable into LGK memory. 
**      14-Dec-2009 (maspa05) bug 122642
**          #ifdef out the above change for Windows. The rest of the change
**          does not apply to Windows so the variables aren't defined.
*/
STATUS
LGK_initialize(
i4	  	flag,
CL_ERR_DESC	*sys_err,
char		*lgk_info)
{
    PTR		ptr;
    SIZE_TYPE	memleft;
    SIZE_TYPE	size;
    STATUS	ret_val;
    STATUS	mem_exists;
    char	mem_name[15];
    SIZE_TYPE	allocated_pages;
    i4		me_flags;
    i4		me_locked_flag;
    SIZE_TYPE	memory_needed;
    char	*nm_string;
    SIZE_TYPE	pages;
    LGK_MEM	*lgk_mem;
    i4		err_code;
    SIZE_TYPE   min_memory;
    i4		retries;
    i4		i;
    i4		attached;
    PID		*my_pid_slot;
    i4		clustered;
    u_i4	nodes;
    SIZE_TYPE	cxmemreq;
    PTR		pcxmem;
    LGLK_INFO	lgkcount;
    char	instid[4];

    CL_CLEAR_ERR(sys_err);

    /*
    ** if LGK_base is set then this routine has already been called.  It is
    ** set up so that both LGiniitalize and LKinitialize calls it, but only
    ** the first call does anything.
    */

    if (LGK_base.lgk_mem_ptr)
	return(OK);

    PCpid( &LGK_my_pid );

    memory_needed = 0;
    NMgtAt("II_LG_MEMSIZE", &nm_string);
    if (nm_string && *nm_string)
#if defined(LP64)
	if (CVal8(nm_string, (long*)&memory_needed))
#else
	if (CVal(nm_string, (i4 *)&memory_needed))
#endif /* LP64 */
	    memory_needed = 0;

    /* Always calculate memory needed from PM resource settings  */
    /* and compare with supplied value, if supplied value is less */
    /* than minimum then use minimum                             */

    min_memory = 0;
    if ( OK == lgk_get_counts(&lgkcount, FALSE))
    {
	if ( lgk_calculate_size(FALSE, &lgkcount, &min_memory) )
	{
	    /*
	    ** Memory exceeds MAX_SIZE_TYPE, can't continue.
	    ** 
	    ** Do calculation again, this time with "wordy"
	    ** so user can see allocation bits, then quit.
	    */
	    lgk_calculate_size(TRUE, &lgkcount, &min_memory);
	    return (E_DMA802_LGKINIT_ERROR); 
	}
    }
    if (min_memory)
       memory_needed = (memory_needed < min_memory) ? min_memory
                                                    : memory_needed;
    else
       memory_needed = (memory_needed < 400000 ) ? 400000 
                                                 : memory_needed;

    clustered = (i4)CXcluster_enabled();
    cxmemreq = 0;
    if ( clustered )
    {

	if ( OK != CXcluster_nodes( &nodes, NULL ) )
	    nodes = 0;
	cxmemreq = CXshm_required( 0, nodes, lgkcount.lgk_max_xacts,
		    lgkcount.lgk_max_locks, lgkcount.lgk_max_resources );
	if ( MAX_SIZE_TYPE - memory_needed < cxmemreq )
	{
	    /*
	    ** Memory exceeds MAX_SIZE_TYPE, can't continue.
	    ** 
	    ** Do calculation again, this time with "wordy"
	    ** so user can see allocation bits, then quit.
	    */
	    SIprintf("Total LG/LK/CX allocation exceeds max of %lu bytes by %lu\n"
	    	     "Adjust logging/locking configuration values and try again\n",
		         MAX_SIZE_TYPE, cxmemreq - (MAX_SIZE_TYPE - memory_needed));
	    lgk_calculate_size(TRUE, &lgkcount, &min_memory);
	    return (E_DMA802_LGKINIT_ERROR); 
	}
	memory_needed += cxmemreq;
    }

    if ( memory_needed < MAX_SIZE_TYPE - ME_MPAGESIZE )
	pages = (memory_needed + ME_MPAGESIZE - 1) / ME_MPAGESIZE;
    else
        pages = memory_needed / ME_MPAGESIZE;

    /*
    ** Lock the LGK segment if requested to do so
    */
    if (flag & LOCK_LGK_MEMORY)
	me_locked_flag = ME_LOCKED_MASK;
    else
	me_locked_flag = 0;

    me_flags = (me_locked_flag | ME_MSHARED_MASK | ME_IO_MASK | 
		ME_CREATE_MASK | ME_NOTPERM_MASK | ME_MZERO_MASK);
    if (CXnuma_user_rad())
        me_flags |= ME_LOCAL_RAD;

    STcopy("lglkdata.mem", mem_name);

    /*
    ** In general, we just want to attach to the shared memory and detect if
    ** we are the first process to do so. However, there are ugly race
    ** conditions to consider, as well as complications because the shared
    ** memory may be left around following a system crash.
    **
    ** First we attempt to create the shared memory. Usually it already exists,
    ** so we check for and handle the case of "already exists".
    */

    /*
    ** (jenjo02)
    **
    ** Restructured to better handle all those ugly race conditions
    ** which are easily reproduced by running two scripts, one that
    ** continuously executes "lockstat" while the other is starting
    ** and stopping Ingres.
    **
    ** For example,
    **
    **		lockstat A	acquires and init's the memory
    **		RCP		attaches to "A" memory
    **		lockstat A	terminates normally
    **		lockstat B	attaches to "A" memory, sees that
    **				"A"s pid is no longer alive, and
    **				reinitializes the memory, much to
    **				the RCP's chagrin.
    ** or (more commonly)
    **
    **		lockstat A	acquires and begins to init the mem
    **		RCP		attaches to "A" memory which is
    **				still being zero-filled by lockstat,
    **				checks the version number (zero),
    **				and fails with a E_DMA434 mismatch.
    **
    ** The fix utilizes the mem_ext_sem to synchronize multiple
    ** processes; if the semaphore hasn't been initialized or
    ** if mem_version_no is zero, we'll wait one second and retry,
    ** up to 60 seconds before giving up. This gives the creating
    ** process time to complete initialization of the memory.
    **
    ** Up to LGK_MAX_PIDS are allowed to attach to the shared
    ** memory. When a process attaches it sets its PID in the
    ** first vacant slot in lgk_mem->mem_pid[]; if there are
    ** no vacant slots, the attach is refused. When the process
    ** terminates normally by calling LGK_rundown(), it zeroes
    ** its PID slot.
    **
    ** When attaching to an existing segment, we check if  
    ** there are any live processes still using the memory;
    ** if so, we can't destroy it (no matter who created it).
    ** If there are no live processes attached to the memory,
    ** we destroy and reallocate it (based on current config.dat
    ** settings).
    */

    for ( retries = 0; ;retries++ )
    {
	LGK_base.lgk_mem_ptr = (PTR)NULL;
	
	/* Give up if unable to get memory in one minute */
#if defined(conf_CLUSTER_BUILD)
        if (retries > 1)
#else
	if ( retries )
#endif
	{
	    if ( retries < 60 )
		PCsleep(1000);
	    else
	    {
		/* Another process has it blocked way too long */
		uleFormat(NULL, E_DMA800_LGKINIT_GETMEM, (CL_ERR_DESC *)NULL,
				ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0);
		/* Unable to attach allocated shared memory segment. */
		return (E_DMA802_LGKINIT_ERROR); 
	    }
	}

	ret_val = MEget_pages(me_flags,
				pages, mem_name, (PTR*)&lgk_mem,
				&allocated_pages, sys_err);

	if ( mem_exists = ret_val )
	{
	    if (ret_val == ME_ALREADY_EXISTS)
	    {
		ret_val = MEget_pages((me_locked_flag | 
				       ME_MSHARED_MASK | ME_IO_MASK),
				      pages, mem_name, (PTR*)&lgk_mem,
				      &allocated_pages, sys_err);
#if defined(conf_CLUSTER_BUILD)
                if (ret_val && !retries)
                    continue;  /* try one more time */
#endif
	    }
	    if (ret_val)
	    {
		uleFormat(NULL, ret_val, sys_err,
				ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0);
		uleFormat(NULL, E_DMA800_LGKINIT_GETMEM, (CL_ERR_DESC *)NULL,
				ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0);
		/* Unable to attach allocated shared memory segment. */
		return (E_DMA802_LGKINIT_ERROR); 
	    }
	}
	else if (flag & LOCK_LGK_MUST_ATTACH)
	{	
	    /* Do not use the shared segment you just allocated */
	    MEfree_pages((PTR)lgk_mem, allocated_pages, sys_err);
	    return (E_DMA812_LGK_NO_SEGMENT); 
	}

	size = allocated_pages * ME_MPAGESIZE;

	/* Expose this process to the memory */
	LGK_base.lgk_mem_ptr = (PTR)lgk_mem;

	if ( mem_exists )
	{
	    /*
	    ** Memory exists.
	    **
	    ** Try to acquire the semaphore. If it's
	    ** uninitialzed, retry from the top.
	    **
	    ** If the version is zero, then another
	    ** process is initializing the memory;
	    ** keep retrying until the version is 
	    ** filled in.
	    **
	    */
	    if ( ret_val = CSp_semaphore(1, &lgk_mem->mem_ext_sem) )
	    {
		if ( ret_val != E_CS000A_NO_SEMAPHORE )
		{
		    uleFormat(NULL, ret_val, sys_err,
				ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0);
		    ret_val = E_DMA802_LGKINIT_ERROR;
		    break;
		}
		continue;
	    }

	    /* Retry if still being init'd by another process */
	    if ( !lgk_mem->mem_version_no )
	    {
		CSv_semaphore(&lgk_mem->mem_ext_sem);
		continue;
	    }

	    /*
	    ** Check pids which appear to be attached to
	    ** the memory:
	    **
	    ** If any process is still alive, then we
	    ** assume the memory is consistent and use it.
	    **
	    ** If a process is now dead, it terminated
	    ** without going through LGK_rundown
	    ** to zero its PID slot, zero it now.
	    **
	    ** If there are no live PIDs attached to 
	    ** the memory, we destroy and recreate it.
	    */
	    my_pid_slot = (PID*)NULL;
	    attached = 0;

	    for ( i = 0; i < LGK_MAX_PIDS; i++ )
	    {
		if ( lgk_mem->mem_pid[i] && 
		     PCis_alive(lgk_mem->mem_pid[i]) )
		{
		    attached++;
		}
		else
		{
		    /* Vacate the slot */
		    if (lgk_mem->mem_pid[i])
		    {
			uleFormat(NULL, E_DMA499_DEAD_PROCESS_INFO, (CL_ERR_DESC *)NULL,
				ULE_LOG, NULL, NULL, 0, NULL, &err_code, 2,
				0, lgk_mem->mem_pid[i],
				0, lgk_mem->mem_info[i].info_txt);
		    }
		    lgk_mem->mem_pid[i] = (PID)0;
		    lgk_mem->mem_info[i].info_txt[0] = EOS;

		    /* Use first vacant slot for this process */
		    if ( !my_pid_slot )
		    {
			my_pid_slot = &lgk_mem->mem_pid[i];
			LGK_base.lgk_pid_slot = i;
		    }
		}
		/* Quit when both questions answered */
		if ( attached && my_pid_slot )
		    break;
	    }

	    /* If no living pids attached, destroy/reallocate */
	    if ( !attached )
	    {
		CSv_semaphore(&lgk_mem->mem_ext_sem);
		if ( LGK_destroy(allocated_pages, sys_err) )
		{
		    ret_val = E_DMA802_LGKINIT_ERROR;
		    break;
		}
		continue;
	    }

	    /* All attached pids alive? */
	    if ( !my_pid_slot )
	    {
		/* ... then there's no room for this process */
		uleFormat(NULL, E_DMA80A_LGK_ATTACH_LIMIT, (CL_ERR_DESC *)NULL,
		    ULE_LOG, NULL, NULL, 0, NULL, &err_code, 1,
		    0, attached);
	        ret_val = E_DMA802_LGKINIT_ERROR;
	    }
	    else if (lgk_mem->mem_version_no != LGK_MEM_VERSION_CURRENT)
	    {
		uleFormat(NULL, E_DMA434_LGK_VERSION_MISMATCH, (CL_ERR_DESC *)NULL,
		    ULE_LOG, NULL, NULL, 0, NULL, &err_code, 2,
		    0, lgk_mem->mem_version_no, 0, LGK_MEM_VERSION_CURRENT);
		ret_val = E_DMA435_WRONG_LGKMEM_VERSION;
	    }
	    /*
	    ** Don't allow mixed connections of MT/non-MT processes.
	    ** Among other things, the mutexing mechanisms are 
	    ** incompatible!
	    */
	    else if ( (CS_is_mt() && (lgk_mem->mem_status & LGK_IS_MT) == 0) ||
		     (!CS_is_mt() &&  lgk_mem->mem_status & LGK_IS_MT) )
	    {
		uleFormat(NULL, E_DMA811_LGK_MT_MISMATCH, (CL_ERR_DESC *)NULL,
		    ULE_LOG, NULL, NULL, 0, NULL, &err_code, 2,
		    0, (lgk_mem->mem_status & LGK_IS_MT) ? "OS"
							 : "INTERNAL",
		    0, (CS_is_mt()) ? "OS"
				    : "INTERNAL");
		ret_val = E_DMA802_LGKINIT_ERROR;
	    }
	    else
	    {
		/*
		** CX memory (if any) will lie immediately past LGK header.
		*/
		pcxmem = (PTR)(lgk_mem + 1);
		pcxmem = (PTR)ME_ALIGN_MACRO(pcxmem, sizeof(ALIGN_RESTRICT));

		LGK_base.lgk_lkd_ptr = (char *)LGK_base.lgk_mem_ptr +
					lgk_mem->mem_lkd;
		LGK_base.lgk_lgd_ptr = (char *)LGK_base.lgk_mem_ptr +
					lgk_mem->mem_lgd;
		
		/* Stuff our pid in first vacant slot */
		*my_pid_slot = LGK_my_pid;
		STlcopy(lgk_info, lgk_mem->mem_info[i].info_txt, LGK_INFO_SIZE-1);
	    }

#if defined(VMS) || defined(UNIX)
	    /* set up pointers to reference the uuid mutex and last time
	     * variable */

	    if (!ID_uuid_sem_ptr)
           	ID_uuid_sem_ptr=&lgk_mem->id_uuid_sem;

	    if (!ID_uuid_last_time_ptr)
                ID_uuid_last_time_ptr=&lgk_mem->uuid_last_time;

	    if (!ID_uuid_last_cnt_ptr)
                ID_uuid_last_cnt_ptr=&lgk_mem->uuid_last_cnt;
#endif

	    CSv_semaphore(&lgk_mem->mem_ext_sem);
	}
	else
	{

	    /* Memory did not exist */
	    /* Zero the version to keep other processes out */
	    lgk_mem->mem_version_no = 0;

#if defined(VMS) || defined(UNIX)
	    /* set up the uuid mutex and last time pointers to
	     * reference the objects in shared memory */

	    {
	        STATUS id_stat;

	        ID_uuid_sem_ptr=&lgk_mem->id_uuid_sem;
                ID_uuid_last_time_ptr=&lgk_mem->uuid_last_time;
                ID_uuid_last_cnt_ptr=&lgk_mem->uuid_last_cnt;
	        *ID_uuid_last_cnt_ptr=0;
	        ID_UUID_SEM_INIT(ID_uuid_sem_ptr,CS_SEM_MULTI,"uuid sem",
				&id_stat);
	    }
#endif

	    /* ... then initialize the mutex */
	    CSw_semaphore(&lgk_mem->mem_ext_sem, CS_SEM_MULTI,
	    			    "LGK mem ext sem" );

	    /* Record if memory created for MT or not */
	    if ( CS_is_mt() )
		lgk_mem->mem_status = LGK_IS_MT;

	    /*
	    ** memory is as follows:
	    **
	    **	-----------------------------------------------------------|
	    **	| LGK_MEM struct (keep track of this mem)	           |
	    **	|							   |
	    **	-----------------------------------------------------------|
	    **	| If a clustered installation memory reserved for CX       |
	    **	|							   |
	    **	------------------------------------------------------------
	    **	| LKD - database of info for lk system			   |
	    **	|							   |
	    **	------------------------------------------------------------
	    **	| LGD - database of info for lg system			   |
	    **	|							   |
	    **	------------------------------------------------------------
	    **	| memory manipulated by LGKm_* routines for structures used |
	    **	| by both the lk and lg systems.			   |
	    **	|							   |
	    **	------------------------------------------------------------
	    */

	    /* put the LGK_MEM struct at head of segment leaving ptr pointing 
	    ** at next aligned piece of memory
	    */

	    /*
	    ** CX memory (if any) will lie immediately past LGK header.
	    */
	    pcxmem = (PTR)(lgk_mem + 1);
	    pcxmem = (PTR)ME_ALIGN_MACRO(pcxmem, sizeof(ALIGN_RESTRICT));

	    LGK_base.lgk_lkd_ptr = pcxmem + cxmemreq;
	    LGK_base.lgk_lkd_ptr = (PTR) ME_ALIGN_MACRO(LGK_base.lgk_lkd_ptr,
						sizeof(ALIGN_RESTRICT));
	    lgk_mem->mem_lkd = (i4)((char *)LGK_base.lgk_lkd_ptr -
					 (char *)LGK_base.lgk_mem_ptr);

	    LGK_base.lgk_lgd_ptr = (PTR) ((char *) LGK_base.lgk_lkd_ptr +
					    sizeof(LKD));
	    LGK_base.lgk_lgd_ptr = (PTR) ME_ALIGN_MACRO(LGK_base.lgk_lgd_ptr,
						sizeof(ALIGN_RESTRICT));
	    lgk_mem->mem_lgd = (i4)((char *)LGK_base.lgk_lgd_ptr -
					 (char *)LGK_base.lgk_mem_ptr);

	    /* now initialize the rest of memory for allocation */

	    /* how much memory is left? */

	    ptr = ((char *)LGK_base.lgk_lgd_ptr + sizeof(LGD));
	    memleft = size - (((char *) ptr) - ((char *) LGK_base.lgk_mem_ptr));

	    if ( (ret_val = lgkm_initialize_mem(memleft, ptr)) == OK &&
		 (ret_val = LG_meminit(sys_err)) == OK &&
		 (ret_val = LK_meminit(sys_err)) == OK )
	    {
		/* Clear array of attached pids and pid info */
		for ( i = 0; i < LGK_MAX_PIDS; i++ )
		{
		    lgk_mem->mem_pid[i] = (PID)0;
		    lgk_mem->mem_info[i].info_txt[0] = EOS;
		}

		/* Set the creator pid */
		LGK_base.lgk_pid_slot = 0;
		lgk_mem->mem_creator_pid = LGK_my_pid;

		/* Set the version, releasing other processes */
		lgk_mem->mem_version_no = LGK_MEM_VERSION_CURRENT;
	    }
	    else
	    {
		uleFormat(NULL, ret_val, (CL_ERR_DESC *)NULL,
			    ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0);
		ret_val = E_DMA802_LGKINIT_ERROR;

		/* Destroy the shared memory */
		LGK_destroy(allocated_pages, sys_err);
	    }
	}

	if ( ret_val == OK )
	{
	    PCatexit(LGK_rundown);

	    if ( clustered )
	    {
		/*
		** Perform preliminary cluster connection and CX memory init.
		*/

		/* Get installation code */
		NMgtAt("II_INSTALLATION", &nm_string);
		if ( nm_string )
		{
		    instid[0] = *(nm_string);
		    instid[1] = *(nm_string+1);
		}
		else
		{
		    instid[0] = 'A';
		    instid[1] = 'A';
		}
		instid[2] = '\0';
		ret_val = CXinitialize( instid, pcxmem, flag & LGK_IS_CSP );
		if ( ret_val )
		{
		    /* Report error returned from CX */
		    uleFormat(NULL, ret_val, (CL_ERR_DESC *)NULL,
			ULE_LOG, NULL, NULL, 0, NULL, &err_code, 0 );
		    break;
		}
	    }

#ifdef VMS
	    {
	    static $EXHDEF	    exit_block;
	    i4			ctrl_y_mask = 0x02000000;

	    /*
	    ** On VMS, programs like the dmfjsp and logstat run as images in
	    ** the shell process. That is, the system doesn't start and stop
	    ** a process for each invocation of the program, it just starts
	    ** and stops an image in the same process. This means that if
	    ** the program should die, the image may be rundown but the process
	    ** will remain, which means that the check-dead threads of other
	    ** processes in the installation will not feel that they need to
	    ** rundown this process, since it's still alive.
	    **
	    ** By declaring an exit handler, which will get a chance to run
	    ** even if PCexit isn't called, we improve our chances of getting
	    ** to perform rundown processing if we should die unexpectedly.
	    **
	    ** Furthermore, we ask DCL to disable its ^Y processing, which
	    ** lessens the chance that the user will interrupt us while we
	    ** are holding the semaphore.
	    */
	    exit_block.exh$g_func = LGK_rundown;
	    exit_block.exh$l_argcount = 1;
	    exit_block.exh$gl_value = &exit_block.exh$l_status;

	    if (sys$dclexh(&exit_block) != SS$_NORMAL)
		ret_val = FAIL;

	    lib$disable_ctrl(&ctrl_y_mask, 0);
	    }
#endif
	}
	break;
    }

    if ( ret_val )
	LGK_base.lgk_mem_ptr = NULL;

    return(ret_val);
}
Exemplo n.º 9
0
DB_STATUS
qss_end_session( QSF_RCB *qsf_rb)
{
    STATUS		 status = E_DB_OK;
    QSF_CB		*scb = qsf_rb->qsf_scb;
    QSF_RCB		int_qsf_rb;
    QSO_OBJ_HDR		*objects = (QSO_OBJ_HDR *) scb->qss_obj_list;
    QSO_OBJ_HDR		*obj;
    i4		 error;
    i4		 count;
#ifdef    xDEBUG
    QSF_CB		*prevses;
    QSF_CB		*nextses;
    i4			 trace_003;
#endif /* xDEBUG */

    CLRDBERR(&qsf_rb->qsf_error);

    /* Toss any uncommitted named session-owned objects first.
    ** There shouldn't be any, if QEF did its job right, but who knows.
    ** The important thing is that we NOT have objects pretending to be
    ** on a session list when the session has ended.
    */
    int_qsf_rb.qsf_sid = qsf_rb->qsf_sid;
    int_qsf_rb.qsf_scb = scb;
    CLRDBERR(&int_qsf_rb.qsf_error);
    (void) qsf_clrsesobj(&int_qsf_rb);

    /*
    ** Look for orphaned objects that should not exist and destroy unshareable
    ** named objects.
    */
    count = QSF_MAX_ORPHANS;
    while (objects != (QSO_OBJ_HDR *) NULL && --count > 0)
    {
        STATUS		status;
        QSO_OBJ_HDR	*current_obj = objects;

        objects = objects->qso_obnext;

        if (current_obj->qso_obid.qso_lname == 0)
        {
            char	*type;

            switch (current_obj->qso_obid.qso_type)
            {
            case QSO_QTEXT_OBJ:
                type = "Query Text";
                break;
            case QSO_QTREE_OBJ:
                type = "Query Tree";
                break;
            case QSO_QP_OBJ:
                type = "Query Plan";
                break;
            case QSO_SQLDA_OBJ:
                type = "SQLDA";
                break;
            case QSO_ALIAS_OBJ:
                type = "Alias";
                break;
            default:
                type = "Unknown";
                break;
            }

            /* Report the orphaned object. */
            uleFormat( &int_qsf_rb.qsf_error, E_QS001E_ORPHANED_OBJ,
                       NULL, (i4) ULE_LOG, NULL,
                       NULL, (i4) 0, NULL, &error, 1, 0, type);
        }

        int_qsf_rb.qsf_obj_id = current_obj->qso_obid;
        int_qsf_rb.qsf_lk_state = QSO_EXLOCK;
        int_qsf_rb.qsf_sid = qsf_rb->qsf_sid;
        int_qsf_rb.qsf_scb = scb;
        status = qso_lock(&int_qsf_rb);
        if (DB_FAILURE_MACRO(status))
        {
            uleFormat( &int_qsf_rb.qsf_error, 0, NULL, (i4) ULE_LOG,
                       NULL, NULL, (i4) 0, NULL, &error, 0);
        }

        int_qsf_rb.qsf_lk_id = current_obj->qso_lk_id;
        status = qso_destroy(&int_qsf_rb);
        if (DB_FAILURE_MACRO(status))
        {
            uleFormat( &int_qsf_rb.qsf_error, 0, NULL, (i4) ULE_LOG,
                       NULL, NULL, (i4) 0, NULL, &error, 0);
        }
    }

    if (count <= 0)
    {
        uleFormat( &int_qsf_rb.qsf_error, E_QS001F_TOO_MANY_ORPHANS,
                   NULL, (i4) ULE_LOG,
                   NULL, NULL, (i4) 0, NULL, &error, 0);
    }

#ifdef    xDEBUG
    if (Qsr_scb->qsr_tracing && qst_trcheck(&scb, QSF_001_ENDSES_OBJQ))
    {
        DB_ERROR		save_err = qsf_rb->qsf_error;

        TRdisplay("<<< Dumping QSF object queue before ending session >>>\n");
        (void) qsd_obq_dump(qsf_rb);

        qsf_rb->qsf_error = save_err;
    }


    trace_003 = (   Qsr_scb->qsr_tracing
                    && qst_trcheck(&scb, QSF_003_CHK_SCB_LIST)
                );					    /* Must check before
							    ** we blow away the
							    ** the session CB.
							    */
#endif /* xDEBUG */


    /* First, wait to get exclusive access to QSF's SERVER CONTROL BLOCK */
    /* ----------------------------------------------------------------- */
    if (CSp_semaphore((i4) TRUE, &Qsr_scb->qsr_psem))
    {
        SETDBERR(&qsf_rb->qsf_error, 0, E_QS0008_SEMWAIT);
        /* Return now, instead of attempting to do a v() */
        return (E_DB_ERROR);
    }

    /* Do a quick run through the global LRU-able (persistent) object list
    ** and clear the session if it's us.  This indicates that the object
    ** is not on any session list, and prevents someone else from believing
    ** a stale qso_session.
    */

    obj = Qsr_scb->qsr_1st_lru;
    while (obj != NULL)
    {
        if (obj->qso_session == scb)
            obj->qso_session = NULL;
        obj = obj->qso_lrnext;
    }

    /* Now remove this session from QSF's server CB */
    /* -------------------------------------------- */
    if (scb == Qsr_scb->qsr_se1st)
        Qsr_scb->qsr_se1st = scb->qsf_next;
    else
        scb->qsf_prev->qsf_next = scb->qsf_next;

    if (scb->qsf_next != NULL)
        scb->qsf_next->qsf_prev = scb->qsf_prev;

    scb->qsf_prev = scb->qsf_next = NULL;
    Qsr_scb->qsr_nsess--;


#ifdef    xDEBUG
    if (trace_003)
    {
        /* This code just verifies that the session CB list is intact */
        /* ---------------------------------------------------------- */
        count = Qsr_scb->qsr_nsess;
        prevses = NULL;
        nextses = Qsr_scb->qsr_se1st;

        while (count-- > 0)
        {
            if (nextses == NULL)
            {
                TRdisplay("*** Not enough QSF session CBs found");
                TRdisplay(" while ending a session.\n");
                SETDBERR(&qsf_rb->qsf_error, 0, E_QS9999_INTERNAL_ERROR);
                return (E_DB_FATAL);
            }

            if (	(nextses->qsf_prev != prevses)
                    ||	(nextses->qsf_ascii_id != QSFCB_ASCII_ID)
                    ||	(nextses->qsf_type != QSFCB_CB)
                    ||	(nextses->qsf_length != sizeof(QSF_CB))
               )
            {
                TRdisplay("*** QSF's session CB list found trashed");
                TRdisplay(" while ending a session.\n");
                SETDBERR(&qsf_rb->qsf_error, 0, E_QS9999_INTERNAL_ERROR);
                return (E_DB_FATAL);
            }
            prevses = nextses;
            nextses = nextses->qsf_next;
        }

        if (nextses != NULL)
        {
            TRdisplay("*** Too many QSF session CBs detected");
            TRdisplay(" while ending a session.\n");
            SETDBERR(&qsf_rb->qsf_error, 0, E_QS9999_INTERNAL_ERROR);
            return (E_DB_FATAL);
        }
    }
#endif /* xDEBUG */


    status = E_DB_OK;

    /* Release exclusive access to QSF's SERVER CONTROL BLOCK */
    /* ------------------------------------------------------ */
    if (CSv_semaphore(&Qsr_scb->qsr_psem))
    {
        status = E_DB_ERROR;
        SETDBERR(&qsf_rb->qsf_error, 0, E_QS0004_SEMRELEASE);
    }

    return (status);
}
Exemplo n.º 10
0
DB_STATUS
qss_bgn_session( QSF_RCB *qsf_rb )
{
    STATUS		status;
    QSF_CB		*scb = qsf_rb->qsf_scb;
#ifdef    xDEBUG
    i4		n;
    QSF_CB		*prevses;
    QSF_CB		*nextses;
#endif /* xDEBUG */

    CLRDBERR(&qsf_rb->qsf_error);

    Qsr_scb = (QSR_CB *) qsf_rb->qsf_server;

    if (scb == NULL)
    {
        SETDBERR(&qsf_rb->qsf_error, 0, E_QS001B_NO_SESSION_CB);
        return (E_DB_SEVERE);
    }

    /* Before anything, set up the session CB's standard header portion */
    /* ---------------------------------------------------------------- */
    scb->qsf_ascii_id = QSFCB_ASCII_ID;
    scb->qsf_type = QSFCB_CB;
    scb->qsf_length = sizeof(QSF_CB);
    scb->qsf_prev = NULL;

    /* Update the other structure information.  */
    /* ---------------------------------------- */
    scb->qss_obj_list = (QSO_OBJ_HDR *) NULL;
    scb->qss_master = (QSO_MASTER_HDR *) NULL;
    scb->qss_snamed_list = (QSO_OBJ_HDR *) NULL;

    /* Init the tracing vector */
    /* ----------------------- */
    ult_init_macro(&scb->qss_trstruct.trvect, 128, 0, 8);

    /* Get exclusive access to QSF's SERVER CONTROL BLOCK */
    /* -------------------------------------------------- */
    if (CSp_semaphore((i4) TRUE, &Qsr_scb->qsr_psem))
    {
        SETDBERR(&qsf_rb->qsf_error, 0, E_QS0008_SEMWAIT);
        /* Return now, instead of attempting to do a v() */
        return (E_DB_ERROR);
    }

    /* Make the session known to QSF's server CB */
    /* ----------------------------------------- */
    if (Qsr_scb->qsr_se1st != (QSF_CB *) NULL)
        Qsr_scb->qsr_se1st->qsf_prev = scb;
    scb->qsf_next = Qsr_scb->qsr_se1st;
    Qsr_scb->qsr_se1st = scb;
    Qsr_scb->qsr_nsess++;
    if (Qsr_scb->qsr_nsess > Qsr_scb->qsr_mxsess)
        Qsr_scb->qsr_mxsess = Qsr_scb->qsr_nsess;

#ifdef    xDEBUG
    if (Qsr_scb->qsr_tracing && qst_trcheck(&scb, QSF_003_CHK_SCB_LIST))
    {
        /* This code just verifies that the session CB list is intact */
        /* ---------------------------------------------------------- */
        n = Qsr_scb->qsr_nsess;
        prevses = NULL;
        nextses = Qsr_scb->qsr_se1st;

        while (n-- > 0)
        {
            if (nextses == NULL)
            {
                TRdisplay("*** Not enough QSF session CBs found");
                TRdisplay(" while beginning a session.\n");
                SETDBERR(&qsf_rb->qsf_error, 0, E_QS9999_INTERNAL_ERROR);
                return (E_DB_FATAL);
            }

            if (	(nextses->qsf_prev != prevses)
                    ||	(nextses->qsf_ascii_id != QSFCB_ASCII_ID)
                    ||	(nextses->qsf_type != QSFCB_CB)
                    ||	(nextses->qsf_length != sizeof(QSF_CB))
               )
            {
                TRdisplay("*** QSF's session CB list found trashed");
                TRdisplay(" while beginning a session.\n");
                SETDBERR(&qsf_rb->qsf_error, 0, E_QS9999_INTERNAL_ERROR);
                return (E_DB_FATAL);
            }

            prevses = nextses;
            nextses = nextses->qsf_next;
        }

        if (nextses != NULL)
        {
            TRdisplay("*** Too many QSF session CBs detected");
            TRdisplay(" while beginning a session.\n");
            SETDBERR(&qsf_rb->qsf_error, 0, E_QS9999_INTERNAL_ERROR);
            return (E_DB_FATAL);
        }
    }
#endif /* xDEBUG */


    status = E_DB_OK;

    /* Release exclusive access to QSF's SERVER CONTROL BLOCK */
    /* ------------------------------------------------------ */
    if (CSv_semaphore(&Qsr_scb->qsr_psem))
    {
        status = E_DB_ERROR;
        SETDBERR(&qsf_rb->qsf_error, 0, E_QS0004_SEMRELEASE);
    }

    return (status);
}
Exemplo n.º 11
0
/*{
** Name: psq_bgn_session	- Begin a parser session.
**
**  INTERNAL PSF call format: status = psq_bgn_session(&psq_cb, &sess_cb);
**
**  EXTERNAL call format: status = psq_call(PSQ_BGN_SESSION, &psq_cb, &sess_cb);
**
** Description:
**      The psq_bgn_session function begins a parser session.  It should be
**	called each time a new user connects to a server.  There may be 
**      many parser sessions per database server.  There should be one parser 
**      session for each invocation of the database system that is connected 
**      to the server.  When starting a parser session, one has to tell it
**	what query language to use, and other session parameters.
**
** Inputs:
**      psq_cb
**          .psq_qlang                  The query language to use.
**          .psq_decimal
**		.psf_decspec		TRUE indicates that the decimal marker
**					has been specified.  FALSE means use the
**					default (a ".").
**		.psf_decimal		The character to use as a decimal marker
**					(if specified).
**	    .psq_distrib		Indicator for whether distributed
**					statements and constructs should be
**					accepted.
**	    .psq_sessid			Session id 
**	    .psq_server			address of server control block
**	    .psq_adf_cb			Pointer to session's ADF_CB
**	    .psq_dbid			Database id for this session.
**	    .psq_user			User name of 
**	    .psq_dba			User name of dba
**	    .psq_group			Group id of session
**	    .psq_aplid			Application id of session
**	    .psq_flag			bitmask containing the following flags:
**	      .psq_catupd		  TRUE means catalogs updateable
**	      .psq_warnings		  Set to TRUE if user wishes to see
**					  warnings on unsupported commands
**	    .psq_idxstruct		Structure for creating new indexes
**					(e.g. DB_ISAM_STORE)
**	    .psq_udbid			Unique database id for this session.
**	    .psq_ustat	 		User status flags from SCS_ICS
**	    .psq_dbxlate		Case translation semantics for the db
**	sess_cb				Pointer to session control block
**					(Can be NULL)
**
** Outputs:
**      psq_cb
**          .psq_error                  Error information
**		.err_code		    What error occurred
**		    E_PS0000_OK			Success
**		    E_PS0001_INTERNAL_ERROR	    Internal PSF problem
**		    E_PS0201_BAD_QLANG		    Bad query language specifier
**		    E_PS0203_NO_DECIMAL		    No decimal marker specified
**		    E_PS0204_BAD_DISTRIB	    Bad distributed
**						    specification
**		    E_PS0205_SRV_NOT_INIT	    Server not initialized
**		    E_PS0206_TOO_MANY_SESS	    Too many sessions at one
**						    time
**	Returns:
**	    E_DB_OK			Function completed normally.
**	    E_DB_WARN			Function completed with warning(s)
**	    E_DB_ERROR			Function failed; non-catastrophic error
**	    E_DB_SEVERE			Session is to be aborted
**	    E_DB_FATAL			Function failed; catastrophic error
**	Exceptions:
**	    none
**
** Side Effects:
**	    Causes memory to be allocated.
**	    Increments the session count in the server control block.
**
** History:
**	01-oct-85 (jeff)
**          written
**	28-jul-86 (jeff)
**	    Added initialization of pss_catupd and pss_idxstruct
**      26-aug-86 (seputis)
**          Removed definition of yaccstream
**	13-apr-87 (puree)
**	    Initialize prototype list for dynamic SQL.
**	24-apr-87 (stec)
**	    init pss_project.
**	11-may-87 (stec)
**	    store psq_udbid to pss_dbid.
**	04-sep-87 (stec)
**	    Added critical region code where needed.
**	02-oct-87 (stec)
**	    Added pss_journaling initialization.
**	13-jun-88 (stec)
**	    Added initialization of pss_ruset for DB procs.
**	08-mar-89 (andre)
**	    Copy dba_drop_all from PSQ_CB to PSS_SESBLK.
**	15-mar-89 (ralph)
**	    GRANT Enhancements, Phase 1:
**	    Copy psq_aplid to pss_aplid;
**	    Copy psq_group to pss_group.
**	16-mar-89 (neil)
**	    Initialized rule field.
**	27-jul-89 (jrb)
**	    Copy numeric literals flag into session cb.
**	27-oct-89 (ralph)
**	    Copy user status flags to session control block.
**	11-oct-89 (ralph)
**	    Initialize pss_rgset and pss_raset.
**	28-dec-89 (andre)
**	    Copy fips_mode from PSQ_CB to PSS_SESBLK.
**	13-feb-90 (andre)
**	    set scf_stype to SCU_EXCLUSIVE before calling scu_swait.
**	12-sep-90 (sandyh)
**	    Added support for session memory value calculated from psf
**	    memory startup parameter.
**	15-nov-90 (andre)
**	    check the return status after calling SCF to acquire or to release a
**	    semaphore.
**	    If an error occurred when trying to acquire the semaphore, return
**	    E_DB_SEVERE to abort the session.
**	    If an error occurred when trying to release the semaphore, return
**	    E_DB_FATAL to bring down the server.
**	17-may-91 (andre)
**	    store DBA name into sess_cb->pss_dbaname and NULL-terminate.
**	08-nov-91 (rblumer)
**          merged from 6.4:  25-jul-91 (andre)
**		if (psq_cb->psq_flag & PSQ_STRIP_NL_IN_STRCONST), set bit
**		PSS_STRIP_NL_IN_STRCONST in sess_cb->pss_ses_flag.  this will
**		indicate that we are connected to an older FE, so the scanners
**		will continue to strip NLs inside quoted strings;
**		this is required to fix bug 38098
**	14-jan-92 (barbara)
**	    Included ddb.h for Star.  Updated to check for distributed
**	    specification.
**	26-feb-92 (andre)
**	    if PSQ_REPAIR_SYSCAT is set in psq_cb->psq_flag, set
**	    PSS_REPAIR_SYSCAT in sess_cb->pss_ses_flags
**	30-mar-1992 (bryanp)
**	    Fill in pss_sess_owner with a session-unique owner name for use
**	    by temporary tables which are owned by this session.
**	02-jun-92 (andre)
**	    initialize pss_dependencies_stream to NULL to avloid use of illegal
**	    address throughout the parser.
**	24-nov-92 (ralph)
**	    CREATE SCHEMA:
**	    Initialize pss_prvgoval
**	22-dec-92 (rblumer)
**	    initialize pointer for statement-level rule list.
**	14-jan-93 (andre)
**	    remember whether we are running UPGRADEDB - this will enable us to
**	    decide whether IIDEVICES can be dropped - which is needed by
**	    UPGRADEDB
**	15-mar-93 (ralph)
**	    DELIM_IDENT: initialize pss_dbxlate to zero
**	08-apr-93 (andre)
**	    names of rule list headers in sess_cb have changed (and their
**	    number has doubled)
**	26-mar-93 (ralph)
**	    DELIM_IDENT: Must initialize pss_dbxlate from psq_cb.psq_dbxlate
**	    and pss_cat_owner from psq_cat_owner.
**	10-aug-93 (andre)
**	    fixed cause of a compiler warning
**	08-sep-93 (swm)
**	    Changed sizeof(DB_SESSID) to sizeof(CS_SID) to reflect recent CL
**	    interface revision.
**	20-sep-93 (rogerk)
**	    Changed default table create semantics to be WITH JOURNALING.
**	    Initialized the pss_ses_flag setting to include PSS_JOURNALING
**	    which mimics the user requesting "set journaling" to indicate that
**	    tables created should be journaled.
**	08-oct-93 (rblumer)
**	    increased values allowed in pss_trace vector, using PSS_TVALS.
**	18-oct-93 (rogerk)
**	    Added support for journal default override.  Check psf server
**	    control block flag for PSF_NO_JNL_DEFAULT override before setting
**	    the session parse flag to assume journaling on table creates.
**	15-nov-93 (andre)
**	    add code to initialize a newly added sess_cb->pss_flattening_flags
**	01-nov-93 (anitap)
**	    if PSQ_INGRES_PRIV is set in psq_cb->psq_flag, set
**	    PSS_INGRES_PRIV in sess_cb->pss_ses_flags.
**	17-dec-93 (rblumer)
**	    "FIPS mode" no longer exists.  It was replaced some time ago by
**	    several feature-specific flags (e.g. flatten_nosingleton and
**	    direct_cursor_mode).  So I removed all FIPS_MODE flags.
**	02-jan-94 (andre)
**	    if starting a local session, call DMF to determine whether the 
**	    database to which we are connected is being journaled and record 
**	    that information by setting (or not setting) PSS_JOURNALED_DB bit 
**	    in pss_ses_flags
**	 7-jan-94 (swm)
**	    Bug #58635
**	    Added PTR cast for pss_owner which has changed type to PTR.
**	17-mar-94 (robf)
**          Add support for PSQ_SELECT_ALL flag
**	13-Feb-1995 (canor01)
**	    initialize the pss_audit field in the session control block
**	09-Oct-1998 (jenjo02)
**	    Removed SCF semaphore functions, inlining the CS calls instead.
**	23-mar-1999 (thaju02)
**	    Modified '$Sess' to use #define DB_SESS_TEMP_OWNER. (B94067)
**      01-Dec-2000 (hanal04) Bug 100680 INGSRV 1123
**          If PSQ_RULE_UPD_PREFETCH is set turn on PSS_RULE_UPD_PREFETCH
**          in the session control block to signify that we should use
**          the prefetch stategy required to ensure consitent behaviour in
**          updating rules fired by updates.
**	10-Jan-2001 (jenjo02)
**	    Remove callback to SCF to get session id and ADF_CB;
**	    *ADF_CB now supplied by scsinit in PSQ_CB.
**	30-Jan-2004 (schka24)
**	    Get rid of a type-cast warning on adf cb.
**	3-Feb-2005 (schka24)
**	    Num-literals renamed to parser-compat, fix here.
**	15-june-06 (dougi)
**	    Add support for "before" triggers.
**	30-aug-06 (thaju02)
**	    If PSQ_RULE_DEL_PREFETCH is set turn on PSS_RULE_DEL_PREFETCH
**          in the session control block, for prefetch strategy to 
**          be applied for deletes. (B116355)
**	26-Oct-2009 (kiria01) SIR 121883
**	    Scalar sub-query support: Added copy of
**	    psq_flag.PSQ_NOCHK_SINGLETON_CARD to session flag
**	    for defaulting SET CARDINALITY_CHECK
**      November 2009 (stephenb)
**          Batch execution; initilization of new fields.
**      29-apr-2010 (stephenb)
**          Init batch_copy_optim.
**	04-may-2010 (miket) SIR 122403
**	    Init new sess_cb->pss_stmt_flags2.
**	19-May-2010 (kiria01) b123766
**	    Get cardinality check default from server block not psq_cb
**	21-Jul-2010 (kschendel) SIR 124104
**	    Initialize default compression from facility cb.
**	14-Oct-2010 (kschendel) SIR 124544
**	    Initialize default result structure from facility cb.
**	19-Nov-2010 (kiria01) SIR 124690
**	    Add support for setting installation wide collation defaults.
*/
DB_STATUS
psq_bgn_session(
	register PSQ_CB     *psq_cb,
	register PSS_SESBLK *sess_cb)
{
    i4		    err_code;
    i4			    i;
    DB_STATUS		    status = E_DB_OK;
    STATUS		    sem_status;
    i4		    sem_errno;
    bool		    leave_loop = TRUE;
    ULM_RCB		    ulm_rcb;

    /*
    ** No error to begin with.
    */
    psq_cb->psq_error.err_code = E_PS0000_OK;

    /*
    ** Do as much validity checking as possible before allocating any memory.
    ** That way, there won't be any cleaning up to do for the majority of
    ** errors.
    */

    /*
    ** Check for server initialized. This code could be placed within
    ** critical region, but this is not necessary, since this is a flag
    ** test.
    */
    if (!Psf_srvblk->psf_srvinit)
    {
	(VOID) psf_error(E_PS0205_SRV_NOT_INIT, 0L, PSF_CALLERR, &err_code,
	    &psq_cb->psq_error, 0);
	return (E_DB_ERROR);
    }

    /*
    ** Check for valid language spec.
    */
    if (psq_cb->psq_qlang != DB_QUEL && psq_cb->psq_qlang != DB_SQL)
    {
	(VOID) psf_error(E_PS0201_BAD_QLANG, 0L, PSF_CALLERR, &err_code,
	    &psq_cb->psq_error, 0);
	return (E_DB_ERROR);
    }
	
    /*
    ** Check whether language is allowed in this server.  This will be useful
    ** when we have configurable servers, where some query languages can be
    ** used and some can't. This code could be placed within a critical region
    ** but it is not necessary, since this is a flag test only.
    */
    if ((psq_cb->psq_qlang & Psf_srvblk->psf_lang_allowed) == 0)
    {
	(VOID) psf_error(E_PS0202_QLANG_NOT_ALLOWED, 0L, PSF_CALLERR, &err_code,
	    &psq_cb->psq_error, 0);
	return (E_DB_ERROR);
    }

    /*
    ** Make sure that the decimal character is actually specified.
    */
    if (!psq_cb->psq_decimal.db_decspec)
    {
	(VOID) psf_error(E_PS0203_NO_DECIMAL, 0L, PSF_CALLERR, &err_code,
	    &psq_cb->psq_error, 0);
	return (E_DB_ERROR);
    }

     /* Check distributed specification
    **
    ** a=local_server, b=distrib_server, c=distrib_session
    **
    **   a,b
    **
    **     00  01  11  10
    **    -----------------
    ** c  |   |   |   |   |
    **  0 | 1 | 1 | 0 | 0 |
    **    |   |   |   |   |
    **    -----------------  ==> ERROR
    **    |   |   |   |   |
    **  1 | 1 | 0 | 0 | 1 |
    **    |   |   |   |   |
    **    -----------------
    */
    if (   !(psq_cb->psq_distrib & (DB_1_LOCAL_SVR | DB_3_DDB_SESS))
	|| ((~psq_cb->psq_distrib & DB_2_DISTRIB_SVR)
	    && (psq_cb->psq_distrib & DB_3_DDB_SESS))
	)
    {
	psf_error(E_PS0204_BAD_DISTRIB, 0L, PSF_CALLERR, &err_code,
	    &psq_cb->psq_error,0);
	return (E_DB_ERROR);
    }

    /*
    ** Check for too many sessions in server at one time.
    ** This code must be executed as a critical region.
    */

    do		    /* something to break out of */
    {
	/* get the semaphore */
	if (sem_status = CSp_semaphore(1, &Psf_srvblk->psf_sem)) /* exclusive */
	{
	    status = E_DB_SEVERE;	/* abort the session */
	    sem_errno = E_PS020A_BGNSES_GETSEM_FAILURE;
	    break;
	}

	if (Psf_srvblk->psf_nmsess >= Psf_srvblk->psf_mxsess)
	{
	    (VOID) psf_error(E_PS0208_TOO_MANY_SESS, 0L, PSF_CALLERR, &err_code,
		&psq_cb->psq_error, 0);
	    status = E_DB_ERROR;
	    break;
	}

	/* Increment the session count */
	Psf_srvblk->psf_nmsess++;
	sess_cb->pss_psessid = ++Psf_srvblk->psf_sess_num;

	/* leave_loop has already been set to TRUE */
    } while (!leave_loop);

    /* if semaphore has been successfully acquired, try to release it */
    if (sem_status == OK)
    {
	if (sem_status = CSv_semaphore(&Psf_srvblk->psf_sem))
	{
	    status = E_DB_FATAL;	/* bring down the server */
	    sem_errno = E_PS020B_BGNSES_RELSEM_FAILURE;
	}
    }

    /*
    ** if an error was encountered while trying to get or to release a
    ** semaphore, report it here
    */
    if (sem_status != OK)
    {
	(VOID) psf_error(sem_errno, sem_status, PSF_INTERR,
	    &err_code, &psq_cb->psq_error, 0);
    }

    if (DB_FAILURE_MACRO(status))
    {
	return(status);
    }

    /*
    ** Initialize the case translation semantics stuff
    */
    sess_cb->pss_dbxlate = psq_cb->psq_dbxlate;
    sess_cb->pss_cat_owner = psq_cb->psq_cat_owner;

    /*
    ** Copy the user name and dba name to the session control block.
    */
    STRUCT_ASSIGN_MACRO(psq_cb->psq_user.db_tab_own, sess_cb->pss_user);
    STRUCT_ASSIGN_MACRO(psq_cb->psq_dba, sess_cb->pss_dba);
    STRUCT_ASSIGN_MACRO(psq_cb->psq_group, sess_cb->pss_group);
    STRUCT_ASSIGN_MACRO(psq_cb->psq_aplid, sess_cb->pss_aplid);

    /* copy DBA name into sess_cb->pss_dbaname and NULL-terminate */
    {
	u_i2			dba_name_len;

	dba_name_len = (u_i2) psf_trmwhite((u_i4) sizeof(sess_cb->pss_dba),
			    (char *) &sess_cb->pss_dba);
	MEcopy((PTR) &sess_cb->pss_dba, dba_name_len,
	       (PTR) sess_cb->pss_dbaname);

	sess_cb->pss_dbaname[dba_name_len] = EOS;
    }

    /*
    ** Build a DB_OWN_NAME which contains a session-unique owner name. This
    ** owner name will be used for temporary tables which are owned by this
    ** session.
    */
    {
	char	    temp_sess_id[10];

	STmove(DB_SESS_TEMP_OWNER, ' ', sizeof(sess_cb->pss_sess_owner),
		(char *)&sess_cb->pss_sess_owner);
	/*
	** We can't convert directly into the sess_owner field because CVlx
	** null-terminates the result, and we don't want the trailing null
	*/
	CVlx(sess_cb->pss_psessid, temp_sess_id);
	MEcopy(temp_sess_id, 8, &sess_cb->pss_sess_owner.db_own_name[5]);
    }

    /*
    ** Start with per-user quota of memory.  Note that user may have overridden
    ** the default value at server startup in which case we will use calculated
    ** amount (pool/sessions); otherwise, default amount will be used.
    */
    sess_cb->pss_memleft = (Psf_srvblk->psf_sess_mem) ? Psf_srvblk->psf_sess_mem
						      : PSF_SESMEM;

    /*
    ** Initialize the user range table.
    */
    if (pst_rginit(&sess_cb->pss_usrrange) != E_DB_OK)
    {
	return (E_DB_FATAL);
    }
	
    /*
    ** Initialize the auxiliary range table.
    */
    if (pst_rginit(&sess_cb->pss_auxrng) != E_DB_OK)
    {
	return (E_DB_FATAL);
    }

    /*
    ** Open a memory stream for the symbol table.  The symbol table is
    ** composed of a list of blocks.
    ** Allocate the symbol table at the same time.
    */
    ulm_rcb.ulm_facility = DB_PSF_ID;
    ulm_rcb.ulm_poolid = Psf_srvblk->psf_poolid;
    ulm_rcb.ulm_blocksize = sizeof(PSS_SYMBLK);
    ulm_rcb.ulm_memleft = &sess_cb->pss_memleft;
    /* Set pointer to stream handle for ULM */
    ulm_rcb.ulm_streamid_p = &sess_cb->pss_symstr;
    /* Open a private, thread-safe stream */
    ulm_rcb.ulm_flags = ULM_PRIVATE_STREAM | ULM_OPEN_AND_PALLOC;
    ulm_rcb.ulm_psize = sizeof(PSS_SYMBLK);
    if (ulm_openstream(&ulm_rcb) != E_DB_OK)
    {
	if (ulm_rcb.ulm_error.err_code == E_UL0005_NOMEM)
	{
	    (VOID) psf_error(E_PS0F02_MEMORY_FULL, 0L, PSF_CALLERR, 
		&err_code, &psq_cb->psq_error, 0);
	}
	else
	{
	    (VOID) psf_error(E_PS0A02_BADALLOC, ulm_rcb.ulm_error.err_code,
		PSF_INTERR, &err_code, &psq_cb->psq_error, 0);
	}

	return((ulm_rcb.ulm_error.err_code == E_UL0004_CORRUPT) ? E_DB_FATAL
								: E_DB_ERROR);
    }

    sess_cb->pss_symtab = (PSS_SYMBLK*) ulm_rcb.ulm_pptr;
    sess_cb->pss_symtab->pss_sbnext = (PSS_SYMBLK *) NULL;

    /*
    ** Allocate the YACC_CB.  
    */

    if ((status = psl_yalloc(sess_cb->pss_symstr, 
	&sess_cb->pss_memleft,
	(PTR *) &sess_cb->pss_yacc, &psq_cb->psq_error)) != E_DB_OK)
    {
	/*
	** If the allocation failed, remember to close the streams, so the
	** memory associated with it will be freed.
	*/
	(VOID) ulm_closestream(&ulm_rcb);
	return (status);
    }
	
    /*
    ** Fill in the control block header.
    */
    sess_cb->pss_next = (PSS_SESBLK *) NULL;
    sess_cb->pss_prev = (PSS_SESBLK *) NULL;
    sess_cb->pss_length = sizeof(PSS_SESBLK);
    sess_cb->pss_type = PSS_SBID;
    sess_cb->pss_owner = (PTR)DB_PSF_ID;
    sess_cb->pss_ascii_id = PSSSES_ID;

    /*
    ** Initialize the session control block.
    */
    /* Save the session id */
    sess_cb->pss_sessid	    = psq_cb->psq_sessid;

    /* Set pointer to session's ADF_CB */
    sess_cb->pss_adfcb	    = (ADF_CB *) psq_cb->psq_adfcb;

    /* No cursors yet */
    sess_cb->pss_numcursors = 0;

    /* Language has already been validated */
    sess_cb->pss_lang = psq_cb->psq_qlang;

    /* Decimal spec has already been validated */
    sess_cb->pss_decimal = psq_cb->psq_decimal.db_decimal;

    /* Distributed spec has already been validated */
    sess_cb->pss_distrib = psq_cb->psq_distrib;

    /* Save the database id */
    sess_cb->pss_dbid = psq_cb->psq_dbid;

    /* Save the unique database id */
    sess_cb->pss_udbid = psq_cb->psq_udbid;

    /* Initialize QSF_RCB for use by psfmem.c functions */
    sess_cb->pss_qsf_rcb.qsf_type = QSFRB_CB;
    sess_cb->pss_qsf_rcb.qsf_ascii_id = QSFRB_ASCII_ID;
    sess_cb->pss_qsf_rcb.qsf_length = sizeof(sess_cb->pss_qsf_rcb);
    sess_cb->pss_qsf_rcb.qsf_owner = (PTR)DB_PSF_ID;
    sess_cb->pss_qsf_rcb.qsf_sid = sess_cb->pss_sessid;

    /*
    **	so session reset all bit flags
    */
    sess_cb->pss_stmt_flags = sess_cb->pss_stmt_flags2 =
	sess_cb->pss_dbp_flags = sess_cb->pss_ses_flag = 0L;
    sess_cb->pss_flattening_flags = 0;

    /*
    ** Default table create semantics are to assume journaling unless
    ** the PSF_NO_JNL_DEFAULT override is set.
    */
    if ((Psf_srvblk->psf_flags & PSF_NO_JNL_DEFAULT) == 0)
	sess_cb->pss_ses_flag |= PSS_JOURNALING;

	    /* catalog update flag */
    if (psq_cb->psq_flag & PSQ_CATUPD)
	sess_cb->pss_ses_flag |= PSS_CATUPD;

	    /* warnings on unsupported commands */
    if (psq_cb->psq_flag & PSQ_WARNINGS)
	sess_cb->pss_ses_flag |= PSS_WARNINGS;

	/* INDICATE if the DBA may DROP everyone's tables */
    if (psq_cb->psq_flag & PSQ_DBA_DROP_ALL)
	sess_cb->pss_ses_flag |= PSS_DBA_DROP_ALL;

	/* INDICATE if the session may SELECT everyone's tables */
    if (psq_cb->psq_flag & PSQ_SELECT_ALL)
	sess_cb->pss_ses_flag |= PSS_SELECT_ALL;

	/*
	** indicate that the session is allowed to INSERT/DELETE/UPDATE an index
	** which is a catalog (but not an extended catalog
	*/
    if (psq_cb->psq_flag & PSQ_REPAIR_SYSCAT)
	sess_cb->pss_ses_flag |= PSS_REPAIR_SYSCAT;

	/* 
	** indicate that the session allows $ingres to drop/add constraint on
	** tables owned by other users
	*/
    if (psq_cb->psq_flag & PSQ_INGRES_PRIV)
	sess_cb->pss_ses_flag |= PSS_INGRES_PRIV;	

    if (psq_cb->psq_flag & PSQ_ROW_SEC_KEY)
	sess_cb->pss_ses_flag |= PSS_ROW_SEC_KEY;

    /* See if passwords, roles allowed */
    if (psq_cb->psq_flag & PSQ_PASSWORD_NONE)
	sess_cb->pss_ses_flag |= PSS_PASSWORD_NONE;

    if (psq_cb->psq_flag & PSQ_ROLE_NONE)
	sess_cb->pss_ses_flag |= PSS_ROLE_NONE;

    if (psq_cb->psq_flag & PSQ_ROLE_NEED_PW)
	sess_cb->pss_ses_flag |= PSS_ROLE_NEED_PW;

	/* remember whether we are running UPGRADEDB */
    if (psq_cb->psq_flag & PSQ_RUNNING_UPGRADEDB)
	sess_cb->pss_ses_flag |= PSS_RUNNING_UPGRADEDB;

    /* Pick up serverwide default for card check */
    if (Psf_srvblk->psf_flags & PSF_NOCHK_SINGLETON_CARD)
	sess_cb->pss_ses_flag |= PSS_NOCHK_SINGLETON_CARD;

        /* Initialize pss_project. */
    sess_cb->pss_ses_flag |= PSS_PROJECT;	/* pss_project = TRUE */
    
    /* init last statement */
    sess_cb->pss_last_sname[0] = EOS;
    /* batch optimization switch starts undefined */
    sess_cb->batch_copy_optim = PSS_BATCH_OPTIM_UNDEF;

    /* 
    ** if starting a local session, determine whether the database is being 
    ** journaled
    */
    if (~psq_cb->psq_distrib & DB_3_DDB_SESS)
    {
        DMC_CB		    dmc_cb, *dmc = &dmc_cb;
        DMC_CHAR_ENTRY	    dmc_char;

	MEfill(sizeof(dmc_cb), (u_char) 0, (PTR) dmc);

        dmc->type = DMC_CONTROL_CB;
        dmc->length = sizeof(*dmc);
        dmc->dmc_op_type = DMC_DATABASE_OP;
	dmc->dmc_session_id = (PTR) sess_cb->pss_sessid;
        dmc->dmc_flags_mask = DMC_JOURNAL;
        dmc->dmc_char_array.data_address= (PTR) &dmc_char;
	dmc->dmc_char_array.data_out_size = sizeof(dmc_char);
	dmc->dmc_db_id = (char *) sess_cb->pss_dbid;
	
	status = dmf_call(DMC_SHOW, (PTR) dmc);
	if (DB_FAILURE_MACRO(status))
	{
	    (VOID) psf_error(E_PS020E_CANT_GET_DB_JOUR_STATUS, 
		dmc->error.err_code, PSF_INTERR, &err_code, 
		&psq_cb->psq_error, 0);
	    return(status);
	}

	if (dmc_char.char_value == DMC_C_ON)
	{
	    sess_cb->pss_ses_flag |= PSS_JOURNALED_DB;
	}
    }

    /* Save the storage structure for indexes */
    sess_cb->pss_idxstruct = psq_cb->psq_idxstruct;

    /* Make session copy of parser compatability settings */
    sess_cb->pss_parser_compat = psq_cb->psq_parser_compat;

    /* remember if NLs inside string constants need to be stripped */
    if (psq_cb->psq_flag & PSQ_STRIP_NL_IN_STRCONST)
	sess_cb->pss_ses_flag |= PSS_STRIP_NL_IN_STRCONST;

    /* no rule tree yet */
    sess_cb->pss_row_lvl_usr_rules  =
    sess_cb->pss_row_lvl_sys_rules  =
    sess_cb->pss_stmt_lvl_usr_rules =
    sess_cb->pss_stmt_lvl_sys_rules =
    sess_cb->pss_row_lvl_usr_before_rules  =
    sess_cb->pss_row_lvl_sys_before_rules  =
    sess_cb->pss_stmt_lvl_usr_before_rules =
    sess_cb->pss_stmt_lvl_sys_before_rules = (PST_STATEMENT *) NULL;

    if (psq_cb->psq_flag & PSQ_RULE_DEL_PREFETCH)
	sess_cb->pss_ses_flag |= PSS_RULE_DEL_PREFETCH;

    if(psq_cb->psq_flag2 & PSQ_RULE_UPD_PREFETCH)
        sess_cb->pss_ses_flag |= PSS_RULE_UPD_PREFETCH;

    /* copy user status flags to session control block */
    sess_cb->pss_ustat = psq_cb->psq_ustat;

    /*
    ** Initialize lots of pointer to NULL because nothing is happening yet.
    */
    sess_cb->pss_qbuf = sess_cb->pss_nxtchar = sess_cb->pss_prvtok =
	sess_cb->pss_bgnstmt = sess_cb->pss_endbuf =
	sess_cb->pss_prvgoval = (u_char *) NULL;

    /* initialize pss_audit */
    sess_cb->pss_audit = NULL;

    for (i = 0; i < PSS_CURTABSIZE; i++)
    {
	sess_cb->pss_curstab.pss_curque[i] = (PSC_CURBLK *) NULL;
    }

    /* initialize prototype list for dynamic SQL */
    sess_cb->pss_proto = (PST_PROTO *) NULL;

    /*
    ** pss_dependencies_stream, when not NULL, is expected to point at a valid
    ** stream descriptor.  After closing the stream we always reset
    ** pss_dependencies_stream to NULL, but in some cases we may end up checking
    ** pss_dependencies_stream before ever opening (and closing it).  As a
    ** result, you may end up using invalid address as a stream pointer.
    ** Initializing it here to NULL will ensure that it is non-NULL iff it
    ** points at a valid open stream descriptor.
    */
    sess_cb->pss_dependencies_stream = (PSF_MSTREAM *) NULL;

    /* No trace flags set */
    /* expect lint message */
    ult_init_macro(&sess_cb->pss_trace, PSS_TBITS, PSS_TVALS, PSS_TVAO);

    /* Cursor id set to 0, no cursors open yet */
    sess_cb->pss_crsid = 0;

    sess_cb->pss_create_compression = Psf_srvblk->psf_create_compression;

    /* SCF can pass a client requested result_structure, but if it
    ** doesn't, init from server default.
    */
    if (psq_cb->psq_result_struct != 0)
    {
	sess_cb->pss_result_struct = psq_cb->psq_result_struct;
	sess_cb->pss_result_compression = psq_cb->psq_result_compression;
    }
    else
    {
	sess_cb->pss_result_struct = Psf_srvblk->psf_result_struct;
	sess_cb->pss_result_compression = Psf_srvblk->psf_result_compression;
    }

    if (psq_cb->psq_def_coll > DB_NOCOLLATION)
	sess_cb->pss_def_coll = psq_cb->psq_def_coll;
    else
	sess_cb->pss_def_coll = Psf_srvblk->psf_def_coll;
    if (psq_cb->psq_def_unicode_coll > DB_NOCOLLATION)
	sess_cb->pss_def_unicode_coll = psq_cb->psq_def_unicode_coll;
    else
	sess_cb->pss_def_unicode_coll = Psf_srvblk->psf_def_unicode_coll;

    return (E_DB_OK);
}
Exemplo n.º 12
0
/*
** Name: SXC_END_SESSION - end a SXF session
**
** Description:
**	This routine is used to end a SXF session, it is called at
**	session end time via the sxf_call(SXC_END_SESSION) request. 
**	This request must be made by a session that has previously been
**	registered with SXF via the sxc_bgn_session routine. All resourses
**	allocated by this session will be released.
**
**	Overview of algorithm:-
**
**	Locate the SXF_SCB for this session. 
**	Call sxac_end_session to remove the session from the auditing system.
**	Unlink SXF_SCB from SCB list in the server control block.
**	Destroy the SXF_SCB.
**
** Inputs:
**	rcb
**	    .sxf_scb		Pointer to the session control block.
**
** Outputs:
**	rcb
**	    .sxf_error		error code returned to the caller
**
** Returns:
**	DB_STATUS
**
** History:
**	11-aug-92 (markg)
**	    Initial creation.
**	26-oct-1992 (markg)
**	    Updated error handling.
*/
DB_STATUS
sxc_end_session(
    SXF_RCB	*rcb)
{
    DB_STATUS		status = E_DB_OK;
    i4		err_code = E_SX0000_OK;
    i4		local_err;
    SXF_SCB		*scb;
    bool		scb_found = FALSE;

    for (;;)
    {
	/* Locate SCB and remove the session from the audit system */
	if (Sxf_svcb->sxf_svcb_status & SXF_SNGL_USER)
	{
	    if (Sxf_svcb->sxf_scb_list == NULL)
	    {
		err_code = E_SX000D_INVALID_SESSION;
		break;
	    }
	    else
	    {
	        scb = Sxf_svcb->sxf_scb_list;
	    }
	}
	else
	{
	    CSp_semaphore(TRUE, &Sxf_svcb->sxf_svcb_sem);
	    for (scb = Sxf_svcb->sxf_scb_list;
	        scb != NULL;
	        scb = scb->sxf_next)
	    {
	        if (scb == (SXF_SCB *) rcb->sxf_scb)
	        {
		    scb_found = TRUE;
		    break;
	        }
	    }
	    CSv_semaphore(&Sxf_svcb->sxf_svcb_sem);
	    if (scb_found == FALSE)
	    {
	        err_code = E_SX000D_INVALID_SESSION;
	        break;
	    }
	}

	status = sxac_end_session(scb, &local_err);
	if (status != E_DB_OK)
	{
	    err_code = local_err;
	    _VOID_ ule_format(err_code, NULL,
		ULE_LOG, NULL, NULL, 0L, NULL, &local_err, 0);
	}

	/* Remove SCB from list of active sessions and destroy it */
        CSp_semaphore(TRUE, &Sxf_svcb->sxf_svcb_sem);
	if (scb->sxf_prev != NULL)
	    scb->sxf_prev->sxf_next = scb->sxf_next;
	else
	    Sxf_svcb->sxf_scb_list = scb->sxf_next;
	if (scb->sxf_next != NULL)
	    scb->sxf_next->sxf_prev = scb->sxf_prev;
	Sxf_svcb->sxf_active_ses--;
        CSv_semaphore(&Sxf_svcb->sxf_svcb_sem);

	status = sxc_destroy_scb(scb, &local_err);
	if (status != E_DB_OK)
	{
	    err_code = local_err;
	    _VOID_ ule_format(err_code, NULL,
		ULE_LOG, NULL, NULL, 0L, NULL, &local_err, 0);
	}

	break;
    }

    /* Handle any errors */
    if (err_code != E_SX0000_OK)
    {
	if (err_code > E_SXF_INTERNAL)
	    err_code = E_SX000E_BAD_SESSION_END;

	rcb->sxf_error.err_code = err_code;
	if (status == E_DB_OK)
	    status = E_DB_ERROR;	
    }

    return (status);
}