Example #1
0
/*{
** Name: OPU_RSMEMORY_RECLAIM	- Return memory to the global memory pool
**
** Description:
**      This routine returns memory from the given mark to the end of the
**	ULM memory stream to the global memory pool.
[@comment_line@]...
**
** Inputs:
**	global -
**	    State info for the current query.
**	global->ops_mstate.ops_ulmrcb -
**	    The ULM control block.
**	global->ops_mstate.ops_sstreamid -
**	    The stream id.
**	mark -
**	    The mark stating where to start returning memory.
**
** Outputs:
**
**	Returns:
**	    The address of the allocated memory.
**	Exceptions:
**	    none
**
** Side Effects:
**	    Memory is returned to the global memory pool
**
** History:
**      19-July-87 (eric)
**          written
**	11-oct-2006 (hayke02)
**	    Send E_OP0002_NOMEMORY to errlog.log. This change fixes bug 116309.
[@history_template@]...
*/
VOID
opu_Rsmemory_reclaim(
	OPS_STATE   *global,
	ULM_SMARK   *mark)
{
    DB_STATUS      ulmstatus;		    /* return status from ULM */

    /* store the stream id */
    global->ops_mstate.ops_ulmrcb.ulm_streamid_p = &global->ops_mstate.ops_sstreamid;

    /* store the mark to be initialized */
    global->ops_mstate.ops_ulmrcb.ulm_smark = mark;

    if ( (ulmstatus = ulm_reclaim( &global->ops_mstate.ops_ulmrcb )) != E_DB_OK )
    {
	if (global->ops_mstate.ops_ulmrcb.ulm_error.err_code == E_UL0005_NOMEM)
	{
	    opx_lerror(E_OP0002_NOMEMORY, 0, 0, 0, 0, 0);
	    opx_error( E_OP0002_NOMEMORY);  /* out of memory */
	}
#ifdef E_OP0093_ULM_ERROR
	else
	    opx_verror( ulmstatus, E_OP0093_ULM_ERROR, 
		global->ops_mstate.ops_ulmrcb.ulm_error.err_code); /* check for error */
#endif
    }	
}
Example #2
0
/*{
** Name: opu_compare	- compare two values with the same type
**
** Description:
**      This routine will compare two values of the same type.
**
** Inputs:
**      global                          ptr to global state variable
**      vp1                             ptr to first value
**      vp2                             ptr to second value
**      datatype                        ptr to datatype info on values
**
** Outputs:
**	Returns:
**	    -1 if vp1 < vp2
**          0  if vp1 = vp2
**          1  if vp1 > vp2
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	21-jun-86 (seputis)
**          initial creation
[@history_line@]...
*/
i4
opu_compare(
	OPS_STATE          *global,
	PTR                vp1,
	PTR                vp2,
	DB_DATA_VALUE      *datatype)
{
    i4			adc_cmp_result;
    DB_DATA_VALUE	vpdv1;
    DB_DATA_VALUE	vpdv2;
    DB_STATUS		comparestatus;

    STRUCT_ASSIGN_MACRO(*datatype, vpdv1);
    STRUCT_ASSIGN_MACRO(vpdv1, vpdv2);
    vpdv2.db_data = vp2;
    vpdv1.db_data = vp1;
    comparestatus = adc_compare(global->ops_adfcb, &vpdv1, &vpdv2,
	&adc_cmp_result);
# ifdef E_OP078C_ADC_COMPARE
    if (comparestatus != E_DB_OK)
	opx_verror( comparestatus, E_OP078C_ADC_COMPARE, 
	    global->ops_adfcb->adf_errcb.ad_errcode);
# endif
    return (adc_cmp_result);
}
Example #3
0
/*{
** Name: OPU_GSMEMORY_CLOSE	- Get memory from the stack ULM memory stream
**
** Description:
**      This routine allocates memory from the ULM memory stream that is used 
**      for the stack style memory usage.
[@comment_line@]...
**
** Inputs:
**	global -
**	    State info for the current query.
**	global->ops_mstate.ops_ulmrcb -
**	    The ULM control block.
**	global->ops_mstate.ops_sstreamid -
**	    The stream id.
**	size -
**	    size of the piece of memory to allocate.
**
** Outputs:
**
**	Returns:
**	    The address of the allocated memory.
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**      19-July-87 (eric)
**          written
**      21-feb-91 (seputis)
**          make non-zero initialization of memory an xDEBUG feature
**      16-sep-93 (smc)
**          Moved <cs.h> for CS_SID.
**	11-oct-2006 (hayke02)
**	    Send E_OP0002_NOMEMORY to errlog.log. This change fixes bug 116309.
[@history_line@]...
[@history_template@]...
*/
PTR
opu_Gsmemory_get(
	OPS_STATE   *global,
	i4	    size)
{
    DB_STATUS      ulmstatus;		    /* return status from ULM */

    /* store the stream id */
    global->ops_mstate.ops_ulmrcb.ulm_streamid_p = &global->ops_mstate.ops_sstreamid;

    /* store the size to be allocate */
    global->ops_mstate.ops_ulmrcb.ulm_psize = size;

    if ( (ulmstatus = ulm_palloc( &global->ops_mstate.ops_ulmrcb )) != E_DB_OK )
    {
	if (global->ops_mstate.ops_ulmrcb.ulm_error.err_code == E_UL0005_NOMEM)
	{
	    opx_lerror(E_OP0002_NOMEMORY, 0, 0, 0, 0, 0);
	    opx_error( E_OP0002_NOMEMORY);  /* out of memory */
	}
#ifdef E_OP0093_ULM_ERROR
	else
	    opx_verror( ulmstatus, E_OP0093_ULM_ERROR, 
		global->ops_mstate.ops_ulmrcb.ulm_error.err_code); /* check for error */
#endif
    }	
#ifdef xDEBUG
    MEfill( size, (u_char)127, (PTR)global->ops_mstate.ops_ulmrcb.ulm_pptr); /*FIXME
                                            ** remove this initialization after
                                            ** test for uninitialized memory
                                            ** is not required any more */
#endif
    /* return the allocated memory */
    return( global->ops_mstate.ops_ulmrcb.ulm_pptr );
}
Example #4
0
/*{
** Name: OPU_RELEASE	- Return memory to the global memory pool
**
** Description:
**      This routine returns memory from the given mark to the end of the
**	ULM memory stream to the global memory pool.
[@comment_line@]...
**
** Inputs:
**	global -
**	    State info for the current query.
**      ulmrcb -
**          address of local control block, or NULL if
**	    global->ops_mstate.ops_ulmrcb should be used
**	mark -
**	    The mark stating where to start returning memory.
**
** Outputs:
**
**	Returns:
**	    none
**	Exceptions:
**	    none
**
** Side Effects:
**	    Memory is returned to the global memory pool
**
** History:
**      18-apr-87 (seputis)
**          initial creation
**	11-oct-2006 (hayke02)
**	    Send E_OP0002_NOMEMORY to errlog.log. This change fixes bug 116309.
[@history_template@]...
*/
VOID
opu_release(
	OPS_STATE   *global,
	ULM_RCB     *ulmrcb,
	ULM_SMARK   *mark)
{
    DB_STATUS      ulmstatus;		    /* return status from ULM */

    if (!ulmrcb)
    {
	ulmrcb = &global->ops_mstate.ops_ulmrcb; /* use global ulmrcb if
					    ** control block is not defined */
	ulmrcb->ulm_streamid_p = &global->ops_mstate.ops_streamid; 
					    /* mark memory
					    ** in the global stream */
    }
    /* store the mark to be initialized */
    ulmrcb->ulm_smark = mark;

    ulmstatus = ulm_reclaim( ulmrcb );
    if ( DB_FAILURE_MACRO(ulmstatus) )
    {
	if (ulmrcb->ulm_error.err_code == E_UL0005_NOMEM)
	{
	    opx_lerror(E_OP0002_NOMEMORY, 0, 0, 0, 0, 0);
	    opx_error( E_OP0002_NOMEMORY);  /* out of memory */
	}
#ifdef E_OP0093_ULM_ERROR
	else
	    opx_verror( ulmstatus, E_OP0093_ULM_ERROR, 
		ulmrcb->ulm_error.err_code); /* check for error */
#endif
    }	
}
Example #5
0
/*{
** Name: opu_dcompare	- compare two values with different types
**
** Description:
**      This routine will compare two values of different types.
**
** Inputs:
**      global                          ptr to global state variable
**      vp1                             ptr to first value
**      vp2                             ptr to second value
**      datatype                        ptr to datatype info on values
**
** Outputs:
**	Returns:
**	    -1 if vp1 < vp2
**          0  if vp1 = vp2
**          1  if vp1 > vp2
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	21-jun-86 (seputis)
**          initial creation to fix constant compare problem in opvctrees.c
[@history_line@]...
*/
i4
opu_dcompare(
	OPS_STATE          *global,
	DB_DATA_VALUE	   *vp1,
	DB_DATA_VALUE	   *vp2)
{
    i4			adc_cmp_result;
    DB_STATUS		comparestatus;

    comparestatus = adc_compare(global->ops_adfcb, vp1, vp2,
	&adc_cmp_result);
# ifdef E_OP078C_ADC_COMPARE
    if (comparestatus != E_DB_OK)
	opx_verror( comparestatus, E_OP078C_ADC_COMPARE, 
	    global->ops_adfcb->adf_errcb.ad_errcode);
# endif
    return (adc_cmp_result);
}
Example #6
0
/*{
** Name: opu_allocate	- allocate a new private memory stream
**
** Description:
**      This routine will allocate a new private memory stream from the ULM
**
** Inputs:
**      global                          ptr to global state variable
**
** Outputs:
**	Returns:
**	    PTR which represents the new memory stream
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	16-jun-86 (seputis)
**          initial creation
**	11-oct-2006 (hayke02)
**	    Send E_OP0002_NOMEMORY to errlog.log. This change fixes bug 116309.
[@history_line@]...
*/
PTR
opu_allocate(
    OPS_STATE          *global)
{
    DB_STATUS       ulmstatus;		/* return status from ulm */

    /* Tell ULM to return streamid into ulm_streamid */
    global->ops_mstate.ops_ulmrcb.ulm_streamid_p =
        &global->ops_mstate.ops_ulmrcb.ulm_streamid;
    ulmstatus = ulm_openstream(&global->ops_mstate.ops_ulmrcb);
    if (DB_FAILURE_MACRO(ulmstatus))
    {
        opx_lerror(E_OP0002_NOMEMORY, 0);
        opx_verror(ulmstatus, E_OP0002_NOMEMORY,
                   global->ops_mstate.ops_ulmrcb.ulm_error.err_code);
    }
    return ( global->ops_mstate.ops_ulmrcb.ulm_streamid );
}
Example #7
0
/*{
** Name: opu_memory	- get joinop memory
**
** Description:
**      This routine will allocate the requested size of memory from the 
**      joinop memory stream.  Memory in this stream is not deallocated 
**      until the optimization has completed.  The allocated memory will
**      be aligned for any datatype.
**
** Inputs:
**      global                          ptr to global state variable
**      size                            size of memory block requested.
**
** Outputs:
**	Returns:
**	    PTR to aligned memory of "size" bytes
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	16-jun-86 (seputis)
**          initial creation
**	4-mar-91 (seputis)
**	    make initialization of memory an xDEBUG feature
**	11-oct-2006 (hayke02)
**	    Send E_OP0002_NOMEMORY to errlog.log. This change fixes bug 116309.
**	5-oct-2007 (dougi)
**	    Accumulate memory acquisition stats.
*/
PTR
opu_memory(
	OPS_STATE          *global,
	i4                size)
{
    DB_STATUS      ulmstatus;		    /* return status from ULM */

    global->ops_mstate.ops_countalloc++;
    if (size >= 2048)
	global->ops_mstate.ops_count2kalloc++;
    global->ops_mstate.ops_totalloc += size;

    global->ops_mstate.ops_ulmrcb.ulm_streamid_p = &global->ops_mstate.ops_streamid; /* allocate memory
					    ** from the global stream */
    global->ops_mstate.ops_ulmrcb.ulm_psize = size;    /* size of request */
    ulmstatus = ulm_palloc( &global->ops_mstate.ops_ulmrcb );
    if (DB_FAILURE_MACRO(ulmstatus))
    {
	if (global->ops_mstate.ops_ulmrcb.ulm_error.err_code == E_UL0005_NOMEM)
	{
	    opx_lerror(E_OP0002_NOMEMORY, 0, 0, 0, 0, 0);
	    opx_error( E_OP0002_NOMEMORY);  /* out of memory */
	}
#ifdef E_OP0093_ULM_ERROR
	else
	    opx_verror( ulmstatus, E_OP0093_ULM_ERROR, 
		global->ops_mstate.ops_ulmrcb.ulm_error.err_code); /* check for error */
#endif
    }	
#ifdef xDEBUG
    MEfill( size, (u_char)247, (PTR)global->ops_mstate.ops_ulmrcb.ulm_pptr); /*FIXME
                                            ** remove this initialization after
                                            ** test for uninitialized memory
                                            ** is not required any more */
#endif
    return( global->ops_mstate.ops_ulmrcb.ulm_pptr );  /* return the allocated
					    ** memory */
}
Example #8
0
/*{
** Name: OPU_OSMEMORY_OPEN	- Initialize the stack ULM memory stream
**
** Description:
**      This routine initializes the ULM memory stream that will be used 
**      for the stack style memory allocation and deallocation. 
[@comment_line@]...
**
** Inputs:
**	global -
**	    State info for the current query.
**	global->ops_mstate.ops_ulmrcb -
**	    The ULM control block.
**
** Outputs:
**	global->ops_mstate.ops_sstreamid -
**	    The new stream id.
**
**	Returns:
**	    Nothing
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**      19-July-87 (eric)
**          written
**	11-oct-2006 (hayke02)
**	    Send E_OP0002_NOMEMORY to errlog.log. This change fixes bug 116309.
[@history_template@]...
*/
VOID
opu_Osmemory_open(
	OPS_STATE   *global)
{
    DB_STATUS      ulmstatus;		    /* return status from ULM */

    /* Set the output streamid location for ULM */
    global->ops_mstate.ops_ulmrcb.ulm_streamid_p = &global->ops_mstate.ops_sstreamid;

    if ( (ulmstatus = ulm_openstream( &global->ops_mstate.ops_ulmrcb )) != E_DB_OK )
    {
	if (global->ops_mstate.ops_ulmrcb.ulm_error.err_code == E_UL0005_NOMEM)
	{
	    opx_lerror(E_OP0002_NOMEMORY, 0, 0, 0, 0, 0);
	    opx_error( E_OP0002_NOMEMORY);  /* out of memory */
	}
#ifdef E_OP0093_ULM_ERROR
	else
	    opx_verror( ulmstatus, E_OP0093_ULM_ERROR, 
		global->ops_mstate.ops_ulmrcb.ulm_error.err_code); /* check for error */
#endif
    }	
}
Example #9
0
/*{
** Name: OPU_CSMEMORY_CLOSE	- Close the stack ULM memory stream
**
** Description:
**      This routine closes the ULM memory stream that was used 
**      for the stack style memory allocation and deallocation. 
[@comment_line@]...
**
** Inputs:
**	global -
**	    State info for the current query.
**	global->ops_mstate.ops_ulmrcb -
**	    The ULM control block.
**	global->ops_mstate.ops_sstreamid -
**	    The stream id.
**
** Outputs:
**
**	Returns:
**	    Nothing
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**      19-July-87 (eric)
**          written
**	11-oct-2006 (hayke02)
**	    Send E_OP0002_NOMEMORY to errlog.log. This change fixes bug 116309.
[@history_template@]...
*/
VOID
opu_Csmemory_close(
	OPS_STATE   *global)
{
    DB_STATUS      ulmstatus;		    /* return status from ULM */

    /* store the stream id */
    global->ops_mstate.ops_ulmrcb.ulm_streamid_p = &global->ops_mstate.ops_sstreamid;
    /* ULM will nullify ops_sstreamid */

    if ( (ulmstatus = ulm_closestream( &global->ops_mstate.ops_ulmrcb )) != E_DB_OK )
    {
	if (global->ops_mstate.ops_ulmrcb.ulm_error.err_code == E_UL0005_NOMEM)
	{
	    opx_lerror(E_OP0002_NOMEMORY, 0, 0, 0, 0, 0);
	    opx_error( E_OP0002_NOMEMORY);  /* out of memory */
	}
#ifdef E_OP0093_ULM_ERROR
	else
	    opx_verror( ulmstatus, E_OP0093_ULM_ERROR, 
		global->ops_mstate.ops_ulmrcb.ulm_error.err_code); /* check for error */
#endif
    }	
}
Example #10
0
/*{
** Name: opn_ceval	- evaluate the cost of the join operator tree
**
** Description:
**      Entry point for routines which evaluate cost of a join operator tree.
**      Contains checks for timeouts within the optimizer.
**
** Inputs:
**      subquery                        ptr to subquery being analyzed
**          ->ops_estate.opn_sroot      ptr to root of join operator tree
**
** Outputs:
**      subquery->ops_bestco            ptr to best plan found
**      subquery->ops_cost              cost of best plan found
**	Returns:
**	    VOID
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	11-jun-86 (seputis)
**          initial creation from costeval
**	2-nov-88 (seputis)
**          changed CSstatistics interface
**	2-nov-88 (seputis)
**          add trace flag to timeout on number of plans being evaluated
**	15-aug-89 (seputis)
**	    add trace flag to adjust timeout factor which converts cost to time
**	    used to help isolate early timeout problems
**	16-may-90 (seputis)
**	    - b21582, move timeout checking to opn_timeout routine so it can be
**	    called from opn_arl
**	17-oct-90 (seputis)
**	    - b33386 - print error is no qep is found
**	22-apr-91 (seputis)
**	    - fix floating point exception handling problems
**	19-nov-99 (inkdo01)
**	    Changes to remove EXsignal from opn_exit processing.
**	18-oct-02 (inkdo01)
**	    Changes to enable new enumeration.
**	30-mar-2004 (hayke02)
**	    Call opn_exit() with a FALSE longjump.
**	30-Jun-2006 (kiria01) b116309
**	    Capture memory exhaustion error to errlog.log. 
**	11-Mar-2008 (kschendel) b122118
**	    Remove unused ops_tempco.  Fix up bfexception stuff so that we
**	    stop calling recover pointlessly at the start of every query.
[@history_line@]...
*/
OPN_STATUS
opn_ceval(
	OPS_SUBQUERY       *subquery)
{
    EX_CONTEXT	    excontext;		/* context block for exception 
                                        ** handler*/
    OPN_SUBTREE     *opn_ssubtp;        /* dummy var ignored at this level
                                        ** - list of subtrees with possible cost
                                        ** orderings (including reformats)
                                        ** - all element in the list have same
                                        ** relations but in different order or
                                        ** use a different tree structure.
                                        */
    OPN_RLS         *opn_srlmp;         /* dummy var ignored at this level
                                        ** - list of list of subtrees, with a
                                        ** different set of relations for each
                                        ** OPN_RLS element
                                        */
    OPN_EQS         *opn_seqp;          /* dummy var ignored at this level
                                        ** - used to create a list of different
                                        ** OPN_SUBTREE (using indexes) if
                                        ** an index exists.
                                        */
    OPN_STATUS	    sigstat = OPN_SIGOK;

    if ( EXdeclare(opn_mhandler, &excontext) == EX_DECLARE ) /* set
				    ** up exception handler to 
				    ** recover from out of memory
				    ** errors */
    {
	/* this point will only be reached if the enumeration processing has
	** run out of memory.  The optimizer will try to continue by copying 
	** the best CO tree found so far, out of enumeration memory, and  
	** reinitializing the enumeration stream, and continuing.  
	** The out-of-memory error will be reported if a complete pass of 
	** cost evaluation cannot be completed. */
	(VOID)EXdelete();		    /* cancel exception handler prior
					    ** to exiting routine */
	if (subquery->ops_global->ops_gmask & OPS_FLINT)
	{
	    subquery->ops_global->ops_gmask &= ~OPS_FLINT; /* reset exception
					    ** indicator */
	    return(OPN_SIGOK);		    /* skip this plan if a float
					    ** or integer exception has occurred
					    ** during processing */
	}
	if ( EXdeclare(opn_mhandler, &excontext) == EX_DECLARE ) /* set
				    ** exception handler to catch case in
				    ** which no progress is made */
	{
	    (VOID)EXdelete();		    /* cancel exception handler prior
					    ** to exiting routine */
	    if (subquery->ops_bestco)
		opx_verror(E_DB_WARN, E_OP0400_MEMORY, (OPX_FACILITY)0); /* report 
					    ** warning message to caller */
	    else
	    {
		opx_lerror(E_OP0400_MEMORY, 0); /* Output to errlog.log as well kiria01-b116309 */
		opx_error(E_OP0400_MEMORY); /* exit with a user error if the query
					    ** cannot find at least one acceptable
					    ** query plan */
	    }
	    opn_exit(subquery, FALSE);		    /* exit with current query plan */
	    return(OPN_SIGEXIT);
	}
	opn_recover(subquery);              /* routine will copy best CO tree 
					    ** and reinitialize memory stream
					    */
    }

    subquery->ops_global->ops_gmask &= (~OPS_FPEXCEPTION); /* reset fp exception
					    ** indicator before evaluating
					    ** plan */
    if (subquery->ops_global->ops_gmask & OPS_BFPEXCEPTION
      && subquery->ops_bestco != NULL)
	opn_recover(subquery);		    /* if the current best plan occurred with
					    ** exceptions then enumeration memory needs
					    ** to be flushed, so that subsequent plans
					    ** do not use subtrees which may have
					    ** been created with exceptions, this means
					    ** that the OPF cache of sub-trees is not
					    ** used if one exception has occurred and
					    ** the current best plan was created with
					    ** exceptions */
    

    (VOID) opn_nodecost (  subquery, 
		    subquery->ops_global->ops_estate.opn_sroot,
		    (subquery->ops_mask & OPS_LAENUM) ? subquery->ops_laeqcmap :
		    	&subquery->ops_eclass.ope_maps.opo_eqcmap, 
		    &opn_ssubtp,
		    &opn_seqp,
		    &opn_srlmp, &sigstat);

    if (sigstat != OPN_SIGEXIT && 
	!(subquery->ops_mask & OPS_LAENUM) &&
	opn_timeout(subquery))    	    /* check for timeout (but only for 
					    ** old style enumeration) */
    {
	(VOID)EXdelete();		    /* cancel exception handler prior
					    ** to exiting routine, call after
					    ** opn_timeout in case out of memory
					    ** errors occur */
	opn_exit(subquery, FALSE);	    /* at this point we 
					    ** return the subquery->opn_bestco
					    ** tree, this
					    ** could also happen in freeco
					    ** and enumerate */
	return(OPN_SIGEXIT);
    }
    (VOID)EXdelete();			    /* cancel exception handler prior
					    ** to exiting routine */
    return(sigstat);
}
Example #11
0
/*{
** Name: opn_prleaf	- project restrict for leaf
**
** Description:
**
**      This routine generates a CO-tree forest for a single leaf.  The
**      forest will contain a tree with just an ORIG in it, and another
**      tree with PR + ORIG.  (unless the caller requests that the latter
**      be omitted, i.e. wants keyed strategies only.)  The bare ORIG
**      will typically be used for K-join or T-join plans, while the
**      PR/ORIG subtree can be used to cost out simple-join plans.
**
**      [the below apparently dates from the Jurassic age, and it's not
**      entirely clear to me how much of it is accurate or useful...]
**      
**       General:
**      
**       an index is useful if it is hash (and single attribute) or isam and
**	       a) it matches a joining clause
**          or b) it matches a boolean factor
**      
**       To match a joining clause-
**      	attribute of index must equal join attribute. if isam, the
**      	attribute must be the most significant. if hash, the rel must
**      	be keyed on only this attribute.
**      
**       To match a boolean factor-
**      	there must exist a boolean factor such that
**      	a) for hash, all preds are equality sargs on the attribute or
**      	b) for isam, all preds are sargs on the most significant 
**      	   attribute.  We do not worry about sort because it can 
**                 only exist as an intermediate relation and hence will 
**                 never need a project-restrict (since it would have 
**		   been done when the tuple was created).
**      
**       Operation of project-restrict when relation is isam or hash:
**      
**       a) hash: find a matching boolean factor (all preds are equality sargs)
**      	   then for each sarg hash it's value and for all elements 
**                 in the resulting linked list apply the whole predicate
**      
**       b) isam: if there is a boolean factor where all preds are equality 
**      	   sargs then process similar to hash. the only difference 
**      	   is that a directory search is performed instead of 
**      	   hashing to determine the beginning and ending of the 
**                 linked list.
**      
**      	otherwise, for all matching boolean factors find a max and 
**      		a min value as follows:
**      	. if all preds in this boolean factor are <, <= or = then set
**      	  mx to max of the constants. if mx is greater then Gmax, set
**      	  Gmax to mx.
**      	. if all preds in this bf are >, >= or = then set mn to min of 
**      	  the constants. if mn is greater than Gmin set Gmin to mn.
**      	. do a directory search with Gmin and Gmax.
**      	. for all elements in this linked list apply the whole 
**                predicate.
**      
**      
**       COST
**      
**       cost of project-restrict-reformat is composed of the
**      	project-restrict cost and
**      	reformat cost.
**      
**       for the reformat cost see procedure "costreformat.c"
**      
**       project restrict cost is:
**      	hash and matching boolean factor
**		    ((#_pgs)/(#_primary) + hashcost) * (#_sargs_in_bf)
**      			hashcost is usually 0.
**      	isam and matching boolean factor
**		    ((#_pgs)/(#_primary) + dirsearchcost) * (#_sargs_in_bf)
**      	isam and matching boolean factor with range (at least one
**      	inequality sarg)
**		    2 * (dirsearchcost) + integrate that part of hist to find
**      			that portion of relation which must be accessed
**      
**      
**       SPECIFIC
**      
**       operation of prr-
**      
**       We are given a list of co's. this list contains one element if we are
**       working on a leaf. if we are working on the result of a join then
**       there may be zero or more co's. (there could be zero if for every co 
**       generated by calcost, there was a cheaper one in an associated co
**       list with the same opo_storage and ordeqc.)
**      
**       let stp refer to this node in the tree
**       if stp is a leaf then there is only one co. it represents the original
**       relation as it is stored on disk. there can be no associated
**       co lists because for a leaf there is only one structure and relation
**       assignment possible. so for this stp's co, do simple pr and
**       reformat to isam, hash and sort on all interesting orders.
**       the reformat co's point to the pr co which points to the original co
**       (the couter pointer does the pointing).
**      
**       If stp is the output of a join then the procedure is more complex.
**       If there are no co's then nothing is done, so return
**       otherwise find the minimum cost co in stp's co list.
**       Find the minimum cost from all the other associated co's, if any.
**       if there are other co's and a minimum cost is found which is less
**       than stp's minimum cost then do nothing, return
**       if there are other co's and a they have a higher mincost, then
**       delete all reformat co's from them cause they can be done cheaper here
**      	specifically:
**       using the minimum cost co as a base, do reformats to isam, hash
**       and sort on all interesting orders. for each structure/ordering 
**       check this stp and all associated co lists.  If this co is the best use
**       it and delete the other with same struct/ord. if this is not the
**       cheapest then do not use it, try next struct/ord combo.
**
**	 Default case will be to not do reformat to isam or hash unless the 
**       trace flags are turned on.  Also, do not reformat to isam or hash 
**       if the tuple is too wide (>MAXTUP) cause modify cannot handle it.
** 
**	 ctups is multiplied by sel so QEP will print correctly and 
**       set_qep_lock will know how many result tuples there are so it can 
**       set result relation locking appropriately.
**
**	 Always create PR nodes so that the cost of their use can be evaluated.
**       This fixes a bug where cross products would become excessively large 
**       because the restriction on the outer node would only happen
**	 after the join.  In the query "ret (r.a)where s.a =5"
**	 s.a will be the outer. We should do the restriction before the 
**       cross product.
**
**       There will be no eqclasses in eqsp->eqcmp at this point if
**       this query is from a single variable integrity constraint (right
**       now all integrity constraints are single variable). This is because
**       integrity constraints have no target list because they just want to
**       see if there exists at least one element the query and do not want
**       to return any attributes. We could do a
**       special case in the optimizer to find the fastest way to get the
**       first tuple (this could also be useful for QBF) but this is not done
**       now.
**
** Note on trace points op188/op215/op216.
**	Trace points op188/op215/op216 and op145 (set qep) both use opt_cotree().
**	op188/op215/op216 expect opt_conode to point to the fragment being
**	traced, and op145 expects it to be NULL (so that ops_bestco is used).
**	The NULL/non-NULL value of opt_conode also results in different
**	display behaviour. For these reasons opt_conode is also NULLed
**	after the call to opt_cotree().
**
** Inputs:
**      subquery                        ptr to subquery being analyzed
**      jsbtp                           set of cost orderings already calculated
**          .opn_coforw                 co tree representing leaf
**      eqsp                            equivalence class list header structure
**      rlsp                            represents the relation being restricted
**                                      and projected
**      blocks                          estimated number of blocks in relation
**                                      represented by this leaf after restrict
**                                      project applied
**      selectivity                     selectivity of predicates on relation
**
** Outputs:
**	Returns:
**	    VOID
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	28-may-86 (seputis)
**          initial creation
**	19-sep-88 (seputis)
**          fix double application of selectivity
**	17-nov-88 (seputis)
**          non-zero PR cpu cost required
**	29-mar-90 (seputis)
**	    fix metaphor performance problem - be smarter about search space
**	20-dec-90 (seputis)
**	    add top sort node removal checks
**	22-apr-91 (seputis)
**	    - fix float exception handling problem b36920
**      15-dec-91 (seputis)
**          fix b33551 make better guess for initial set of relations to enumerate
**	26-dec-91 (seputis)
**	    - 41526 - initialize sortrequired since opn_checksort now needs this
**	    initialized as part of bug fix
**	28-jan-91 (seputis)
**	    - fix 40923 - remove parameter to opn_prcost
**	1-apr-92 (seputis)
**	    - b40687 - part of tid join estimate fix.
**      8-may-92 (seputis)
**          - b44212, check for overflow in opo_pointercnt
**      5-jan-93 (ed)
**          - bug 48049 - move definition of ordering to make it more localized
**	30-mar-94 (ed)
**	    - bug 59461 - correct estimate of index pages in fanout causing
**	    overestimates and potential overflows
**	06-mar-96 (nanpr01)
**          - Changes for variable page size. Do not use DB_MAXTUP constant
**	      rather use the opn_maxreclen function to calculate the 
**	      max tuple size given the page size.
**	06-may-96 (nanpr01)
**	    Changes for New Page Format project - account for tuple header.
**	17-jun-96 (inkdo01)
**	    Treat Rtree like Btree for relprim computation.
**	21-jan-1997 (nanpr01)
**	    opo_pagesize was not getting set and consequently in opn_prcost/
**	    opn_readahead was getting floating point exception in VMS.
**	 1-apr-99 (hayke02)
**	    Use tbl_id.db_tab_index and tbl_storage_type rather than
**	    tbl_dpage_count <= 1 to check for a btree or rtree index. As of
**	    1.x, dmt_show() returns a tbl_dpage_count of 1 for single page
**	    tables, rather than 2 in 6.x. This change fixes bug 93436.
**	17-aug-99 (inkdo01)
**	    Minor, but ugly, change to detect descending top sort where 
**	    first column of multi-attr key struct is also in constant BF. 
**	    read backwards doesn't work in this case.
**	28-sep-00 (inkdo01)
**	    Code for composite histograms (avoid using them when attr no 
**	    is expected in histogram)
**	13-nov-00 (inkdo01)
**	    Added CO-tree addr to opn_checksort call. Seems no reason to
**	    omit it and new unique index top sort removal heuristic (100482)
**	    requires it.
**      12-aug-2002 (huazh01)
**          Do not mark ncop->opo_storage as sorted on a hash structure base
**          table. This fixes bug 107351, INGSRV 1727 
**      26-aug-2003 (wanfr01)
**          Bug 111062, INGSRV 2545
**          Do not mark storage sorted unless it is a btree.
**	5-july-05 (inkdo01)
**	    Call opn_modcost() to alter row count for agg queries.
**	30-Jun-2006 (kschendel)
**	    Remove modcost, done now after subquery enumerated so that we
**	    don't affect the agg-input co tree.
**      30-Aug-2006 (kschendel)
**          Eliminate redundant opo_topsort bool.
**	31-Aug-2006 (kschendel)
**	    Watch for HFAGG as well as RFAGG.
**	22-Oct-2007 (kibro01) b119313
**	    Ensure that pr_tuples does not fall below 1.
**      7-Mar-2008 (kschendel)
**          Always generate PR/ORIG co tree if new enumeration, regardless
**          of plan ops_cost considerations.  Failure to do this can cause
**          a new enum pass to not find any valid plans until it clears the
**          subtree cache with opn-recover.  See inline.
**	04-nov-08 (hayke02 for kschendel)
**	    Modify the previous change to prevent single var substitution index
**	    QEPs always being chosen, despite the base table QEP being cheaper.
**	    This change fixes bug 121051.
**	3-Jun-2009 (kschendel) b122118
**	    OJ filter cleanup, fix here.
**	26-feb-10 (smeke01) b123349 b123350
**	    Copy over the op255 and op188 trace point code from opn_corput().
**	    Amend opn_prleaf() to return OPN_STATUS in order to make trace point
**	    op255 work with set joinop notimeout.
**	01-mar-10 (smeke01) b123333 b123373
**	    Improve op188. Replace call to uld_prtree() with call to
**	    opt_cotree() so that the same formatting code is used in op188 as
**	    in op145 (set qep). Save away the current fragment number and
**	    the best fragment number so far for use in opt_cotree(). Also
**	    set ops_trace.opt_subquery to make sure we are tracing the current
**	    subquery.
**	13-may-10 (smeke01) b123727
**	    Add new trace point op215 to output trace for all query plan
**	    fragments and best plans considered by the optimizer. Add a new
**	    trace point op216 that prints out only the best plans as they are
**	    Also cleaned up some confused variable usage: replaced dsortcop
**	    with sortcop; and removed usage of ncop after it has been assigned
**	    to cop, replacing it with cop.
**	01-jun-10 (smeke01) b123838
**	    Prevent oph_origtuples/oph_odefined from being set for the
**	    histograms of a multi-attribute key when there are no boolean
**	    factors for the key.  This in turn prevents the
**	    'delayed multi-attribute' code in oph_jtuples() from being skewed
**	    by an unreasonably high number for oph_origtuples (up to 50% of
**	    the number of rows in the base table).
*/
OPN_STATUS
opn_prleaf(
	OPS_SUBQUERY       *subquery,
	OPN_SUBTREE        *jsbtp,
	OPN_EQS            *eqsp,
	OPN_RLS            *rlsp,
	OPO_BLOCKS	   blocks,
	bool		   selapplied,
	OPN_PERCENT        selectivity,
	OPV_VARS           *varp,
	bool               keyed_only)
{
    OPS_STATE		*global;
    OPO_BLOCKS          dio;        /* estimated number of disk block
                                    ** reads/writes to produce tuples
                                    ** represented by this cost ordering
                                    ** - after the project restrict */
    OPO_CPU             cpu;        /* estimate of cpu cost to produce
                                    ** this cost ordering */
    OPO_TUPLES		pr_tuples;  /* number of tuples in the project
				    ** restrict */
    OPB_SARG            sargtype;   /* type of keyed access which can be
                                    ** used on relation */
    OPO_BLOCKS		pblk;       /* estimated number of blocks read including
                                    ** blocks in primary index */
    OPH_HISTOGRAM       *hp;        /* histogram returned only if TID access */
    OPO_CO              *cop;       /* ptr to cost-ordering representing
                                    ** the leaf node */
    OPO_CO              *ncop;      /* ptr to new cost-ordering of a project
                                    ** - restrict of "*cop" */
    bool		pt_valid;   /* TRUE if pblk can be used as an estimate
				    ** for pages touched, which is used for
				    ** lock mode calculations */
    bool		imtid;      /* TRUE if implicit TID instead of
                                    ** keyed attributes were used */
    bool		distributed; /* TRUE is this is a distributed
				    ** thread */
    OPO_COST		prcost;	    /* project restrict cost of node */
    i4 		pagesize;   /* estimate the page size     */ 
    OPO_CO		*sortcop = NULL;	/* set to not NULL if a sort node is required */
    bool 		op188 = FALSE;
    bool 		op215 = FALSE;
    bool 		op216 = FALSE;

    global = subquery->ops_global;

    if (global->ops_cb->ops_check)
    {
	if (opt_strace( global->ops_cb, OPT_F060_QEP))
	    op188 = TRUE;
	if (opt_strace( global->ops_cb, OPT_F087_ALLFRAGS))
	    op215 = TRUE;
	if (opt_strace( global->ops_cb, OPT_F088_BESTPLANS))
	    op216 = TRUE;
    }

    distributed = global->ops_cb->ops_smask & OPS_MDISTRIBUTED;
    cop = jsbtp->opn_coback = jsbtp->opn_coforw = opn_cmemory( subquery ); /* 
				    ** get a new CO node */
    STRUCT_ASSIGN_MACRO(*(varp->opv_tcop), *(jsbtp->opn_coback));
				    /* copy the node since it may be 
				    ** changed */
    jsbtp->opn_coback->opo_coforw = jsbtp->opn_coback->opo_coback = 
	(OPO_CO *) jsbtp;		    /* ptr to jsbtp used to terminate list */
    jsbtp->opn_coback->opo_maps = &eqsp->opn_maps; /* save a ptr to the 
				    ** equivalence classes supplied by this
				    ** CO subtree (i.e. node) */
    if (varp->opv_grv->opv_relation)
    {
	DMT_TBL_ENTRY          *dmfptr;	    /* ptr to DMF table info */

	dmfptr = varp->opv_grv->opv_relation->rdr_rel; /* get ptr to RDF 
					** relation description info */
	pagesize = dmfptr->tbl_pgsize;

	/*
	** Note if distributed, the DMT_TBL_ENTRY is not totally initialized
	** (it was filled in by RDF rdd_alterdate -- which did 
	** 'select ... from iitables...' against the local database
	** However the information available by querying iitables does
	** not include all information put in the DMT_TBL_ENTRY by DMT_SHOW
	*/
	if (distributed &&
		((dmfptr->tbl_storage_type == DB_BTRE_STORE) ||
		 (dmfptr->tbl_storage_type == DB_RTRE_STORE) ||
		 (dmfptr->tbl_storage_type == DB_ISAM_STORE)) )
	{
	    i4  pgtype;

	    varp->opv_dircost = 1.0;

	    /*
	    ** Make assumptions about page type -> page/row overheads
	    ** The overheads used are data page, not btree index page overheads
	    ** That's okay - we're just looking for rough estimates
	    */
	    pgtype = (dmfptr->tbl_pgsize == DB_COMPAT_PGSIZE) ?
			DB_PG_V1: DB_PG_V3;
	    varp->opv_kpb = 
		(dmfptr->tbl_pgsize - DB_VPT_PAGE_OVERHEAD_MACRO(pgtype))
	       / (varp->opv_mbf.opb_keywidth + DB_VPT_SIZEOF_LINEID(pgtype));

	    if (varp->opv_kpb < 2.0)
		varp->opv_kpb = 2.0;
	    if (dmfptr->tbl_storage_type == DB_ISAM_STORE)
	    {
		varp->opv_kpleaf = 0;
		varp->opv_leafcount = 0;
	    }
	    else
	    {
		/* kpleaf calculation assumes no non-key columns */
		varp->opv_kpleaf = varp->opv_kpb;
		if (dmfptr->tbl_record_count)
		{
		    varp->opv_leafcount = 
				dmfptr->tbl_record_count/varp->opv_kpleaf;
		    if (varp->opv_leafcount < 1.0)
			varp->opv_leafcount = 1.0;
		}
		else
		    varp->opv_leafcount = 1.0;
	    }
	}
	else if ((dmfptr->tbl_storage_type == DB_BTRE_STORE) ||
	    (dmfptr->tbl_storage_type == DB_RTRE_STORE))
	{
	    /* 
	    ** Let dmf work this out, since it knows about page and row 
	    ** overhead and also takes into consideration potentially
	    ** different entry lengths for leaf and index pages
	    */
	    varp->opv_kpb = dmfptr->tbl_kperpage;
	    varp->opv_kpleaf = dmfptr->tbl_kperleaf;
	    varp->opv_dircost = dmfptr->tbl_ixlevels;
	    varp->opv_leafcount = dmfptr->tbl_lpage_count;
	}
	else if (dmfptr->tbl_storage_type == DB_ISAM_STORE)
	{
	    OPO_TUPLES	    tupleperblock; /* estimated number of tuples in an index
					** block where size of one tuple is the
					** length of the attribute being ordered
					** on ... plus the size a block ptr */
	    OPO_BLOCKS      blocks;     /* number of blocks accessed in index */
	    OPO_BLOCKS      previous;   /* total blocks accessed in the index */
	    i4	    dirsearch;
	    OPO_BLOCKS	    indexpages;
	    OPO_BLOCKS	    root_blocks;  /* number of index pages which are not
					** on the final fanout level */
	    OPO_BLOCKS	    index_blocks; /* number of index pages for btree */
	    OPO_BLOCKS	    leaf_blocks; /* number of index and leaf pages for
					** btree */

	    indexpages = dmfptr->tbl_ipage_count;
	    if (indexpages < 1.0)
		indexpages = 1.0;

	    /* 
	    ** Let dmf work out keys-per-page since it know about
	    ** page and row overhead for different table types
	    */
	    varp->opv_kpb = dmfptr->tbl_kperpage;
	    tupleperblock = dmfptr->tbl_kperpage;

	    root_blocks =
	    index_blocks = 0.0;
	    previous = 1.0;
	    leaf_blocks =
	    blocks = tupleperblock;
	    for (dirsearch = 1; previous < indexpages; )
	    {
		root_blocks = index_blocks;
		index_blocks = previous;    /* previous level contain only index blocks */
		previous += blocks;	    /* calculate total blocks used so far */
		leaf_blocks = blocks;	    /* lowest level contain leaf pages for btree */
		blocks = blocks * blocks;
		dirsearch += 1;		    /* probably faster than using logs */
	    }
	    blocks = leaf_blocks;	    /* get previous value, i.e. leaf page
					    ** level for btree */
	    varp->opv_dircost = dirsearch - (previous - indexpages)/blocks;  /* 
					    ** calculate the average directory 
					    ** search cost; if fanout is incomplete
					    ** then subtract the probabilistic fraction
					    ** in which one less directory search would be
					    ** made - FIXME
					    ** get fill factor involved in this
					    ** calculation */
	    blocks = (dmfptr->tbl_dpage_count < 1.0) ? 1.0 :
						dmfptr->tbl_dpage_count;
	    varp->opv_leafcount = blocks;

					    /* opv_leafcount is probably not used anymore since DMF
					    ** returns an leaf page count in the tbl_dpage_count field
					    ** for a secondary btree index now */
					    /* used for BTREE secondary indexes which have no leaf pages
					    ** to put an upper bound on the number of pages that
					    ** can be hit on any particular level of the btree, which
					    ** can be thought of as data pages */
	}
    }
    else { 				    /* Temporary relation   */
      /* 
      ** Determine the best possible page size for this relation
      ** Try to fit at least 20 rows in the temp relation if possible.
      */
      pagesize = opn_pagesize(global->ops_cb->ops_server, eqsp->opn_relwid);
    }
    opo_orig(subquery, cop);	    /* initialize the ordeqc of the DB_ORIG
				    ** node */
    if (distributed)
	opd_orig(subquery, cop);    /* initialize the distributed portion of
				    ** the orig node */
    if (keyed_only)
	return(OPN_SIGOK);

    if (cop->opo_cost.opo_pagesize == 0)
      cop->opo_cost.opo_pagesize = pagesize;

    /* FIXME - is not opo_dio 0 here ? */
    {
        OPO_ISORT               ordering;   /* this is the ordering which can be
                                    ** assumed after keying has been done in
                                    ** the relation, - OPE_NOEQLCS if no
                                    ** ordering can be assumed
                                    ** not used here */
        opn_prcost (subquery, cop, &sargtype, &pblk, &hp,
            &imtid, FALSE, &ordering,
            &pt_valid, &dio, &cpu);
    }
    dio += cop->opo_cost.opo_dio; /* add disk i/o 
				    ** cost to restrict/project
                                    ** i.e. cost of reading tuples using
                                    ** keyed access or cost of reading
                                    ** entire relation */
    if (selapplied)
	pr_tuples = rlsp->opn_reltups; /* duplicate this info here
				    ** to help lock chooser.  We will 
				    ** use this to guess how many
				    ** pages in the result relation
				    ** will be touched.  It is also
				    ** printed out in the QEP */
    else
	pr_tuples = rlsp->opn_reltups * selectivity; /* do not
					    ** apply selectivity twice */

    if (pr_tuples < 1.0)
	pr_tuples = 1.0;

    /* FIXME - same thing */
    cpu += cop->opo_cost.opo_cpu; /* get CPU time used to project and
                                    ** restrict the node */

    prcost = opn_cost(subquery, dio, cpu);

    /* count another fragment for trace point op188/op215/op216 */
    subquery->ops_currfragment++;

    /* With traditional enum, the only way to have an ops_cost is to have
    ** a best plan, which implies we looked at all leaves already.  So
    ** presumably enum memory was dumped so that the search could continue.
    ** It's safe to assume that a PR/ORIG subtree that costs more than the
    ** entire best plan so far is not useful, and we might as well reduce
    ** the search space.
    **
    ** Things are very different with the new enumeration.  New enumeration
    ** can and will look for best-plans that do NOT encompass all leaves.
    ** An early pass may generate a very cheap best-fragment that does not
    ** involve some expensive leaf.  A later pass (that includes that
    ** fragment) may require the expensive leaf to be able to form a plan.
    ** If all we make is the ORIG, it makes it look like the var can only
    ** be keyed to, which may in fact not be right.  This could prevent
    ** new enum from finding a valid plan for the larger set of tables, until
    ** it flushes enumeration memory and tries again.  Avoid this by always
    ** saving both the ORIG and PR/ORIG CO-tree forest.
    **
    ** (If somehow we got here as root, act normally;  shouldn't happen
    ** during greedy enum.)
    */
    if ((global->ops_estate.opn_rootflg
	||
	(subquery->ops_mask & OPS_LAENUM) == 0)
	&&
	prcost >= subquery->ops_cost)
    {
	if (op215)
	{
	    global->ops_trace.opt_subquery = subquery;
	    global->ops_trace.opt_conode = cop;
	    opt_cotree(cop);
	    global->ops_trace.opt_conode = NULL; 
	}
	return(OPN_SIGOK);	    /* do not do anything if the cost is larger
                                    ** than the current best plan
                                    */
    }
    ncop = opn_cmemory(subquery);   /* get space for a project-restrict CO node 
                                    */
    ncop->opo_storage = DB_HEAP_STORE; /* no reformat for this CO node */
    {
	OPV_GRV		*grvp;	    /* ptr to global range var descriptor
				    ** for ORIG node */
	if ((cop->opo_storage == DB_HEAP_STORE)  
	    &&
	    (grvp = varp->opv_grv)
	    )
	{
	    if (grvp->opv_gsubselect
		&&
		(
		    (grvp->opv_gsubselect->opv_subquery->ops_sqtype == OPS_RFAGG)
		    ||
		    (grvp->opv_gsubselect->opv_subquery->ops_sqtype == OPS_HFAGG)
		    ||
		    (grvp->opv_gsubselect->opv_subquery->ops_sqtype == OPS_RSAGG)
		    ||
		    (grvp->opv_gsubselect->opv_subquery->ops_sqtype == OPS_VIEW)
		)
	       )
# if 0
	    check for noncorrelated rfagg removed,
	    removed since correlated aggregates need an SEJOIN and these need
            to be projected/restrict always because OPC has a restriction
            in which the parent node of an aggregate result needs to be a
            PST_BOP with a "subselect indicator"
# endif
	    {   /* eliminate node from list to reduce cost calculations needed */
		jsbtp->opn_coback =
		jsbtp->opn_coforw = (OPO_CO *)jsbtp;
	    }
	    if (grvp->opv_gquery 
		&& 
		(   (grvp->opv_created == OPS_RFAGG)
		    ||
		    (grvp->opv_created == OPS_FAGG)
		))
		ncop->opo_storage = DB_SORT_STORE; /* function aggregate has
					** ordered by lists */	    		
	}
    }
    /* create a new CO project restrict node ncop which points to cop */
    cop->opo_pointercnt++;          /* cop has one more CO pointer to it */
    if (!(cop->opo_pointercnt))
	opx_error(E_OP04A8_USAGEOVFL); /* report error is usage count overflows */
    ncop->opo_ordeqc = OPE_NOEQCLS;

    switch (sargtype)		    /* define adjacent duplicate factor and
                                    ** sortedness factor */
    {
    case ADC_KEXACTKEY:		    /* isam or hash or explicit equality TID 
                                    ** clause in the qualification, (assume the
				    ** boolfact sarg constants are sorted and 
                                    ** then used to access the relation, this is
				    ** done by opb_create) */
    {
	if (hp)
	    ncop->opo_cost.opo_adfactor = hp->oph_reptf; /* this may happen 
				    ** with tid access */
	else				
	    ncop->opo_cost.opo_adfactor = 1.0; /* we do not have an ordering
                                    ** equivalence class yet so worst case
                                    ** adjacent duplicate factor */
	ncop->opo_cost.opo_sortfact = rlsp->opn_reltups; /* number of
                                    ** tuples in the relation */
        /* b107351 */
        if ( varp->opv_grv->opv_relation->rdr_rel->tbl_storage_type ==
	     DB_HASH_STORE )
	     ncop->opo_storage = DB_HASH_STORE;
        else
	{
	  if (varp->opv_grv->opv_relation->rdr_rel->tbl_storage_type == DB_BTRE_STORE)
	     ncop->opo_storage = DB_SORT_STORE;  /* mark node as sorted on the
				    ** ordering equivalence class if we
				    ** can guarantee it, this will cause
				    ** opo_pr to lookup the ordering which
				    ** is contained in range var descriptor
                                    ** FIXME- need to check multi-attribute
                                    ** ordering portion which is not ordered
				    */
	  else
	    ncop->opo_storage = varp->opv_grv->opv_relation->rdr_rel->tbl_storage_type;
	}	
	break;
    }

    case ADC_KRANGEKEY:	    /* must be isam or btree */
    {
	/*static*/opn_adfsf (ncop, cop, blocks, pblk, rlsp->opn_reltups);
	break;
    }

      default: 				/* read whole relation */
	    /*static*/opn_adfsf (ncop, cop, blocks, cop->opo_cost.opo_reltotb, 
                rlsp->opn_reltups);
    }

    ncop->opo_outer	= cop;              /* outer node is leaf i.e. original
                                            ** relation */
    ncop->opo_sjpr	= DB_PR;	    /* project restrict node */
    ncop->opo_union.opo_ojid = OPL_NOOUTER; /* not an outer join */
    ncop->opo_maps	= &eqsp->opn_maps;  /* save ptr to equivalence class
                                            ** map of attributes which will
                                            ** be returned */
    ncop->opo_cost.opo_pagesize = pagesize;
    ncop->opo_cost.opo_tpb	= (i4)opn_tpblk(global->ops_cb->ops_server,
						 pagesize, eqsp->opn_relwid); 
					    /* tuples per block */
    ncop->opo_cost.opo_cvar.opo_cache = OPN_SCACHE; /* project restrict will use
					    ** the single page cache prior to
					    ** accessing the block reads */
    ncop->opo_cost.opo_reltotb	= blocks;   /* estimated number of blocks in
                                            ** relation after project restrict*/
    ncop->opo_cost.opo_dio	= dio;      /* disk i/o to do this project
                                            ** restrict */
    ncop->opo_cost.opo_cpu	= cpu;	    /* cpu required for project 
                                            ** restrict*/
    if (pt_valid)
	ncop->opo_cost.opo_pagestouched = pblk; /* number of primary blocks 
					    ** read is an accurate estimate of 
					    ** pages touched */
    else
	ncop->opo_cost.opo_pagestouched = 0.0; /* if it is not accurate then
					    ** estimate 0.0 pages touched so
					    ** that page level locking will
					    ** be used */
    ncop->opo_cost.opo_tups	= pr_tuples; /* number of tuples in PR result */
    if(	(cop->opo_storage == DB_BTRE_STORE) /* if a btree */
	||
    	(cop->opo_storage == DB_ISAM_STORE) /* if a isam, then ordering
                                            ** can be used for partial
                                            ** sort merge, note that
                                            ** ncop->opo_storage is
                                            ** DB_HEAP_STORE so that
                                            ** ordering is not used for
                                            ** a full sort merge */
	||
	(ncop->opo_storage == DB_SORT_STORE)) /* or sorted due to exact
					    ** keys */
	opo_pr(subquery, ncop);		    /* initialize the ordering
					    ** for the project restrict
                                            ** node */

    /* In the following condition we check first whether the relation:
    ** (1) has a multi-attribute key with matching attributes in the
    ** query (opb_count > 1).
    ** (2) has a multi-attribute key with at least one matching boolean
    ** factor in the query (opb_bfcount > 0).
    ** If either is false then there is either no multi-attribute key,
    ** or a file scan is preferable to using the multi-attribute key.
    */
    if ( varp->opv_mbf.opb_count > 1 && varp->opv_mbf.opb_bfcount > 0 )
    {	/* look at all histograms in the relation and determine which
	** have a requirement for special processing due to multi-attribute
	** index restrictivity rules, FIXME, this should be replaced
	** by TRUE multi-attribute processing */
	OPH_HISTOGRAM	    *histp;
	OPZ_AT                 *abase;  /* base of array of ptrs to joinop
					** attribute elements */
	abase = subquery->ops_attrs.opz_base; /* base of array of ptrs to
					    ** joinop attribute elements */
	for (histp = rlsp->opn_histogram; histp; histp = histp->oph_next)
	    if (!(histp->oph_mask & OPH_COMPOSITE) && 
		abase->opz_attnums[histp->oph_attribute]->opz_mask & OPZ_MAINDEX)
	    {
		histp->oph_origtuples = rlsp->opn_reltups;
		histp->oph_odefined = TRUE;
	    }
  
    }
    cop		= ncop;			    /* all reformats go from cop
					    ** (we must make a copy of the
					    ** original relation before
					    ** reformatting) */
    varp->opv_pr = ncop;		    /* save project-restrict for calculation 
					    ** of initial guess of best set of
					    ** relations */
    varp->opv_prtuples = pr_tuples;
    {
	bool		root_flag;

	root_flag = global->ops_estate.opn_rootflg;
	if (distributed)
	{
	    bool		useful;	    /* TRUE if distributed plan
					    ** is useful for at least one site */
	    if (root_flag)
	    {
		OPO_COST	cpu_sortcost;   /* total cpu cost for sort node */
		OPO_COST	dio_sortcost;   /* total dio cost for sort node */
		if (subquery->ops_msort.opo_mask & OPO_MTOPSORT)
		{
		    cpu_sortcost = eqsp->opn_cpu;
		    dio_sortcost = eqsp->opn_dio;
		}
		else
		{
		    cpu_sortcost = 0.0;
		    dio_sortcost = 0.0;
		}
		useful = opd_prleaf(subquery, rlsp, cop, eqsp->opn_relwid, 
			cpu_sortcost, dio_sortcost); /* add 
					    ** distributed costs for this node*/
		if (!useful 
		    ||
		    opd_bestplan(subquery, cop,
			((subquery->ops_msort.opo_mask & OPO_MTOPSORT) != 0),
			cpu_sortcost, dio_sortcost, &sortcop))
		{
		    if (op215)
		    {
			global->ops_trace.opt_subquery = subquery;
			global->ops_trace.opt_conode = cop;
			opt_cotree(cop);
			global->ops_trace.opt_conode = NULL; 
		    }
		    return(OPN_SIGOK);
		}
		if (subquery->ops_msort.opo_mask & OPO_MTOPSORT)
		    opn_createsort(subquery, sortcop, cop, eqsp); /* initialize the fields of
					    ** the top sort node, opd_bestplan has already
					    ** placed the top sort node in the appropriate
					    ** CO plan locations */
	    }
	    else
	    {
		op216 = FALSE; /* op216 traces only the best plans */
		useful = opd_prleaf(subquery, rlsp, cop, eqsp->opn_relwid, 
		    (OPO_CPU)0.0, (OPO_BLOCKS)0.0); /* add 
					    ** distributed costs for this node*/
	    }
	}
	else if (root_flag)
	{
	    OPO_COST		prcost1;

	    prcost1 = opn_cost(subquery, dio, cpu);
	    if (subquery->ops_msort.opo_mask & OPO_MTOPSORT)
	    {	/* FIXME - can detect user specified orderings here and
		** avoid a sort node */
		OPO_ISORT	sordering;
		bool		sortrequired;
		if (cop->opo_storage == DB_SORT_STORE)
		    sordering = cop->opo_ordeqc;
		else
		    sordering = OPE_NOEQCLS;
		sortrequired = FALSE;	/* no deferred semantics problem for
					** single table lookups */

		if (subquery->ops_msort.opo_mask & OPO_MDESC)
		{
		    /* Single node and descending order by. Assure the key
		    ** key structure isn't multi-attr in which the 1st column
		    ** is in a constant BF. Read backwards doesn't work. */
		    OPO_CO	*orig = cop;
		    OPO_EQLIST	*orig_list;
		
		    if ((orig->opo_sjpr == DB_ORIG || (orig = orig->opo_outer)
			&& orig->opo_sjpr == DB_ORIG) &&
			!(orig->opo_ordeqc < subquery->ops_eclass.ope_ev ||
			subquery->ops_bfs.opb_bfeqc == NULL) &&
			(orig->opo_ordeqc > OPE_NOEQCLS ||
			(orig_list = subquery->ops_msort.opo_base->opo_stable
			[orig->opo_ordeqc-subquery->ops_eclass.ope_ev]->opo_eqlist) 
			== NULL || BTtest((i4)orig_list->opo_eqorder[0],
			(char *)subquery->ops_bfs.opb_bfeqc))) sortrequired = TRUE;
		}
		if (opn_checksort(subquery, &subquery->ops_bestco, subquery->ops_cost,
			    dio, cpu, eqsp, &prcost1, sordering, &sortrequired, 
			    cop) )
		{
		    if (op215)
		    {
			global->ops_trace.opt_subquery = subquery;
			global->ops_trace.opt_conode = cop;
			opt_cotree(cop);
			global->ops_trace.opt_conode = NULL; 
		    }
		    return(OPN_SIGOK);	/* return if added cost of sort node, or creating
					** relation is too expensive */
		}
		if (sortrequired)
		    sortcop = opn_cmemory(subquery); /* allocate memory here so that
					** we do  not run out of memory prior to
					** calling opn_dcmemory, which would delete
					** the previous best plan */
	    }
	    opn_dcmemory(subquery, subquery->ops_bestco);
	    if (sortcop)
	    {
		opn_createsort(subquery, sortcop, cop, eqsp);
		subquery->ops_bestco = sortcop;
	    }
	    else
		subquery->ops_bestco	= cop;
	    subquery->ops_besthisto = rlsp->opn_histogram;

	    if (global->ops_gmask & OPS_FPEXCEPTION)
		global->ops_gmask |= OPS_BFPEXCEPTION; /* a plan was found which
					** did not have a floating point exception
					** so skip over subsequent plans with
					** floating point exceptions */
	    else
		global->ops_gmask &= (~OPS_BFPEXCEPTION); /* reset exception
					** flag if plan was found to be free
					** of float exceptions */
            subquery->ops_tcurrent++;	/* increment plan number */
	    subquery->ops_tplan = subquery->ops_tcurrent; /* since this is
					**a single leaf tree, opn_ceval
					** will detect a timeout */
	    /* save the best fragment so far for trace point op188/op215/op216 */
	    subquery->ops_bestfragment = subquery->ops_currfragment;
	    subquery->ops_cost	= prcost1;
	    global->ops_estate.opn_search = FALSE; /* new best CO
					** found so memory garbage
					** collection routines may be
					** useful */
	}

	if (!root_flag)
	{
	    op216 = FALSE; /* op216 traces only the best plans */
	    opn_coinsert (jsbtp, cop);	/* put in co list, do not place into CO
					** if at the root since it may be deallocated
					** when a new best plan is found, and the PR
					** is worthless if it was once the best plan
					** so it does not need to be in the linked list
					** of CO nodes */
	}
    }

    if (op188 || op215 || op216)
    {
	global->ops_trace.opt_subquery = subquery;
	if (!sortcop)
	{
	    global->ops_trace.opt_conode = cop;
	    opt_cotree(cop);
	}
	else
	{
	    global->ops_trace.opt_conode = sortcop;
	    opt_cotree(sortcop);
	}
	global->ops_trace.opt_conode = NULL; 
    }

    if (global->ops_cb->ops_check)
    {
	i4	    first;
	i4	    second;

	/* If we have a usable plan, check for trace point op255 timeout setting */
	if( subquery->ops_bestco &&
	    opt_svtrace( global->ops_cb, OPT_F127_TIMEOUT, &first, &second) &&
	    !(subquery->ops_mask & OPS_LAENUM) &&
	    (first <= subquery->ops_tcurrent) &&
	    /* check if all subqueries should be timed out OR a particular subquery is being searched for */
	    ( (second == 0) || (second == subquery->ops_tsubquery) ) )
	{
	    opx_verror(E_DB_WARN, E_OP0006_TIMEOUT, (OPX_FACILITY)0);
	    opn_exit(subquery, FALSE);
	    /*
	    ** At this point we return the subquery->opn_bestco tree.
	    ** This could also happen in freeco and enumerate 
	    */
	    return(OPN_SIGEXIT);
	}
    }
    return(OPN_SIGOK);
}
Example #12
0
/*{
** Name: opz_addatts	- add joinop attributes to the array
**
** Description:
**      This routine will add or find the appropriate joinop attribute 
**      to the joinop attributes array given the 
**      (range variable, range variable attribute number) pair.
**
**	The exception to this rule occurs with function attributes which
**      always results in a new joinop  attribute being allocated.  A
**      function attribute uses "OPZ_FANUM" as the input attribute number.
**
** Inputs:
**      subquery                        ptr to subquery being analyzed
**      joinopvar                       index into joinop range table
**      dmfattr			        dmf attribute number
**      datatype                        ptr to ADT datatype
**
** Outputs:
**	Returns:
**	    joinop attribute number
**	Exceptions:
**	    none
**
** Side Effects:
**	    An entry may be allocated in the joinop attributes array 
**	    (OPS_SUBQUERY->ops_attrs)
**
** History:
**	20-apr-86 (seputis)
**          initial creation addatts
**      06-nov-89 (fred)
**          Added support for non-histogrammable datatypes.  This support
**	    entails providing a simple, coordinated replacement for this
**	    histogram.  In our case, the datatype & length will be varchar,
**	    minimum & maximum being those for varchar, and adc_helem()
**	    replacement will be the histogram for "nohistos".
**           
**          This code is done in OPF as opposed to ADF because I think it
**	    desirable that OPF manage the histogram replacement.  ADF will not
**	    know the appropriate values.  Furthermore, it is more desirable to
**	    teach OPF to manage w/out histograms;  however, that change is
**	    beyond the scope of this project.
**	26-dec-90 (seputis)
**	    init mask of booleans associated with attribute descriptor
**	18-jan-93 (rganski)
**	    Character Histogram Enhancements project:
**	    Initialize attr_ptr->opz_histogram.oph_dataval.db_length to 8
**	    before call to adc_hg_dtln(), which has been changed; this gives
**	    old behavior (limit for char types was 8). This length may be
**	    changed later when RDF gets info from iistatistics.
**	24-may-93 (rganski)
**	    Character Histograms Enhancements project:
**	    Initialize attr_ptr->opz_histogram.oph_dataval.db_length to 0
**	    before call to adc_hg_dtln(), which causes histogram value to be
**	    same length as attribute. This is necessary because otherwise the
**	    histogram value, which is used to determine selectivity of boolean
**	    factors, is truncated; this removes the benefits of having long
**	    histogram values. The histogram value length is adjusted in
**	    oph_catalog(), which reads the actual length from iistatistics (or
**	    truncates to 8, for old histograms).
**	6-dec-93 (ed)
**	    - bug 56139 - project union view before it is instantiated
**	09-oct-98 (matbe01)
**	    Added NO_OPTIM for NCR to eliminate runtime error that produces the
**	    message:  E_OP0889 Eqc is not available at a CO node.
**	 6-sep-04 (hayke02)
**	    Add OPZ_COLLATT attribute if OPS_COLLATION is on (resdom list,
**	    collation sequence). This change fixes problem INGSRV 2940, bug
**	    112873.
**	 8-nov-05 (hayke02)
**	    Add OPZ_VAREQVAR if OPS_VAREQVAR is on. Return if OPZ_VAREQVAR is
**	    not set and the attribute is found. This ensures that the fix for
**	    110675 is limited to OPZ_VAREQVAR PST_BOP nodes. This change
**	    fixes bug 115420 problem INGSRV 3465.
**       15-aug-2007 (huazh01)
**          ensure the datatype/length info of a created attribute matches 
**          the corresponding column type/length info on the user table. 
**          bug 117316.
**
[@history_line@]...
*/
OPZ_IATTS
opz_addatts(
	OPS_SUBQUERY       *subquery,
	OPV_IVARS	   joinopvar,
	OPZ_DMFATTR        dmfattr,
	DB_DATA_VALUE      *datatype)
{
	OPZ_IATTS              attribute;   /* used to save joinop attribute
					    ** number if found by opz_findatt
                                            */
        RDR_INFO		*rel;
   
        /* FIXME - need to deal with unsigned compare problem here */
	if ( (dmfattr != OPZ_FANUM) 
	     &&
             (dmfattr != OPZ_SNUM)
	     && 
             (dmfattr != OPZ_CORRELATED)
	     && 
	     (attribute = opz_findatt(subquery, joinopvar, dmfattr)) >= 0
	     &&
	     !(subquery->ops_attrs.opz_base->opz_attnums[attribute]->opz_mask &
								OPZ_VAREQVAR
	     &&
	     subquery->ops_mask2 & OPS_COLLATION
	     &&
	     (abs(datatype->db_datatype) == DB_CHA_TYPE
	     ||
	     abs(datatype->db_datatype) == DB_VCH_TYPE
	     ||
	     abs(datatype->db_datatype) == DB_CHR_TYPE
	     ||
	     abs(datatype->db_datatype) == DB_TXT_TYPE)) )
	    return( attribute );	    /* if this (joinopvar,dmfattr) pair 
					    ** exists and is not a function 
					    ** attribute, or subselect attribute
                                            ** then return
					    */

    {	/* create new joinop attribute */

	OPZ_IATTS		    nextattr; /* next available joinop attribute
                                            ** number
                                            */
	OPS_STATE                   *global; /* ptr to global state variable */

        global = subquery->ops_global;	    /* get ptr to global state variable 
                                            */
	if ( (nextattr = subquery->ops_attrs.opz_av++) >= OPZ_MAXATT)
	    opx_error(E_OP0300_ATTRIBUTE_OVERFLOW); /* exit with error
					    ** if no more room in attributes
                                            ** array
                                            */
	{
	    OPZ_ATTS               *attr_ptr;   /* ptr to newly created joinop 
					    ** attribute element */
	    OPV_VARS               *var_ptr; /* ptr to respective joinop
                                            ** variable containing attribute */

            var_ptr = subquery->ops_vars.opv_base->opv_rt[joinopvar];

	    /* allocate new joinop attribute structure and initialize */
	    subquery->ops_attrs.opz_base->opz_attnums[nextattr] = attr_ptr = 
		(OPZ_ATTS *) opu_memory( global, (i4) sizeof(OPZ_ATTS));
	    MEfill(sizeof(*attr_ptr), (u_char)0, (PTR)attr_ptr);


            /* b117316:
            **
            ** the fix to b109879 set PST_VAR node under the RESDOM to 
            ** to nullable and increase the db_length by one, though
            ** the column corresponds to PST_VAR is defined as not null.
            ** Need to reset them to not null in order to prevent
            ** wrong db_type and db_length being used during query
            ** execution. e.g., wrong db_type and db_length could cause OPC
            ** to generate a set of ADF code which materialize tuples
            ** using incorrect offset and cause wrong result. 
            **
            */
            rel = var_ptr->opv_grv->opv_relation;
            if (datatype->db_datatype < 0 && 
                dmfattr > 0 && 
                rel && 
                rel->rdr_attr[dmfattr]->att_type * -1 == datatype->db_datatype &&
                rel->rdr_attr[dmfattr]->att_width + 1 == datatype->db_length)
            {
                datatype->db_datatype = rel->rdr_attr[dmfattr]->att_type; 
                datatype->db_length = rel->rdr_attr[dmfattr]->att_width;
            }

	    attr_ptr->opz_varnm = joinopvar; /* index into local range table */
            BTset( (i4)nextattr, (char *)&var_ptr->opv_attrmap); /* indicate 
					    ** that this joinop
                                            ** attribute belongs to this range
                                            ** variable */
	    attr_ptr->opz_gvar = var_ptr->opv_gvar; /* move global range
                                            ** variable number to attribute */
	    attr_ptr->opz_attnm.db_att_id = dmfattr;/* dmf attribute of 
                                            ** range variable  */
	    STRUCT_ASSIGN_MACRO((* datatype), attr_ptr->opz_dataval);
	    attr_ptr->opz_equcls = OPE_NOEQCLS; /* this attribute has not been
                                            ** assigned an equivalence class yet
                                            */
	    attr_ptr->opz_func_att = OPZ_NOFUNCATT; /* this indicates that a
                                            ** function attribute is not
                                            ** associated with this joinop
                                            ** attribute element
                                            */
	    if ((dmfattr != OPZ_FANUM) 
		&&
		(dmfattr != OPZ_SNUM)
		&& 
		(dmfattr != OPZ_CORRELATED)
		&&
		(attribute >= 0))
		attr_ptr->opz_mask |= OPZ_COLLATT;
	    if (subquery->ops_mask2 & OPS_VAREQVAR)
		attr_ptr->opz_mask |= OPZ_VAREQVAR;
	    if ((var_ptr->opv_grv->opv_gmask & OPV_UVPROJECT)
		&&
		(dmfattr >= 0))
	    {	/* if union view exists in which a projection is possible 
		** (i.e. a UNION ALL view) then
		** mark the attribute which are referenced in the union view*/
		BTset((i4)dmfattr, (char *)var_ptr->opv_grv->opv_attrmap);
	    }
	    {
		/* initialize the histogram information associated with this
		** attribute
		*/
		DB_STATUS	hgstatus;	/* ADT return status */
		i4		dt_bits;

		hgstatus = adi_dtinfo(global->ops_adfcb,
					    datatype->db_datatype, &dt_bits);

		if ((hgstatus == E_AD0000_OK) && (dt_bits & AD_NOHISTOGRAM))
		{
		    attr_ptr->opz_histogram.oph_dataval.db_datatype =
							    OPH_NH_TYPE;
		    attr_ptr->opz_histogram.oph_dataval.db_prec =
							    OPH_NH_PREC;
		    attr_ptr->opz_histogram.oph_dataval.db_length =
							    OPH_NH_LENGTH;
		}
		else
		{
		    attr_ptr->opz_histogram.oph_dataval.db_length = 0;
		    hgstatus = adc_hg_dtln(global->ops_adfcb, datatype, 
			&attr_ptr->opz_histogram.oph_dataval );
		}
# ifdef E_OP0780_ADF_HISTOGRAM
		if ((hgstatus != E_AD0000_OK)
		    ||
		    (attr_ptr->opz_histogram.oph_dataval.db_datatype < 0)
					    /* do not expect a nullable
                                            ** histogram datatype */
		   )
		    opx_verror( hgstatus, E_OP0780_ADF_HISTOGRAM, 
			global->ops_adfcb->adf_errcb.ad_errcode); /*
                                            ** abort if unexpected error occurs
                                            */
# endif
		attr_ptr->opz_histogram.oph_dataval.db_data = NULL; /* init the
                                            ** data value ptr */
		attr_ptr->opz_histogram.oph_numcells = OPH_NOCELLS; /* histogram
                                            ** has not been fetched so no cells
                                            ** exist at this point
                                            */
		attr_ptr->opz_histogram.oph_histmm = NULL; /* no histogram
                                            ** exists at this point
					    */
                attr_ptr->opz_histogram.oph_mindomain = NULL; /* no minimum
                                            ** value for this histogram yet */
                attr_ptr->opz_histogram.oph_maxdomain = NULL; /* no maximum
					    ** value for this histogram yet */
	    }
	    if (dmfattr == DB_IMTID)
	    {
		/* implicit TID found so an equivalence class needs to be
                ** created ... so we know the equivalence class of the
                ** implicit TID for this (since the implicit TID is referenced
                ** by an index or explicitly in the qualification)
		*/
		var_ptr->opv_primary.opv_tid 
		    = ope_neweqcls( subquery, nextattr );
	    }
	}
    return( nextattr );			/* return joinop attribute which has
                                        ** been assigned
                                        */
    }
}
Example #13
0
/*{
** Name: ops_init - initialize structures needed for optimization
**
** Description:
**	This routine will initialize the "global state" variable which contains
**      all information for the optimization of this query for this session.
**
** Inputs:
**      global				ptr to global state variable
**          .ops_cb                     ptr to session control block
**          .ops_caller_cb              ptr to same object as opf_cb
**      opf_cb                          caller's control block
**
** Outputs:
**      global                          all components initialized
**	Returns:
**	    E_DB_OK, E_DB_ERROR
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	24-feb-86 (seputis)
**          initial creation
**	26-nov-90 (stec)
**	    Added initialization of ops_prbuf in OPS_STATE struct.
**	28-dec-90 (stec)
**	    Changed initialization of print buffer; ops_prbuf has been
**	    removed, opc_prbuf in OPC_STATE struct will be initialized
**	    instead.
**      28-jan-91 (seputis)
**          added support for OPF ACTIVE FLAG
**	11-jun-91 (seputis)
**	    ifdef distributed code until header files merge
**      04-sep-92 (fpang)
**          Fixed initialization of rdr_r1_distrib.
**      27-apr-95 (inkdo01)
**          Init ops_osubquery to NULL 
**	26-nov-96 (inkdo01)
**	    Allow users OPF memory equal to 1.5 config.dat definition. That way
**	    the boundary users don't dictate the size of the mem pool (it being
**	    highly unlikely that all users will want big memory simultaneously).
**	12-dec-96 (inkdo01)
**	    Shamed into doing the above change more rigourously. Now a new 
**	    config parm (opf_maxmemf) defines the proportion of the OPF pool
**	    available to any session. This gets computed in opsstartup, so the
**	    code added for the previous change is now removed.
**	20-jun-1997 (nanpr01)
**	    Initialize the rdf_info_blk to NULL. This is specially required
**	    for  query without a base table.
**	23-oct-98 (inkdo01)
**	    Init opc_retrowno, opc_retrowoff for row producing procs.
**	27-oct-98 (inkdo01)
**	    Quicky afterthought to init opc_retrow_rsd.
**	11-oct-2006 (hayke02)
**	    Send E_OP0002_NOMEMORY to errlog.log. This change fixes bug 116309.
**      25-Nov-2008 (hanal04) Bug 121248
**          Initialise new caller_ref field in the opv_rdfcb to avoid SEGVs
**          later on.
**	25-feb-10 (smeke01) b123333
**	    As the NULL-ing of ops_trace.opt_conode has been remmoved from 
**	    opt_printCostTree() we need to make sure it is initialised here  
**	    prior to any call of opt_cotree() by trace point op145 (set qep).
[@history_line@]...
[@history_template@]...
*/
VOID
ops_init(
	OPF_CB             *opf_cb,
	OPS_STATE          *global)
{
    /* initialize some variables so that error recovery can determine which
    ** resources to free (i.e. which resources have been successfully
    ** allocated)
    ** - this must be done prior to the allocation of a memory stream since
    ** the streamid is used to indicate whether any resources at all have
    ** been allocated
    */

    global->ops_cb = (OPS_CB *)opf_cb->opf_scb; /* save session control block */
    /* global->ops_caller_cb initialized before exception handler established */
    global->ops_adfcb = global->ops_cb->ops_adfcb; /* get current ADF control
                                        ** block for session */
    if (global->ops_adfcb)
    {	/* init adf_constants since this may uninitialized after the
	** previous query executed by QEF, and may cause ADF to write
	** to deallocated memory */
	global->ops_adfcb->adf_constants = (ADK_CONST_BLK *)NULL;
    }
    global->ops_qheader = NULL;		
    global->ops_statement = NULL;		
    global->ops_procedure = NULL;	/* - NULL indicates that QSF query
                                        ** tree has not been fixed 
                                        ** - used by error handling to determine
                                        ** if this resource needs to be 
                                        ** deallocated
                                        */
    /* global->ops_lk_id initialized below */
    /* global->ops_parmtotal initialized below */

    global->ops_mstate.ops_streamid = NULL;/* init so deallocate routines do not
                                        ** access unless it is necessary */
    global->ops_mstate.ops_sstreamid = NULL; /* init so deallocate routines do
                                        ** not access unless it is necessary */
    global->ops_mstate.ops_tstreamid = NULL; /* the temporary buffer stream is
                                        ** allocated only when needed */
    global->ops_subquery = NULL;        /* initialize the subquery list */
    global->ops_osubquery = NULL;

    /* initialise pointer used in opt_printCostTree() for tracing CO node */
    global->ops_trace.opt_conode = NULL;  

    /* global->ops_astate initialized by aggregate processing phase */
    /* global->ops_estate initialized by joinop processing phase */
    global->ops_qpinit = FALSE;		/* query plan object not allocated yet*/
    global->ops_cstate.opc_prbuf = NULL;/* trace print buffer ptr. */
    global->ops_cstate.opc_relation = NULL;  /* init relation descriptor
                                            ** so deallocation routine will
                                            ** only be done if OPC allocates
                                            ** an RDF descriptor */
    global->ops_cstate.opc_retrowno = -1;
    global->ops_cstate.opc_retrowoff = 0;  /* result row buffer init */
    global->ops_cstate.opc_retrow_rsd = (PST_QNODE *) NULL;
    ops_qinit(global, (PST_STATEMENT *)NULL); /* init range table only for resource
					** deallocation */
    {
	/* allocate a memory stream to be used by the optimizer
        ** - the streamid PTR was initialized to NULL earlier - prior to the
        ** establishment of the exception handler so that it can be used
        ** to indicate to the cleanup routines that no resources have been
        ** allocated
        */
        DB_STATUS       ulmstatus;

        global->ops_mstate.ops_ulmrcb.ulm_facility = DB_OPF_ID; /* identifies optimizer
					** so that ULM can make SCF calls on
                                        ** behave of the optimizer */
        global->ops_mstate.ops_ulmrcb.ulm_poolid = global->ops_cb->ops_server->opg_memory;
                                        /* poolid of OPF
                                        ** obtained at server startup time */
        global->ops_mstate.ops_ulmrcb.ulm_blocksize = 0; /* use default for 
					** now */
        global->ops_mstate.ops_memleft =global->ops_cb->ops_alter.ops_maxmemory;
                                        /* save amount of memory which can be 
                                        ** used by this session */
        global->ops_mstate.ops_mlimit = global->ops_mstate.ops_memleft / 10;
					/* if 10% of memory is left trigger 
                                        ** garbage collection routines */
        global->ops_mstate.ops_ulmrcb.ulm_memleft = &global->ops_mstate.ops_memleft; /* 
					** and point to it for ULM */
	global->ops_mstate.ops_ulmrcb.ulm_streamid_p = &global->ops_mstate.ops_streamid; /*
					** Where ULM will return streamid */
	global->ops_mstate.ops_ulmrcb.ulm_flags = ULM_PRIVATE_STREAM;
					/* Allocate private, thread-safe streams */
        ulmstatus = ulm_openstream(&global->ops_mstate.ops_ulmrcb); /* get memory for the
                                        ** optimizer */
        if (DB_FAILURE_MACRO(ulmstatus))
	{
	    opx_lerror(E_OP0002_NOMEMORY, 0, 0, 0, 0, 0);
	    opx_verror( ulmstatus, E_OP0002_NOMEMORY, 
		global->ops_mstate.ops_ulmrcb.ulm_error.err_code);
	}
        global->ops_mstate.ops_tptr = NULL; /* init temp buffer ptr from 
                                        ** ops_tstreamid*/
        global->ops_mstate.ops_tsize = 0;          /* init temp buffer size */
					/* ULM has set ops_streamid to the
                                        ** streamid for "global optimizer"
                                        ** memory, note the enumeration will
                                        ** create other streamids for "local"
                                        ** memory but still use the 
                                        ** same control ops_ulmrcb 
                                        ** in order to decrement and
                                        ** increment the same "memleft" counter
                                        */
	/* initialize ptrs to full size array of ptrs, allocate once for DB procedure
	** or 4K will be wasted for each assignment statement and query */
        global->ops_mstate.ops_trt = NULL;
        global->ops_mstate.ops_tft = NULL;
        global->ops_mstate.ops_tat = NULL;
        global->ops_mstate.ops_tet = NULL;
        global->ops_mstate.ops_tbft = NULL;
	global->ops_mstate.ops_usemain = FALSE; /* disable redirection of
					** memory allocation */
	global->ops_mstate.ops_totalloc = 0;	/* init stats fields */
	global->ops_mstate.ops_countalloc = 0;
	global->ops_mstate.ops_count2kalloc = 0;
	global->ops_mstate.ops_recover = 0;
    }

    
    {
	/* get the procedure from QSF */
        DB_STATUS	       qsfstatus;  /* QSF return status */

	qsfstatus = ops_gqtree(global); /* get procedure from QSF */
        if (DB_FAILURE_MACRO(qsfstatus))
	    opx_verror( qsfstatus, E_OP0085_QSO_LOCK, 
		global->ops_qsfcb.qsf_error.err_code); /* report error */

    }

    {
	/* initialize the RDF control block used to fetch information for
        ** the global range table */
	RDR_RB                 *rdfrb;      /* ptr to RDF request block */

	rdfrb = &global->ops_rangetab.opv_rdfcb.rdf_rb;
	global->ops_rangetab.opv_rdfcb.rdf_info_blk = NULL;
        global->ops_rangetab.opv_rdfcb.caller_ref = (RDR_INFO **)NULL;
	rdfrb->rdr_db_id = global->ops_cb->ops_dbid; /* save the data base id
                                            ** for this session only */
        rdfrb->rdr_unique_dbid = global->ops_cb->ops_udbid; /* save unique
                                            ** dbid for all sessions */
        rdfrb->rdr_session_id = global->ops_cb->ops_sid; /* save the session id
                                            ** for this session */
        rdfrb->rdr_fcb = global->ops_cb->ops_server->opg_rdfhandle; /* save the
					    ** poolid for the RDF info */
        if (global->ops_cb->ops_smask & OPS_MDISTRIBUTED)
            rdfrb->rdr_r1_distrib = DB_3_DDB_SESS;
	else
            rdfrb->rdr_r1_distrib = 0;
	if (global->ops_cb->ops_smask & OPS_MCONDITION)
	    rdfrb->rdr_2types_mask = RDR2_TIMEOUT; /* indicate that timeout should
					    ** occur on all RDF accesses */
	else
	    rdfrb->rdr_2types_mask = 0;
	rdfrb->rdr_instr = RDF_NO_INSTR;
    }
}
Example #14
0
VOID
opc_querycomp(
		OPS_STATE          *global)
{
    DB_STATUS	    	ret;

    global->ops_gmask |= OPS_OPCEXCEPTION;  /* mark facility as being in OPC */
#ifdef OPT_F033_OPF_TO_OPC
    if (opt_strace(global->ops_cb, OPT_F033_OPF_TO_OPC) == TRUE)
    {
	char	temp[OPT_PBLEN + 1];
	bool	init = 0;

	if (global->ops_cstate.opc_prbuf == NULL)
	{
	    global->ops_cstate.opc_prbuf = temp;
	    init++;
	}

	/* Trace all of 'global' */
        if (global->ops_statement != NULL)
        {
	    opt_state(global);
	}
	    
	if (init)
	{
	    global->ops_cstate.opc_prbuf = NULL;
	}
    }
#endif

    if ( opt_strace(global->ops_cb, OPT_F071_QEP_WITHOUT_COST ) == TRUE && global->ops_subquery)
    {
	opt_cotree_without_stats( global );
    }

    /* If this is CREATE TABLE, check for primary, unique, foreign key
    ** constraints to use for default base table structure. */
    if (global->ops_statement &&
	global->ops_statement->pst_type == PST_CREATE_TABLE_TYPE &&
	global->ops_statement->pst_specific.pst_createTable.
			pst_createTableFlags == PST_CRT_TABLE)
    {
	QEU_CB *qeucb = global->ops_statement->pst_specific.pst_createTable.pst_createTableQEUCB;
	DMU_CB *dmucb = (DMU_CB *) qeucb->qeu_d_cb;
	bool checkit = FALSE;

	if (BTtest(DMU_AUTOSTRUCT, dmucb->dmu_chars.dmu_indicators))
	    checkit = (dmucb->dmu_chars.dmu_flags & DMU_FLAG_AUTOSTRUCT) != 0;
	else
	    checkit = opt_strace(global->ops_cb, OPT_F084_TBLAUTOSTRUCT ) ||
			global->ops_cb->ops_alter.ops_autostruct != 0;
	if (checkit)
	    opc_checkcons(global->ops_statement, dmucb);
    }

    /* On entry for rule processing, assume ops_qpinit == TRUE. There	    */
    /* is no need to allocate a memory stream since we are contuing	    */
    /* processing on the QP that was started by the triggering statement. */
    if (global->ops_qpinit == FALSE)
    {
	/* First, lets open the stack ULM memory stream that OPC uses */
	opu_Osmemory_open(global);

	/* Tell QSF that we want to store an object; */
	global->ops_qsfcb.qsf_obj_id.qso_type = QSO_QP_OBJ;
	if (global->ops_procedure->pst_flags & PST_REPEAT_DYNAMIC)
	{
	    char	*p;

	    global->ops_qsfcb.qsf_obj_id.qso_lname = sizeof(DB_CURSOR_ID) + sizeof(i4);
	    MEfill(sizeof(global->ops_qsfcb.qsf_obj_id.qso_name), 0,
		   global->ops_qsfcb.qsf_obj_id.qso_name);
	    MEcopy((PTR)&global->ops_procedure->pst_dbpid.db_cursor_id[0],
		   sizeof (global->ops_procedure->pst_dbpid.db_cursor_id[0]),
		   (PTR)global->ops_qsfcb.qsf_obj_id.qso_name);
	    p = (char *) global->ops_qsfcb.qsf_obj_id.qso_name + 2*sizeof(i4);
	    if (global->ops_caller_cb->opf_locator)
		MEcopy((PTR)"ql", sizeof("ql"), p);
	    else MEcopy((PTR)"qp", sizeof("qp"), p);
	    p = (char *) global->ops_qsfcb.qsf_obj_id.qso_name + sizeof(DB_CURSOR_ID);
	    I4ASSIGN_MACRO(global->ops_caller_cb->opf_udbid, *(i4 *) p); 


	}
	else if (   global->ops_procedure->pst_isdbp == TRUE
	    || (   global->ops_qheader != NULL
		&& (global->ops_qheader->pst_mask1 & PST_RPTQRY)
	       )
	   )
	{
	    global->ops_qsfcb.qsf_obj_id.qso_lname = 
		sizeof (global->ops_procedure->pst_dbpid);
	    MEcopy((PTR)&global->ops_procedure->pst_dbpid, 
		   sizeof (global->ops_procedure->pst_dbpid),
		   (PTR)global->ops_qsfcb.qsf_obj_id.qso_name);
	}
	else
	{
	    global->ops_qsfcb.qsf_obj_id.qso_lname = 0;
	}

	/* Also allow for the case where a concurrent clash causes a new
	** object at an awkward point */
	if ((ret = qsf_call(QSO_CREATE, &global->ops_qsfcb)) != E_DB_OK &&
	    !(ret == E_DB_ERROR &&
		global->ops_qsfcb.qsf_error.err_code == E_QS001C_EXTRA_OBJECT) &&
	    !((global->ops_procedure->pst_flags & PST_SET_INPUT_PARAM)  &&
	     global->ops_qsfcb.qsf_error.err_code == E_QS001C_EXTRA_OBJECT))
	{
	    /* if object exists and we have a named query plan. */
	    if (global->ops_qsfcb.qsf_error.err_code
		    == E_QS000A_OBJ_ALREADY_EXISTS
	       )
	    {
		/* Log query info */
		QSO_OBID    *obj = &global->ops_qsfcb.qsf_obj_id;
		char	    *qrytype;
		char	    *objtype;
		char	    *objname;
		char	    *qrytext;
		char	    tmp[(DB_OWN_MAXNAME + DB_CURSOR_MAXNAME)  + 3 + 1];
		DB_STATUS   status;
		QSF_RCB	    qsf_rb;
		PSQ_QDESC   *qdesc;

		if (global->ops_procedure->pst_isdbp == TRUE)
		    qrytype = "database procedure";
		else if (global->ops_qheader != NULL
			 && (global->ops_qheader->pst_mask1 & PST_RPTQRY)
			)
		    qrytype = "repeat query";
		else
		    qrytype = "non-repeat query";

		objtype = "QSO_QP_OBJ";

		if (obj->qso_lname == 0)
		{
		    objname = "QSF object has no name";
		}
		else
		{
		    char	    fmt[30];
		    DB_CURSOR_ID    *curid;
		    char	    *user;
		    i4		    *dbid;

		    curid = (DB_CURSOR_ID *)obj->qso_name;
		    user = curid->db_cur_name + DB_CURSOR_MAXNAME;
		    dbid = (i4 *)(user + DB_OWN_MAXNAME);

		    STprintf(fmt, ":%%lx:%%lx:%%.%ds:%%.%ds:%%lx:",
			DB_CURSOR_MAXNAME, DB_OWN_MAXNAME);

		    STprintf(tmp, fmt, (i4)curid->db_cursor_id[0], 
			(i4)curid->db_cursor_id[1],
			curid->db_cur_name, user, (i4)(*dbid));

		    objname = tmp;
		}
 
		qsf_rb.qsf_type = QSFRB_CB;
		qsf_rb.qsf_ascii_id = QSFRB_ASCII_ID;
		qsf_rb.qsf_length = sizeof(qsf_rb);
		qsf_rb.qsf_owner = (PTR)DB_OPF_ID;
		qsf_rb.qsf_obj_id.qso_handle = global->ops_caller_cb->opf_thandle;

		qrytext = "Query text was not available.";

		if (qsf_rb.qsf_obj_id.qso_handle != NULL)
		{
		    status = qsf_call(QSO_INFO, &qsf_rb);

		    if (DB_SUCCESS_MACRO(status))
		    {
			qdesc = (PSQ_QDESC*) qsf_rb.qsf_root;

			qrytext = qdesc->psq_qrytext;
		    }
		}

		/* log an error */
		opx_lerror((OPX_ERROR)E_OP089F_QSF_FAILCREATE, (i4)4,
		    (PTR)qrytype, (PTR)objtype, (PTR)objname, (PTR)qrytext);
	    }

	    opx_verror(ret, E_OP0882_QSF_CREATE, 
		global->ops_qsfcb.qsf_error.err_code);
	}

	/* Put the handle for the QEP into the callers CB; 
	** - will be used for deallocation in case of an error
	** - both the object id and the lock id are needed in order to destroy
	** the object
	*/
	STRUCT_ASSIGN_MACRO(global->ops_qsfcb.qsf_obj_id,
	    global->ops_caller_cb->opf_qep);
	global->ops_qplk_id = global->ops_qsfcb.qsf_lk_id;
	global->ops_qpinit = TRUE;

	/* Allocate and initialize the QP. */
	opc_iqp_init(global);
    }

    /* Continue the QP compilation by adding the current statement */
    if (global->ops_statement != NULL)
    {
	opc_cqp_continue(global);
    }

    /* if it's time to stop compiling the query, then lets close stuff. */
    /* The caller is responsible for making one additional call to OPC	    */
    /* with ops_statement == NULL after all statements in the QP we are	    */
    /* currently building have been compiled. Note that this is a change    */
    /* from the previous version of this routine which required the extra   */
    /* call only if a db procedure was being compiled. Such a call must	    */
    /* also be made after the last statement in each rule list. This allows */
    /* OPC to link all conditionals statements in the rule list together    */
    /* before continuing with the next user statement to be compiled. */
    if (global->ops_statement == NULL)
    {
	/* We're finished compiling all of the statements, so lets finish
	** the QP
	*/
	opc_fqp_finish(global);

	/* The QP is only associated with the outer query, not a rule list  */
	if (!global->ops_inAfterRules && !global->ops_inBeforeRules)
	{
	    /* Tell QSF what the root of the QEP is; */
	    global->ops_qsfcb.qsf_root = (PTR) global->ops_cstate.opc_qp;
	    if ((ret = qsf_call(QSO_SETROOT, &global->ops_qsfcb)) != E_DB_OK)
	    {
		opx_verror(ret, E_OP0883_QSF_SETROOT, 
					global->ops_qsfcb.qsf_error.err_code);
	    }

	    if ((ret = qsf_call(QSO_UNLOCK, &global->ops_qsfcb)) != E_DB_OK)
	    {
		opx_verror(ret, E_OP089E_QSF_UNLOCK, 
		    global->ops_qsfcb.qsf_error.err_code);
	    }

	    /* Now lets close the stack ULM memory stream that OPC used */
	    opu_Csmemory_close(global);
	}
    }
    global->ops_gmask &= (~OPS_OPCEXCEPTION);  /* mark facility as leaving OPC */
}
Example #15
0
/*{
** Name: opv_parser	- init global range table element given parser varno
**
** Description:
**      This routine will initialize the global range table element in OPF
**      corresponding to the PSF range table element.
**
** Inputs:
**      global                          ptr to global range table
**      gvar                            element in parser range table
**                                      which is referenced in query
**
** Outputs:
**      global->ops_rangetab.opv_base[gvar] initialize corresponding element
**                                      in optimizer range table
**	Returns:
**	    VOID
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	2-jul-86 (seputis)
**          initial creation
**	6-nov-88 (seputis)
**          change RDF invalidation to include all indexes on base relation
**          since this is only notification that OPF gets that its' time
**          stamp is out of date
**      25-sep-89 (seputis)
**          - made addition to timestamp check, to refresh if this is a
**          multi-variable query and the tuple count is zero
**	9-nov-89 (seputis)
**          - added OPA_NOVID initialization for b8160, and corrected
**	    sep 25 fix for timestamps
**	12-jan-90 (seputis)
**	    - detect table ID's which are the same for different range vars
**	26-feb-91 (seputis)
**	    - add improved diagnostics for b35862
**      31-dec-91 (seputis)
**          - flush cache entry if tuple count is zero and more than one
**          range table entry, since aggregate queries and flattened queries
**          are not getting handled.
**	12-feb-93 (jhahn)
**	    Added support for statement level rules. (FIPS)
**	12-apr-93 (ed)
**	    - fix bug 50673, relax range variable check for larger OPF table
**      7-dec-93 (ed)
**          b56139 - add OPZ_TIDHISTO to mark tid attributes which
**          need histograms,... needed since target list is traversed
**          earlier than before
**      16-feb-95 (inkdo01)
**          b66907 - check for explicit refs to inconsistent tabs/idxes
**      23-feb-95 (wolf) 
**          Recent addition of MEcopy call should have been accompanied by
**	    an include of me.h
**      10-aug-98 (stial01)
**          opv_parser() Remove code to invalidate indexes. The invalidate
**          table that follows will do the job. Invalidate indexes by table id
**          can get E_OP008E_RDF_INVALIDATE, E_RD0045_ULH_ACCESS
**	31-oct-1998 (nanpr01)
**	    Reset the rdfcb->infoblk ptr before checking the error code.
**	18-june-99 (inkdo01)
**	    Init opv_ttmodel for temp table model histogram feature.
**	19-Jan-2000 (thaju02)
**	    relation descriptor may be out of date and contain a stale 
**	    secondary index count, use rdfcb->rdf_info_block->rdr_no_index 
**	    which reflects the number of index entries in the rdr_indx 
**	    array. (b100060)
**	17-Jan-2004 (schka24)
**	    Rename RDR_BLD_KEY to RDR_BLD_PHYS.
**	3-Sep-2005 (schka24)
**	    Remove if-0'ed out section that included a ref to a member
**	    that is going away (opv_ghist).
**	17-Nov-2005 (schka24)
**	    Don't propagate RDF invalidates that we cause.  Our RDF cache
**	    is out of date but that's not other servers' problem.
**	14-Mar-2007 (kschendel) SIR 122513
**	    Light "partitioned" flag if partitioned table seen.
**	18-april-2008 (dougi)
**	    Add support for table procedures.
**	8-Jul-2010 (wanfr01) b123949
**	    If rdf_gdesc failed with an error, don't use the column 
**	    information - it may not be fully initialized.
*/
bool
opv_parser(
	OPS_STATE          *global,
	OPV_IGVARS	   gvar,
	OPS_SQTYPE         sqtype,
	bool		   rdfinfo,
	bool               psf_table,
	bool               abort)
{
    OPV_GRT             *gbase;	    /* ptr to base of global range table */

# ifdef E_OP0387_VARNO
    if ((gvar < 0) || ((gvar >= PST_NUMVARS) && (gvar >= OPV_MAXVAR)))
	opx_error(E_OP0387_VARNO ); /* range var out of range - 
                                    ** consistency check */
# endif

    gbase = global->ops_rangetab.opv_base; /* get base of global range table
                                    ** ptr to array of ptrs */
    if ( !gbase->opv_grv[gvar] )
    {
	/* if global range variable element has not been allocated */
	OPV_GRV             *grvp;	    /* ptr to new range var element */
	
        if (global->ops_rangetab.opv_gv <= gvar)
            global->ops_rangetab.opv_gv = gvar+1; /* update the largest range
                                            ** table element assigned so far
                                            */
        grvp = (OPV_GRV *) opu_memory(global, sizeof( OPV_GRV ) ); /* save
                                            ** and allocate ptr to global
                                            ** var */
	/* Explicitly zero out the grv entry */
	MEfill(sizeof(*grvp), (u_char)0, (PTR)grvp);
	grvp->opv_qrt = gvar;		    /* save index to
                                            ** parser range table element */
        grvp->opv_created = sqtype;         /* save global range table type */
	grvp->opv_gvid = OPA_NOVID;	    /* if this base table was implicitly
					    ** referenced then the view id of the
					    ** explicitly reference view will be
					    ** saved */
        grvp->opv_siteid = OPD_NOSITE;      /* initialize distributed site location */
	grvp->opv_same = OPV_NOGVAR;	    /* used to map tables with similiar
					    ** IDs */
	grvp->opv_compare = OPV_NOGVAR;	    /* used to map tables with similiar
					    ** IDs */
	grvp->opv_ttmodel = NULL;	    /* RDR_INFO ptr for temp table model
					    ** histograms */
	gbase->opv_grv[gvar] = grvp;	    /* place into table */

        /* get RDF information about the table */
        if (rdfinfo)
        {
	    RDF_CB             *rdfcb;	    /* ptr to RDF control block which
                                            ** has proper db_id and sessionid
                                            ** info */
	    PST_RNGENTRY       *rngentry;   /* ptr to parse tree range entry */
	    DB_STATUS          status;      /* RDF return status */

	    i4	       ituple;

            rdfcb = &global->ops_rangetab.opv_rdfcb;
	    if (psf_table)
	    {
		/* Snag table procedures and handle them elsewhere. */
		if ((rngentry = global->ops_qheader->pst_rangetab[gvar])
						->pst_rgtype == PST_TPROC)
		{
		    if (opv_tproc(global, gvar, grvp, rngentry) == E_DB_OK)
			return(FALSE);
		    else return(TRUE);
		}

		STRUCT_ASSIGN_MACRO(global->ops_qheader->pst_rangetab[gvar]->
		    pst_rngvar, rdfcb->rdf_rb.rdr_tabid); /* need 
						** table id from parser's table */
#if 0
                if ((BTnext((i4)-1, (char *)&rangep->pst_outer_rel,
                    (i4)BITS_IN(rangep->pst_outer_rel)) >= 0)
                    ||
                    (BTnext((i4)-1, (char *)&rangep->pst_inner_rel,
                    (i4)BITS_IN(rangep->pst_outer_rel)) >= 0)
                    )
                    grvp->opv_gmask |= OPV_GOJVAR; /* mark whether this
                                            ** variable is within the scope of an
                                            ** outer join */
/* OPV_GOJVAR is not reliably set since the subquery bitmap should be tested 
** also for an aggregate temp, multi-to-one mappings may exist for global range
** variables
*/
#endif
		if (global->ops_qheader->pst_rangetab[gvar]->
		    pst_rgtype == PST_SETINPUT)
		    grvp->opv_gmask |= OPV_SETINPUT; /* is this the set input
						    ** parameter for a procedure
						    */
		rdfcb->rdf_rb.rdr_types_mask = RDR_RELATION | RDR_INDEXES |
		    RDR_ATTRIBUTES | RDR_BLD_PHYS; /* get relation info 
						** - The optimizer uses attribute
						** info in query tree directly 
						** but it is needed to be requested
						** since the RDF uses attr info to
						** build RDR_BLK_KEY info.  The
						** attribute info does not need to
						** requested if RDF is changed.
						** - ask for indexes so that
						** invalidating the cache can also
						** invalidate any indexes in the
						** cache
						*/
/* When we're ready to enable column comparison stats, uncomment the
   following statement. **
		rdfcb->rdf_rb.rdr_2types_mask = RDR2_COLCOMPARE; /* column 
						** comparison stats, too */
		
	    }
	    rdfcb->rdf_info_blk = NULL;     /* get new ptr to info
                                            ** associated with global var */
            status = rdf_call( RDF_GETDESC, (PTR)rdfcb);
	    grvp->opv_relation = rdfcb->rdf_info_blk; /* save ptr to
                                            ** new info block */
	    if ((status != E_RD0000_OK)
		&&
		(rdfcb->rdf_error.err_code != E_RD026A_OBJ_INDEXCOUNT) /* this
					    ** is a check for distributed
					    ** index information, which could
					    ** be inconsistent but should not
					    ** cause the query to be aborted
					    ** it will cause indexes to be
					    ** avoided */
		)
	    {
		gbase->opv_grv[gvar] = NULL;
		if (abort)
		    opx_verror( status, E_OP0004_RDF_GETDESC,
			rdfcb->rdf_error.err_code);
		else
		{
		    return (TRUE);	/* indicate failure to get RDF
				    ** descriptor */
		}
	    }
            BTset( (i4)gvar, (char *)&global->ops_rangetab.opv_mrdf); /* indicate
                                            ** that RDF information is fixed */

	    /* Check table and all associated indexes to see if there is a
	    ** statement level index associated with this variable
	    */
	    if (grvp->opv_relation->rdr_rel->tbl_2_status_mask
		& DMT_STATEMENT_LEVEL_UNIQUE)
		grvp->opv_gmask |= OPV_STATEMENT_LEVEL_UNIQUE;
	    for( ituple = grvp->opv_relation->rdr_no_index;
		ituple-- ;)
	    {
		if (grvp->opv_relation->rdr_indx[ituple]->idx_status
		    & DMT_I_STATEMENT_LEVEL_UNIQUE)
		    grvp->opv_gmask |= OPV_STATEMENT_LEVEL_UNIQUE;
	    }
	    if (psf_table)
	    {	/* check if timestamp matches and invalidate RDF cache
                ** date of last modify do not match, this is only done
		** for tables passed by PSF, the other tables should
		** be dependent on the PSF time stamp */
		DB_TAB_TIMESTAMP	*timeptr; /* ptr to last modify
                                            ** date that RDF has cached */
		DB_TAB_TIMESTAMP	*psftimeptr; /* ptr to last modify
					    ** date which parser used for the
                                            ** table */
		psftimeptr = &global->ops_qheader->pst_rangetab[gvar]->
		    pst_timestamp;
		timeptr = &grvp->opv_relation->rdr_rel->tbl_date_modified;
                if (timeptr->db_tab_high_time != psftimeptr->db_tab_high_time
                    ||
                    timeptr->db_tab_low_time != psftimeptr->db_tab_low_time
                    ||
                    (
                        !grvp->opv_relation->rdr_rel->tbl_record_count
                        &&
                        (global->ops_qheader->pst_rngvar_count
                            > 1)
		     )			    /* special zero tuple count
                                            ** case check to see if tuple count
                                            ** is way off, i.e. a table create
                                            ** followed by a number of appends
                                            ** will cause a 0 tuple count, check
					    ** for more than one variable since
					    ** refresh is only useful when joins occur
					    */
                    )
		{
		    PTR save_fcb = rdfcb->rdf_rb.rdr_fcb;

		    /* Don't propagate this invalidate to other DBMS servers.
		    ** There's no reason to think that they are as out of
		    ** date as we are.  (plus this might be a session temp
		    ** which is strictly local!)
		    */

		    rdfcb->rdf_rb.rdr_fcb = NULL;
		    status = rdf_call( RDF_INVALIDATE, (PTR)rdfcb);
		    rdfcb->rdf_rb.rdr_fcb = save_fcb;
# ifdef E_OP008E_RDF_INVALIDATE
		    if (status != E_RD0000_OK)
			opx_verror( E_DB_ERROR, E_OP008E_RDF_INVALIDATE,
			    rdfcb->rdf_error.err_code);
# endif
		    status = rdf_call( RDF_GETDESC, (PTR)rdfcb);
		    grvp->opv_relation = rdfcb->rdf_info_blk; /* save ptr to
					    ** new info block */
		    if (status != E_RD0000_OK)
		    {
			gbase->opv_grv[gvar] = NULL;
			opx_verror( E_DB_ERROR, E_OP0004_RDF_GETDESC,
			    rdfcb->rdf_error.err_code);
		    }
		    timeptr = &grvp->opv_relation->rdr_rel->tbl_date_modified;
		    if (timeptr->db_tab_high_time != psftimeptr->db_tab_high_time
			||
			timeptr->db_tab_low_time != psftimeptr->db_tab_low_time
			)
			opx_vrecover( E_DB_ERROR, E_OP008F_RDF_MISMATCH,
			    rdfcb->rdf_error.err_code); /* PSF timestamp is
					    ** still out of date, so tell
                                            ** SCF to reparse the query */
		}
		{   /* search thru existing tables to discover if any
		    ** tables have the same table ID */
		    OPV_IGVARS	    gvno;
		    DB_TAB_ID	    *tabidp;
		    OPV_IGVARS	    target_vno;

		    tabidp = &global->ops_qheader->pst_rangetab[gvar]->pst_rngvar;
		    target_vno = OPV_NOGVAR;

		    for (gvno = 0; gvno < OPV_MAXVAR; gvno++)
		    {
			OPV_GRV	    *grv1p;
			grv1p = gbase->opv_grv[gvno];
			if (grv1p && grv1p->opv_relation)
			{
			    DB_TAB_ID   *gtabidp;

			    if (gvno == gvar)
				continue;
			    gtabidp = &grv1p->opv_relation->rdr_rel->tbl_id;
			    if ((tabidp->db_tab_base == gtabidp->db_tab_base)
				&&
				(tabidp->db_tab_index == gtabidp->db_tab_index)
				)
			    {	/* found 2 table ID's which are identical */
				global->ops_gmask |= OPS_IDSAME;
				if (target_vno == OPV_NOGVAR)
				{   /* map all table id's vars to the lowest
				    ** global range number of the group */
				    if (gvno > gvar)
					target_vno = gvar;
				    else
					target_vno = gvno;
				}
				grv1p->opv_same = target_vno;
				grvp->opv_same = target_vno;
				if (target_vno != gvar)
				    break;
			    }
			}
		    }
		}
	    }
	    if (global->ops_cb->ops_smask & OPS_MDISTRIBUTED)
		opd_addsite(global, grvp); /* add site information if a
					** distributed thread */
	    /* Check for partitioned table, turns on additional
	    ** analysis after enumeration.
	    */
	    if (grvp->opv_relation != NULL
	      && grvp->opv_relation->rdr_parts != NULL)
		global->ops_gmask |= OPS_PARTITION;
            if (grvp->opv_relation && grvp->opv_relation->rdr_rel->
                   tbl_2_status_mask & DMT_INCONSIST)
                                        /* check for inconsistent table
                                        ** (due to partial back/recov) */
            {
               OPT_NAME     tabname;
               i2           i;
               
               MEcopy(&grvp->opv_relation->rdr_rel->tbl_name, 
                         sizeof(grvp->opv_relation->rdr_rel->tbl_name),
                         &tabname);           /* table name is msg token */
               for (i = sizeof(grvp->opv_relation->rdr_rel->tbl_name);
                      i > 1 && tabname.buf[i-1] == ' '; i--);
               tabname.buf[i] = 0;            /* lop blanks off name */
	       opx_1perror(E_OP009A_INCONSISTENT_TAB,(PTR)&tabname);    
            }
        }
	else
	    grvp->opv_relation = NULL;          /* set to NULL if not used */
    }
    return(FALSE);
}
Example #16
0
/*{
** Name: opv_tproc	- Load RDF defs for table procedure range table entry
**
** Description:
**      This function allocates and formats simulated RDF structures for
**	a table procedure, including result column descriptors that are
**	used to resolve "column" references to the procedure.
**
** Inputs:
**	sess_cb				Pointer to session control block
**      rngtable                        Pointer to the user range table
**	scope				scope that range variable belongs in
**					-1 means don't care.
**	corrname			Correlation name of table procedure
**	dbp				Ptr to PSF procedure descriptor
**	rngvar				Place to put pointer to new range
**					variable
**	root				Ptr to RESDOM list of parameter specs
**	err_blk				Place to put error information
**
** Outputs:
**      rngvar                          Set to point to the range variable
**	err_blk				Filled in if an error happens
**	Returns:
**	    E_DB_OK			Success
**	    E_DB_ERROR			Non-catastrophic failure
**	    E_DB_FATAL			Catastrophic failure
**	Exceptions:
**	    none
**
** Side Effects:
**	    Can allocate memory
**
** History:
**	17-april-2008 (dougi)
**	    Written for table procedures(semi-cloned from pst_stproc).
**	15-dec-2008 (dougi) BUG 121381
**	    Fix computation of result column offsets.
*/
DB_STATUS
opv_tproc(
	OPS_STATE	*global,
	OPV_IGVARS	gvar,
	OPV_GRV		*grvp,
	PST_RNGENTRY	*rngentry)

{
    DMT_ATT_ENTRY	**attarray, *attrp;
    DMT_ATT_ENTRY	**parmarray, **rescarray;
    DMT_TBL_ENTRY	*tblptr;
    RDR_INFO		*rdrinfop;
    QEF_DATA		*qp;
    RDF_CB		*rdf_cb;
    RDR_RB		*rdf_rb;
    DB_PROCEDURE	*dbp;
    DB_DBP_NAME		proc_name;
    DB_OWN_NAME		proc_owner;
    i4			parameterCount, rescolCount, resrowWidth;
    u_i4		created;

    i4			i, j, totlen, offset;
    DB_STATUS           status;
    i4		err_code;


    /* First retrieve iiprocedure row using id from range table entry. */
    rdf_cb = &global->ops_rangetab.opv_rdfcb;
    rdf_rb = &rdf_cb->rdf_rb;

    STRUCT_ASSIGN_MACRO(rngentry->pst_rngvar, rdf_rb->rdr_tabid);
    rdf_rb->rdr_types_mask  = RDR_PROCEDURE;
    rdf_rb->rdr_2types_mask = 0;
    rdf_rb->rdr_instr       = RDF_NO_INSTR;

    /*
    ** need to set rdf_info_blk to NULL for otherwise RDF assumes that we
    ** already have the info_block
    */
    rdf_cb->rdf_info_blk = (RDR_INFO *) NULL;

    status = rdf_call(RDF_GETINFO, rdf_cb);
    if (DB_FAILURE_MACRO(status))
    {
	(VOID) opx_verror(status, E_OP0004_RDF_GETDESC,
                                rdf_cb->rdf_error.err_code);
    }
    dbp = rdf_cb->rdf_info_blk->rdr_dbp;

    /* Before proceeding - assure this is the same proc that we think it is. */
    if (rngentry->pst_timestamp.db_tab_high_time != dbp->db_created)
    {
	/* If not, bounce back to SCF to re-parse query. */
	opx_vrecover(E_DB_ERROR, E_OP008F_RDF_MISMATCH, 
					rdf_cb->rdf_error.err_code);
    }

    /* Save procedure stuff for later. */
    parameterCount = dbp->db_parameterCount;
    rescolCount = dbp->db_rescolCount;
    resrowWidth = dbp->db_resrowWidth;
    created = dbp->db_created;
    STRUCT_ASSIGN_MACRO(dbp->db_dbpname, proc_name);
    STRUCT_ASSIGN_MACRO(dbp->db_owner, proc_owner);

    /* Allocate attr descriptors and address from ptr arrays. */
    i = dbp->db_parameterCount + dbp->db_rescolCount;

    attarray = (DMT_ATT_ENTRY **) opu_memory(global, (sizeof(PTR) +
		sizeof(DMT_ATT_ENTRY)) * i + sizeof(PTR));
				/* 1 extra ptr because array is 1-origin */

    /* Set up attr pointer arrays for both parms and result columns. */
    for (j = 1, attrp = (DMT_ATT_ENTRY *)&attarray[i+1],
	attarray[0] = (DMT_ATT_ENTRY *)NULL; j <= i; j++, attrp = &attrp[1])
    {
	attarray[j] = attrp;
	MEfill(sizeof(DMT_ATT_ENTRY), (u_char)0, (char *)attrp);
    }

    rescarray = attarray;
    parmarray = &attarray[rescolCount+1];

    /* Load iiprocedure_parameter rows for both parms and result cols. */
    rdf_rb->rdr_types_mask     = 0;
    rdf_rb->rdr_2types_mask    = RDR2_PROCEDURE_PARAMETERS;
    rdf_rb->rdr_instr          = RDF_NO_INSTR;

    rdf_rb->rdr_update_op      = RDR_OPEN;
    rdf_rb->rdr_qrymod_id      = 0;	/* get all tuples */
    rdf_rb->rdr_qtuple_count   = 20;	/* get 20 at a time */
    rdf_cb->rdf_error.err_code = 0;

    /*
    ** must set rdr_rec_access_id since otherwise RDF will barf when we
    ** try to RDR_OPEN
    */
    rdf_rb->rdr_rec_access_id  = NULL;

    while (rdf_cb->rdf_error.err_code == 0)
    {
	status = rdf_call(RDF_READTUPLES, rdf_cb);
	rdf_rb->rdr_update_op = RDR_GETNEXT;

	/* Must not use DB_FAILURE_MACRO because E_RD0011 returns
	** E_DB_WARN that would be missed.
	*/
	if (status != E_DB_OK)
	{
	    switch(rdf_cb->rdf_error.err_code)
	    {
		case E_RD0011_NO_MORE_ROWS:
		    status = E_DB_OK;
		    break;

		case E_RD0013_NO_TUPLE_FOUND:
		    status = E_DB_OK;
		    continue;

		default:
		    opx_error(E_OP0013_READ_TUPLES);
		    break;
	    }	    /* switch */
	}	/* if status != E_DB_OK */

	/* For each dbproc parameter tuple */
	for (qp = rdf_cb->rdf_info_blk->rdr_pptuples->qrym_data, j = 0;
	    j < rdf_cb->rdf_info_blk->rdr_pptuples->qrym_cnt;
	    qp = qp->dt_next, j++)
	{
	    DB_PROCEDURE_PARAMETER *param_tup =
		(DB_PROCEDURE_PARAMETER *) qp->dt_data;
	    if (i-- == 0)
	    {
		opx_error(E_OP0013_READ_TUPLES);
	    }
	    if (param_tup->dbpp_flags & DBPP_RESULT_COL)
	    {
		attrp = rescarray[param_tup->dbpp_number];
		attrp->att_number = param_tup->dbpp_number;
	    }
	    else 
	    {
		attrp = parmarray[param_tup->dbpp_number-1];
		attrp->att_flags = DMT_F_TPROCPARM;
		attrp->att_number = param_tup->dbpp_number +
					dbp->db_rescolCount;
	    }

	    STRUCT_ASSIGN_MACRO(param_tup->dbpp_name, attrp->att_name);
	    attrp->att_type = param_tup->dbpp_datatype;
	    attrp->att_width = param_tup->dbpp_length;
	    attrp->att_prec = param_tup->dbpp_precision;
	    attrp->att_offset = param_tup->dbpp_offset;
	}
    }

    /* Reset result column offsets to remove effect of parms. */
    offset = rescarray[1]->att_offset;
    for (j = 1; j <= rescolCount; j++)
	rescarray[j]->att_offset -= offset;

    if (rdf_rb->rdr_rec_access_id != NULL)
    {
	rdf_rb->rdr_update_op = RDR_CLOSE;
	status = rdf_call(RDF_READTUPLES, rdf_cb);
	if (DB_FAILURE_MACRO(status))
	{
	    opx_error(E_OP0013_READ_TUPLES);
	}
    }

    /* now unfix the dbproc description */
    rdf_rb->rdr_types_mask  = RDR_PROCEDURE | RDR_BY_NAME;
    rdf_rb->rdr_2types_mask = 0;
    rdf_rb->rdr_instr       = RDF_NO_INSTR;

    status = rdf_call(RDF_UNFIX, rdf_cb);
    if (DB_FAILURE_MACRO(status))
    {
	opx_error(E_OP008D_RDF_UNFIX);
    }

    /* Allocate table descriptor. */
    tblptr = (DMT_TBL_ENTRY *) opu_memory(global, sizeof(DMT_TBL_ENTRY));

    /* Allocate RDR_INFO block. */
    rdrinfop = (RDR_INFO *) opu_memory(global, sizeof(RDR_INFO)+sizeof(PTR));

    /* Now format them all. */

    MEfill(sizeof(DMT_TBL_ENTRY), (u_char) 0, tblptr);
    MEcopy((char *)&proc_name.db_dbp_name, sizeof(DB_DBP_NAME), 
				(char *)&tblptr->tbl_name.db_tab_name);
    STRUCT_ASSIGN_MACRO(proc_owner, tblptr->tbl_owner);
    MEfill(sizeof(DB_LOC_NAME), (u_char)' ', (char *)&tblptr->tbl_location);
    tblptr->tbl_attr_count = rescolCount + parameterCount;
    tblptr->tbl_width = resrowWidth;
    tblptr->tbl_date_modified.db_tab_high_time = created;
    tblptr->tbl_storage_type = DB_TPROC_STORE;
    /* Load cost parameters (if there). */
    tblptr->tbl_record_count = (dbp->db_estRows) ? dbp->db_estRows : DBP_ROWEST;
    tblptr->tbl_page_count = (dbp->db_estCost) ? dbp->db_estCost : DBP_DIOEST;
    tblptr->tbl_pgsize = 2048;

    /* All the other DMT_TBL_ENTRY fields are being left 0 until 
    ** something happens that suggests other values. */

    /* Finally fill in the RDR_INFO structure. */
    MEfill(sizeof(RDR_INFO), (u_char) 0, rdrinfop);
    rdrinfop->rdr_rel = tblptr;
    rdrinfop->rdr_attr = rescarray;
    rdrinfop->rdr_no_attr = tblptr->tbl_attr_count;
    rdrinfop->rdr_dbp = dbp;
    grvp->opv_relation = rdrinfop;

    BTset((i4)gvar, (char *)&global->ops_rangetab.opv_mrdf); /* indicate
						** that RDF info is fixed */
    grvp->opv_gmask |= OPV_TPROC;
    global->ops_gmask |= OPS_TPROCS;		/* show query has table procs */

    return (E_DB_OK);
}
Example #17
0
/*{
** Name: ops_deallocate	- deallocate resources for an optimization
**
** Description:
**      This routine will deallocate the resources used for an optimization.
**      Resources include any memory requested from the optimizer memory pool
**      and any RDF cache objects which were locked in the global range
**      table
**
** Inputs:
**      global                          ptr to global state variable
**      report                          TRUE if errors should be reported
**                                      via the user's control block.
**      partial_dbp                     partial deallocation required for
**                                      statement within a procedure
**
** Outputs:
**	Returns:
**	    VOID
**	Exceptions:
**	    none
**
** Side Effects:
**	    memory resources released, RDF unfixed, 
**          QSF query tree memory released
**
** History:
**	29-jun-86 (seputis)
**          initial creation
**	8-nov-88 (seputis)
**          if no query run trace point is set then destroy the QP since
**	    SCF assumes optimizer cleans up after error
**	8-nov-88 (seputis)
**          turn off CPU accounting if was off originally
**	28-jan-91 (seputis)
**	    added support for OPF ACTIVE flag
**      20-jul-93 (ed)
**	    changed name ops_lock for solaris, due to OS conflict
**	29-jul-93 (andre)
**	    rdr_types_mask must be initialized (to RDR_RELATION) before
**	    calling RDF_UNFIX.  Otherwise RDF may end up complaining because we
**	    ask it to destroy a relation cache entry while RDR_PROCEDURE bit is
**	    set.
**	12-aug-93 (swm)
**	    Cast first parameter of CSaltr_session() to CS_SID to match
**	    revised CL interface specification.
**	02-Jun-1997 (shero03)
**	    Update the saved rdf_info_block after calling RDF.
**      02-Aug-2001 (hanal04) Bug 105360 INGSRV 1505
**          Plug the RDF memory leak introduced by inkdo01's new
**          function oph_temphist().
**	17-Dec-2003 (jenjo02)
**	    Added (CS_SID)NULL to CScnd_signal prototype.
**	6-Feb-2006 (kschendel)
**	    Fix some squirrely looking code that purported to avoid dangling
**	    references, but didn't really.  (No symptoms known.)
**	14-nov-2007 (dougi)
**	    Add support for cached dynamic query plans.
**	20-may-2008 (dougi)
**	    Add support for table procedures.
**	29-may-2009 (wanfr01)    Bug 122125
**	    Need to add dbid to cache_dynamic queries for db uniqueness
*/
VOID
ops_deallocate(
	OPS_STATE          *global,
	bool               report,
	bool               partial_dbp)
{
    DB_STATUS           finalstatus;	/* this status is returned to the user
                                        ** - it will contain the first error
                                        ** during resource deallocation
                                        */
    DB_ERROR            error;          /* error code from offending facility
                                        */

    finalstatus = E_DB_OK;
    error.err_code = 0;

    {    /* close any fixed RDF objects - deallocate prior to closing the
         ** global memory stream 
         */

	OPV_IGVARS             gvar;	    /* index into global range variable
                                            ** table */
	OPV_GRT                *gbase;	    /* ptr to base of array of ptrs
                                            ** to global range table elements
                                            */
	OPV_IGVARS	       maxgvar;	    /* number of global range table
                                            ** elements allocated
                                            */
	RDF_CB                 *rdfcb;	    /* ptr to rdf control block used
                                            ** unfix the relation info */
	OPV_GBMVARS            *rdfmap;     /* ptr to map of global range
                                            ** variables which have RDF info
                                            ** fixed */

	gbase = global->ops_rangetab.opv_base;
        maxgvar = global->ops_rangetab.opv_gv;
	rdfcb = &global->ops_rangetab.opv_rdfcb;
        rdfmap = &global->ops_rangetab.opv_mrdf;

	/*
	** rdr_types_mask needs to be initialized - since we will be unfixing
	** relation entries, RDR_RELATION seems like a good choice, although 0
	** would suffice as well
	*/
	rdfcb->rdf_rb.rdr_types_mask = RDR_RELATION;
	
	if (global->ops_cstate.opc_relation)
	{   /* OPC allocates a RDF descriptor for cursors so deallocate
            ** if this is the case */
	    DB_STATUS	   opcrdfstatus; /* RDF return status */

	    rdfcb->rdf_info_blk = global->ops_cstate.opc_relation;
	    opcrdfstatus = rdf_call( RDF_UNFIX, (PTR)rdfcb );
	    if ( (DB_FAILURE_MACRO(opcrdfstatus)) && (DB_SUCCESS_MACRO(finalstatus)) )
	    {
		finalstatus = opcrdfstatus;
		error.err_code = rdfcb->rdf_error.err_code;
	    }
	    global->ops_cstate.opc_relation = NULL;
	}
	if (maxgvar)
	{
	    for ( gvar = -1; 
		 (gvar = BTnext((i4)gvar, (char *)rdfmap, (i4)maxgvar)) >=0;)
	    {
		OPV_GRV             *gvarp;	    /* ptr to global range variable to
						** be deallocated */

		if	((gvarp = gbase->opv_grv[gvar]) /* NULL if not allocated */
		     &&
		     (gvarp->opv_relation)	    /* not NULL if RDF has been
						** called for this range variable */
		     &&
		     !(gvarp->opv_gmask & OPV_TPROC)	/* not table procedure */
		    )
		{
		    /* if this element has been allocated and if it has an RDF
		    ** cache element associated with it */
		    DB_STATUS	   rdfstatus; /* RDF return status */

		    gbase->opv_grv[gvar] = NULL; /* so we do not try to deallocate
						** twice in case of an error */
		    rdfcb->rdf_info_blk = gvarp->opv_relation;
		    rdfstatus = rdf_call( RDF_UNFIX, (PTR)rdfcb );
		    if ( (DB_FAILURE_MACRO(rdfstatus)) && (DB_SUCCESS_MACRO(finalstatus)) )
		    {
			finalstatus = rdfstatus;
			error.err_code = rdfcb->rdf_error.err_code;
		    }
		    gvarp->opv_relation = NULL;
		}

		if ((gvarp) && (gvarp->opv_ttmodel))
		{
		    /* if this element has been allocated and if it has an RDF
		    ** cache element associated with a persistent table
                    ** which provides histogram models.
		    */
		    DB_STATUS      rdfstatus; /* RDF return status */

		    rdfcb->rdf_info_blk = gvarp->opv_ttmodel;
		    gvarp->opv_ttmodel = NULL;
		    rdfstatus = rdf_call( RDF_UNFIX, (PTR)rdfcb );
		    if ( (DB_FAILURE_MACRO(rdfstatus)) && (DB_SUCCESS_MACRO(finalstatus)) )
		    {
		        finalstatus = rdfstatus;
	                error.err_code = rdfcb->rdf_error.err_code;
		    }
		    
		}

	    }
	    global->ops_rangetab.opv_gv = 0;
	}
    }
    if (partial_dbp)
	return;					/* only deallocate the global range table
						** for DBP, and keep the memory streams
						** until the end */
    if (global->ops_estate.opn_statistics
	&&
	global->ops_estate.opn_reset_statistics)
    {	/* statistics CPU accounting was turned on, and needs to be reset */
	STATUS		cs_status;
	i4		turn_off;

	turn_off = FALSE;			/* turn off accounting */
	global->ops_estate.opn_statistics = FALSE;
	cs_status = CSaltr_session((CS_SID)0, CS_AS_CPUSTATS, (PTR)&turn_off);
	if (cs_status != OK)
	{
	    finalstatus = E_DB_ERROR;
	    error.err_code = cs_status;
	}
    }

    /* deallocate ULM memory stream */
    if (global->ops_mstate.ops_streamid == NULL)	/* non-zero if allocated */
    {
	/* check if ULM stream does not exist then this deallocation has
        ** already occurred so just return */
        return;
    }
    else
    {
	DB_STATUS            ulm1status;/* ULM return status */
	global->ops_mstate.ops_ulmrcb.ulm_streamid_p = 
	    &global->ops_mstate.ops_streamid; /* ulm will NULL ops_streamid */
	ulm1status = ulm_closestream( &global->ops_mstate.ops_ulmrcb );
	if ( (DB_FAILURE_MACRO(ulm1status)) && (DB_SUCCESS_MACRO(finalstatus)) )
	{
	    finalstatus = ulm1status;
	    error.err_code = global->ops_mstate.ops_ulmrcb.ulm_error.err_code;
	}
    }

    /* deallocate ULM temp buffer memory stream */
    if ( global->ops_mstate.ops_tstreamid )	/* non-zero if allocated */
    {
	DB_STATUS            ulm2status; /* ULM return status */
	global->ops_mstate.ops_ulmrcb.ulm_streamid_p
	    = &global->ops_mstate.ops_tstreamid; /* ulm will NULL ops_tstreamid */
	ulm2status = ulm_closestream( &global->ops_mstate.ops_ulmrcb );
	if ( (DB_FAILURE_MACRO(ulm2status)) && (DB_SUCCESS_MACRO(finalstatus)) )
	{
	    finalstatus = ulm2status;
	    error.err_code = global->ops_mstate.ops_ulmrcb.ulm_error.err_code;
	}
    }

    /* deallocate OPC ULM buffer memory stream */
    if ( global->ops_mstate.ops_sstreamid )	/* non-zero if allocated */
    {
	DB_STATUS            ulm3status; /* ULM return status */
	global->ops_mstate.ops_ulmrcb.ulm_streamid_p = &global->ops_mstate.ops_sstreamid;
	/* ulm will NULL ops_sstreamid */
	ulm3status = ulm_closestream( &global->ops_mstate.ops_ulmrcb );
	if ( (DB_FAILURE_MACRO(ulm3status)) && (DB_SUCCESS_MACRO(finalstatus)) )
	{
	    finalstatus = ulm3status;
	    error.err_code = global->ops_mstate.ops_ulmrcb.ulm_error.err_code;
	}
    }

    if (!report
#ifdef    OPT_F032_NOEXECUTE
	||
	/* if trace flag is set then cleanup QSF memory since optimizer will
	** generate an error to SCF and SCF assumes optimizer will cleanup
        */
	    (global->ops_cb->ops_check 
	    && 
	    (opt_strace( global->ops_cb, OPT_F032_NOEXECUTE)
	    || 
	    opt_strace( global->ops_cb, OPT_F023_NOCOMP)  )
	    )
#endif
       )
    {	/* an error or an asychronous abort has occurred so destroy the plan
        ** or shared plan , FIXME destroy the shared plan in the earlier
        ** exception handler */
	DB_STATUS	       qsfqpstatus;   /* QSF return status */
	if(global->ops_qpinit)
	{   /* deallocate QSF object for query plan if another error has occurred 
            ** - in this case OPC has already created a new QP handle and has
            ** gotten a lock on it */

	    STRUCT_ASSIGN_MACRO(global->ops_caller_cb->opf_qep, global->ops_qsfcb.qsf_obj_id); /* get
						** query plan id */
	    global->ops_qsfcb.qsf_lk_id = global->ops_qplk_id; /* get lock id for 
						** QSF */
	    qsfqpstatus = ops_qsfdestroy(global); /* destroy the query plan */
	    if ( (DB_FAILURE_MACRO(qsfqpstatus)) && (DB_SUCCESS_MACRO(finalstatus)) )
	    {
		finalstatus = qsfqpstatus;
		error.err_code = global->ops_qsfcb.qsf_error.err_code;
	    }
	}
	else 
	{   /* OPC has not been reached so need to check for shared query plan
            */
	    if (!global->ops_procedure)
	    {	/* get query tree if it has not already been retrieved */
		qsfqpstatus = ops_gqtree(global);
		if ( (DB_FAILURE_MACRO(qsfqpstatus)) && (DB_SUCCESS_MACRO(finalstatus)) )
		{
		    finalstatus = qsfqpstatus;
		    error.err_code = global->ops_qsfcb.qsf_error.err_code;
		}
	    }
	    if (global->ops_qheader && 
			(global->ops_qheader->pst_mask1 & PST_RPTQRY))
	    {	/* shared query plan possible */
		if (global->ops_procedure->pst_flags & PST_REPEAT_DYNAMIC)
		{
		    char	*p;

		    global->ops_qsfcb.qsf_obj_id.qso_lname = 
						sizeof(DB_CURSOR_ID) + sizeof(i4);
		    MEfill(sizeof(global->ops_qsfcb.qsf_obj_id.qso_name), 0,
			global->ops_qsfcb.qsf_obj_id.qso_name);
		    MEcopy((PTR)&global->ops_procedure->
						pst_dbpid.db_cursor_id[0],
			sizeof (global->ops_procedure->
						pst_dbpid.db_cursor_id[0]),
			(PTR)global->ops_qsfcb.qsf_obj_id.qso_name);
		    p = (char *) global->ops_qsfcb.qsf_obj_id.qso_name + 
								2*sizeof(i4);
		    MEcopy((PTR)"qp", sizeof("qp"), p);
		    p = (char *) global->ops_qsfcb.qsf_obj_id.qso_name + 
						sizeof(DB_CURSOR_ID);
		    I4ASSIGN_MACRO(global->ops_caller_cb->opf_udbid, *(i4 *) p);
		    
		}
		else	/* must be proc or regular repeat query */
		{
		    global->ops_qsfcb.qsf_obj_id.qso_lname = 
				sizeof (global->ops_procedure->pst_dbpid);
		    MEcopy((PTR)&global->ops_procedure->pst_dbpid, 
			    sizeof (global->ops_procedure->pst_dbpid),
			    (PTR)&global->ops_qsfcb.qsf_obj_id.qso_name[0]);
		}
		global->ops_qsfcb.qsf_obj_id.qso_type = QSO_QP_OBJ;
		global->ops_qsfcb.qsf_lk_state = QSO_SHLOCK;
		qsfqpstatus = qsf_call(QSO_GETHANDLE, &global->ops_qsfcb);
		if (DB_SUCCESS_MACRO(qsfqpstatus))
		{
		    qsfqpstatus = ops_qsfdestroy( global );
		    if ( (DB_FAILURE_MACRO(qsfqpstatus)) && (DB_SUCCESS_MACRO(finalstatus)) )
		    {
			finalstatus = qsfqpstatus;
			error.err_code = global->ops_qsfcb.qsf_error.err_code;
		    }
		}
		else if (global->ops_qsfcb.qsf_error.err_code != E_QS0019_UNKNOWN_OBJ)
		{   /* if object is not found then this is not a shared query */
		    finalstatus = qsfqpstatus;
		    error.err_code = global->ops_qsfcb.qsf_error.err_code;
		}
	    }
	}	
    }

    /* release QSF memory allocated to query tree, make sure that this
    ** is done after the QP has been processed since pst_rptqry is still
    ** needed for above block */
    {
	DB_STATUS	       qsfstatus;   /* QSF return status */

	STRUCT_ASSIGN_MACRO(global->ops_caller_cb->opf_query_tree, global->ops_qsfcb.qsf_obj_id); /* get
					    ** query tree id */
        global->ops_qsfcb.qsf_lk_id = global->ops_lk_id; /* get lock id for 
                                            ** QSF */
	qsfstatus = ops_qsfdestroy( global );
	if ( (DB_FAILURE_MACRO(qsfstatus)) && (DB_SUCCESS_MACRO(finalstatus)) )
	{
	    finalstatus = qsfstatus;
	    error.err_code = global->ops_qsfcb.qsf_error.err_code;
	}
    }

    /* signal that the session is exiting OPF and that another thread may enter */
    if (global->ops_cb->ops_smask & OPS_MCONDITION)
    {
	DB_STATUS   lockstatus;
	OPG_CB	    *servercb;

	servercb = global->ops_cb->ops_server;
	global->ops_cb->ops_smask &= (~OPS_MCONDITION);
	lockstatus = ops_exlock(global->ops_caller_cb, &servercb->opg_semaphore); 
					    /* check if server
					    ** thread is available, obtain
					    ** semaphore lock on critical variable */
	servercb->opg_activeuser--;	    /* since exit is about to occur, and memory
					    ** has already been deallocated, allow another
					    ** user to enter OPF */
	servercb->opg_waitinguser--;	    /* since exit is about to occur, and memory
					    ** has already been deallocated, allow another
					    ** user to enter OPF */
	if (DB_FAILURE_MACRO(lockstatus) && (DB_SUCCESS_MACRO(finalstatus)))
	{
	    finalstatus = lockstatus;
	    error.err_code = global->ops_caller_cb->opf_errorblock.err_data;
	}
	else
	{
	    if (servercb->opg_waitinguser > servercb->opg_activeuser)
	    {
		STATUS	    csstatus;
		csstatus = CScnd_signal(&servercb->opg_condition, (CS_SID)NULL);
						/* signal only if some users are waiting */
		if ((csstatus != OK) && (DB_SUCCESS_MACRO(finalstatus)))
		{
		    finalstatus = E_DB_ERROR;
		    error.err_code = csstatus;
		}
	    }
	    lockstatus = ops_unlock(global->ops_caller_cb, 
		&servercb->opg_semaphore);	/* check if server
						** thread is available */
	    if (DB_FAILURE_MACRO(lockstatus) && (DB_SUCCESS_MACRO(finalstatus)))
	    {
		finalstatus = lockstatus;
		error.err_code = global->ops_caller_cb->opf_errorblock.err_data;
	    }
	}
    }

    if (DB_FAILURE_MACRO(finalstatus))
    {
	if (report)
	    opx_verror( finalstatus, E_OP0084_DEALLOCATION, error.err_code); /* report
					    ** error and generate an exception */
	else
	    opx_rverror(global->ops_cb->ops_callercb, finalstatus,
		E_OP0084_DEALLOCATION, error.err_code);
					    /* report error only but do not generate an
					    ** exception */
    }
}
Example #18
0
/*{
** Name: opn_timeout	- check for timeout condition
**
** Description:
**      Check the various indications that timeout has occurred. 
**
** Inputs:
**      subquery                        ptr to subquery state
**
** Outputs:
**
**	Returns:
**	    bool - TRUE is timeout condition has been detected 
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**      14-may-90 (seputis)
**          - b21582, do not allow large CPU gaps before checking
**	    for a timeout condition
**	26-nov-90 (seputis)
**	    - added support for new server startup flags
**      31-jan-91 (seputis)
**          - use more time to optimize queries in DB procedures
**      20-mar-91 (seputis)
**          - b 36600 added calls to CSswitch to allow context 
**	switching if no query plan is found or notimeout is used 
**      3-dec-92 (ed)
**          OP255 was doing a notimeout on all subqueries if the second
**	    parameter was set
**	31-jan-97 (inkdo01)
**	    Simplify CSswitch interface so that all calls to timeout 
**	    result in a CSswitch call. This fixes a CPU lockout problem 
**	    in (at least) Solaris, because the switch depended largely
**	   on faulty quantum testing logic in CSstatistics.
**	15-sep-98 (sarjo01)
**	    Redo of function, adding support for ops_timeoutabort
**	14-jul-99 (inkdo01)
**	    Add code to do quick exit for where-less idiot queries.
**	13-Feb-2007 (kschendel)
**	    Replace CSswitch with better CScancelCheck.
[@history_template@]...
*/
bool
opn_timeout(
	OPS_SUBQUERY       *subquery)
{
    OPS_CB          *scb;               /* ptr to session control block for
                                        ** currently active user */
    OPS_STATE       *global;            /* ptr to global state variable */

    if (subquery->ops_mask & OPS_IDIOT_NOWHERE && subquery->ops_bestco) return(TRUE);
					    /* if idiot query (join, but no where)
					    ** and we have valid plan, quit now */

    global = subquery->ops_global;	    /* get ptr to global state variable
                                            */
    scb = global->ops_cb;		    /* get ptr to active session control
                                            ** block */
    if (scb->ops_alter.ops_timeout)	    /* TRUE if optimizer should timeout
                                            */
    {
        i4		    joinop_timeout;
        i4		    joinop_abort;
        joinop_timeout = scb->ops_alter.ops_sestime;
        joinop_abort   = scb->ops_alter.ops_timeoutabort;

        if (joinop_abort == 0)
	{
	    if (subquery->ops_bestco)
	    {
	        TIMERSTAT	    timer_block;
	        STATUS		    cs_status;
	        i4		    nowcpu;    /* cpu time used by process */
	        OPO_COST	    miladjust; /* adjustment to cost to obtain
                                               ** millisec equivalent */
	        i4		    first;     /* plan to timeout */
	        i4		    second;    /* subquery plan to timeout */
	        bool		    planid_timeout; /* check if tracing flag is
					            ** set and override timeout
					            ** check with new one */

	        planid_timeout = FALSE;
	        if ((global->ops_qheader->pst_numparm > 0)
			||
		    global->ops_procedure->pst_isdbp)
	            miladjust = scb->ops_alter.ops_tout *
                                         scb->ops_alter.ops_repeat; 
					    /* use more time to optimize
					    ** a repeat query */
	        else
		    miladjust = scb->ops_alter.ops_tout;

	        if (scb->ops_check)
	        {
		    if (opt_svtrace( scb, OPT_F125_TIMEFACTOR, &first, &second)
		        &&
		        first>0)    /* see if there is a miladjust factor
				    ** from the user */
		        miladjust = (miladjust*first)/100.0;
				    /* this will increase/decrease
				    ** the amount of time required to timeout */
		    planid_timeout = 
		        opt_svtrace( scb, OPT_F127_TIMEOUT, &first, &second);
				    /* this will time out based on subquery
				    ** plan id and number of plans being
				    ** evaluated */
	        }


		if (cs_status = CSstatistics(&timer_block, (i4)0))
		{   /* get cpu time used by session from SCF to calculate
                    ** difference from when optimization began FIXME - this
                    ** is an expensive routine to call, perhaps some other
                    ** way of timing out could be used such as number of CO
                    ** nodes processed */
		    opx_verror(E_DB_ERROR, E_OP0490_CSSTATISTICS, 
			(OPX_FACILITY)cs_status);
		}
		nowcpu = timer_block.stat_cpu -global->ops_estate.opn_startime;

	        if (
		    (((nowcpu > (miladjust * subquery->ops_cost)) ||
					    /* if the amount of time spent so 
					    ** far on optimization is roughly 
                                            ** about how much time the best 
                                            ** solution found so far
					    ** will take to execute, then stop
                                            */
		      ( (joinop_timeout > 0) && (nowcpu > joinop_timeout)))
		    && 
		    (   !planid_timeout	    /* if the plan timeout tracing is
					    ** set then this should override
					    ** the normal timeout mechanism */
			||
			(   (second != 0)
			    &&
			    (second != subquery->ops_tsubquery) /* apply timeout
					    ** to all query plans except the
					    ** one specified */
			)
		    ))
		    ||
		    ( planid_timeout
		        &&
		      (first <= subquery->ops_tcurrent)
		        &&
		      (	(second == 0)	    /* check if all plans should be
					    ** timed out */
			||
			(second == subquery->ops_tsubquery) /* check if a
					    ** particular subquery is being
					    ** search for */
		      )
		    ))
	        {
		    opx_verror(E_DB_WARN, E_OP0006_TIMEOUT, (OPX_FACILITY)0);
		    return(TRUE);
	        }
	    }
	}
	else
	{
            TIMERSTAT           timer_block;
            STATUS              cs_status;
            i4             nowcpu;    /* cpu time used by process */
            OPO_COST            miladjust; /* adjustment to cost to obtain
                                           ** millisec equivalent */
            i4             first;     /* plan to timeout */
            i4             second;    /* subquery plan to timeout */
            bool                planid_timeout; /* check if tracing flag is
                                                ** set and override timeout
                                                ** check with new one */
	    if (joinop_timeout == 0 || joinop_timeout >= joinop_abort)
		joinop_timeout = joinop_abort; 
            planid_timeout = FALSE;
            if ((global->ops_qheader->pst_numparm > 0)
                    ||
                global->ops_procedure->pst_isdbp)
                miladjust = scb->ops_alter.ops_tout *
                                     scb->ops_alter.ops_repeat;
                                        /* use more time to optimize
                                        ** a repeat query */
            else
                miladjust = scb->ops_alter.ops_tout;

            if (scb->ops_check)
            {
                if (opt_svtrace( scb, OPT_F125_TIMEFACTOR, &first, &second)
                    &&
                    first>0)    /* see if there is a miladjust factor
                                ** from the user */
                    miladjust = (miladjust*first)/100.0;
                                /* this will increase/decrease
                                ** the amount of time required to timeout */
                planid_timeout =
                    opt_svtrace( scb, OPT_F127_TIMEOUT, &first, &second);
                                /* this will time out based on subquery
                                ** plan id and number of plans being
                                ** evaluated */
            }
            if (cs_status = CSstatistics(&timer_block, (i4)0))
            {
                opx_verror(E_DB_ERROR, E_OP0490_CSSTATISTICS,
                    (OPX_FACILITY)cs_status);
            }
            nowcpu = timer_block.stat_cpu -global->ops_estate.opn_startime;

            if ( subquery->ops_bestco
		 &&
                ( (
                 ((nowcpu > (miladjust * subquery->ops_cost)) ||
                  ((joinop_timeout > 0) && (nowcpu > joinop_timeout)))
                     &&
                 ( !planid_timeout ||
                    ( (second != 0)
                        &&
                        (second != subquery->ops_tsubquery)
                    )
                  )
                 )
                ||
                ( planid_timeout
                    &&
                  (first <= subquery->ops_tcurrent)
                    &&
                  ( (second == 0)
                    ||
                    (second == subquery->ops_tsubquery)
                  ))

                ) )
            {
                opx_verror(E_DB_WARN, E_OP0006_TIMEOUT, (OPX_FACILITY)0);
                return(TRUE);
            }
	    else if (nowcpu > joinop_abort)
	    {
		if (subquery->ops_bestco)
		{
    		    opx_verror(E_DB_WARN, E_OP0006_TIMEOUT, (OPX_FACILITY)0);
		    return(TRUE);
		}
		else
		{
		    scb->ops_interrupt = TRUE;
	            opx_rerror(subquery->ops_global->ops_caller_cb,
                               E_OP0018_TIMEOUTABORT);
		}
	    }
	} /* else */
    }
    /* Check for cancels if OS-threaded, time-slice if internal threaded */
    CScancelCheck(global->ops_cb->ops_sid);
    return(FALSE);
}