Example #1
0
DB_STATUS
qen_tsort(
QEN_NODE           *node,
QEF_RCB            *qef_rcb,
QEE_DSH		   *dsh,
i4		    function )
{
    QEF_CB	*qef_cb = dsh->dsh_qefcb;
    PTR		*cbs = dsh->dsh_cbs;
    ADE_EXCB	*ade_excb;
    DMR_CB	*dmr_load = (DMR_CB*)cbs[node->node_qen.qen_tsort.tsort_load];
    DMR_CB	*dmr_get = (DMR_CB*) cbs[node->node_qen.qen_tsort.tsort_get];
    QEN_NODE	*out_node = node->node_qen.qen_tsort.tsort_out;
    QEE_XADDRS	*node_xaddrs = dsh->dsh_xaddrs[node->qen_num];
    QEN_STATUS	*qen_status = node_xaddrs->qex_status;
    QEN_SHD	*shd = dsh->dsh_shd[node->node_qen.qen_tsort.tsort_shd];
    DB_STATUS	status;
    bool	reset = FALSE;
    bool	heap_sort = TRUE, no_qef = FALSE;
    i4		rowno;
    i4		out_func = NO_FUNC;
    DM_MDATA	dm_mdata;
    i4	val1;
    i4	val2;
    TIMERSTAT	timerstat;
    i4          loop_cntr = 0;

    if (function != 0)
    {
	if (function & FUNC_RESET)
	{
	    reset = TRUE;
	    out_func = FUNC_RESET;
	}

	/* Do open processing, if required. Only if this is the root node
	** of the query tree do we continue executing the function. */
	if ((function & TOP_OPEN || function & MID_OPEN) && 
	    !(qen_status->node_status_flags & QEN1_NODE_OPEN))
	{
	    status = (*out_node->qen_func)(out_node, qef_rcb, dsh, MID_OPEN);
	    qen_status->node_status_flags = QEN1_NODE_OPEN;
	    if (function & MID_OPEN)
		return(E_DB_OK);
	    function &= ~TOP_OPEN;
	}
	/* Do close processing, if required. */
	if (function & FUNC_CLOSE)
	{
	    if (!(qen_status->node_status_flags & QEN8_NODE_CLOSED))
	    {
		status = (*out_node->qen_func)(out_node, qef_rcb, dsh, FUNC_CLOSE);
		qen_status->node_status_flags = 
		    (qen_status->node_status_flags & ~QEN1_NODE_OPEN) | QEN8_NODE_CLOSED;
	    }
	    return(E_DB_OK);
	}
	if (function & FUNC_EOGROUP)
	{
	    /* End of partition group request ends our sort if we're returning
	    ** rows.  If we aren't in the middle of returning rows, pass the
	    ** EOG request on down so that the child skips the upcoming group
	    ** and moves on to the next one.
	    */
	    if (qen_status->node_status == QEN3_GET_NEXT_INNER)
	    {
		status = qen_ts_reset(dsh, node, qen_status);
		/* FIXME we should do better at remembering whether this
		** sort load got EOF or EOG, and pass it on up now.  At
		** present (Apr '07), tsorts aren't very common except under
		** merge join plans, where early eof detection doesn't
		** matter much.  (the tsort is on the outer side.)
		** For now, pass EOG back up, if we're really at EOF caller
		** will find out soon enough.
		*/
		if (status == E_DB_OK)
		{
		    status = E_DB_WARN;
		    dsh->dsh_error.err_code = E_QE00A5_END_OF_PARTITION;
		}
	    }
	    else
	    {
		status = (*out_node->qen_func)(out_node, qef_rcb, dsh, FUNC_EOGROUP);
		/* Pass resulting EOF or EOG indication in dsh up to caller */
	    }
	    return(status);
	}
    } /* if function != 0 */


    /* Check for cancel, context switch if not MT */
    CScancelCheck(dsh->dsh_sid);
    if (QEF_CHECK_FOR_INTERRUPT(qef_cb, dsh) == E_DB_ERROR)
	return (E_DB_ERROR);

    /* If the trace point qe90 is turned on then gather cpu and dio stats */
    if (dsh->dsh_qp_stats)
    {
	qen_bcost_begin(dsh, &timerstat, qen_status);
    }

    if (node->node_qen.qen_tsort.tsort_dups == DMR_NODUPLICATES &&
        ult_check_macro(&qef_cb->qef_trace, 92, &val1, &val2))
            heap_sort = FALSE;

    if (ult_check_macro(&qef_cb->qef_trace, 94, &val1, &val2))
    {
	no_qef = TRUE;
	qen_status->node_u.node_sort.node_sort_status = QEN9_DMF_SORT;
    }

#ifdef xDEBUG
    (VOID) qe2_chk_qp(dsh);
#endif

    for (;;)	    /* to break off in case of error */
    {
	if (reset && qen_status->node_status != QEN0_INITIAL)
	{
	    status = qen_ts_reset(dsh, node, qen_status);
	    if (status != E_DB_OK)
		break;
	}

	/* If NO MORE ROWS from this node, just return */
	if (qen_status->node_status == QEN4_NO_MORE_ROWS)
	{
	    dsh->dsh_error.err_code = E_QE0015_NO_MORE_ROWS;
	    status = E_DB_WARN;
	    break;
	}

	rowno = node->node_qen.qen_tsort.tsort_mat->qen_output;
	ade_excb = node_xaddrs->qex_otmat;

	/* If this is the first time execution, or if the node is reset, 
	** initialize the sorter. If this is not, just go get a tuple.
	*/
	if (qen_status->node_status == QEN0_INITIAL || 
	    qen_status->node_status == QEN1_EXECUTED)
	{
	    if (qen_status->node_status == QEN0_INITIAL)
	    {
		if (qen_status->node_u.node_sort.node_sort_status == QEN0_QEF_SORT)
		{
		    status = qes_init(dsh, shd, node, rowno,
				node->node_qen.qen_tsort.tsort_dups);
		    if (status != E_DB_OK)
		    {
			if (dsh->dsh_error.err_code == E_QE000D_NO_MEMORY_LEFT)
			{
			    /* out of space, convert it to DMF sort */
			    qen_status->node_u.node_sort.node_sort_status = QEN9_DMF_SORT;
			    status = E_DB_OK;
			}
			else
			{
			    break;
			}
		    }
		}

		if (qen_status->node_u.node_sort.node_sort_status == QEN9_DMF_SORT)
		{
		    status = qen_ts_dump(shd, dsh, node, 
					rowno, heap_sort, no_qef);
		    if (status != E_DB_OK)
			break;
		}

		if (node->node_qen.qen_tsort.tsort_pqual != NULL)
		    qeq_join_pqreset(dsh, node->node_qen.qen_tsort.tsort_pqual);

		qen_status->node_status = QEN1_EXECUTED;
	    }

	    /* Common code for QEN0_INITIAL and QEN1_EXECUTED */

	    /* Get dm_mdata ready for DMF loading  */
	    dm_mdata.data_address = dsh->dsh_row[rowno];
	    dm_mdata.data_size = dsh->dsh_qp_ptr->qp_row_len[rowno];
	    dmr_load->dmr_mdata = &dm_mdata;

            dsh->dsh_qp_status |= DSH_SORT;

	    /* Get rows from the underneath node and append them to the 
	    ** sorter */
	    for (;;)
	    {
		/* fetch rows */
		status = (*out_node->qen_func)(out_node, qef_rcb, dsh, out_func);
		out_func = NO_FUNC;
		if (status != E_DB_OK)
		{
		    /* the error.err_code field in qef_rcb should already be
		    ** filled in.
		    */
		    if (dsh->dsh_error.err_code == E_QE0015_NO_MORE_ROWS)
		    {
			status = E_DB_OK;
		    }
		    else if (dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION &&
			(node->qen_flags & QEN_PART_SEPARATE))
		    {
			/* End of rows from partitioning group. Flag the
			** fact and make it look like regular "eof". */
			qen_status->node_status_flags |= QEN2_OPART_END;
			dsh->dsh_error.err_code = E_QE0015_NO_MORE_ROWS;
			status = E_DB_OK;
		    }

		    break;
		}
		
		/* project the attributes into sort tuple row */
		status = qen_execute_cx(dsh, ade_excb);
		if (status != E_DB_OK)
		    break;

		/* If we're generating partition qualifications on behalf
		** of a parent FSM join, eval against this row.
		*/
		if (node->node_qen.qen_tsort.tsort_pqual != NULL)
		{
		    status = qeq_join_pquals(dsh, node->node_qen.qen_tsort.tsort_pqual);
		    if (status != E_DB_OK)
			break;
		}

                /* append the sort tuple into the sorter - note periodic
                ** differences between heap sort and trace point mandated
                ** insert sort */
		if (qen_status->node_u.node_sort.node_sort_status == QEN0_QEF_SORT)
		{
		    if (heap_sort) status = qes_putheap(dsh, shd, 
                         node->node_qen.qen_tsort.tsort_cmplist,
                         node->node_qen.qen_tsort.tsort_scount); 
                     else status = qes_insert(dsh, shd,
                         node->node_qen.qen_tsort.tsort_cmplist,
                         node->node_qen.qen_tsort.tsort_scount);

		    if (status != E_DB_OK && 
			dsh->dsh_error.err_code == E_QE000D_NO_MEMORY_LEFT)
		    {
			/* out of space, convert it to DMF sort */
			qen_status->node_u.node_sort.node_sort_status = QEN9_DMF_SORT;
			status = qen_ts_dump(shd, dsh, 
					node, rowno, heap_sort, no_qef);
		    }
		    if (status != E_DB_OK)
			break;
		}

		if(qen_status->node_u.node_sort.node_sort_status == QEN9_DMF_SORT)  
		{
		    status = dmf_call(DMR_LOAD, dmr_load);
		    if (status != E_DB_OK)
		    {
			dsh->dsh_error.err_code = dmr_load->error.err_code; 
			break;
		    }
		}

                if (!(Qef_s_cb->qef_state & QEF_S_IS_MT) && (loop_cntr++ >1000))
                {
                   /* On an Internal threaded system, after processing 1000 rows
                   ** give another thread a chance.
                   */

                   loop_cntr = 0;

                   CSswitch();
                }
	    }
	    if (status != E_DB_OK)
		break;

	    /* End of loading loop */
	    /* Finish up join-time part qual if we're doing it */
	    if (node->node_qen.qen_tsort.tsort_pqual != NULL)
	    {
		qeq_join_pqeof(dsh, node->node_qen.qen_tsort.tsort_pqual);
	    }
	    if(qen_status->node_u.node_sort.node_sort_status == QEN0_QEF_SORT)
	    {
		if (!heap_sort) qes_endsort(dsh, shd);
						/* prep return of tups */

	    }
	    else   /* DMF */
	    {
		/* Tell DMF that there are no more rows */
		dmr_load->dmr_flags_mask = (DMR_ENDLOAD | DMR_SORT_NOCOPY);
		status = dmf_call(DMR_LOAD, dmr_load);
		if (status != E_DB_OK)
		{
		    dsh->dsh_error.err_code = dmr_load->error.err_code;
		    break;
		}

		/* position the table for reading sorted tuples */
		dmr_get->dmr_flags_mask = DMR_SORTGET;
		dmr_get->dmr_position_type = DMR_ALL;
		status = dmf_call(DMR_POSITION, dmr_get);
		if (status != E_DB_OK)
		{
		    dsh->dsh_error.err_code = dmr_get->error.err_code;
		    break;
		}
	    }

	    /* Mark the node is ready to return tuples */
	    qen_status->node_status = QEN3_GET_NEXT_INNER;
	}


	/* 
	** Return a tuple from the sorter to the caller.
	*/
	if (qen_status->node_u.node_sort.node_sort_status == QEN0_QEF_SORT)
	{
	    if (heap_sort) 
	    {
		status = qes_getheap(dsh, shd, 
                         node->node_qen.qen_tsort.tsort_cmplist,
                         node->node_qen.qen_tsort.tsort_scount); 
		if (status != E_DB_OK) 
		{
		    if (dsh->dsh_error.err_code == E_QE0015_NO_MORE_ROWS)
		    {
			qen_status->node_status = QEN4_NO_MORE_ROWS;
			status = E_DB_WARN;
		    }
		    break;
		}
	    }
	    else
	    {		/* rows come straight from pointer array */
	 	/* Check for the end of buffer */
		if (shd->shd_next_tup == shd->shd_tup_cnt)
		{
		    qen_status->node_status = QEN4_NO_MORE_ROWS;
		    dsh->dsh_error.err_code = E_QE0015_NO_MORE_ROWS;
		    status = E_DB_WARN;
		    break;
		}

		/* Copy tuple to the row buffer */
		MEcopy((PTR)(shd->shd_vector[shd->shd_next_tup]), 
		    shd->shd_width, (PTR)shd->shd_row);
		++shd->shd_next_tup;
		status = E_DB_OK;
	    }
	}
	else
	{
	    dmr_get->dmr_flags_mask = (DMR_NEXT | DMR_SORTGET);
	    status = dmf_call(DMR_GET, dmr_get);
	    if (status != E_DB_OK)
	    {
		if (dmr_get->error.err_code == E_DM0055_NONEXT)
		{
		    qen_status->node_status = QEN4_NO_MORE_ROWS;
		    dsh->dsh_error.err_code = E_QE0015_NO_MORE_ROWS;
		    status = E_DB_WARN;
		}
		else
		{
		    dsh->dsh_error.err_code = dmr_get->error.err_code;
		}
		break;
	    }
	}
	/* status has to be OK here */

	/* Increment the count of rows that this node has returned */
	qen_status->node_rcount++;
	dsh->dsh_error.err_code = 0;

	/* print tracing information DO NOT xDEBUG THIS */
	if (node->qen_prow &&
		(ult_check_macro(&qef_cb->qef_trace, 100+node->qen_num,
				    &val1, &val2) ||
		    ult_check_macro(&qef_cb->qef_trace, 99,
				    &val1, &val2)
		)
	    )
	{
	    (void) qen_print_row(node, qef_rcb, dsh);
	}

	break;
    }	    /* end of error-break loop */

#ifdef xDEBUG
    (VOID) qe2_chk_qp(dsh);
#endif

    if (dsh->dsh_qp_stats)
    {
	qen_ecost_end(dsh, &timerstat, qen_status);
    }

    dsh->dsh_qp_status &= ~DSH_SORT;

    if (dsh->dsh_error.err_code == E_QE0015_NO_MORE_ROWS &&
	(qen_status->node_status_flags & QEN2_OPART_END))
    {
	/* If this was just the end of a partition, reset error code
	** to notify the caller. */
	qen_status->node_status_flags &= ~QEN2_OPART_END;
	dsh->dsh_error.err_code = E_QE00A5_END_OF_PARTITION;
	status = E_DB_WARN;
    }
    return (status);
}
Example #2
0
DB_STATUS
qen_fsmjoin(
QEN_NODE	*node,
QEF_RCB		*qef_rcb,
QEE_DSH		*dsh,
i4		function )
{
    QEF_CB	    *qef_cb = dsh->dsh_qefcb;
    DMR_CB	    *dmrcb;
    QEN_NODE	    *out_node = node->node_qen.qen_sjoin.sjn_out;
    QEN_NODE	    *in_node = node->node_qen.qen_sjoin.sjn_inner;
    QEE_XADDRS	    *node_xaddrs = dsh->dsh_xaddrs[node->qen_num];
    QEN_STATUS	    *qen_status = node_xaddrs->qex_status;
    ADE_EXCB	    *ade_excb;
    ADE_EXCB	    *jqual_excb = node_xaddrs->qex_jqual;
    QEN_HOLD	    *qen_hold;
    QEN_HOLD	    *ijFlagsHold = (QEN_HOLD *)NULL;
    QEN_SHD         *qen_shd;
    QEN_SHD         *ijFlagsShd;
    DB_STATUS	    status = E_DB_OK;
    bool	    reset = FALSE;
    bool	    out_reset = FALSE;
    bool	    in_reset = FALSE;
    bool            ojoin = (node->node_qen.qen_sjoin.sjn_oj != NULL); 
    bool            ljoin = FALSE;
    bool            rjoin = FALSE;
    bool	    innerTupleJoined;
    bool	    rematerializeInnerTuple = TRUE;
			/* During full joins, the last driving tuple may left
			** join.  This 0s all special eqcs from the
			** re-scannable stream.  The current re-scannable
			** tuple will right join.  To recover the state of
			** its special eqcs, simply re-materialize the inner
			** tuple.  That's what this variable is for.
			*/
    i4		    new_to_old;
    i4		    join_result;
    i4	    val1;
    i4	    val2;
    TIMERSTAT	    timerstat;
    bool potential_card_violation = FALSE;

#ifdef xDEBUG
    (VOID) qe2_chk_qp(dsh);
#endif

    if (function != 0)
    {
	if (function & FUNC_RESET)
	{
	    reset = in_reset = out_reset = TRUE;
	}

	/* Do open processing, if required. Only if this is the root node
	** of the query tree do we continue executing the function. */
	if ((function & TOP_OPEN || function & MID_OPEN) 
	    && !(qen_status->node_status_flags & QEN1_NODE_OPEN))
	{
	    status = (*out_node->qen_func)(out_node, qef_rcb, dsh, MID_OPEN);
	    status = (*in_node->qen_func)(in_node, qef_rcb, dsh, MID_OPEN);
	    qen_status->node_status_flags |= QEN1_NODE_OPEN;
	    if (function & MID_OPEN)
		return(E_DB_OK);
	    function &= ~TOP_OPEN;
	}
	/* Do close processing, if required. */
	if (function & FUNC_CLOSE)
	{
	    if (!(qen_status->node_status_flags & QEN8_NODE_CLOSED))
	    {
		/* Ideally we would clean up all of our own shd crap here
		** instead of making qee do it...
		*/
		status = (*out_node->qen_func)(out_node, qef_rcb, dsh, FUNC_CLOSE);
		status = (*in_node->qen_func)(in_node, qef_rcb, dsh, FUNC_CLOSE);
		qen_status->node_status_flags = 
		    (qen_status->node_status_flags & ~QEN1_NODE_OPEN) | QEN8_NODE_CLOSED;
	    }
	    return(E_DB_OK);
	}

	/* End of partition group call just gets passed down. */
	if (function & FUNC_EOGROUP)
	{
	    status = (*out_node->qen_func)(out_node, qef_rcb, dsh, FUNC_EOGROUP);
	    status = (*in_node->qen_func)(in_node, qef_rcb, dsh, FUNC_EOGROUP);
	    return(E_DB_OK);
	}
    } /* if function */

    /* If the trace point qe90 is turned on then gather cpu and dio stats */
    if (dsh->dsh_qp_stats)
    {
	qen_bcost_begin(dsh, &timerstat, qen_status);
    }
     

    /* Check for cancel, context switch if not MT */
    CScancelCheck(dsh->dsh_sid);
    if (QEF_CHECK_FOR_INTERRUPT(qef_cb, dsh) == E_DB_ERROR)
	return (E_DB_ERROR);

    dsh->dsh_error.err_code = E_QE0000_OK;

    qen_hold = dsh->dsh_hold[node->node_qen.qen_sjoin.sjn_hfile];
    qen_shd = dsh->dsh_shd[dsh->dsh_qp_ptr->qp_sort_cnt +
                            node->node_qen.qen_sjoin.sjn_hfile];

    if( ojoin && node->node_qen.qen_sjoin.sjn_oj->oj_ijFlagsFile >= 0 )
    {
        ijFlagsHold =
	  dsh->dsh_hold[node->node_qen.qen_sjoin.sjn_oj->oj_ijFlagsFile];
        ijFlagsShd = dsh->dsh_shd[dsh->dsh_qp_ptr->qp_sort_cnt +
                            node->node_qen.qen_sjoin.sjn_oj->oj_ijFlagsFile];
    }

    if ( ojoin ) switch(node->node_qen.qen_sjoin.sjn_oj->oj_jntype)
    {
       case DB_LEFT_JOIN:
         ljoin = TRUE;
         break;
       case DB_RIGHT_JOIN:
         rjoin = TRUE;
         break;
       case DB_FULL_JOIN:
         ljoin = TRUE;
         rjoin = TRUE;
         break;
       default: break;
    }

    /* If the node is to be reset, dump the hold file and reset the
    ** inner/outer nodes */

loop_reset:
    if (reset)
    {
	if (qen_status->node_status != QEN0_INITIAL && 
	    in_node->qen_type != QE_SORT)
	{
	    /* reset in memory or dump dmf hold if it has been created */
	    status = qen_u9_dump_hold(qen_hold, dsh, qen_shd);
	    if(status) goto errexit;
	    qen_hold->hold_medium = HMED_IN_MEMORY;  /* set back to mem */
	}
	qen_hold->hold_buffer_status = HFILE6_BUF_EMPTY;

	if ( qen_status->node_status != QEN0_INITIAL && ijFlagsHold )
	{
	    /* dump tid hold file if it has been created */

	    status = qen_u9_dump_hold( ijFlagsHold, dsh, ijFlagsShd );
	    if(status) goto errexit;  
	    ijFlagsHold->hold_medium = HMED_IN_MEMORY;  /* set back to mem */
	}

	qen_status->node_status = QEN0_INITIAL;  /* reset = reintialize */
	qen_status->node_u.node_join.node_inner_status = QEN0_INITIAL;
	qen_status->node_u.node_join.node_outer_status = QEN0_INITIAL;
	qen_status->node_u.node_join.node_outer_count = 0;

	qen_status->node_access = (	QEN_READ_NEXT_OUTER |
					QEN_READ_NEXT_INNER |
					QEN_OUTER_HAS_JOINED	);  
    }

    if (qen_status->node_status == QEN0_INITIAL)  
    {
	qen_status->node_u.node_join.node_outer_status = QEN0_INITIAL;

	/* set num entries in mem_hold in case we build one */
	/* this may not be a hard number in future */
	/* qen_shd->shd_tup_cnt = 20; */

	/* by setting it to -1, the required memory will be configured to */
	/* suit the condition. if it is < 20, it will use the dmf hold mem*/
	/* ramra01 19-oct-94 */
	qen_shd->shd_tup_cnt = -1;

	if( ijFlagsHold )
	{
	    ijFlagsHold->hold_status = HFILE0_NOFILE;	/* default */
	    ijFlagsHold->hold_status2 = 0;		/* default */
	    ijFlagsHold->hold_medium = HMED_IN_MEMORY;  /* default */
	    /* in case we build a hold file
	    ** tell qen_u1_append to calculate its size in memory
	    ** or go to DMF hold */
	    ijFlagsShd->shd_tup_cnt = -1;
	}

	if(rjoin)
	{
	    /* consistency check */
	    if( !ijFlagsHold )
	    {
		/* rjoin and no hold file for inner join flags */
		dsh->dsh_error.err_code = E_QE0002_INTERNAL_ERROR;
		status = E_DB_ERROR;
		goto errexit;           
	    }
	}

	qen_status->node_access = (	QEN_READ_NEXT_OUTER |
					QEN_READ_NEXT_INNER |
					QEN_OUTER_HAS_JOINED	);  
    }

    for (;;)   /* The loop */
    {
	status = E_DB_OK;

	/*********************************************************
	**	
	**	LOGIC TO READ FROM THE OUTER TUPLE STREAM
	**
	**
	**
	*********************************************************/

	if( qen_status->node_access & QEN_READ_NEXT_OUTER )
	{

	    /*
	    ** If the previous outer tuple did not inner join with
	    ** any inner tuples, then it's an outer join.  Return
	    ** it along with nulls for the right side if it passes
	    ** the WHERE clause.
	    */

	    if ( ljoin && !( qen_status->node_access & QEN_OUTER_HAS_JOINED ) )
	    {
		/*
		** Set the "outer has joined" flag so that if we emit
		** a left join, we won't come back into this conditional
		** the next time through this fmsjoin node.
		*/
		qen_status->node_access |= QEN_OUTER_HAS_JOINED;

		/* now execute oj_lnull */
		status = qen_execute_cx(dsh, node_xaddrs->qex_lnull);
		if (status != E_DB_OK)
		    goto errexit;   /* if ade error, return error */

		/* Execute jqual restriction, if any */
		if ( jqual_excb == NULL)
		    break;	/* emit a left join */
		else
		{
		    status = qen_execute_cx(dsh, jqual_excb);
		    if (status != E_DB_OK)
			goto errexit;   /* if ade error, return error */
		    if (jqual_excb->excb_value == ADE_TRUE)
			break;	/* emit a left join */
		}
	    }	/* endif previous outer did not join */

	    qen_status->node_access &= ~(	QEN_READ_NEXT_OUTER |
						QEN_OUTER_HAS_JOINED	);
	    /* get a new outer */

	newouter:
	    if ( qen_status->node_u.node_join.node_outer_status == QEN8_OUTER_EOF )
	    {
	        status = E_DB_WARN;
		dsh->dsh_error.err_code = E_QE0015_NO_MORE_ROWS;
	    }
	    else	/* this is where we actually read the outer stream! */
	    {
	        status = (*out_node->qen_func)(out_node, qef_rcb, dsh,
				(out_reset) ? FUNC_RESET : NO_FUNC);
		if (status == E_DB_OK)
		    qen_status->node_u.node_join.node_outer_count++;
	    }
	    out_reset = FALSE;

	    /* a little error handling.  check for end of outer stream */
	    if (status != E_DB_OK)
	    {
		if (dsh->dsh_error.err_code == E_QE0015_NO_MORE_ROWS ||
		    dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION &&
		    node->qen_flags & QEN_PART_SEPARATE)
		{
		    /* If no outer rows were read and we're doing partition
		    ** grouping, skip the next inner partition to re-sync. */
		    if (dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION &&
			qen_status->node_u.node_join.node_outer_count <= 0)
		     if (!rjoin)
		     {
			if (node->qen_flags & QEN_PART_SEPARATE)
			{
			    status = (*in_node->qen_func)(in_node, qef_rcb, 
						dsh, FUNC_EOGROUP);
			    if (dsh->dsh_error.err_code == 
						E_QE00A5_END_OF_PARTITION)
				qen_status->node_status_flags |=
						QEN4_IPART_END;
					/* if just EOPartition, flag it */
			}
		     goto errexit;
		     }

		    qen_status->node_access |= QEN_OUTER_HAS_JOINED;
                    qen_status->node_u.node_join.node_outer_status = QEN8_OUTER_EOF;

		    if (dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION)
			qen_status->node_status_flags |= QEN2_OPART_END;
					/* if just EOPartition, flag it */

		    /*
		    ** If ( the inner stream is exhausted and there's nothing
		    ** to rescan ) or we're not right joining,
		    ** then there are no more tuples to return.  This should
		    ** be the only way to end this fsmjoin node.
		    */
		    if ( INNER_STREAM_EXHAUSTED || rjoin == FALSE  )
                    {
		       qen_status->node_status = QEN4_NO_MORE_ROWS;
                       break;    /* done */
                    }    
                    
                    else	/* we must check some more inner tuples */
		    {
                       dsh->dsh_error.err_code = 0;   /*reset*/

		       if(qen_status->node_status == QEN0_INITIAL)  /* empty */
		       {
			    qen_status->node_status = QEN1_EXECUTED;
			    qen_hold->hold_status = HFILE0_NOFILE;
			    qen_hold->hold_status2 = 0;
			    qen_hold->hold_medium = HMED_IN_MEMORY;
			    if(in_node->qen_type == QE_SORT)
			    {
				status = qen_u32_dosort(in_node, 
							qef_rcb, 
							dsh, 
							qen_status, 
							qen_hold,
							qen_shd, 
					(in_reset) ? FUNC_RESET : NO_FUNC);
				if(status) goto errexit;
			    }
			    in_reset = FALSE;
		       }	/* endif first time through */
		    }	/* endif no more inner tuples to check */
		}
		else if (dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION)
		{
		    /* No more rows in partitioning group - read from 
		    ** next group. */
		    out_reset = TRUE;
		    goto newouter;
		}
		else  /* return error from reading outer stream */
		{
		    break;
		}
	    }	/* end of error handling for outer stream EOF */


            if(qen_status->node_status == QEN0_INITIAL)
            {
		qen_status->node_status = QEN1_EXECUTED;  /* init done */
		qen_hold->hold_status = HFILE0_NOFILE;  /* default */
		qen_hold->hold_status2 = 0;		/* default */
		qen_hold->hold_medium = HMED_IN_MEMORY;  /* default */
		if(in_node->qen_type  == QE_SORT)
		{

		    status = qen_u32_dosort(in_node, 
					    qef_rcb, 
					    dsh, 
					    qen_status, 
					    qen_hold,
					    qen_shd, 
					(in_reset) ? FUNC_RESET : NO_FUNC);
		    if(status) goto errexit;
		    in_reset = FALSE;
		}

		/* now materialize the first join key */
		status = qen_execute_cx(dsh, node_xaddrs->qex_okmat);
		if (status != E_DB_OK)
		    goto errexit;   /* if ade error, return error */
	    }

	    /* If not the first time */
	    else
	    {
		if ( qen_status->node_u.node_join.node_outer_status == QEN8_OUTER_EOF )
		{
		    new_to_old = NEW_GT_OLD;
		}
		else	/* outer not at EOF */
		{
		    /* compare the old outer key to the new one. */
		    new_to_old = ADE_1EQ2;
		    if ((ade_excb = node_xaddrs->qex_kcompare) != NULL)
		    {
			status = qen_execute_cx(dsh, ade_excb);
			if (status != E_DB_OK)
			    goto errexit;   /* if ade error, return error */
			new_to_old = ade_excb->excb_cmp;
		    }

		    /* Materialize the new outer key if the old and the 
		    ** new outer keys are not equal */
		    if (new_to_old != ADE_1EQ2)
		    {
			status = qen_execute_cx(dsh, node_xaddrs->qex_okmat);
			if (status != E_DB_OK)
			    goto errexit;   /* if ade error, return error */
		    }
		    else if ((node->qen_flags & QEN_CARD_MASK) == QEN_CARD_01L)
		    {
			/* Right outer - note cardinality */
			potential_card_violation = (new_to_old == ADE_1EQ2);
		    }
		}	/* endif outer not at EOF */

		/*
		** If there are inner tuples to rescan, decide whether
		** to thumb through them again or dump them.
		*/


		if ( qen_status->node_access & QEN_RESCAN_MARKED )
		{
		    if ( new_to_old == ADE_1EQ2 )
		    {
		        status = repositionInnerStream( node, dsh );
		        if(status != E_DB_OK) break;   /* to error */
		        continue;
		    }

		    else	/* key has changed */
		    {
		        if ( rjoin )
		        {
		            status = repositionInnerStream( node, dsh );
		            if(status != E_DB_OK) break;   /* to error */
		            qen_status->node_access |= QEN_LOOKING_FOR_RIGHT_JOINS;
		            continue; /* to get a new inner */
		        }
		        else	/* don't have to return right joins */
		        {
		            status = clearHoldFiles( node, dsh );
		            if(status != E_DB_OK) break;   /* to error */
		        }
		    }	/* endif comparison of new and old keys */
		}	/* endif there are inner tuples to rescan */
	    }	/* end first or subsequent times */
	}  /* end if read_outer  */

	/*********************************************************
	**	
	**	LOGIC TO READ FROM THE INNER TUPLE STREAM
	**
	**
	*********************************************************/

	if( qen_status->node_access & QEN_READ_NEXT_INNER )
	{
            qen_status->node_access &= ~QEN_READ_NEXT_INNER;

	    if ( !INNER_STREAM_EXHAUSTED )
	    {
		/*
		** If we're rescanning the hold files and will eventually
		** have to look for right joins, read from the hold file
		** of inner join flags.
		*/
		if ( rjoin &&
		     ( ijFlagsHold->hold_status2 & HFILE_REPOSITIONED )  )
		{
		    if (qen_u40_readInnerJoinFlag( ijFlagsHold, dsh,
	    				ijFlagsShd,
					&innerTupleJoined ) != E_DB_OK)
		    {
			/*
			** If innerJoinFlags is exhausted and we were
			** looking for right joins, then we've found
			** all the right joins for this key.  Dump the
			** hold files.
			*/
		        if (dsh->dsh_error.err_code == E_QE0015_NO_MORE_ROWS)
		        {
			    /* Hold file ends, mark this. Continue reading. */
			    ijFlagsHold->hold_buffer_status = HFILE6_BUF_EMPTY;
			    ijFlagsHold->hold_status = HFILE2_AT_EOF;
			    if ( qen_status->node_access &
				 QEN_LOOKING_FOR_RIGHT_JOINS )
			    {
				qen_status->node_access &= 
				    ~( QEN_LOOKING_FOR_RIGHT_JOINS |
				       QEN_READ_NEXT_OUTER );
				qen_status->node_access |= 
				    QEN_READ_NEXT_INNER;
				status = clearHoldFiles( node, dsh );
				if(status != E_DB_OK) break;   /* to error */
				continue;	/* get next inner */
			    }
			}
			else	/* other errors are fatal */
			{
			    break;
			}
		    }	/* endif innerJoinFlags read wasn't OK */
		}	/* endif rjoin and rescanning hold files */

	        /* Read from hold file if it is positioned */
	        if (qen_hold->hold_status == HFILE3_POSITIONED)
	        {
		    if (qen_u4_read_positioned(qen_hold, dsh,
	    			       qen_shd) != E_DB_OK)
		    {
		        if (dsh->dsh_error.err_code == E_QE0015_NO_MORE_ROWS)
		        {
			    /* Hold file ends, must read from inner node */
			    qen_hold->hold_buffer_status = HFILE6_BUF_EMPTY;
			    qen_hold->hold_status = HFILE2_AT_EOF;
			    dsh->dsh_error.err_code = 0;
			    qen_status->node_access |= QEN_READ_NEXT_INNER;
			    if (node->qen_flags & QEN_PART_SEPARATE &&
				!(qen_hold->hold_status2 &
						HFILE_LAST_PARTITION))
			    {
				qen_status->node_status_flags |=
							QEN4_IPART_END;
				dsh->dsh_error.err_code = 
						E_QE00A5_END_OF_PARTITION;
			    }
			    continue;	/* to read a new inner */
		        }
			else	/* other, presumably fatal error */
			{
			    break;
			}
		    }   /* end if hold end */

		    if(in_node->qen_type == QE_SORT)  /* if hold from sort */
		    {
		        /* Materialize the inner tuple from sort's row buffer
		           into my row buffer. */
			status = qen_execute_cx(dsh, node_xaddrs->qex_itmat);
			if (status != E_DB_OK)
			    goto errexit;   /* if ade error, return error */
			rematerializeInnerTuple = FALSE;
		    } /* end if hold from sort */

		    qen_hold->hold_buffer_status = HFILE7_FROM_HOLD;
	        }	/* end if positioned */
	        /* if not EOF on stream */
	        else if (qen_status->node_u.node_join.node_inner_status != QEN11_INNER_ENDS)
	        {
		    if(qen_hold->unget_status)   /* if occupied */
                    {
                        /* put unget in row buffer */
                        MEcopy((PTR)qen_hold->unget_buffer, 
			   qen_shd->shd_width,
                           (PTR)qen_shd->shd_row);
                        qen_hold->unget_status = 0;   /* set no unget */
		        qen_hold->hold_buffer_status = HFILE8_FROM_INNER;
                    }
                    else   /* get new from stream */
                    {
		newinner:
		        status = (*in_node->qen_func)(in_node, qef_rcb, dsh,
					(in_reset) ? FUNC_RESET : NO_FUNC);
		        in_reset = FALSE;
		        if (status != E_DB_OK)
		        {
			    if (dsh->dsh_error.err_code == E_QE0015_NO_MORE_ROWS ||
				dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION
				&& (node->qen_flags & QEN_PART_SEPARATE))
			    {
			        qen_hold->hold_buffer_status = HFILE6_BUF_EMPTY;

				if (dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION)
				    qen_status->node_status_flags |= QEN4_IPART_END;
					/* if just EOPartition, flag it */

			        /* mark EOF on stream */
			        qen_status->node_u.node_join.node_inner_status = QEN11_INNER_ENDS;
			        if(qen_hold->hold_status == HFILE2_AT_EOF ||
			           qen_hold->hold_status == HFILE0_NOFILE ||
			           qen_hold->hold_status == HFILE1_EMPTY )
			        {
			           qen_status->node_u.node_join.node_hold_stream = 
			        	    QEN5_HOLD_STREAM_EOF;
			        }
			    }
			    else if (dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION)
			    {
				/* No more rows in partitioning group - read 
				** from next group. */
				in_reset = TRUE;
				goto newinner;
			    }
			    else
			    {
			        break;	/* other, fatal error */
			    }
		        }
			else	/* inner tuple successfully read */
			{
		            /* Materialize the inner tuple into row buffer. */
			    status = qen_execute_cx(dsh, node_xaddrs->qex_itmat);
			    if (status != E_DB_OK)
				goto errexit;
		            qen_hold->hold_buffer_status = HFILE8_FROM_INNER;
			    rematerializeInnerTuple = FALSE;
			}
                    }  /* end if unget occupied */
	        }   /* end of read from hold/inner */
	    }	/* endif inner stream not exhausted */
	}    /* end if read_inner */

	/***************************************************************
	**
	**	LOOK FOR RIGHT JOINS
	**
	**
	***************************************************************/

	if ( qen_status->node_access & QEN_LOOKING_FOR_RIGHT_JOINS )
	{
	    qen_status->node_access &= ~QEN_READ_NEXT_OUTER;
	    qen_status->node_access |= QEN_READ_NEXT_INNER;

	    if ( innerTupleJoined == FALSE )
	    {
		status = qen_execute_cx(dsh, node_xaddrs->qex_rnull);
		if (status != E_DB_OK)
		    goto errexit;   /* if ade error, return error */
		if (jqual_excb == NULL)
		    break;	/* return right join */
		else
		{
		    status = qen_execute_cx(dsh, jqual_excb);
		    if (status != E_DB_OK)
			goto errexit;   /* if ade error, return error */
		    if (jqual_excb->excb_value == ADE_TRUE)
			break;  /* to return right join */
		}
	    }	/* endif inner tuple joined with some outer */

	    continue;	/* evaluate next inner tuple for right joins */
	}	/* endif looking for right joins */

	/***************************************************************
	**
	**	COMPARE THE INNER AND OUTER JOIN KEYS
	**
	**
	***************************************************************/


	if ( INNER_STREAM_EXHAUSTED ||
	     qen_hold->hold_buffer_status == HFILE6_BUF_EMPTY )
	{
	    join_result = OUTER_LT_INNER;
	}

	else if(qen_status->node_u.node_join.node_outer_status == QEN8_OUTER_EOF)
	{
	    join_result = OUTER_GT_INNER;
	}

	else	/* we have an inner and outer.  join them on the join key. */
	{
	    join_result = ADE_1EQ2;
	    if ((ade_excb = node_xaddrs->qex_joinkey) != NULL)
	    {
		status = qen_execute_cx(dsh, ade_excb);
		if (status != E_DB_OK)
		    goto errexit;   /* if ade error, return error */
		join_result = ade_excb->excb_cmp;
	    }

	    if (join_result == ADE_BOTHNULL)
	    {  
	        join_result = OUTER_GT_INNER;
	    }

	    else if (join_result == ADE_1ISNULL)
	    {
	        join_result = OUTER_GT_INNER;
	    }

	    else if (join_result == ADE_2ISNULL)
	    {
	        join_result = OUTER_LT_INNER;
	    }
	}	/* endif we have inner and outer */

	/***************************************************************
	**
	**	OUTER AND INNER KEYS NOW JOINED.  PERFORM OTHER
	**	QUALIFICATIONS NOW.  EMIT JOINS WHERE APPROPRIATE.
	**
	***************************************************************/


	if (join_result == OUTER_LT_INNER)
	{                           
	    qen_status->node_access |= QEN_READ_NEXT_OUTER;
	    qen_status->node_access &= ~QEN_READ_NEXT_INNER;
	    continue;	/* get next outer */
	}

	if ( join_result == OUTER_GT_INNER )
	{
	    qen_status->node_access &= ~QEN_READ_NEXT_OUTER;
	    qen_status->node_access |= QEN_READ_NEXT_INNER;

	    if ( rjoin )
	    {
		/* rematerialize inner tuple if the ultimate outer tuple
		** just left joined.  rematerialization will reset the
		** special equivalence classes from the inner stream.
		*/

		if ( rematerializeInnerTuple == TRUE )
		{
		    status = qen_execute_cx(dsh, node_xaddrs->qex_itmat);
		    if (status != E_DB_OK)
			goto errexit;   /* if ade error, return error */
		    rematerializeInnerTuple = FALSE;
		}

		/* execute oj_rnull */
		status = qen_execute_cx(dsh, node_xaddrs->qex_rnull);
		if (status != E_DB_OK)
		    goto errexit;   /* if ade error, return error */

		if (jqual_excb == NULL)
		    break;	/* return right join */
		else
		{
		    status = qen_execute_cx(dsh, jqual_excb);
		    if (status != E_DB_OK)
			goto errexit;   /* if ade error, return error */
		    if (jqual_excb->excb_value == ADE_TRUE)
			break;  /* to return right join */
		}
	    }
	    continue;	/* get next inner */
	}    /* endif outer greater than inner */
		
	/* We come to this point when joinkey returns OUTER_EQ_INNER */ 

	if ( join_result != OUTER_EQ_INNER )
	{
	    /* consistency check */
	    dsh->dsh_error.err_code = E_QE0002_INTERNAL_ERROR;
	    status = E_DB_ERROR;
	    goto errexit;           
	}	/* end consistency check */

	if (qen_hold->hold_buffer_status == HFILE8_FROM_INNER)
	{
	    /* append to hold */
            status = qen_u1_append(qen_hold, qen_shd, dsh); 
	    if(status) break;  /* to return error */
	}	

	/* If this is the first inner that joins with the current
	** outer, save the hold file TID so we can reposition it later.
	*/
	if ( !( qen_status->node_access & QEN_RESCAN_MARKED ) )
	{
	    if ( qen_u5_save_position(qen_hold, qen_shd) )
		goto errexit;
	    qen_status->node_access |= QEN_RESCAN_MARKED;
	}
	else if ((node->qen_flags & QEN_CARD_MASK) == QEN_CARD_01R &&
	    (qen_status->node_access & QEN_OUTER_HAS_JOINED) != 0)
	{
	    /* Left outer - note cardinality */
	    potential_card_violation = TRUE;
	}

	qen_status->node_access &= ~QEN_READ_NEXT_OUTER;
	qen_status->node_access |= QEN_READ_NEXT_INNER;

	/* execute OQUAL */
	ade_excb = node_xaddrs->qex_onqual;
	status = qen_execute_cx(dsh, ade_excb);
	if (status != E_DB_OK)
	    goto errexit;   /* if ade error, return error */
	if (ade_excb == NULL || ade_excb->excb_value == ADE_TRUE)
	{
	    /* not OJ, or OQUAL succeeds.  Remember that a join occurred. */
	    qen_status->node_access |= QEN_OUTER_HAS_JOINED;  

	    if ( rjoin )
	    {
		if ( status = qen_u41_storeInnerJoinFlag( ijFlagsHold,
		ijFlagsShd, dsh, innerTupleJoined,
		     ( i4 ) TRUE ) ) goto errexit;   /* error */
	    }

	    /* set the special eqcs to "inner join" state */
	    status = qen_execute_cx(dsh, node_xaddrs->qex_eqmat);
	    if (status != E_DB_OK)
		goto errexit;   /* if ade error, return error */

	    if (jqual_excb != NULL)
	    {
		status = qen_execute_cx(dsh, jqual_excb);
		if (status != E_DB_OK)
		    goto errexit;   /* if ade error, return error */
	    }
	    if( jqual_excb == NULL || jqual_excb->excb_value == ADE_TRUE)
	    {
		/* JQUAL succeeds */
		if(node->node_qen.qen_sjoin.sjn_kuniq)  /* if kuniq */
		{
		    /* make next entry read new outer bit not new inner */
		    qen_status->node_access |= QEN_READ_NEXT_OUTER;
		    qen_status->node_access &= ~QEN_READ_NEXT_INNER;
		}	/* endif key unique */
		if (potential_card_violation)
		{
		    /* We only want to act on seltype violation after
		    ** qualification and that is now. */
		    qen_status->node_status = QEN7_FAILED;
		    dsh->dsh_error.err_code = E_QE004C_NOT_ZEROONE_ROWS;
		    status = E_DB_ERROR;
		    goto errexit;
		}

		break;  /* emit inner join */
	    }
	}
	else	/* OQUAL failed */
	{
	    if ( rjoin )
	    {
	        if ( status = qen_u41_storeInnerJoinFlag( ijFlagsHold,
	            ijFlagsShd, dsh, innerTupleJoined,
		    ( i4 ) FALSE ) )
	        goto errexit;   /* error */
	    }

	}	/* end check of OQUAL status */

	/* OQUAL or JQUAL failed.  Get next inner. */

	continue;

    }	/* end of get loop */

    /********************************************************************
    **
    **	CLEANUP.  MATERIALIZE FUNCTION ATTRIBUTES.  ERROR EXIT WHEN
    **	APPROPRIATE.
    **
    ********************************************************************/

    if (status == E_DB_OK)
    {
	status = qen_execute_cx(dsh, node_xaddrs->qex_fatts);
	if (status != E_DB_OK)
	    goto errexit;   

	/* Increment the count of rows that this node has returned */
	qen_status->node_rcount++;

	/* print tracing information DO NOT xDEBUG THIS */
	if (node->qen_prow != NULL &&
		(ult_check_macro(&qef_cb->qef_trace, 100+node->qen_num,
				    &val1, &val2) ||
		    ult_check_macro(&qef_cb->qef_trace, 99,
				    &val1, &val2)
		)
	    )
	{
	    if (status == E_DB_OK)
	    {
		status = qen_print_row(node, qef_rcb, dsh);
		if (status != E_DB_OK)
		{
		    goto errexit;
		}
	    }
	}

#ifdef xDEBUG
	(VOID) qe2_chk_qp(dsh);
#endif 
    }
    else
    {
	if(in_node->qen_type == QE_SORT)  /* if sort child */
	/* release the memory now if in memory */
	{
	    qen_u31_release_mem(qen_hold, dsh,
	    	  dsh->dsh_shd[in_node->node_qen.qen_sort.sort_shd] );
	}
	else
	if(qen_hold)  /* if hold file */
	{
	    /* release our hold file, if in memory */
	    qen_u31_release_mem(qen_hold, dsh, qen_shd );
	}
    }

errexit:

    if ((dsh->dsh_error.err_code == E_QE0015_NO_MORE_ROWS ||
	dsh->dsh_error.err_code == E_QE00A5_END_OF_PARTITION) &&
	(qen_status->node_status_flags & 
			(QEN2_OPART_END | QEN4_IPART_END)))
    {
	/* Restart using next partitioning group. */
	out_reset = in_reset = reset = TRUE;
	qen_status->node_status_flags &=
			~(QEN2_OPART_END | QEN4_IPART_END);
	goto loop_reset;
    }

    if (dsh->dsh_qp_stats)
    {
	qen_ecost_end(dsh, &timerstat, qen_status);
    }

    return (status);	    

}
Example #3
0
/*{
** Name: opn_timeout	- check for timeout condition
**
** Description:
**      Check the various indications that timeout has occurred. 
**
** Inputs:
**      subquery                        ptr to subquery state
**
** Outputs:
**
**	Returns:
**	    bool - TRUE is timeout condition has been detected 
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**      14-may-90 (seputis)
**          - b21582, do not allow large CPU gaps before checking
**	    for a timeout condition
**	26-nov-90 (seputis)
**	    - added support for new server startup flags
**      31-jan-91 (seputis)
**          - use more time to optimize queries in DB procedures
**      20-mar-91 (seputis)
**          - b 36600 added calls to CSswitch to allow context 
**	switching if no query plan is found or notimeout is used 
**      3-dec-92 (ed)
**          OP255 was doing a notimeout on all subqueries if the second
**	    parameter was set
**	31-jan-97 (inkdo01)
**	    Simplify CSswitch interface so that all calls to timeout 
**	    result in a CSswitch call. This fixes a CPU lockout problem 
**	    in (at least) Solaris, because the switch depended largely
**	   on faulty quantum testing logic in CSstatistics.
**	15-sep-98 (sarjo01)
**	    Redo of function, adding support for ops_timeoutabort
**	14-jul-99 (inkdo01)
**	    Add code to do quick exit for where-less idiot queries.
**	13-Feb-2007 (kschendel)
**	    Replace CSswitch with better CScancelCheck.
[@history_template@]...
*/
bool
opn_timeout(
	OPS_SUBQUERY       *subquery)
{
    OPS_CB          *scb;               /* ptr to session control block for
                                        ** currently active user */
    OPS_STATE       *global;            /* ptr to global state variable */

    if (subquery->ops_mask & OPS_IDIOT_NOWHERE && subquery->ops_bestco) return(TRUE);
					    /* if idiot query (join, but no where)
					    ** and we have valid plan, quit now */

    global = subquery->ops_global;	    /* get ptr to global state variable
                                            */
    scb = global->ops_cb;		    /* get ptr to active session control
                                            ** block */
    if (scb->ops_alter.ops_timeout)	    /* TRUE if optimizer should timeout
                                            */
    {
        i4		    joinop_timeout;
        i4		    joinop_abort;
        joinop_timeout = scb->ops_alter.ops_sestime;
        joinop_abort   = scb->ops_alter.ops_timeoutabort;

        if (joinop_abort == 0)
	{
	    if (subquery->ops_bestco)
	    {
	        TIMERSTAT	    timer_block;
	        STATUS		    cs_status;
	        i4		    nowcpu;    /* cpu time used by process */
	        OPO_COST	    miladjust; /* adjustment to cost to obtain
                                               ** millisec equivalent */
	        i4		    first;     /* plan to timeout */
	        i4		    second;    /* subquery plan to timeout */
	        bool		    planid_timeout; /* check if tracing flag is
					            ** set and override timeout
					            ** check with new one */

	        planid_timeout = FALSE;
	        if ((global->ops_qheader->pst_numparm > 0)
			||
		    global->ops_procedure->pst_isdbp)
	            miladjust = scb->ops_alter.ops_tout *
                                         scb->ops_alter.ops_repeat; 
					    /* use more time to optimize
					    ** a repeat query */
	        else
		    miladjust = scb->ops_alter.ops_tout;

	        if (scb->ops_check)
	        {
		    if (opt_svtrace( scb, OPT_F125_TIMEFACTOR, &first, &second)
		        &&
		        first>0)    /* see if there is a miladjust factor
				    ** from the user */
		        miladjust = (miladjust*first)/100.0;
				    /* this will increase/decrease
				    ** the amount of time required to timeout */
		    planid_timeout = 
		        opt_svtrace( scb, OPT_F127_TIMEOUT, &first, &second);
				    /* this will time out based on subquery
				    ** plan id and number of plans being
				    ** evaluated */
	        }


		if (cs_status = CSstatistics(&timer_block, (i4)0))
		{   /* get cpu time used by session from SCF to calculate
                    ** difference from when optimization began FIXME - this
                    ** is an expensive routine to call, perhaps some other
                    ** way of timing out could be used such as number of CO
                    ** nodes processed */
		    opx_verror(E_DB_ERROR, E_OP0490_CSSTATISTICS, 
			(OPX_FACILITY)cs_status);
		}
		nowcpu = timer_block.stat_cpu -global->ops_estate.opn_startime;

	        if (
		    (((nowcpu > (miladjust * subquery->ops_cost)) ||
					    /* if the amount of time spent so 
					    ** far on optimization is roughly 
                                            ** about how much time the best 
                                            ** solution found so far
					    ** will take to execute, then stop
                                            */
		      ( (joinop_timeout > 0) && (nowcpu > joinop_timeout)))
		    && 
		    (   !planid_timeout	    /* if the plan timeout tracing is
					    ** set then this should override
					    ** the normal timeout mechanism */
			||
			(   (second != 0)
			    &&
			    (second != subquery->ops_tsubquery) /* apply timeout
					    ** to all query plans except the
					    ** one specified */
			)
		    ))
		    ||
		    ( planid_timeout
		        &&
		      (first <= subquery->ops_tcurrent)
		        &&
		      (	(second == 0)	    /* check if all plans should be
					    ** timed out */
			||
			(second == subquery->ops_tsubquery) /* check if a
					    ** particular subquery is being
					    ** search for */
		      )
		    ))
	        {
		    opx_verror(E_DB_WARN, E_OP0006_TIMEOUT, (OPX_FACILITY)0);
		    return(TRUE);
	        }
	    }
	}
	else
	{
            TIMERSTAT           timer_block;
            STATUS              cs_status;
            i4             nowcpu;    /* cpu time used by process */
            OPO_COST            miladjust; /* adjustment to cost to obtain
                                           ** millisec equivalent */
            i4             first;     /* plan to timeout */
            i4             second;    /* subquery plan to timeout */
            bool                planid_timeout; /* check if tracing flag is
                                                ** set and override timeout
                                                ** check with new one */
	    if (joinop_timeout == 0 || joinop_timeout >= joinop_abort)
		joinop_timeout = joinop_abort; 
            planid_timeout = FALSE;
            if ((global->ops_qheader->pst_numparm > 0)
                    ||
                global->ops_procedure->pst_isdbp)
                miladjust = scb->ops_alter.ops_tout *
                                     scb->ops_alter.ops_repeat;
                                        /* use more time to optimize
                                        ** a repeat query */
            else
                miladjust = scb->ops_alter.ops_tout;

            if (scb->ops_check)
            {
                if (opt_svtrace( scb, OPT_F125_TIMEFACTOR, &first, &second)
                    &&
                    first>0)    /* see if there is a miladjust factor
                                ** from the user */
                    miladjust = (miladjust*first)/100.0;
                                /* this will increase/decrease
                                ** the amount of time required to timeout */
                planid_timeout =
                    opt_svtrace( scb, OPT_F127_TIMEOUT, &first, &second);
                                /* this will time out based on subquery
                                ** plan id and number of plans being
                                ** evaluated */
            }
            if (cs_status = CSstatistics(&timer_block, (i4)0))
            {
                opx_verror(E_DB_ERROR, E_OP0490_CSSTATISTICS,
                    (OPX_FACILITY)cs_status);
            }
            nowcpu = timer_block.stat_cpu -global->ops_estate.opn_startime;

            if ( subquery->ops_bestco
		 &&
                ( (
                 ((nowcpu > (miladjust * subquery->ops_cost)) ||
                  ((joinop_timeout > 0) && (nowcpu > joinop_timeout)))
                     &&
                 ( !planid_timeout ||
                    ( (second != 0)
                        &&
                        (second != subquery->ops_tsubquery)
                    )
                  )
                 )
                ||
                ( planid_timeout
                    &&
                  (first <= subquery->ops_tcurrent)
                    &&
                  ( (second == 0)
                    ||
                    (second == subquery->ops_tsubquery)
                  ))

                ) )
            {
                opx_verror(E_DB_WARN, E_OP0006_TIMEOUT, (OPX_FACILITY)0);
                return(TRUE);
            }
	    else if (nowcpu > joinop_abort)
	    {
		if (subquery->ops_bestco)
		{
    		    opx_verror(E_DB_WARN, E_OP0006_TIMEOUT, (OPX_FACILITY)0);
		    return(TRUE);
		}
		else
		{
		    scb->ops_interrupt = TRUE;
	            opx_rerror(subquery->ops_global->ops_caller_cb,
                               E_OP0018_TIMEOUTABORT);
		}
	    }
	} /* else */
    }
    /* Check for cancels if OS-threaded, time-slice if internal threaded */
    CScancelCheck(global->ops_cb->ops_sid);
    return(FALSE);
}
Example #4
0
File: dmrget.c Project: akdh/Ingres
/*{
** Name:  dmr_get - Get a record.
**
**  INTERNAL DMF call format:      status = dmr_get(&dmr_cb);
**
**  EXTERNAL call format:          status = dmf_call(DMR_GET,&dmr_cb);
**
** Description:
**      This function gets a record from a table.  It can either get a record
**      by tuple identifier, re-get the last record returned, or get the
**      next record that passes the qualification specified by the dmr_position
**      operation.  If no morerecords meet the qualification the caller is
**      returned a error indicating no next record.
**
**	As a special case for aggregate optimization, this function may
**	be called to obtain the count of records in the table, which prior
**	to this change required getting all records with multiple calls
**	to this function.
** 
**      Note:  When a B1 secure server is running, this interface only 
**      returns records that pass the B1 MAC assurances.  Namely, only
**      records with a security label that is dominated by the security 
**      label of the requester are returned.
**
** Inputs:
**      dmr_cb
**          .type                           Must be set to DMR_RECORD_CB.
**          .length                         Must be at least
**                                          sizeof(DMR_RECORD_CB) bytes.
**          .dmr_flags_mask                 Must be DMR_NEXT, DMR_PREV,        
**					    DMR_CURRENT_POS or DMR_BY_TID.
**          .dmr_access_id                  Record access identifer returned
**                                          from DMT_OPEN that identifies a
**                                          table.
**          .dmr_tid                        If dmr_flags_mask = DMR_BY_TID, then
**                                          field is used as a tuple identifer.
**          .dmr_data.data_address          Pointer to area to return the
**                                          requested record.
**          .dmr_data.data_in_size          Size of area for record.
**
** Outputs:
**      dmr_cb
**          .dmr_tid                        The tuple identifier of the record
**                                          being returned.
**          .dmr_data.data_address          The record is stored here.
**          .dmr_data.data_out_size         The size of the returned record.
**          .error.err_code                 One of the following error numbers.
**                                          E_DM0000_OK                
**                                          E_DM000B_BAD_CB_LENGTH
**                                          E_DM000C_BAD_CB_TYPE
**                                          E_DM000F_BAD_DB_ACCESS_MODE
**                                          E_DM0011_BAD_DB_NAME
**                                          E_DM001A_BAD_FLAG
**                                          E_DM001D_BAD_LOCATION_NAME
**                                          E_DM002B_BAD_RECORD_ID
**                                          E_DM003C_BAD_TID
**                                          E_DM0042_DEADLOCK
**                                          E_DM0044_DELETED_TID
**					    E_DM0047_UPDATED_TUPLE
**                                          E_DM004A_INTERNAL_ERROR
**                                          E_DM004B_LOCK_QUOTA_EXCEEDED
**					    E_DM004D_LOCK_TIMER_EXPIRED
**                                          E_DM0055_NONEXT
**                                          E_DM0065_USER_INTR
**                                          E_DM0064_USER_ABORT
**                                          E_DM0073_RECORD_ACCESS_CONFLICT
**                                          E_DM0074_NOT_POSITIONED
**					    E_DM008A_ERROR_GETTING_RECORD
**                                          E_DM0100_DB_INCONSISTENT
**                                          E_DM010C_TRAN_ABORTED
**                                          E_DM0112_RESOURCE_QUOTA_EXCEEDED
**					    E_DM006E_NON_BTREE_GETPREV
**
**      Returns:
**         E_DB_OK                          Function completed normally.
**         E_DB_WARN                        Function completed normally with a 
**                                          termination status which is in
**                                          dmr_cb.err_code.
**         E_DB_ERROR                       Function completed abnormally 
**                                          with a 
**                                          termination status which is in
**                                          dmr_cb.err_code.
**         E_DB_FATAL                       Function completed with a fatal
**                                          error which must be handled
**                                          immediately.  The fatal status is in
**                                          dmr_cb.err_code.
** History:
**      01-sep-85 (jennifer)
**	    Created new for jupiter.
**	17-dec-1985 (derek)
**	    Completed code.
**	28-jul-1989 (mikem)
**	    Added logging of database, table, and owner when we get an internal
**	    error.
**	15-aug-1989 (rogerk)
**	    Added support for Non-SQL Gateway.  If getting record from a 
**	    gateway secondary index, then make sure that record buffer is
**	    large enough to hold a record from the base table, as that is
**	    what the gateway returns.  This is somewhat hokey, and would be
**	    better if the secondary index could actually describe the records
**	    being returned back, but...
**	15-oct-90 (linda)
**	    Integrate bug fix for gateway secondary index support:  perform
**	    sanity check on table width *after* switching tcb's.
**	11-feb-1991 (linda)
**	    Check for dmr_char->char_id == DMR_TIDJOIN, if it does then set
**	    rcb->rcb_tidjoin = RCB_TIDJOIN.  Part of gateway secondary index
**	    support.
**      22-apr-92 (schang)
**          GW merge
**	    30-apr-1991 (rickh)
**	        Removed the 11-feb-1991 tidjoin logic.  Let stand the change in
**	        where table width calculation occurs.
**	    22-jul-1991 (rickh)
**	        And now remove the table width calculation change that went in
**	        with the 11-feb-1991 tidjoin logic.
**	28-may-1993 (robf)
**	   Secure 2.0: Reworked old ORANGE code.
**	23-aug-1993 (bryanp)
**	    Fix a few cut-and-paste errors in some error message parameters.
**	31-jan-1994 (bryanp) B58487
**	    Handle failures in both dm2r_get and dm2r_unfix_pages.
**      30-aug-1994 (cohmi01)
**          Add DMR_PREV support for FASTPATH rel. Error if not btree.
**	22-may-1995 (cohmi01)
**	    Add support for count-only, for aggregate optimisation.
**	21-aug-1995 (cohmi01)
**	    count-only aggregate code moved to dml!dmragg.c
**      22-nov-96 (stial01,dilma04)
**          Row Locking Project:
**          Unfix all pages before leaving DMF if row locking.
**      14-may-97 (dilma04)
**          Cursor Stability Project:
**          - upgrade isolation level from CS to RR, if DMR_SORT flag is set;
**          - if isolation level is CS or RR, set RCB_CSRR_LOCK locking mode 
**          for the time of dm2r_get() call.
**      21-may-97 (stial01)
**          Row locking: No more LK_PH_PAGE locks, so page(s) can stay fixed.
**	19-dec-97 (inkdo01)
**	    Changes for sorts which do NOT materialize results in temp tables.
**	    get is now directed straight to DMF sorter.
**      08-oct-98 (stial01)
**          Deallocate load context after all records read from DMF sorter.
**      09-dec-98 (stial01)
**          DMR_PKEY_PROJECTION: check for relspec BTREE not table_type which
**          may be GATEWAY
**      11-aug-2003 (chash01)
**          For RMS Gateway index table, add specific test to make sure
**          dmr->dmr_data.data_in_size + sizeof(DM_TID)
**          is no more than the value in table_width
**	12-Feb-2004 (schka24)
**	    Defend against someone doing a get on a partitioned master.
**	03-Nov-2004 (jenjo02)
**	    Relocated CSswitch from dmf_call to here; don't waste the
**	    call if Factotum thread.
**	11-Nov-2005 (jenjo02)
**	    Replaced dmx_show() with the more robust 
**	    dmxCheckForInterrupt() to standardize external
**	    interrupt handling.
**	11-Sep-2006 (jonj)
**	    Don't dmxCheckForInterrupt if extended table as txn is
**	    likely in a recursive call and not at an atomic
**	    point in execution as required for LOGFULL_COMMIT.
**	13-Feb-2007 (kschendel)
**	    Replace CSswitch with cancel check.
**	11-Apr-2008 (kschendel)
**	    Roll arithmetic exceptions into caller specified ADFCB.
**	    This is part of getting DMF qual context out of QEF.
**	15-Jan-2010 (jonj)
**	    SIR 121619 MVCC: Don't change isolation level if crow_locking()
**	03-Mar-2010 (jonj)
**	    SIR 121619 MVCC, blob support:
**	    Set rcb_dmr_opcode here; dmpe bypasses dmf_call,
**	    which used to set it.
*/
DB_STATUS
dmr_get(
DMR_CB  *dmr_cb)
{
    DMR_CB		*dmr = dmr_cb;
    DMP_RCB		*rcb;
    DMP_TCB		*tcb;
    DML_XCB		*xcb;
    i4		flag;
    i4		table_width;
    DB_STATUS		status, local_status;
    i4		error, local_error;
    DB_ERROR		local_dberr;

    CLRDBERR(&dmr->error);

    for (status = E_DB_ERROR;;)
    {
	rcb = (DMP_RCB *)dmr->dmr_access_id;
	if (dm0m_check((DM_OBJECT *)rcb, (i4)RCB_CB) == E_DB_OK)
	{
	    if (rcb == NULL)
	    {
		uleFormat(NULL, E_DM00E0_BAD_CB_PTR, (CL_ERR_DESC *)NULL, ULE_LOG, 
		    NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1,
		    sizeof("record")-1, "record");
		SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
		break;
	    }

	    rcb->rcb_dmr_opcode = DMR_GET;

	    tcb = rcb->rcb_tcb_ptr;
	    if (tcb == NULL)
	    {
		uleFormat(NULL, E_DM00E0_BAD_CB_PTR, (CL_ERR_DESC *)NULL, ULE_LOG, 
		    NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1,
		    sizeof("table")-1, "table");
		SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
		break;
	    }
	    if (tcb->tcb_rel.relstat & TCB_IS_PARTITIONED)
	    {
		uleFormat(&dmr->error, E_DM0022_BAD_MASTER_OP, NULL, ULE_LOG, NULL,
			NULL, 0, NULL, &error, 3,
			0, "dmrget",
			sizeof(DB_OWN_NAME),tcb->tcb_rel.relowner.db_own_name,
			sizeof(DB_TAB_NAME),tcb->tcb_rel.relid.db_tab_name);
		break;
	    }
	    xcb = rcb->rcb_xcb_ptr;
	    if (xcb == NULL)
	    {
		uleFormat(NULL, E_DM00E0_BAD_CB_PTR, (CL_ERR_DESC *)NULL, ULE_LOG, 
		    NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1,
		    sizeof("transaction")-1, "transaction");
		SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
		break;
	    }
	    if (dmr->dmr_flags_mask & DMR_NEXT)
	     if (dmr->dmr_flags_mask & DMR_SORTGET) ;
	     else flag = DM2R_GETNEXT;
	    else if (dmr->dmr_flags_mask & DMR_BY_TID)
		flag = DM2R_BYTID;
	    else if (dmr->dmr_flags_mask & DMR_CURRENT_POS)
	    {
	/* 	flag = DM2R_BYPOSITION; */
		flag = DM2R_BYTID;
		dmr->dmr_tid = rcb->rcb_currenttid.tid_i4;
	    }
            else if (dmr->dmr_flags_mask & DMR_PREV)
	    {
		flag = DM2R_GETPREV;
		if (dmr->dmr_flags_mask & DMR_RAAT)
		    flag |= DM2R_RAAT;
		if (tcb->tcb_table_type != TCB_BTREE)
		{
		    SETDBERR(&dmr->error, 0, E_DM006E_NON_BTREE_GETPREV);
	    	    break;
		}
	    }
	    else
	    {
		SETDBERR(&dmr->error, 0, E_DM001A_BAD_FLAG);
		break;
	    }

	    /* Check for btree primary key projection */
	    if (dmr->dmr_flags_mask & DMR_PKEY_PROJECTION)
	    {
		if ((tcb->tcb_rel.relspec == TCB_BTREE) &&
		    ((tcb->tcb_rel.relstat & TCB_INDEX) == 0) &&
		    (flag == DM2R_GETNEXT || flag == DM2R_GETPREV))
		    flag |= DM2R_PKEY_PROJ;
		else
		{
		    SETDBERR(&dmr->error, 0, E_DM001A_BAD_FLAG);
		    break;
		}
	    }

	    if (xcb->xcb_scb_ptr == NULL )
	    {
		uleFormat(NULL, E_DM00E0_BAD_CB_PTR, (CL_ERR_DESC *)NULL, ULE_LOG, 
		    NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1,
		    sizeof("session")-1, "session");
		SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
		break;
	    }
	    
	    /* Check for external interrupts */
	    if ( xcb->xcb_scb_ptr->scb_ui_state && !tcb->tcb_extended )
		dmxCheckForInterrupt(xcb, &error);

	    if (xcb->xcb_state == 0)
	    {
		/*
		** Make sure the caller's buffer is large enough to hold
		** a record from this table.
                **
		** Note that for Gateway secondary index's, retrieves done
		** will actually return records from the base table!!!! So
		** if this is a get on a gateway 2nd index, make sure the
		** buffer is large enough.
                ** Aug-4-2003 (chash01)  The value relwid is sizeof(DM_TID) +
                ** sizeof(index column's length)., but the value in
                ** data_in_size is the size of base table columns.  This leads
                ** to serious problem (looping) in DMFCALL() if the size of
                ** base table columns is less than the value in relwid. This
                ** RMS gateway specific problem will be tested specifically.
		*/
		table_width = tcb->tcb_rel.relwid;

		if ( (dmr->dmr_data.data_address) &&
                       (
                         (tcb->tcb_rel.relgwid != GW_RMS &&
                          dmr->dmr_data.data_in_size >= table_width) ||
                           ( tcb->tcb_rel.relgwid == GW_RMS &&
		             dmr->dmr_data.data_in_size + sizeof(DM_TID)
                                >= table_width)
                       )
                   )
		{
		    dmr->dmr_data.data_out_size = table_width;

                    /* Upgrade isolation level to repeatable read if a 
                    ** cursor stability transaction is getting tuples 
                    ** to sort them for further update of this table,
		    ** but not if MVCC crow_locking().
                    */
		    if ( !crow_locking(rcb) )
		    {
			if (dmr->dmr_flags_mask & DMR_SORT &&
			    rcb->rcb_access_mode == RCB_A_WRITE &&
			    rcb->rcb_iso_level == RCB_CURSOR_STABILITY)
			{
			    rcb->rcb_iso_level = RCB_REPEATABLE_READ;
			}
     
			if (rcb->rcb_iso_level == RCB_CURSOR_STABILITY ||
			    rcb->rcb_iso_level == RCB_REPEATABLE_READ)
			{
			     rcb->rcb_state |= RCB_CSRR_LOCK;
			}
		    }

		    /*
		    ** Quick troll for external interrupts.
		    */
		    CScancelCheck(rcb->rcb_sid);

		    /* If this is a SORTGET, call DMF sorter to retrieve
		    ** next row. 
		    */

		    if (dmr->dmr_flags_mask & DMR_SORTGET)
		    {
			DM2R_L_CONTEXT	*lct;
			lct = (DM2R_L_CONTEXT *)tcb->tcb_lct_ptr;
			status = dmse_get_record(lct->lct_srt,
					&lct->lct_record, &dmr->error);

			if (status == E_DB_OK)
			{
			    MEcopy((PTR)lct->lct_record, 
				dmr->dmr_data.data_in_size, 
				(PTR)dmr->dmr_data.data_address);
			}
			else
			{
			    /* eof or error, call dmse to finish up. */
			    local_status = dmse_end(lct->lct_srt, &local_dberr);
			    if (local_status != E_DB_OK)
			    {
				dmr->error = local_dberr;
				status = local_status;
			    }

			    /* Deallocate load context */
			    if (lct->lct_mct.mct_buffer != (PTR)0)
			    {
				dm0m_deallocate((DM_OBJECT **)&lct->lct_mct.mct_buffer);
			    }

			    dm0m_deallocate((DM_OBJECT **)&tcb->tcb_lct_ptr);
			    tcb->tcb_lct_ptr = 0;
			    rcb->rcb_state &= ~RCB_LSTART;
			}

			return(status);
		    }

		    /* Regular ol' gets are handled here. */
 
		    xcb->xcb_scb_ptr->scb_qfun_errptr = &dmr->error;
		    status = dm2r_get(rcb, (DM_TID*)&dmr->dmr_tid, flag, 
			dmr->dmr_data.data_address, &dmr->error);

		    xcb->xcb_scb_ptr->scb_qfun_errptr = NULL;

		    /* If any arithmetic warnings to the RCB ADFCB during
		    ** position, roll them into the caller's ADFCB.
		    */
		    if (dmr->dmr_q_fcn != NULL && dmr->dmr_qef_adf_cb != NULL
		      && rcb->rcb_adf_cb->adf_warncb.ad_anywarn_cnt > 0)
			dmr_adfwarn_rollup((ADF_CB *)dmr->dmr_qef_adf_cb, rcb->rcb_adf_cb);

		    if ((tcb->tcb_rel.relstat & TCB_CONCUR))
		    {
			local_status = dm2r_unfix_pages(rcb, &local_dberr);
			if (local_status != E_DB_OK)
			{
			    if (status == E_DB_OK)
			    {
				status = local_status;
				dmr->error = local_dberr;
			    }
			    else
			    {
				uleFormat(&local_dberr, 0, (CL_ERR_DESC *)NULL,
					ULE_LOG, NULL, (char * )NULL, (i4)0,
					(i4 *)NULL, &local_error, 0);
			    }
			}
		    }
            
                    rcb->rcb_state &= ~RCB_CSRR_LOCK;

		    if (status == E_DB_OK)
			return (status);
		}
		else
		    SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
	    }
	    else
	    {
		if (xcb->xcb_state & XCB_USER_INTR)
		    SETDBERR(&dmr->error, 0, E_DM0065_USER_INTR);
		else if (xcb->xcb_state & XCB_FORCE_ABORT)
		    SETDBERR(&dmr->error, 0, E_DM010C_TRAN_ABORTED);
		else if (xcb->xcb_state & XCB_ABORT)
		    SETDBERR(&dmr->error, 0, E_DM0064_USER_ABORT);
		else if (xcb->xcb_state & XCB_WILLING_COMMIT)
		    SETDBERR(&dmr->error, 0, E_DM0132_ILLEGAL_STMT);
	    }
	}	    
	else
	    SETDBERR(&dmr->error, 0, E_DM002B_BAD_RECORD_ID);
	break;
    }	    

    if (dmr->error.err_code == E_DM0022_BAD_MASTER_OP ||
	dmr->error.err_code == E_DM004B_LOCK_QUOTA_EXCEEDED ||
	dmr->error.err_code == E_DM0112_RESOURCE_QUOTA_EXCEED)
    {
	rcb->rcb_xcb_ptr->xcb_state |= XCB_STMTABORT;
    }
    else if (dmr->error.err_code == E_DM0042_DEADLOCK ||
	dmr->error.err_code == E_DM004A_INTERNAL_ERROR ||
	dmr->error.err_code == E_DM0100_DB_INCONSISTENT)
    {
	rcb->rcb_xcb_ptr->xcb_state |= XCB_TRANABORT;
    }
    else if (dmr->error.err_code == E_DM010C_TRAN_ABORTED)
    {
	rcb->rcb_xcb_ptr->xcb_state |= XCB_FORCE_ABORT;
    }
    else if (dmr->error.err_code == E_DM0065_USER_INTR)
    {
	rcb->rcb_xcb_ptr->xcb_state |= XCB_USER_INTR;
	rcb->rcb_state &= ~RCB_POSITIONED;
	*(rcb->rcb_uiptr) &= ~SCB_USER_INTR;
    }
    else if (dmr->error.err_code > E_DM_INTERNAL)
    {
	uleFormat( &dmr->error, 0, NULL, ULE_LOG , NULL,
	    (char * )NULL, 0L, (i4 *)NULL, 
	    &error, 0);
	uleFormat(NULL, E_DM904C_ERROR_GETTING_RECORD, NULL, ULE_LOG, NULL,
	    (char * )NULL, 0L, (i4 *)NULL, &error, 3,
	    sizeof(DB_DB_NAME), &rcb->rcb_tcb_ptr->tcb_dcb_ptr->dcb_name,
	    sizeof(DB_OWN_NAME), &rcb->rcb_tcb_ptr->tcb_rel.relowner,
	    sizeof(DB_TAB_NAME), &rcb->rcb_tcb_ptr->tcb_rel.relid
	    );
	SETDBERR(&dmr->error, 0, E_DM008A_ERROR_GETTING_RECORD);
    }

    return (status);
}