Beispiel #1
0
/*{
** Name:  dmr_unfix - Unfix pages.
**
**  INTERNAL DMF call format:      status = dmr_unfix(&dmr_cb);
**
**  EXTERNAL call format:          status = dmf_call(DMR_UNFIX,&dmr_cb);
**
** Description:
**	This function unfixes any still-fixed pages in the
**	caller's DMR_CB->DMP_RCB.
**
**	It is typically used, for example, when crossing a
**	partition boundary when doing a table scan.
**
**	It lets the caller leave the table open but without any
**	of its pages fixed in cache.
**
**	Unless the partition's table is either closed or
**	unfixed, the pages remain fixed in cache and when
**	"lots" of partitions are involved in a query, the  
**	cache can quickly become saturated.
**
** Inputs:
**      dmr_cb
**          .type                           Must be set to DMR_RECORD_CB.
**          .length                         Must be at least
**                                          sizeof(DMR_RECORD_CB) bytes.
**          .dmr_access_id                  Record access identifer returned
**                                          from DMT_OPEN that identifies a
**                                          table.
**
** Outputs:
**      dmr_cb
**          .error.err_code                 One of the following error numbers.
**                                          E_DM0000_OK                
**                                          E_DM002A_BAD_PARAMETER
**	    				    E_DM019A_ERROR_UNFIXING_PAGES
**
**      Returns:
**         E_DB_OK                          Function completed normally.
**         E_DB_WARN                        Function completed normally with a 
**                                          termination status which is in
**                                          dmr_cb.err_code.
**         E_DB_ERROR                       Function completed abnormally 
**                                          with a 
**                                          termination status which is in
**                                          dmr_cb.err_code.
**         E_DB_FATAL                       Function completed with a fatal
**                                          error which must be handled
**                                          immediately.  The fatal status is in
**                                          dmr_cb.err_code.
** History:
**	09-Apr-2004 (jenjo02)
**	    Invented for partitioning.
**      04-jan-2005 (stial01)
**          Set RCB_CSRR_LOCK so that locks can get released (b113231)
**	15-Jan-2010 (jonj)
**	    SIR 121619 MVCC: No CSRR_LOCK when crow_locking().
**	03-Mar-2010 (jonj)
**	    SIR 121619 MVCC, blob support:
**	    Set rcb_dmr_opcode here; dmpe bypasses dmf_call,
**	    which used to set it.
*/
DB_STATUS
dmr_unfix(
DMR_CB  *dmr_cb)
{
    DMR_CB	*dmr = dmr_cb;
    DMP_RCB	*rcb;
    DB_STATUS	status;
    i4		error;

    CLRDBERR(&dmr->error);

    if ( (rcb = (DMP_RCB *)dmr->dmr_access_id) &&
	 (dm0m_check((DM_OBJECT *)rcb, (i4)RCB_CB) == E_DB_OK) )
    {
	rcb->rcb_dmr_opcode = DMR_UNFIX;

	/* Unfix any pages, if any */
	if ( !crow_locking(rcb) &&
	       (rcb->rcb_iso_level == RCB_CURSOR_STABILITY ||
		rcb->rcb_iso_level == RCB_REPEATABLE_READ) )
	{
	    rcb->rcb_state |= RCB_CSRR_LOCK;
	}

	status = dm2r_unfix_pages(rcb, &dmr->error);
    }
    else
    {
	uleFormat(NULL, E_DM00E0_BAD_CB_PTR, (CL_ERR_DESC *)NULL, ULE_LOG, NULL,
	    (char *)0, (i4)0, (i4 *)0, &error, 1,
	    sizeof("record")-1, "record");
	SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
	status = E_DB_ERROR;
    }

    if ( status )
    {
	/* This is pretty bad; abort the transaction */
	rcb->rcb_xcb_ptr->xcb_state |= XCB_TRANABORT;

	if (dmr->error.err_code > E_DM_INTERNAL)
	{
	    uleFormat( &dmr->error, 0, NULL, ULE_LOG , NULL,
		(char * )NULL, 0L, (i4 *)NULL, &error, 0);
	    uleFormat(NULL, E_DM904F_ERROR_UNFIXING_PAGES, NULL, ULE_LOG, NULL,
		(char * )NULL, 0L, (i4 *)NULL, &error, 3,
		sizeof(DB_DB_NAME), &rcb->rcb_tcb_ptr->tcb_dcb_ptr->dcb_name,
		sizeof(DB_OWN_NAME), &rcb->rcb_tcb_ptr->tcb_rel.relowner,
		sizeof(DB_TAB_NAME), &rcb->rcb_tcb_ptr->tcb_rel.relid
		);
	    SETDBERR(&dmr->error, 0, E_DM019A_ERROR_UNFIXING_PAGES);
	}
    }

    return (status);
}
Beispiel #2
0
/*{
** Name: dmve_undo - Apply undo recovey for given log record.
**
** Description:
**
** Inputs:
**	dmve_cb
**	    .dmve_log_rec	    The log record of the btree delete operation
**	    .dmve_action	    Should be DMVE_DO, DMVE_REDO, or DMVE_UNDO.
**	    .dmve_lg_addr	    The log address of the log record.
**	    .dmve_dcb_ptr	    Pointer to DCB.
**	    .dmve_tran_id	    The physical transaction id.
**	    .dmve_lk_id		    The transaction lock list id.
**	    .dmve_log_id	    The logging system database id.
**	    .dmve_db_lockmode	    The lockmode of the database. Should be 
**				    DM2T_X or DM2T_S.
**
** Outputs:
**	dmve_cb
**	    .dmve_error.err_code    The reason for error status.
**
**	Returns:
**	    E_DB_OK
**	    E_DB_ERROR
**
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	14-dec-1992 (rogerk)
**	    Written for 6.5 recovery.
**	23-aug-93 (jnash)
**	    Add E_DM967A_DMVE_UNDO error msg to provide LSN after failure.
**	18-oct-93 (jnash)
**	    Fix typo in E_DM967A_DMVE_UNDO error msg.
**	18-oct-93 (jrb)
**	    Added DM0LEXTALTER handing for MLSorts.
**	14-Nov-2008 (jonj)
**	    SIR 120874: Properly initialize dmve_error, use new form uleFormat.
**	17-Nov-2008 (jonj)
**	    SIR 120874: Use dmve_logfull, not dmve_error.err_code, to preserve
**	    and return E_DM9070_LOG_LOGFULL for Bug 56702.
**      23-Feb-2009 (hanal04) Bug 121652
**          Added DM0LUFMAP case to deal with FMAP updates needed for an
**          extend operation where a new FMAP did not reside in the last
**          FMAP or new FMAP itself.
*/
DB_STATUS
dmve_undo(
DMVE_CB		*dmve)
{
    DM0L_HEADER		*record = (DM0L_HEADER *) dmve->dmve_log_rec;
    DB_STATUS		status = E_DB_OK;
    i4		error;

    CLRDBERR(&dmve->dmve_error);
    DMVE_CLEAR_TABINFO_MACRO(dmve);
    dmve->dmve_logfull = 0;

    /*
    ** Skip records not needed for UNDO processing:
    **
    **     - Before Images logged for Online Backup.
    **     - CLR records.
    */

    if (record->flags & (DM0L_CLR | DM0L_DUMP))
	return (E_DB_OK);

    /*
    ** Process record by type.
    */
    switch (record->type)
    {
    case DM0LBI:
	status = dmve_bipage(dmve);
	break;

    case DM0LPUT:
	status = dmve_put(dmve);
	break;
    
    case DM0LDEL:
	status = dmve_del(dmve);
	break;
    
    case DM0LREP:
	status = dmve_rep(dmve);
	break;

    case DM0LASSOC:
	status = dmve_assoc(dmve);
	break;

    case DM0LALLOC:
	status = dmve_alloc(dmve);
	break;

    case DM0LDEALLOC:
	status = dmve_dealloc(dmve);
	break;

    case DM0LEXTEND:
	status = dmve_extend(dmve);
	break;

    case DM0LFRENAME:
	status = dmve_frename(dmve);
	break;

    case DM0LMODIFY:
	status = dmve_modify(dmve);
	break;

    case DM0LFCREATE:
	status = dmve_fcreate(dmve);
	break;

    case DM0LSM0CLOSEPURGE:
	status = dmve_sm0_closepurge(dmve);
	break;

    case DM0LSM1RENAME:
	status = dmve_sm1_rename(dmve);
	break;

    case DM0LSM2CONFIG:
	status = dmve_sm2_config(dmve);
	break;

    case DM0LLOCATION:
	status = dmve_location(dmve);
	break;

    case DM0LEXTALTER:
	status = dmve_ext_alter(dmve);
	break;

    case DM0LCREATE:
	status = dmve_create(dmve);
	break;

    case DM0LINDEX:
	status = dmve_index(dmve);
	break;

    case DM0LRELOCATE:
	status = dmve_relocate(dmve);
	break;

    case DM0LDESTROY:
	status = dmve_destroy(dmve);
	break;

    case DM0LALTER:
	status = dmve_alter(dmve);
	break;

    case DM0LLOAD:
	status = dmve_load(dmve);
	break;

    case DM0LCRDB:
	status = dmve_crdb(dmve);
	break;

    case DM0LDMU:
	status = dmve_dmu(dmve);
	break;

    case DM0LOVFL:
	status = dmve_ovfl(dmve);
	break;

    case DM0LNOFULL:
	status = dmve_nofull(dmve);
	break;

    case DM0LFMAP:
	status = dmve_fmap(dmve);
	break;

    case DM0LUFMAP:
	status = dmve_ufmap(dmve);
	break;

    case DM0LBTPUT:
	status = dmve_btput(dmve);
	break;

    case DM0LBTDEL:
	status = dmve_btdel(dmve);
	break;

    case DM0LBTSPLIT:
	status = dmve_btsplit(dmve);
	break;

    case DM0LBTOVFL:
	status = dmve_btovfl(dmve);
	break;

    case DM0LBTFREE:
	status = dmve_btfree(dmve);
	break;

    case DM0LBTUPDOVFL:
	status = dmve_btupdovfl(dmve);
	break;

    case DM0LRTPUT:
	status = dmve_rtput(dmve);
	break;

    case DM0LRTDEL:
	status = dmve_rtdel(dmve);
	break;

    case DM0LRTREP:
	status = dmve_rtrep(dmve);
	break;

    case DM0LDISASSOC:
	status = dmve_disassoc(dmve);
	break;

    case DM0LDELLOCATION:
	status = dmve_del_location(dmve);
	break;

    default:
	/*
	** Log record type that has no associated UNDO action.
	*/
	break;
    }

    if (status != E_DB_OK)
    {
	uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
	    (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	uleFormat(NULL, E_DM967A_DMVE_UNDO, (CL_ERR_DESC *)NULL, ULE_LOG, NULL,
	    (char *)NULL, (i4)0, (i4 *)NULL, &error, 2, 
	    0, record->lsn.lsn_high, 0,  record->lsn.lsn_low);
	SETDBERR(&dmve->dmve_error, 0, E_DM9639_DMVE_UNDO);
    }

    return(status);
}
Beispiel #3
0
/*{
** Name: dmve_rtrep - The recovery of a rtree replace key operation.
**
** Description:
**	This function is used to do, redo and undo a replace key
**	operation to a rtree index/leaf page. This function replaces
**	the old or new value of the key in the index depending on the recovery mode.
**
** Inputs:
**	dmve_cb
**	    .dmve_log_rec	    The log record of the system catalogs put 
**				    operation.
**	    .dmve_action	    Should be DMVE_DO, DMVE_REDO, or DMVE_UNDO.
**	    .dmve_dcb_ptr	    Pointer to DCB.
**	    .dmve_tran_id	    The physical transaction id.
**	    .dmve_lk_id		    The transaction lock list id.
**	    .dmve_log_id	    The logging system database id.
**	    .dmve_db_lockmode	    The lockmode of the database. Should be 
**				    DM2T_X or DM2T_S.
**
** Outputs:
**	dmve_cb
**	    .dmve_error.err_code    The reason for error status.
**	Returns:
**	    E_DB_OK
**	    E_DB_ERROR
**	    E_DB_FATAL			Operation was partially completed,
**					the transaction must be aborted.
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	19-sep-1996 (shero03)
**	    Created from dmvebtpt.c
**	22-nov-96 (stial01,dilma04)
**	    Row Locking Project
**	    Add lock_type argument to dm0p_lock_page and dm0p_unlock_page
**      14-may-97 (dilma04)
**          Cursor Stability Project:
**          Add lock_id argument to dm0p_lock_page and dm0p_unlock_page.
**      19-Jun-2002 (horda03) Bug 108074
**          If the table is locked exclusively, then indicate that SCONCUR
**          pages don't need to be flushed immediately.
**      23-feb-2004 (thaju02) Bug 111470 INGSRV2635
**          For rollforwarddb -b option, do not compare the LSN's on the
**          page to that of the log record.
**	01-Dec-2004 (jenjo02)
**	    Pass fix_action to dmve_rtadjust_mbrs for bug 108074.
**	01-Dec-2006 (kiria01) b117225
**	    Initialise the lockid parameters that will be passed to LKrequest
**	    to avoid random implicit lock conversions.
**	17-Apr-2008 (kibro01) b120276
**	    Initialise ADF_CB structure
*/
DB_STATUS
dmve_rtrep(
DMVE_CB		*dmve)
{
    DM0L_RTREP		*log_rec = (DM0L_RTREP *)dmve->dmve_log_rec;
    LG_LSN		*log_lsn = &log_rec->rtr_header.lsn; 
    DMP_DCB		*dcb = dmve->dmve_dcb_ptr;
    ADF_CB		adf_scb;
    DMP_TABLE_IO	*tbio = NULL;
    DMPP_PAGE		*page = NULL;
    DB_STATUS		status = E_DB_OK;
    DB_STATUS		tmp_status;
    DMPP_ACC_PLV	*loc_plv;
    DMPP_ACC_KLV	loc_klv;
    LK_LKID		lockid;
    LK_LKID		page_lockid;
    DM_TID		leaf_bid;
    DM_TID		*stack;
    i4			stack_level;
    i4		lock_action;
    i4		grant_mode;
    i4		recovery_action;
    i4		error;
    i4		loc_error;
    i4		page_type = log_rec->rtr_pg_type;
    bool		physical_page_lock = FALSE;
    bool		undo_check_done = FALSE;
    i4                  fix_action = 0;
    DB_ERROR		local_dberr;
    DMP_PINFO		*pinfo = NULL;

    CLRDBERR(&dmve->dmve_error);
    DMVE_CLEAR_TABINFO_MACRO(dmve);

    MEfill(sizeof(LK_LKID), 0, &lockid);
    MEfill(sizeof(LK_LKID), 0, &page_lockid);
    MEfill(sizeof(ADF_CB),0,(PTR)&adf_scb);

    /*
    ** Store BID of insert into a local variable.  The insert BID may
    ** be modified in undo recovery by the dmve_btunto_check routine.
    */
    leaf_bid = log_rec->rtr_bid;

    for (;;)
    {
	/* Consistency Check:  check for illegal log records */
	if (log_rec->rtr_header.type != DM0LRTREP)
	{
	    SETDBERR(&dmve->dmve_error, 0, E_DM9601_DMVE_BAD_PARAMETER);
	    break;
	}

	/*
	** If recovery is being bypassed on the entire table then no recovery
	** needs to be done.
	*/
	if ((dmve->dmve_action == DMVE_UNDO) &&
	    (dmve_location_check(dmve, (i4)log_rec->rtr_cnf_loc_id) == FALSE))
	{
            uleFormat(NULL, E_DM9668_TABLE_NOT_RECOVERED, (CL_ERR_DESC *)NULL,
                ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL,
                &loc_error, 2, 0, log_rec->rtr_tbl_id.db_tab_base,
                0, log_rec->rtr_tbl_id.db_tab_index);
	    SETDBERR(&dmve->dmve_error, 0, E_DM9667_NOPARTIAL_RECOVERY);
	    break;
	}

	/*
	** Get handle to a tableio control block with which to read
	** and write pages needed during recovery.
	**
	** Warning return indicates that no tableio control block was
	** built because no recovery is needed on any of the locations 
	** described in this log record.  Note that check above prevents
	** this case from occurring during UNDO recovery.
	*/
	status = dmve_fix_tabio(dmve, &log_rec->rtr_tbl_id, &tbio);
	if (status == E_DB_WARN && 
		dmve->dmve_error.err_code == W_DM9660_DMVE_TABLE_OFFLINE)
	{
	    CLRDBERR(&dmve->dmve_error);
	    return (E_DB_OK);
	}

	if (status != E_DB_OK)
	    break;


	/*
	** Get page accessors for page recovery actions.
	*/
	dm1c_get_plv(page_type, &loc_plv);

	adf_scb.adf_errcb.ad_ebuflen = 0;
	adf_scb.adf_errcb.ad_errmsgp = 0;
	adf_scb.adf_maxstring = DB_MAXSTRING;
	dm1c_getklv(&adf_scb,
		log_rec->rtr_obj_dt_id,
		&loc_klv);

	/*
	** Get required Table/Page locks before we can write the page.
	**
	** Some Ingres pages are locked only temporarily while they are
	** updated and then released immediately after the update.  The
	** instances of such page types that are recovered through this 
	** routine are system catalog pages.
	**
	** Except for these system catalog pages, we expect that any page
	 * which requires recovery here has already been locked by the
	** original transaction and that the following lock requests will
	** not block.
	**
	** Note that if the database is locked exclusively, or if an X table
	** lock is granted then no page lock is requried.
	*/
	if ((dcb->dcb_status & DCB_S_EXCLUSIVE) == 0)
	{
	    /*
	    ** Request IX lock in preparation of requesting an X page lock
	    ** below.  If the transaction already holds an exclusive table
	    ** lock, then an X lock will be granted.  In this case we can
	    ** bypass the page lock request.
	    */
	    status = dm2t_lock_table(dcb, &log_rec->rtr_tbl_id, DM2T_IX, 
		dmve->dmve_lk_id, (i4)0, &grant_mode, &lockid, &dmve->dmve_error);
	    if (status != E_DB_OK)
		break;

	    if (grant_mode != DM2T_X)
	    {         
		/*
		** Page lock required.  If this is a system catalog page
		** or a non-leaf index page then we need to request a 
		** physical lock (and release it later).
		*/
		lock_action = LK_LOGICAL;
		if (log_rec->rtr_header.flags & DM0L_PHYS_LOCK)
		    lock_action = LK_PHYSICAL;
	
		status = dm0p_lock_page(dmve->dmve_lk_id, dcb, 
		    &log_rec->rtr_tbl_id, leaf_bid.tid_tid.tid_page, 
		    LK_PAGE, LK_X, lock_action, (i4)0, tbio->tbio_relid, 
		    tbio->tbio_relowner, &dmve->dmve_tran_id, &page_lockid,
		    (i4 *)NULL, (LK_VALUE *)NULL, &dmve->dmve_error);
		if (status != E_DB_OK)
		    break;

		if (lock_action == LK_PHYSICAL)
		    physical_page_lock = TRUE;
	    }
            else
               fix_action |= DM0P_TABLE_LOCKED_X;
	}

	/*
	** Fix the page we need to recover in cache for write.
	*/
	status = dmve_cachefix_page(dmve, log_lsn,
				    tbio, leaf_bid.tid_tid.tid_page,
				    fix_action, loc_plv,
				    &pinfo);
	if (status != E_DB_OK)
	    break;
	page = pinfo->page;

	/*
	** Dump debug trace info about pages if such tracing is configured.
	*/
	if (DMZ_ASY_MACRO(15))
	    dmve_trace_page_info(log_rec->rtr_pg_type, log_rec->rtr_page_size,
		page, loc_plv, "Page");

	/*
	** Compare the LSN on the page with that of the log record
	** to determine what recovery will be needed.
	**
	**   - During Forward processing, if the page's LSN is greater than
	**     the log record then no recovery is needed.
	**
	**   - During Backward processing, it is an error for a page's LSN
	**     to be less than the log record LSN.
	**
	**   - Currently, during rollforward processing it is unexpected
	**     to find that a recovery operation need not be applied because
	**     of the page's LSN.  This is because rollforward must always
	**     begin from a checkpoint that is previous to any journal record
	**     begin applied.  In the future this requirement may change and
	**     Rollforward will use the same expectations as Redo.
	*/
	switch (dmve->dmve_action)
	{
	case DMVE_DO:
	case DMVE_REDO:
          if (LSN_GTE(
		DM1B_VPT_ADDR_PAGE_LOG_ADDR_MACRO(page_type, page), 
		log_lsn) && ((dmve->dmve_flags & DMVE_ROLLDB_BOPT) == 0))
	    {
		if (dmve->dmve_action == DMVE_DO)
		{
		    uleFormat(NULL, E_DM9665_PAGE_OUT_OF_DATE, (CL_ERR_DESC *)NULL,
			ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL,
			&loc_error, 8,
			sizeof(DB_TAB_NAME), tbio->tbio_relid->db_tab_name,
			sizeof(DB_OWN_NAME), tbio->tbio_relowner->db_own_name,
			0, DM1B_VPT_GET_PAGE_PAGE_MACRO(page_type, page),
			0, DM1B_VPT_GET_PAGE_STAT_MACRO(page_type, page),
			0, DM1B_VPT_GET_LOG_ADDR_HIGH_MACRO(page_type, page),
			0, DM1B_VPT_GET_LOG_ADDR_LOW_MACRO(page_type, page),
			0, log_lsn->lsn_high, 0, log_lsn->lsn_low);
		}
		page = NULL;
	    }
	    break;

	case DMVE_UNDO:
          if (LSN_LT(
		         DM1B_VPT_ADDR_PAGE_LOG_ADDR_MACRO(page_type, page), 
		         log_lsn))
	    {
		uleFormat(NULL, E_DM9665_PAGE_OUT_OF_DATE, (CL_ERR_DESC *)NULL,
		    ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL,
		    &loc_error, 8,
		    sizeof(DB_TAB_NAME), tbio->tbio_relid->db_tab_name,
		    sizeof(DB_OWN_NAME), tbio->tbio_relowner->db_own_name,
		    0, DM1B_VPT_GET_PAGE_PAGE_MACRO(page_type, page),
		    0, DM1B_VPT_GET_PAGE_STAT_MACRO(page_type, page),
		    0, DM1B_VPT_GET_LOG_ADDR_HIGH_MACRO(page_type, page),
		    0, DM1B_VPT_GET_LOG_ADDR_LOW_MACRO(page_type, page),
		    0, log_lsn->lsn_high, 0, log_lsn->lsn_low);
		SETDBERR(&dmve->dmve_error, 0, E_DM9666_PAGE_LSN_MISMATCH);
		status = E_DB_ERROR;
	    }
	    break;
	}
	if (status != E_DB_OK)
	    break;


	/*
	** Call appropriate recovery action depending on the recovery type
	** and record flags.  CLR actions are always executed as an UNDO
	** operation.
	*/
	if (page)
	{
	    recovery_action = dmve->dmve_action;
	    if (log_rec->rtr_header.flags & DM0L_CLR)
	        recovery_action = DMVE_UNDO;

            status = dmv_rertree_rep(dmve, tbio, pinfo, &leaf_bid, log_rec, 
					loc_plv, recovery_action);

	    if (status != E_DB_OK)
	        break;
	/*
	** Adjust the tree from the leaf to the root witih the new MBR and
	** LHV.  As long as the parent's MBR or LHV changes, keep going up 
	** the tree until you reach the root.  Beware that a root split may
	** have occured.
	*/
	    stack = (DM_TID*) (((char *)log_rec) + sizeof(*log_rec));
	    stack_level = log_rec->rtr_stack_size / sizeof(DM_TID);
	    status = dmve_rtadjust_mbrs(dmve, &adf_scb, tbio,
			log_rec->rtr_tbl_id,
			stack, stack_level,
			page_type,
			log_rec->rtr_page_size,
			log_rec->rtr_hilbertsize,
			log_rec->rtr_nkey_size,
			log_rec->rtr_cmp_type,
			pinfo, loc_plv, &loc_klv,
			fix_action);
	    if (status != E_DB_OK)
	        break;
	}  /* good page */

	break;
    }

    if ((status != E_DB_OK) && (dmve->dmve_error.err_code))
    {
	uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
	    (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
    }

    /*
    ** Unfix the updated page.  No need to force it to disk - it
    ** will be tossed out through normal cache protocols if Fast
    ** Commit or at the end of the abort if non Fast Commit.
    */
    if (pinfo && pinfo->page)
    {
	tmp_status = dm0p_uncache_fix(tbio, DM0P_UNFIX, dmve->dmve_lk_id, 
		dmve->dmve_log_id, &dmve->dmve_tran_id, pinfo, &local_dberr);
	if (tmp_status != E_DB_OK)
	{
	    uleFormat(&local_dberr, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
		(char *)NULL, (i4)0, (i4 *)NULL, &loc_error, 0);
	    if (tmp_status > status)
		status = tmp_status;
	}
    }

    /*
    ** If a short term physical lock was acquired on the page
    ** then release it.
    */
    if (physical_page_lock)
    {
	tmp_status = dm0p_unlock_page(dmve->dmve_lk_id, dcb, 
	    &log_rec->rtr_tbl_id, leaf_bid.tid_tid.tid_page, LK_PAGE,
	    tbio->tbio_relid, &dmve->dmve_tran_id, &page_lockid, 
	    (LK_VALUE *)NULL, &local_dberr);
	if (tmp_status != E_DB_OK)
	{
	    uleFormat(&local_dberr, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
		(char *)NULL, (i4)0, (i4 *)NULL, &loc_error, 0);
	    if (tmp_status > status)
		status = tmp_status;
	}
    }

    /* 
    ** Release our handle to the table control block.
    */
    if (tbio)
    {
	tmp_status = dmve_unfix_tabio(dmve, &tbio, 0);
	if (tmp_status > status)
	    status = tmp_status;
    }

    if (status != E_DB_OK)
	SETDBERR(&dmve->dmve_error, 0, E_DM964E_DMVE_BTREE_PUT);

    return(status);
}
Beispiel #4
0
/*{
** Name: scd_add_datatypes	- Add new datatypes, functions, and operators
**
** Description:
**      This routine adds new datatypes, functions, and/or operators to the DBMS
**	server.  It does this by
**	    1) Getting the ADD_DEFINITION block from IIudadt_register
**	    2) Ask ADF for the amount of space required to add these new options
**	    3) Allocate that space, and
**	    4) Add the new options to the system.
**
**	Failures are logged but not returned.  The system currently ignores
**	failures in finding new objects.
**
** Inputs:
**	scb				Session control block to use.  0 if not
**					available.
**	adf_svcb			Addr of old server control block.  NOTE:
**					IT IS ASSUMED THAT THIS MEMORY IS
**					SCU_MALLOC'D MEMORY.  IT WILL BE
**					DEALLOCATED IF THE ROUTINE IS
**					SUCCESSFUL AND DEALLOCATION IS
**					INDICATED.
**	adf_size			Size of old server control block.
**	deallocate_flag			Indicates whether the old memory should
**					be deallocated.  The deallocate flag, if
**					set, contains the facility id
**					(DB_??F_ID) of the facility to which the
**					memory is and was charged (which
**					allocated it...)  See note below.
**	error				Addr of error block to fill in.
**	new_svcb			Addr**2 of new server block.  This
**					routine, if successful will place a
**					pointer to the new adf server control
**					block here.
**	new_size			Pointer to i4  to be filled with number
**					of pages used by new_svcb.  Can be zero
**					if caller is not interested.
**
** Outputs:
**	*error				Filled with error if appropos
**	*new_svcb			Filled with pointer to new adf server
**					control block.  This field is always
**					set.  If no change is necessary, then
**					this field is set to the value of
**					adf_svcb.
**	*new_size			Filled with page count if non-zero.
**
**	Returns:
**	    DB_STATUS
**	Exceptions:
**	    none
**
** Side Effects:
**	Sc_main_cb->sc_{major,minor}_adf are set to reflect the user adt major
**	and minor id's.  These are used later to check (sca_compatible())
**	whether the user defined ADT image is compatible with the remainder of
**	the running INGRES installation.
**
**	ADF is updated (assuming correctness) to run with new user defined
**	datatypes.
**
**	If so indicated (by deallocate_flag being non-zero), the old pages are
**	deallocated.
**
** History:
**      31-Jan-1989 (fred)
**          Created.
**      22-Mar-1989 (fred)
**          Added capability to scan IIDBDB for datatypes.
**      19-Apr-1989 (fred)
**	    Removed IIDBDB stuff.  All information now determined by function
**	    calls to the user.  Added support for returning major/minor id for
**	    user defined structure.
**	22-jun-89 (jrb)
**	    Added check to ensure site is licensed to add dts/ops/funcs.
**	09-oct-90 (ralph)
**	    6.3->6.5 merge:
**	    29-mar-90 (alan)
**		Don't require CI_USER_ADT authorization if RMS Gateway.
**	2-Jul-1993 (daveb)
**	    remove unused variable adf_cb.
**	2-Jul-1993 (daveb)
**	    prototyped.
**       6-Jul-1993 (fred)
**          Added more user callable functions to the callback
**          structure. 
**      26-Jul-1993 (fred)
**          Fixed up ADD_CALLBACKS initialization to be type correct.
**          Problem in mechanism for some prototypes due to differences
**          header inclusions...
**      28-aug-1993 (stevet)
**          Added support for INGRES class objects.  The adu_agument()
**          are call twice now, onec for class objects and once for
**          UDT.
**      10-nov-1993 (stevet)
**          deallocate memory the second time not working when loading class
**          library and udt together.
**	03-Nov-2010 (jonj) SIR 124685 Prototype Cleanup
**	    Prototyped, SCD_SCB *scb now void *scbp - called from DMF.
[@history_template@]...
*/
DB_STATUS
sca_add_datatypes(void *scbp,
		  PTR adf_svcb,
		  i4  adf_size,
		  i4  deallocate_flag,
		  DB_ERROR *error,
		  PTR *new_svcb,
		  i4  *new_size )
{
    SCD_SCB		*scb = (SCD_SCB*)scbp;
    ADD_DEFINITION	*add_block;
    PTR                 new_adf_block = (PTR) 0;
    PTR			old_adf_block = (PTR) 0;
    DB_STATUS		status;
    DB_STATUS		int_status;
    i4                  old_size;
    i4                  cur_size;
    i4		size;
    SCF_CB		scf_cb;
    i4			l_ustring;
    i4                  i;
    char		*ustring;
        /*
        **  This structure is made static to protect against
        **  users who keep the pointer to it.  They are told not
        **  to, but...
        */
    static  ADD_CALLBACKS   callbacks = { ADD_T_V2,
					      sca_trace,
					      adu_ome_error,
					      (ADD_LO_HANDLER *)
					     		adu_peripheral,
					      (ADD_INIT_FILTER *)
							adu_0lo_setup_workspace,
					      (ADD_LO_FILTER *) 
							adu_lo_filter};

    status = E_DB_OK;
    CLRDBERR(error);

    *new_svcb = adf_svcb;   /* Start out with no change */
    old_size = adf_size;
    old_adf_block = adf_svcb;
    Sc_main_cb->sc_major_adf = ADD_INGRES_ORIGINAL;
    /* Loop 2 time so that we are load class obj as well as udt */
    for ( i = 0; i < 2; i++)
    {
	add_block = (ADD_DEFINITION *) 0;
	if( i)
	{
	    status = IIudadt_register( &add_block, &callbacks);
	}
	else
	{
	    status = IIclsadt_register( &add_block, &callbacks); 
	}

	if (status)
	{
	    SETDBERR(error, 0, E_SC026A_SCA_REGISTER_ERROR);
	    status = E_DB_ERROR;
	    break;
	}


	if (!add_block)
	{
	    /* No new datatypes for this register, try next */
    	    continue;
	}

	if ((add_block->add_risk_consistency == ADD_INCONSISTENT) &&
    	    (Sc_main_cb->sc_capabilities & (SC_INGRES_SERVER | SC_STAR_SERVER)))
	{
	    sc0ePut(NULL, E_SC0263_SCA_RISK_CONSISTENCY, NULL, 0);
	}

	if (add_block->add_major_id <= 0)
	{
	    sc0ePut(error, E_SC0264_SCA_ILLEGAL_MAJOR, NULL, 1,
			    sizeof(add_block->add_major_id),
			    &add_block->add_major_id);
	    break;
	}
	    
	/* Now, figure out the size necessary for the new ADF block */
	
	size = adg_sz_augment(add_block, error);
	if (error->err_code)
	    break;
	
	scf_cb.scf_type = SCF_CB_TYPE;
	scf_cb.scf_length = sizeof(SCF_CB);
	scf_cb.scf_session = DB_NOSESSION;
	scf_cb.scf_facility = (deallocate_flag ? deallocate_flag : DB_SCF_ID);
	scf_cb.scf_scm.scm_functions = 0;
	scf_cb.scf_scm.scm_in_pages = ((size + SCU_MPAGESIZE - 1)
	    & ~(SCU_MPAGESIZE - 1)) / SCU_MPAGESIZE;

	status = scf_call(SCU_MALLOC, &scf_cb);

	if (status != OK)
	{
	    *error = scf_cb.scf_error;
	    break;
	}
	new_adf_block = (PTR) scf_cb.scf_scm.scm_addr;

	if (	(add_block->add_l_ustring && !add_block->add_ustring)
	   ||	(!add_block->add_l_ustring && add_block->add_ustring))
	{
	    /* This is an error -- note error in block */
	    l_ustring = STlength(ustring = "*** INVALID USER STRING or LENGTH ***");
	}
	else
	{
	    l_ustring = add_block->add_l_ustring;
	    ustring = add_block->add_ustring;
	}
    	if (Sc_main_cb->sc_capabilities & (SC_INGRES_SERVER | SC_STAR_SERVER))
	{
	    sc0ePut(NULL, E_SC0265_SCA_STATE, 0, 3,
				    sizeof(add_block->add_major_id),
				    &add_block->add_major_id,
				    sizeof(add_block->add_minor_id),
				    &add_block->add_minor_id,
				    l_ustring,
				    ustring);

	    sc0ePut(NULL, E_SC024D_SCA_ADDING, 0, 4,
		    sizeof(add_block->add_count),
			&add_block->add_count,
		    sizeof(add_block->add_dt_cnt),
			&add_block->add_dt_cnt,
		    sizeof(add_block->add_fo_cnt),
			&add_block->add_fo_cnt,
		    sizeof(add_block->add_fi_cnt),
			&add_block->add_fi_cnt);
	} /* if ... SC_INGRES_SERVER | SC_STAR_SERVER ... */

	status = adg_augment(add_block, size, new_adf_block, error);
	if (status && (error->err_code))
	    break;

	Sc_main_cb->sc_major_adf = add_block->add_major_id;
	Sc_main_cb->sc_minor_adf = add_block->add_minor_id;
	Sc_main_cb->sc_risk_inconsistency = add_block->add_risk_consistency;
	
	*new_svcb = new_adf_block;
	cur_size = scf_cb.scf_scm.scm_in_pages;
	if (new_size)
	    *new_size = scf_cb.scf_scm.scm_in_pages;

	if (old_adf_block && deallocate_flag)
	{
	    /* Remainder of control block setup above */
	
	    scf_cb.scf_scm.scm_in_pages = old_size;
	    scf_cb.scf_scm.scm_addr = old_adf_block;
	    int_status = scf_call(SCU_MFREE, &scf_cb);
	    if (int_status)
	    {
		sc0ePut(&scf_cb.scf_error, 0, NULL, 0);
		sc0ePut(&scf_cb.scf_error, E_SC024C_SCA_DEALLOCATE, NULL, 0);
	    }
	}
	old_size = cur_size;
	old_adf_block = new_adf_block;
	new_adf_block = 0;
    }

    if (status && error->err_code)
    {
	sc0ePut(error, 0, NULL, 0);
	sc0ePut(error, E_SC024E_SCA_NOT_ADDED, NULL, 0);
    }

    if (new_adf_block)
    {
	int_status = scf_call(SCU_MFREE, &scf_cb);
	if (int_status)
	{
	    sc0ePut(&scf_cb.scf_error, 0, NULL, 0);
	    sc0ePut(NULL, E_SC024C_SCA_DEALLOCATE, NULL, 0);
	}
    }

    if (status && add_block && (add_block->add_trace & ADD_T_FAIL_MASK))
    {
	sc0ePut(NULL, E_SC0266_SCA_USER_SHUTDOWN, NULL, 0);
	status = E_DB_FATAL;
    }
    else
    {
	status = E_DB_OK;
    }
    return(status);
}
Beispiel #5
0
/*{
** Name: dm1ibend - Finishes building a ISAM file for modify.
**
** Description:
**    This routine finsihes building a ISAM for modify.
**
** Inputs:
**      mct                             Pointer to modify context.
**
**
** Outputs:
**      err_code                        A pointer to an area to return error
**                                      codes if return status not E_DB_OK.
**      
**	Returns:
**
**	    E_DB_OK
**          E_DB_ERROR
**          E_DB_FATAL
**
**	Exceptions:
**	    none
**
** Side Effects:
**          none.
**
** History:
**	07-feb-86 (jennifer)
**          Created for Jupiter.
**	29-may-89 (rogerk)
**	    Check status from dm1c_uncomp_rec calls.
**	08-jul-1992 (mikem)
**          Reorganized logic from for (;;) loops to a do/while to get around
**          sun4/acc compiler error "end-of-loop code not reached".
**	08-Jun-1992 (kwatts)
**	    6.5 MPF project. Replaced dm1c_add, dmpp_get_offset_macros,
**	    and dm1c_get calls with accessor calls. 
**    	29-August-1992 (rmuth)
**          Add call to dm1x_build_SMS to add the FHDR/FMAP(s).
**	14-oct-1992 (jnash)
**	    Reduced logging project.
**	      - Remove unused param's on dmpp_get calls.  
**	      - Move compression out of dm1c layer, call tlv's here.
**		dmpp_get now always returns a pointer to the buffer, never 
**		filling it in.
**	30-October-1992 (rmuth)
**	    Change for new DI file extend paradigm, just call dm1xfinish
**	    as it will build FHDR/FMAP and guarantee_space.
**	06-oct-1995 (nick)
**	    Whilst we ensured any pages unused in the last main data page
**	    allocation were formatted, they were formatted as DMPP_DATA.  This
**	    causes verifydb to barf as a) they are marked as free in the FMAP
**	    and b) they are orphaned.  Format as DMPP_FREE instead.
**	06-mar-1996 (stial01 for bryanp)
**	    Pass mct_page_size as the page_size argument to dmpp_format.
**	06-mar-1996 (stial01 for bryanp)
**	    Don't allocate tuple buffers on the stack.
**	06-may-1996 (thaju02 & nanpr01)
**	    New Page Format Support:
**		Change page header references to use macros.
**	    Fix typo error.
** 	20-may-1996 (ramra01)
**	    Added argument DMPP_TUPLE_INFO to get load accessor routines
**      03-june-1996 (stial01)
**          Use DMPP_INIT_TUPLE_INFO_MACRO to init DMPP_TUPLE_INFO
**      18-jul-1996 (ramra01 for bryanp)
**          When re-reading rows from data pages to build index, pass 0 as the
**          row version number to dmpp_uncompress. The rows have just been
**          loaded, so they are guaranteed to be in version 0 format.
**          Pass 0 as the current table version to dmpp_load.
**	13-sep-1996 (canor01)
**	    Add NULL buffer to dmpp_uncompress call.
**      22-nov-96 (stial01,dilma04)
**          Row Locking Project:
**          load accessor: changed DMPP_TUPLE_INFO param to table_version
**      22-nov-1996 (nanpr01)
**          init the version with the current table version to dmpp_load.
**      10-mar-1997 (stial01)
**          dm1ibend: Use mct_crecord to compress a record
**	18-aug-1997 (nanpr01)
**	    b80775 - Higher level index corruption. The problem is that
**	    dm1xreadpage is returning a pointer to mct_ovfl pages if the
**	    page is in private cache. Then it is finding the low key on 
**	    the page and try to load it. If load  returns an out of space
**	    condition, it gets new page in the mct_ovfl, which might cause
**	    the key to be lost. So we just allocate a key area in this routine
**	    and copy the key into this buffer for load. mainsol code fix
**	    for this also will work, but we thought this will be a more
**	    general solution.
**	19-apr-2004 (gupsh01)
**	    Pass in adf control block in the call to dmpp_uncompress.
**	13-Feb-2008 (kschendel) SIR 122739
**	    Revise uncompress call, remove record-type and tid.
**	15-Jan-2010 (jonj)
**	    SIR 121619 MVCC: parameter changes to page accessors
*/
dm1ibend(
DM2U_M_CONTEXT	*mct,
DB_ERROR        *dberr)
{
    DB_STATUS	    dbstatus;
    STATUS	    status;
    DM_TID	    tid;
    DB_ATTS	    *att;
    DMPP_PAGE	    *ip, *dp, *op;
    i4	    level, start, stop, newstop;
    i4	    pageno, mainpageno, next_page;
    i4	    offset, free;
    i4	    record_size;
    i4	    uncompressed_length;
    i4	    start_free, end_free;
    i4		    k;
    char	    *key;
    char	    *rec;
    char	    *record;
    char	    keytuple[DB_MAXTUP/2];
    DMPP_SEG_HDR seg_hdr;
    DMPP_SEG_HDR *seg_hdr_ptr;
    i4		    local_err;
    DB_ERROR	    local_dberr;

    CLRDBERR(dberr);

    if (mct->mct_seg_rows)
	seg_hdr_ptr = &seg_hdr;
    else
	seg_hdr_ptr = NULL;
 
    record = mct->mct_crecord;

    do  /* loop is executed once, just there to break on errors */
    {
	/*
	** Mark current data page as end of main pages and update overflow
	** page for this chain.
	*/

	DMPP_VPT_SET_PAGE_MAIN_MACRO(mct->mct_page_type, mct->mct_curdata, 0);
	start_free = DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type, 
	    mct->mct_curdata) + 1;
	end_free = mct->mct_startmain + mct->mct_kperpage - 1;
	status = E_DB_OK;
	for (next_page = 
	        DMPP_VPT_GET_PAGE_OVFL_MACRO(mct->mct_page_type, mct->mct_curdata);
	    next_page;
	    next_page = 
		DMPP_VPT_GET_PAGE_OVFL_MACRO(mct->mct_page_type, mct->mct_curovfl))
	{
	    status = dm1xreadpage(mct, DM1X_FORUPDATE, next_page,
		&mct->mct_curovfl, dberr);
	    if (status != E_DB_OK)
		break;
	    DMPP_VPT_SET_PAGE_MAIN_MACRO(mct->mct_page_type, mct->mct_curovfl, 0);
	}
	if (status != E_DB_OK)
	    break;

	mct->mct_main = 
	    DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type, mct->mct_curdata) + 1;

	if ((mct->mct_curovfl == 0 ||
	    DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type, mct->mct_curovfl) < 
	        DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type, mct->mct_curdata))
		&&
	    (mct->mct_curseg == 0 ||
	    DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type, mct->mct_curseg) <
		DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type, mct->mct_curdata)))
	{
	    /*
	    **	Truncate reserve window if no overflow pages have been
	    **	allocated.
	    */

	    end_free = start_free;
	    status = dm1xreserve(mct, 
		DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type, 
		mct->mct_curdata) + 1, dberr);
	    if (status != E_DB_OK)
		break;
	}
	else
	{
	    /*
	    **  Allocate, initialize a write empty pages to end of segment so
	    **  that the page will be formatted if read, because the high
	    **  water mark  will be placed after the index.
	    */

	    do
	    {
		status = dm1xnewpage(mct, 
		    DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type,
		    mct->mct_curdata) + 1,
		    &mct->mct_curdata, dberr);
		/*
		** dm1xnewpage() returns with the page zero-filled
		** and formated with status DMPP_MODIFY | DMPP_DATA.
		** All we need to do is reset the status to
		** DMPP_FREE | DMPP_MODIFY.
		*/
		if (status == E_DB_OK)
		    DMPP_VPT_INIT_PAGE_STAT_MACRO(mct->mct_page_type,
					     mct->mct_curdata,
					     DMPP_FREE | DMPP_MODIFY);
	    } while (status == E_DB_OK);
	    if (status != E_DB_INFO)
		break;
	}

	/*
	** Build the ISAM index on top of the data pages.  Do this by reading
	** through the just-written data pages and placing one key entry for 
	** each data page into an index page (if index page gets full, go to 
	** a new one).
	**
	** After reading through all data pages, start over reading through the
	** just-written level of index pages, placing one key entry for each 
	** page into an index page on a new level.  Continue this until all  
	** of one level's keys fit into one index page.  That one index page 
	** is the ISAM root page.
	*/

	/*
	**  Read the primary pages by following the main page pointers.
	**	From the first key on every primary page construct an index key.
	**	This builds the first level of the index.
	*/

	status = dm1xnewpage(mct, 0, &mct->mct_curovfl, dberr);
	if (status != E_DB_OK)
	    break;

	/* Reformat as an INDEX page */

	ip = mct->mct_curovfl;
	/*
	** dm1xnewpage() returns with the page zero-filled
	** and formated with status DMPP_MODIFY | DMPP_DATA.
	** All we need to do is reset the status to
	** DMPP_DIRECT instead of calling dmpp_format again
	** to reformat the entire page.
	*/
	DMPP_VPT_INIT_PAGE_STAT_MACRO(mct->mct_page_type,
				  ip,
				  DMPP_DIRECT);

	DMPP_VPT_SET_PAGE_OVFL_MACRO(mct->mct_page_type, ip, 0);
	DMPP_VPT_SET_PAGE_MAIN_MACRO(mct->mct_page_type, ip, 0);
	level = 0;
	start = DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type, mct->mct_curovfl);

	pageno = 0;
	do
	{
	    status = dm1xreadpage(mct, DM1X_FORREAD, pageno, &mct->mct_curdata,
		dberr);
	    if (status != E_DB_OK)
		break;

	    /*
	    ** If the relation is empty, then page 0 will
	    ** contain no records. Form a blank tuple
	    ** and use it to create a one tuple directory
	    */

	    dp = mct->mct_curdata;
	    key = keytuple;
	    if (DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type, dp) == 0 && 
		DMPP_VPT_GET_PAGE_NEXT_LINE_MACRO(mct->mct_page_type, dp) == 0)
	    {
		MEfill(mct->mct_klen, 0, key);
	    }
	    else
	    {
		DM_TID	    tid;

		tid.tid_tid.tid_line = 0;
		record_size = mct->mct_relwid;
		status = (*mct->mct_acc_plv->dmpp_get)(mct->mct_page_type,
			mct->mct_page_size, dp, &tid, &record_size,
			&rec, NULL, NULL, NULL, seg_hdr_ptr);

		if (seg_hdr_ptr && seg_hdr_ptr->seg_next)
		{
		    if ( mct->mct_data_rac.compression_type != TCB_C_NONE )
		    {
			status = dm1x_get_segs(mct, seg_hdr_ptr, rec,
			    mct->mct_segbuf, dberr);
			rec = mct->mct_segbuf;
		    }
		    else
		    {
			status = dm1x_get_segs(mct, seg_hdr_ptr, rec, record, dberr);
			rec = record;
		    }
		}

		if (status == E_DB_OK)
		{
		    if (mct->mct_data_rac.compression_type != TCB_C_NONE)
		    {
		    /* Note that the following accessor comes from the MCT
		    ** and not the TCB.
		    ** and not the TCB.  In the case of a compression change
		    ** (eg data to hidata), the TCB rac is the old way,
		    ** while the MCT rac is the new way.
		    */
			status = (*mct->mct_data_rac.dmpp_uncompress)(
			    &mct->mct_data_rac,
			    rec, record, record_size, &uncompressed_length,
			    NULL, (i4)0 , (mct->mct_oldrcb)->rcb_adf_cb);
			if ( (status != E_DB_OK) || 
			   (record_size != uncompressed_length) )
			{
			    if (status != E_DB_OK)
			    {
				uncompressed_length = 0;
				record_size = 0;
			    }
			    uleFormat(NULL, E_DM942C_DMPP_ROW_UNCOMP, 
			     (CL_ERR_DESC *)NULL, ULE_LOG, NULL, (char *)NULL, 
			     (i4)0, (i4 *)NULL, &local_err, 7, 
			     sizeof(DB_DB_NAME), &mct->mct_oldrcb->
			     rcb_tcb_ptr->tcb_dcb_ptr->dcb_name,
			     sizeof(DB_TAB_NAME), 
			     &mct->mct_oldrcb->rcb_tcb_ptr->tcb_rel.relid,
			     sizeof(DB_OWN_NAME), 
			     &mct->mct_oldrcb->rcb_tcb_ptr->tcb_rel.relowner,
			     0, tid.tid_tid.tid_page, 0, tid.tid_i4,
			     0, record_size, 0, uncompressed_length);

			    status = E_DB_ERROR;
			}
			rec = record;
		    }
		}

		if (status != E_DB_OK)
		{
		    SETDBERR(dberr, 0, E_DM9252_DM1I_BEND);
		    break;
		}

		/* Make key from record. */

		for (k = 0; k < mct->mct_keys; k++) 
		{
		    att = mct->mct_key_atts[k]; 
		    MEcopy(rec + att->offset, att->length,
			&key[att->key_offset]); 
		}
	    }

	    /*
	    ** Now put key on the current index page.  If there is no room,
	    ** go to next index page.
	    */
	    while (dm1xbput(mct, ip, key, mct->mct_klen,
		    DM1C_LOAD_ISAMINDEX, 0, 0,
		    mct->mct_ver_number, dberr) == E_DB_WARN)
	    {
		status = dm1xnewpage(mct, 0, &mct->mct_curovfl, dberr);
		if (status != E_DB_OK)
		    break;

		/* Reformat as an INDEX page */
		
		ip = mct->mct_curovfl;
		/*
		** dm1xnewpage() returns with the page zero-filled
		** and formated with status DMPP_MODIFY | DMPP_DATA.
		** All we need to do is reset the status to
		** DMPP_DIRECT instead of calling dmpp_format again
		** to reformat the entire page.
		*/
		DMPP_VPT_INIT_PAGE_STAT_MACRO(mct->mct_page_type,
					  ip,
					  DMPP_DIRECT);

		DMPP_VPT_SET_PAGE_OVFL_MACRO(mct->mct_page_type, ip, pageno);
		DMPP_VPT_SET_PAGE_MAIN_MACRO(mct->mct_page_type, ip, level);
	    }
	}   while (status == E_DB_OK &&
		(pageno = DMPP_VPT_GET_PAGE_MAIN_MACRO(mct->mct_page_type, 
		mct->mct_curdata)));
	if (status != E_DB_OK)
	    break;

	/*
	**  Build the second and successive levels of the index.
	*/

	for (level++, stop = DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type, 
		mct->mct_curovfl);
	     start < stop;
	     stop = DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type, 
		mct->mct_curovfl), level++)
	{
	    /* 
	    ** Format the first page of the overflow page buffer as an ISAM
	    ** directory page.  Level is indicated in page_main, and the page
	    ** containing the first key is indicated by page_ovfl.
	    */

	    status = dm1xnewpage(mct, 0, &mct->mct_curovfl, dberr);
	    if (status != E_DB_OK)
		break;
	    pageno = start;
	    start = DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type, 
		mct->mct_curovfl);

	    /* Reformat as an INDEX page */

	    ip = mct->mct_curovfl;
	    /*
	    ** dm1xnewpage() returns with the page zero-filled
	    ** and formated with status DMPP_MODIFY | DMPP_DATA.
	    ** All we need to do is reset the status to
	    ** DMPP_DIRECT instead of calling dmpp_format again
	    ** to reformat the entire page.
	    */
	    DMPP_VPT_INIT_PAGE_STAT_MACRO(mct->mct_page_type,
				      ip,
				      DMPP_DIRECT);

	    DMPP_VPT_SET_PAGE_OVFL_MACRO(mct->mct_page_type, ip, pageno);
	    DMPP_VPT_SET_PAGE_MAIN_MACRO(mct->mct_page_type, ip, level);

	    for (; pageno <= stop; pageno++)
	    {
		status = dm1xreadpage(mct, DM1X_FORREAD, pageno,
		    &mct->mct_curdata, dberr);
		if (status != E_DB_OK)
		    break;

		/*
		** Now put key on the current index page.  If there is no room,
		** go to next index page.
		** Even if mct_seg_rows
		** There should not be segments on index pages
		*/

		tid.tid_tid.tid_page = 
		    DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type,
		    mct->mct_curdata);
		tid.tid_tid.tid_line = 0;
		(*mct->mct_acc_plv->dmpp_get)(mct->mct_page_type,
		    mct->mct_page_size, mct->mct_curdata, &tid,
		    &record_size, (char **)&key, 
		    NULL, NULL, NULL, NULL);

		/* copy the key from the page to the local buffer */
		MEcopy(key, mct->mct_klen, keytuple);
		while (dm1xbput(mct, ip, keytuple, mct->mct_klen,
			    DM1C_LOAD_ISAMINDEX, 0, 0,
			    (u_i2)0, dberr) == E_DB_WARN)
		{
		    status = dm1xnewpage(mct, 0, &mct->mct_curovfl, dberr);
		    if (status != E_DB_OK)
			break;

		    /* Reformat as an INDEX page */
		    ip = mct->mct_curovfl;
		    /*
		    ** dm1xnewpage() returns with the page zero-filled
		    ** and formated with status DMPP_MODIFY | DMPP_DATA.
		    ** All we need to do is reset the status to
		    ** DMPP_DIRECT instead of calling dmpp_format again
		    ** to reformat the entire page.
		    */
		    DMPP_VPT_INIT_PAGE_STAT_MACRO(mct->mct_page_type,
					      ip,
					      DMPP_DIRECT);

		    DMPP_VPT_SET_PAGE_OVFL_MACRO(mct->mct_page_type, ip, pageno);
		    DMPP_VPT_SET_PAGE_MAIN_MACRO(mct->mct_page_type, ip, level);
		}
		if (status != E_DB_OK)
		    break;
	    }
	    if (status != E_DB_OK)
		break;
	}
	if (status != E_DB_OK)
	    break;

	mct->mct_prim = DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type, 
	    mct->mct_curovfl);

        /* 
	** Write build buffer to disk, deallocate build memory.
	** build the fhdr/fmap and guarantee the space
	*/
        status = dm1xfinish(mct, DM1X_COMPLETE, dberr);
        if (status != E_DB_OK)
            break;


	if (start_free < end_free)
	{
	    status = dm1xfree(mct, start_free,
		mct->mct_startmain + mct->mct_kperpage - 1, dberr);
	    if (status != E_DB_OK)
		break;
	}

    } while (FALSE);

    if (status == E_DB_OK)
	return(status);

    /*	Deallocate build memory and return error. */

    (VOID) dm1xfinish(mct, DM1X_CLEANUP, &local_dberr);
    log_error(E_DM9252_DM1I_BEND, dberr);
    return (status);
}
Beispiel #6
0
DB_STATUS
dmc_add_db(
DMC_CB    *dmc_cb)
{
    DMC_CB		*dmc = dmc_cb;
    DM_SVCB		*svcb = dmf_svcb;
    DMC_LOC_ENTRY	*location;
    i4		loc_count;
    i4		flags;
    i4		mode;
    i4		dm2mode;
    i4		error,local_error;
    DMM_LOC_LIST	*loc_ptr[4];
    DB_STATUS		status;
    DMP_DCB		*dcb;

    CLRDBERR(&dmc->error);

    for (status = E_DB_ERROR;;)
    {
	/*	Verify control block parameters. */

	if (dmc->dmc_op_type != DMC_DATABASE_OP)
	{
	    SETDBERR(&dmc->error, 0, E_DM000C_BAD_CB_TYPE);
	    break;
	}

	if (dmc->dmc_id != svcb->svcb_id)
	{
	    SETDBERR(&dmc->error, 0, E_DM002D_BAD_SERVER_ID);
	    break;
	}

	flags = 0;
	if (dmc->dmc_flags_mask &
	    ~(DMC_NOJOURNAL | DMC_JOURNAL | DMC_FSTCOMMIT | DMC_SOLECACHE |
              DMC_CNF_LOCKED | DMC_CVCFG | DMC_ADMIN_DB | DMC_DMCM))
	{
	    SETDBERR(&dmc->error, 0, E_DM001A_BAD_FLAG);
	    break;
	}
	if (dmc->dmc_flags_mask & DMC_NOJOURNAL)
	    flags |= DM2D_NOJOURNAL;
	if (dmc->dmc_flags_mask & DMC_JOURNAL)
	    flags |= DM2D_JOURNAL;
	if (dmc->dmc_flags_mask & DMC_FSTCOMMIT)
	    flags |= DM2D_FASTCOMMIT;
	if (dmc->dmc_flags_mask & DMC_SOLECACHE)
	    flags |= DM2D_BMSINGLE;
	if (dmc->dmc_flags_mask & DMC_CVCFG)
	    flags |= DM2D_CVCFG;
        
        /* b97083 - Is the CNF file already locked by caller? */
        if (dmc->dmc_flags_mask & DMC_CNF_LOCKED)
            flags |= DM2D_CNF_LOCKED;

	if (dmc->dmc_s_type & DMC_S_SINGLE)
	    flags |= DM2D_SINGLE;
	if (dmc->dmc_s_type & DMC_S_MULTIPLE)
	    flags |= DM2D_MULTIPLE;

        /*
        ** (ICL phil.p)
        */
        if (dmc->dmc_flags_mask & DMC_DMCM)
            flags |= DM2D_DMCM;

        if (dmc->dmc_flags_mask2 & DMC2_READONLYDB)
            flags |= DM2D_READONLYDB;

	/* No MO objects if so requested */
	if ( dmc->dmc_flags_mask2 & DMC2_NODBMO ||
	     mode == DMC_A_CREATE || mode == DMC_A_DESTROY )
	{
	    flags |= DM2D_NODBMO;
	}

	/*
	** It is an error to specify Fast Commit without specifying to
	** use a single buffer manager.
        ** (ICL phil.p) UNLESS running DMCM, which effectively means
        ** running FastCommit in a Multi-Cache environment.
	*/

        if (!(flags & DM2D_DMCM))
        {
            if ((flags & (DM2D_FASTCOMMIT | DM2D_BMSINGLE)) == DM2D_FASTCOMMIT)
	    {
		SETDBERR(&dmc->error, 0, E_DM0115_FCMULTIPLE);
                break;
	    }
        }

	mode = dmc->dmc_db_access_mode;
	if (mode != DMC_A_READ &&
	    mode != DMC_A_WRITE &&
	    mode != DMC_A_CREATE &&
	    mode != DMC_A_DESTROY)
	{
	    SETDBERR(&dmc->error, 0, E_DM000F_BAD_DB_ACCESS_MODE);
	    break;
	}
	dm2mode = (mode == DMC_A_READ) ? DM2D_A_READ : DM2D_A_WRITE;

	/*  Check that at least one location was passed in. */

	location = (DMC_LOC_ENTRY *)dmc->dmc_db_location.data_address;
	loc_count = dmc->dmc_db_location.data_in_size / sizeof(DMC_LOC_ENTRY);
	if (loc_count == 0)
	{
	    SETDBERR(&dmc->error, 0, E_DM002A_BAD_PARAMETER);
	    break;
	}
	    
	/*  Check if database should be created. */

	if (mode == DMC_A_CREATE)
	{
	    SCF_CB              scf_cb;
	    SCF_SCI             sci_list[2]; 
	    DB_NAME		collation;
	    DB_NAME		ucollation;
	    char		*p;
	    char		ucolname[] = "udefault";
	    i4		dbservice;

	    scf_cb.scf_length = sizeof(SCF_CB);
	    scf_cb.scf_type = SCF_CB_TYPE;
	    scf_cb.scf_facility = DB_DMF_ID;
	    scf_cb.scf_session = (SCF_SESSION)dmc->dmc_session_id;
	    scf_cb.scf_ptr_union.scf_sci = (SCI_LIST *)sci_list;
	    sci_list[0].sci_length = sizeof(dbservice);
	    sci_list[0].sci_code = SCI_DBSERVICE;
	    sci_list[0].sci_aresult = (char *)&dbservice;
	    sci_list[0].sci_rlength = 0;
	    scf_cb.scf_len_union.scf_ilength = 1;

	    status = scf_call(SCU_INFORMATION, &scf_cb);
	    if (status != E_DB_OK)
	    {
		uleFormat(&scf_cb.scf_error, 0, (CL_ERR_DESC *)NULL, 
		    ULE_LOG, NULL, (char *)0, (i4)0, (i4 *)0, 
		    &error, 0);
		SETDBERR(&dmc->error, 0, E_DM002F_BAD_SESSION_ID);
		break;
	    }

	    /*	Collation for iidbdb can only be the default. */

	    MEfill(sizeof(collation.db_name), ' ', collation.db_name);
	    NMgtAt("II_COLLATION", &p);
	    if (p && *p)
		MEmove(STlength(p), p, ' ', sizeof(collation.db_name), 
				collation.db_name);
	    MEmove(STlength(ucolname), ucolname, ' ', 
		   sizeof(ucollation.db_name), ucollation.db_name);

	    loc_ptr[0] = (DMM_LOC_LIST *) &loc_list[0];
	    loc_ptr[1] = (DMM_LOC_LIST *) &loc_list[1];
	    loc_ptr[2] = (DMM_LOC_LIST *) &loc_list[2];
	    loc_ptr[3] = (DMM_LOC_LIST *) &loc_list[3];

	    /* Even though the iidbdb is not "operative" at this stage, we
	    ** will mark it as operative in the config file now (it will not
	    ** be marked operative in the iidatabase catalog until after it
	    ** is fully created).  Although we would like to mark the iidbdb
	    ** "inoperative" in the config file now and update it to operative
	    ** status when creation is successfully completed (as is done for
	    ** all other DBs) the internal procedure "iiqef_alter_db" which
	    ** updates this bit will not work on the iidbdb; see comments in
	    ** createdb regarding this problem.
	    */ 
	    status = dmm_add_create(0, &dmc->dmc_db_name, &dmc->dmc_db_owner,
		 1, dbservice, DU_OPERATIVE, 
		(DB_LOC_NAME *) &dbdb_location, 11, "II_DATABASE", 4, 
		loc_ptr, collation.db_name, ucollation.db_name, &dmc->error);

	    if (status != E_DB_OK)
	    {
		if (dmc->error.err_code > E_DM_INTERNAL)
		{
		    uleFormat( &dmc->error, 0, NULL, ULE_LOG , NULL,
			    (char * )0, 0L, (i4 *)0, &local_error, 0);
		    SETDBERR(&dmc->error, 0, E_DM0084_ERROR_ADDING_DB);
		}
		break;
	    }
	}
	else if (mode == DMC_A_DESTROY)
	{
	    return (E_DB_OK);
	}

	/*  Call the physical layer to construct a DCB for this database. */

	status = dm2d_add_db(flags, &dm2mode, &dmc->dmc_db_name, 
		&dmc->dmc_db_owner, loc_count, (DM2D_LOC_ENTRY *)location, &dcb, 
		(i4 *)dmc->dmc_lock_list, &dmc->error);
	if (status != E_DB_OK)
	{
	    if (dmc->error.err_code > E_DM_INTERNAL)
	    {
		uleFormat( &dmc->error, 0, NULL, ULE_LOG , NULL,
		    (char * )0, 0L, (i4 *)0, &local_error, 0);
		SETDBERR(&dmc->error, 0, E_DM0084_ERROR_ADDING_DB);
	    }
	    break;
	}

	/*  Use the access mode passed back */
	dmc->dmc_db_access_mode = 
		(dm2mode == DM2D_A_READ) ? DMC_A_READ : DMC_A_WRITE;
	dmc->dmc_db_id = (char *)dcb;
	dmc->dmc_dbservice = dcb->dcb_dbservice;
	dmc->dmc_dbcmptlvl = dcb->dcb_dbcmptlvl;
	dmc->dmc_1dbcmptminor = dcb->dcb_1dbcmptminor;
	return (E_DB_OK);
    }

    return (status);
}
Beispiel #7
0
/*{
** Name: dmv_unufmap - UNDO of an Fmap Update operation.
**
** Description:
**
** Inputs:
**      dmve				Pointer to dmve control block.
**      tabio				Pointer to table io control block
**	fmap				Table's FMAP page.
**	log_rec				Fmap log record.
**	plv				Pointer to page level accessor 
**
** Outputs:
**	error				Pointer to Error return area
**	Returns:
**	    E_DB_OK
**	    E_DB_ERROR
**
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	23-Feb-2009 (hanal04) Bug 121652
**          Created.
*/
static DB_STATUS
dmv_unufmap(
DMVE_CB             *dmve,
DMP_TABLE_IO	    *tabio,
DMP_PINFO	    *fmappinfo,
DM0L_FMAP	    *log_rec,
DMPP_ACC_PLV	    *loc_plv)
{
    LG_LSN		*log_lsn = &log_rec->fmap_header.lsn; 
    LG_LSN		lsn;
    DB_STATUS		status;
    i4		dm0l_flags;
    i4		*err_code = &dmve->dmve_error.err_code;
    i4	page_type = log_rec->fmap_pg_type;
    i4  fseg = DM1P_FSEG_MACRO(page_type, log_rec->fmap_page_size);
    i4  first_bit   = 0;
    DM1P_FMAP		*fmap = (DM1P_FMAP*)fmappinfo->page;

    CLRDBERR(&dmve->dmve_error);

    /*
    ** If recovery was found to be unneeded to both the old and new pages
    ** then we can just return.
    */
    if (fmap == NULL)
	return (E_DB_OK);

    if(log_rec->fmap_first_used / fseg == log_rec->fmap_map_index)
       first_bit = (log_rec->fmap_first_used % fseg) + 1;

    if (DM1P_VPT_GET_FMAP_SEQUENCE_MACRO(page_type, fmap) != 
        log_rec->fmap_map_index)
    {
        uleFormat(NULL, E_DM9677_DMVE_FMAP_FMAP_STATE, (CL_ERR_DESC *)NULL, 
            ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, err_code, 6, 
	    sizeof(DB_DB_NAME), tabio->tbio_dbname->db_db_name,
	    sizeof(DB_TAB_NAME), tabio->tbio_relid->db_tab_name,
	    sizeof(DB_OWN_NAME), tabio->tbio_relowner->db_own_name,
	    0, DM1P_VPT_GET_FMAP_PAGE_PAGE_MACRO(page_type, fmap),
	    0, DM1P_VPT_GET_FMAP_SEQUENCE_MACRO(page_type, fmap),
	    0, log_rec->fmap_map_index);
        dmd_log(1, (PTR) log_rec, 4096);
        uleFormat(NULL, E_DM9642_UNDO_FMAP, (CL_ERR_DESC *)NULL, ULE_LOG, NULL,
	    (char *)NULL, (i4)0, (i4 *)NULL, err_code, 0);
    }

    /*
    ** Check direction of recovery operation:
    **
    **     If this is a normal Undo, then we log the CLR for the operation
    **     and write the LSN of this CLR onto the newly updated page (unless
    **     dmve_logging is turned off - in which case the rollback is not
    **     logged and the page lsn is unchanged).
    **
    **     If the record being processed is itself a CLR, then we are REDOing
    **     an update made during rollback processing.  Updates are not relogged
    **     in redo processing and the LSN is moved forward to the LSN value of
    **     of the original update.
    */
    if ((log_rec->fmap_header.flags & DM0L_CLR) == 0)
    {
	if (dmve->dmve_logging)
	{
	    dm0l_flags = DM0L_CLR;
	    if (log_rec->fmap_header.flags & DM0L_JOURNAL)
		dm0l_flags |= DM0L_JOURNAL;

	    status = dm0l_ufmap(dmve->dmve_log_id, dm0l_flags, 
		&log_rec->fmap_tblid,
		tabio->tbio_relid, tabio->tbio_relowner,
		log_rec->fmap_pg_type, log_rec->fmap_page_size,
		log_rec->fmap_loc_cnt, 
		log_rec->fmap_fhdr_pageno, log_rec->fmap_fmap_pageno, 
		log_rec->fmap_map_index,
		log_rec->fmap_hw_mark,
		log_rec->fmap_fhdr_cnf_loc_id, log_rec->fmap_fmap_cnf_loc_id,
		log_rec->fmap_first_used, log_rec->fmap_last_used,
		log_lsn, &lsn, &dmve->dmve_error);
	    if (status != E_DB_OK)
	    {
		/*
		 * Bug56702: return logfull indication.
		 */
		dmve->dmve_logfull = dmve->dmve_error.err_code;
		uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL,
		    (char *)NULL, (i4)0, (i4 *)NULL, err_code, 0); 
		SETDBERR(&dmve->dmve_error, 0, E_DM9642_UNDO_FMAP);
		return(E_DB_ERROR);
	    }
	}
    }
    else
    {
	/*
	** If we are processing recovery of an FMAP CLR (redo-ing the undo
	** of an extend) then we don't log a CLR but instead save the LSN
	** of the log record we are processing with which to update the 
	** page lsn's.
	*/
	lsn = *log_lsn;
    }

    /*
    ** Undo the Update FMAP operation.
    */

    /*
    ** FHDR updates will be performed in the associated DM0L_FMAP and/or
    ** DM0L_EXTEND processing.
    **
    ** Mark the appropriate ranges of pages as free.
    */
    dmveMutex(dmve, fmappinfo);

    DM1P_VPT_SET_FMAP_FIRSTBIT_MACRO(page_type, fmap, first_bit);
    dm1p_fmfree(fmap, log_rec->fmap_first_used, log_rec->fmap_last_used,
        page_type, log_rec->fmap_page_size);

    DM1P_VPT_SET_FMAP_PAGE_STAT_MACRO(page_type,fmap,DMPP_MODIFY);
    if (dmve->dmve_logging)
        DM1P_VPT_SET_FMAP_PG_LOGADDR_MACRO(page_type, fmap, lsn);

    dmveUnMutex(dmve, fmappinfo);

    /*
    ** Release log file space allocated for logfile forces that may be
    ** required by the buffer manager when unfixing the pages just recovered.
    */
    if (((log_rec->fmap_header.flags & DM0L_CLR) == 0) &&
	((log_rec->fmap_header.flags & DM0L_FASTCOMMIT) == 0) &&
        (dmve->dmve_logging))
    {
        dmve_unreserve_space(dmve, 1);
    }

    return(E_DB_OK);
}
Beispiel #8
0
/*
** History:
**	26-jul-1993 (bryanp)
**	    Fixed error handling in release_cb following dmxe_pass_abort call.
**	01-Oct-2004 (jenjo02)
**	    With Factotum threads, scb_oq_next will be empty
**	    but xcb_odcb_ptr will be correct when calling
**	    dmxe_pass_abort().
**	6-Jul-2006 (kschendel)
**	    Pass the db id to RDF in the right place.
**	16-Nov-2009 (kschendel) SIR 122890
**	    Update destroy-temp call parameters.
**	15-Jan-2010 (jonj)
**	    SIR 121619 MVCC: Deallocate lctx, jctx if allocated.
**	14-Apr-2010 (kschendel) SIR 123485
**	    Force a BLOB query-end during cleanup.
*/
static DB_STATUS
release_cb(
DMC_CB		*dmc,
DML_SCB		*scb)
{
    DML_XCB		*xcb;
    DMP_DCB		*dcb;
    DMP_RCB		*rcb;
    i4		error,local_error;
    CL_ERR_DESC		clerror;
    DB_STATUS		status;
    DML_ODCB		*odcb;
    DML_SPCB		*spcb;
    DML_XCCB            *xccb;
    DB_TAB_ID		tbl_id;

    CLRDBERR(&dmc->error);

    while (scb->scb_x_next != (DML_XCB *) &scb->scb_x_next)
    {
	/*  Get next XCB. */

	xcb = (DML_XCB *) scb->scb_x_next;
	odcb = (DML_ODCB *) xcb->xcb_odcb_ptr;

	/* Remove blob Locator context */
	if (scb->scb_lloc_cxt)
	    dm0m_deallocate((DM_OBJECT **)&scb->scb_lloc_cxt);

	/* Close off any in-flight DMPE stuff */
	status = dmpe_query_end(TRUE, TRUE, &dmc->error);
	if ( status != E_DB_OK )
	{
	    SETDBERR(&dmc->error, 0, E_DM0106_ERROR_ENDING_SESSION);
	    return (E_DB_FATAL);
	}

	/* Remove blob PCB's */
	while (xcb->xcb_pcb_list != NULL)
	    dmpe_deallocate(xcb->xcb_pcb_list);

	/*  Close all open tables and destroy all open temporary tables. */

	while (xcb->xcb_rq_next != (DMP_RCB*) &xcb->xcb_rq_next)
	{
	    /*	Get next RCB. */

	    rcb = (DMP_RCB *)((char *)xcb->xcb_rq_next -
		(char *)&(((DMP_RCB*)0)->rcb_xq_next));

	    /*	Remove from the XCB. */

	    rcb->rcb_xq_next->rcb_q_prev = rcb->rcb_xq_prev;
	    rcb->rcb_xq_prev->rcb_q_next = rcb->rcb_xq_next;

	    /*	Remember DCB and table id of a temporary. */

	    dcb = 0;
	    if (rcb->rcb_tcb_ptr->tcb_temporary == TCB_TEMPORARY)
	    {
		dcb = rcb->rcb_tcb_ptr->tcb_dcb_ptr;
		tbl_id = rcb->rcb_tcb_ptr->tcb_rel.reltid;
	    }

	    /*	Deallocate the RCB. */

	    status = dm2t_close(rcb, (i4)0, &dmc->error);
	    if (status != E_DB_OK)
	    {
		if ((dmc->error.err_code != E_DM004B_LOCK_QUOTA_EXCEEDED) &&
		    (dmc->error.err_code != E_DM0042_DEADLOCK) &&
		    (dmc->error.err_code != E_DM004D_LOCK_TIMER_EXPIRED) &&
		    (dmc->error.err_code != E_DM004A_INTERNAL_ERROR))
		{
		    uleFormat( &dmc->error, 0, NULL, ULE_LOG , NULL, 
			(char * )NULL, 0L, (i4 *)NULL, 
			&local_error, 0);
		    SETDBERR(&dmc->error, 0, E_DM0106_ERROR_ENDING_SESSION);
		}
		return (E_DB_FATAL);
	    }

	    /*	Now destroy the TCB if it's a temporary. */
	    /*  I can't be bothered to change this code to run off of the
	    **  xccb list, so can't use dmt-destroy-temp. (schka24)
	    **  I may regret this if it turns out that factotum can get
	    **  here -- could end up deleting session temp...
	    */

	    if (dcb)
	    {
		RDF_CB rdfcb;

		/* RDF doesn't use its end-session call, and anyway
		** we know the table ID and it doesn't.   Toss the
		** table out of RDF so that it's not clogging up the
		** RDF-works.  This also ensures that nobody will accidently
		** find the table by ID in RDF if the ID is reused.
		** (Unlikely, but possible.)
		** Zero in rdr_fcb says don't send invalidate dbevents
		** to other servers.  Zero in the rdfcb rdf_info_blk says
		** we don't have anything fixed.
		*/

		MEfill(sizeof(RDF_CB), 0, &rdfcb);
		STRUCT_ASSIGN_MACRO(tbl_id, rdfcb.rdf_rb.rdr_tabid);
		rdfcb.rdf_rb.rdr_session_id = scb->scb_sid;
		rdfcb.rdf_rb.rdr_unique_dbid = dcb->dcb_id;
		rdfcb.rdf_rb.rdr_db_id = (PTR) odcb;
		/* Ignore error on this call */
		(void) rdf_call(RDF_INVALIDATE, &rdfcb);

		/*
		**  Another RCB, yet to be deleted, could still be referencing
		**  the TCB.  Handle the associated error from destroy as normal
		**  in this case.
		*/

		status = dm2t_destroy_temp_tcb(scb->scb_lock_list, 
			    dcb, &tbl_id, &dmc->error);

		if (status != E_DB_OK &&
		    dmc->error.err_code != E_DM005D_TABLE_ACCESS_CONFLICT)
		{
		    uleFormat( &dmc->error, 0, NULL, ULE_LOG , NULL, 
			(char * )NULL, 0L, (i4 *)NULL, 
			&local_error, 0);
		    SETDBERR(&dmc->error, 0, E_DM0106_ERROR_ENDING_SESSION);
		    return (E_DB_FATAL);
		}
	    }
	}
	/* One more time after table closes, guarantees BQCB's deleted */
	status = dmpe_query_end(TRUE, FALSE, &dmc->error);
	if ( status != E_DB_OK )
	{
	    SETDBERR(&dmc->error, 0, E_DM0106_ERROR_ENDING_SESSION);
	    return (E_DB_FATAL);
	}

	/*  Remove SPCBs. */

	while (xcb->xcb_sq_next != (DML_SPCB*) &xcb->xcb_sq_next)
	{
	    /*	Get next SPCB. */

	    spcb = xcb->xcb_sq_next;

	    /*	Remove SPCB from XCB queue. */

	    spcb->spcb_q_next->spcb_q_prev = spcb->spcb_q_prev;
	    spcb->spcb_q_prev->spcb_q_next = spcb->spcb_q_next;

	    /*	Deallocate the SPCB. */

	    dm0m_deallocate((DM_OBJECT **)&spcb);
	}

	/*  Remove XCCBs. */

	while (xcb->xcb_cq_next != (DML_XCCB*)&xcb->xcb_cq_next)
	{
	    /* Get pend XCCB */

	    xccb = xcb->xcb_cq_next;

	    /*	Remove from queue. */

	    xccb->xccb_q_next->xccb_q_prev = xccb->xccb_q_prev;
	    xccb->xccb_q_prev->xccb_q_next = xccb->xccb_q_next;

	    /*	Deallocate. */

	    dm0m_deallocate((DM_OBJECT **)&xccb);
	}

	/* Deallocate lctx, jctx if allocated */
	if ( xcb->xcb_lctx_ptr )
	    dm0m_deallocate(&xcb->xcb_lctx_ptr);
	if ( xcb->xcb_jctx_ptr )
	{
	    /* Close any open jnl file, deallocate jctx */
	    status = dm0j_close(&xcb->xcb_jctx_ptr, &dmc->error);
	    if ( status )
	    {
		SETDBERR(&dmc->error, 0, E_DM0106_ERROR_ENDING_SESSION);
		return (E_DB_FATAL);
	    }
	}

	/*
	** Since we are deallocating a session which has an open transaction,
	** we signal the RCP to abort the transaction for us before deallocating
	** its context.
	**
	** Call dmxe to flush any required pages from the Buffer Manager and
	** to make the abort request to the RCP.
	**
	** The LG context will be freed up by the RCP when the abort is
	** complete.
	**
	** Reference the DB opened on the XCB, not SCB; if this
	** is a Factotum thread, scb_oq_next will be empty,
	** but xcb_odcb_ptr will be valid.
	** Likewise, use xcb_lk_id rather than scb_lock_list.
	*/

	/* XXXX log error message about why pass abort is called */
	status = dmxe_pass_abort(xcb->xcb_log_id, xcb->xcb_lk_id, 
		    &xcb->xcb_tran_id, odcb->odcb_dcb_ptr->dcb_id, &dmc->error);
	if (status)
	{
	    /* XXXX May want to dmd_check here */
	    uleFormat(&dmc->error, 0, NULL, ULE_LOG, NULL, NULL, 0, NULL, &error, 0);
	    SETDBERR(&dmc->error, 0, E_DM0106_ERROR_ENDING_SESSION);
	    return (E_DB_FATAL);
	}

	/*  Remove XCB from SCB queue. */

	xcb->xcb_q_next->xcb_q_prev = xcb->xcb_q_prev;
	xcb->xcb_q_prev->xcb_q_next = xcb->xcb_q_next;
	scb->scb_x_ref_count--;

	dm0m_deallocate((DM_OBJECT **)&xcb);
    }

    /* Close all the opened database of the session. */

    while (scb->scb_oq_next != (DML_ODCB *) &scb->scb_oq_next)
    {
	/*  Get next ODCB. */

	odcb = (DML_ODCB *) scb->scb_oq_next;

	status = dm2d_close_db(odcb->odcb_dcb_ptr, scb->scb_lock_list, 
				DM2D_NLG | DM2D_NLK_SESS, &dmc->error);
	if (status != E_DB_OK)
	{
	    if (dmc->error.err_code > E_DM_INTERNAL)
	    {
		uleFormat( &dmc->error, 0, NULL, ULE_LOG , NULL, 
		    (char * )NULL, 0L, (i4 *)NULL, &local_error, 0);
		SETDBERR(&dmc->error, 0, E_DM0106_ERROR_ENDING_SESSION);
	    }
	    return (E_DB_FATAL);
	}

	scb->scb_o_ref_count--;
	odcb->odcb_q_next->odcb_q_prev = odcb->odcb_q_prev;
	odcb->odcb_q_prev->odcb_q_next = odcb->odcb_q_next;
	dm0s_mrelease(&odcb->odcb_cq_mutex);
	dm0m_deallocate((DM_OBJECT **)&odcb);
	
    }
    return(E_DB_OK);
}
Beispiel #9
0
/*{
** Name: dmve_load - The recovery of a load table opration.
**
** Description:
**
**	This function performs UNDO recovery of a load operation.
**	Load operations are NON-REDO and are not journalled.
**
**	User tables are loaded in two different ways.  For HEAP 
**	tables, rows are added to the already existing table.  For HASH,
**	BTREE and ISAM, a new file is created and loaded, and then 
**	renamed to the base file.  The method used is given by 
**	log_rec->dul_recreate.
**
**      If dul_recreate is set, recovery will have already destroyed the
**      new file and moved the old one back in place.  All we have to do is
**      close-purge the TCB.
**
**      If dul_recreate is not set, then we have to delete all the rows that
**      were added to the table.  We also have to deal with the possibility
**      that the load was killed off in the middle of a file extend, which
**      may leave some overflow pointers invalid.  To avoid these problems,
**      we bypass normal access methods and just read through the file page
**      by page, formatting each as an empty data page.
**
**      For HEAP files one can also bulkload into a table with data.
**	To support recovery for this type of bulkload, a new variable
**      was added to the log record and indicates that last valid page
**      of the heap table before the load began.  In all other cases this
**      variable is set to zero.  To recover this operation, all pages
**      allocated after the page indicated by the lastpage variable, are
**      freed and the overflow pointer on the lastpage is set to zero.
**     
**
** Inputs:
**	dmve_cb
**	    .dmve_log_rec	    The load table operation log record.
**	    .dmve_action	    Should only be DMVE_UNDO.
**	    .dmve_lg_addr	    The log address of the log record.
**	    .dmve_dcb_ptr	    Pointer to DCB.
**	    .dmve_tran_id	    The physical transaction id.
**	    .dmve_lk_id		    The transaction lock list id.
**	    .dmve_log_id	    The logging system database id.
**	    .dmve_db_lockmode	    The lockmode of the database. Should be 
**				    DM2T_X or DM2T_S.
**
** Outputs:
**	dmve_cb
**	    .dmve_error.err_code    The reason for error status.
**	Returns:
**	    E_DB_OK
**	    E_DB_ERROR
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	29-sep-87 (rogerk)
**	    Created new for Jupiter. 
**	22-jan-90 (rogerk)
**	    Make sure the page buffer is aligned on ALIGN_RESTRICT boundary
**	    for byte-align machines.
**      14-jun-90 (jennifer)
**          Change routines to use dm1sbbegin and dm1sbend routines
**          to build empty files.  These routines were used to insure
**          all the new file header, bit map information was set correctly.
**          For bulkload into a heap with data, a new recovery algorithm
**          was used to insure file header and bit map were retained.
**	30-Dec-1991 (rmuth)
**	    When we abort a load into an empty Btree table we just rebuild
**	    an empty btree table ontop of the loaded table. We do this by
**	    calling the build routines dm1bbbegin and dm1bbend, the dm1bbbegin
**	    routine expects certain fields in the mct control block to be set,
**	    these fields are subsequently used during the dm1bbput operations.
**	    As we do not use dm1bbput we were not setting these fields hence
**	    dm1bbbegin was issuing an error. As we already have the values
**	    in the TCB we initialise the values in the mct.
**	8-Jan-1992 (rmuth)
**	    Abort of load was setting reltups and relpages to incorrect values,
**	    because it was not taking into account the fact that the update of
**	    relpages and reltups is logged by a SREP record which is also
**	    rolled back causing relpages and reltups to be set to their 
**	    correct preload values. The fix is to set tcb_page_adds and
**	    tcb_tup_adds to zero so that relpages and reltups are left alone
**	    by the rollback of the LOAD log record.
**	13-feb-1992 (bryanp)
**	    Set mct_allocation and mct_extend. The build routines won't actually
**	    need these, since we're rebuilding over an existing file, but it's
**	    good to set all our parameters before calling the build routines,
**	    just as a matter of practice.
**	28-may-1992 (bryanp)
**	    Set mct_inmemory and mct_rebuild to 0.
**	09-jun-1992 (kwatts)
**	    6.5 MPF Changes. Set mct_compressed from relcomptype.
**	    Set up new mct_data_atts instead of mct_atts_ptr (removed).
**	29-August-1992 (rmuth)
**	   Add parameter to dm1p_lastused.
**	30-October-1992 (rmuth)
**	   Set the mct_guarantee_on_disk flag.
**	8-oct-1992 (bryanp)
**	    Initialize mct_buildrcb.
**	14-dec-1992 (jnash)
**	    Reduced logging project.  Back off the previous change 
**	    (in 6.5, never used in 6.4) where we always created an
**	    empty table, revert back to the old 6.4 file swapping 
**	    behavior.  This is because of recovery problems when someone 
**	    first deletes all rows in a table (including rows on overflow 
**	    pages), and then performs a load, and then aborts the load.  
**	    In this case installing an empty table is not the thing to do.
**	    Upcoming "with emptytable" option will once again introduce
**	    this feature.
**	08-feb-1993 (rmuth)
**	    Use DM2T_A_OPEN_NOACCESS when opening the table so that
**	    we do not check the FHDR/FMAP as these may be ill at the
**	    moment.
**	18-oct-1993 (rogerk)
**	    Changed heap load recovery to rewrite the allocated pages to
**	    be free pages rather than leaving them as formatted data pages.
**	    This allows verifydb and patch table to run without danger
**	    of thinking the freed pages should be restored and the tuples
**	    resurrected.  Also ifdef'd code which does recovery for structured
**	    tables when the EMPTY_TABLE copy option is specified.
**	18-oct-1993 (rogerk)
**	    Added dmve_unreserve_space call to release logfile space
**	    reserved for logforces in the purge tcb call.
**	15-apr-1994 (chiku)
**	    Bug56702: return logfull indication.
**	06-mar-1996 (stial01 for bryanp)
**	    Pass page_size to format accessor.
**	06-may-1996 (thaju02)
**	    New page format support: change page header references to use
**	    macros.
**      21-may-1997 (stial01)
**          Added flags arg to dm0p_unmutex call(s).
**	01-Dec-2004 (jenjo02)
**	    Added DM0P_TABLE_LOCKED_X flag for bug 108074 completeness.
**      21-Sep-2006 (kschendel)
**          Apparently this never worked for partitions!  Fix.
**          Also, it's probably a bad idea to open the load table NOACCESS.
**          And finally, don't use TABLE_LOCKED_X here because we might
**          be processing an fhdr/fmap in the loop that reformats DATA
**          pages as free.  Causes Unfix RELEASE warnings in the BM.
**          We never update fmap/fhdr so don't worry about LOCKED_X.
**	16-Nov-2009 (kschendel) SIR 122890
**	    Don't close-purge the TCB if the load was non-recreate load.
**	    We may not have a control lock in that situation, and there's
**	    no need to close-purge anyway unless it was a recreate load.
**	13-Apr-2010 (kschendel) SIR 123485
**	    Open no-coupon to avoid unnecessary LOB overhead.
*/
DB_STATUS
dmve_load(
DMVE_CB		*dmve_cb)
{
    DMVE_CB		*dmve = dmve_cb;
    DM0L_LOAD		*log_rec = (DM0L_LOAD *)dmve_cb->dmve_log_rec;
    LG_LSN		*log_lsn = &log_rec->dul_header.lsn; 
    DMP_RCB             *rcb;
    DMP_RCB             *master_rcb;
    DMP_TCB		*t;
    DMP_TABLE_IO	*tbio = NULL;
    DMPP_PAGE           *page;			
    DMPP_ACC_PLV	*loc_plv;
    DB_TAB_ID           master_id;
    DB_TAB_TIMESTAMP	timestamp;
    LG_LSN		lsn;
    DM_PAGENO		pageno;
    DM_PAGENO		last_page;
    i4		close_flags;
    i4		dm0l_flags;
    i4		error;
    i4		local_error;
    DB_STATUS		status;
    DB_ATTS             **keyatts = (DB_ATTS **)0;
    bool                is_partition = FALSE;
    DB_ERROR		local_dberr;
    DMP_PINFO		*pinfo = NULL;

    CLRDBERR(&dmve->dmve_error);
    DMVE_CLEAR_TABINFO_MACRO(dmve);

    rcb = master_rcb = NULL;
    for (;;)
    {
	if (log_rec->dul_header.length != sizeof(DM0L_LOAD) || 
	    log_rec->dul_header.type != DM0LLOAD ||
	    dmve->dmve_action != DMVE_UNDO)
	{
	    SETDBERR(&dmve->dmve_error, 0, E_DM9601_DMVE_BAD_PARAMETER);
	    status = E_DB_ERROR;
	    break;
	}
	/* Finish with close-purge if this was a recreate load (file
	** renaming going on).  Don't purge if a non-recreate load undo;
	** not only is it unnecessary, but we may not have the necessary
	** X control lock in the non-recreate case.
	*/
	close_flags = DM2T_PURGE;
	if (! log_rec->dul_recreate)
	    close_flags = DM2T_NOPURGE;

	/*
	** Open up the table so we can possibly recover the load action
	** (if a non-recreate load was done) and so that we can close-purge
	** the TCB below.  Unfortunately for the non-recreate undo, we
        ** need a real RCB, not just a TBIO.  So if the loaded table was
        ** a partition, open the master as well.
        */
        STRUCT_ASSIGN_MACRO(log_rec->dul_tbl_id, master_id);
        if (master_id.db_tab_index < 0)
        {
            is_partition = TRUE;
            master_id.db_tab_index = 0;
        }
        status = dm2t_open(dmve->dmve_dcb_ptr, &master_id, DM2T_X,
            DM2T_UDIRECT, DM2T_A_WRITE_NOCPN, (i4)0, (i4)20, 0,
	    dmve->dmve_log_id, dmve->dmve_lk_id, 0, 0, dmve->dmve_db_lockmode,
            &dmve->dmve_tran_id, &timestamp,
            &master_rcb, (DML_SCB *)0, &dmve->dmve_error);
	if (status != E_DB_OK)
	    break;
        if (is_partition)
        {
            status = dm2t_open(dmve->dmve_dcb_ptr, &log_rec->dul_tbl_id,
                DM2T_X, DM2T_UDIRECT, DM2T_A_WRITE_NOCPN, (i4)0, (i4)20, 0,
                dmve->dmve_log_id, dmve->dmve_lk_id, 0, 0,
                dmve->dmve_db_lockmode,
                &dmve->dmve_tran_id, &timestamp, &rcb, (DML_SCB *)0, &dmve->dmve_error);
            if (status != E_DB_OK)
		break;
        }
        else
        {
            rcb = master_rcb;
        }

        /* Turn off logging. */

	rcb->rcb_logging = 0;
	t = rcb->rcb_tcb_ptr;
	tbio = &t->tcb_table_io;
	dmve->dmve_tbio = tbio;
	loc_plv = t->tcb_acc_plv;
	MEfill(sizeof(dmve->dmve_pages), '\0', &dmve->dmve_pages);

	if (t->tcb_rel.relspec == TCB_BTREE || 
	    t->tcb_rel.relspec == TCB_RTREE)
	    keyatts = t->tcb_leafkeys;
	else
	    keyatts = t->tcb_key_atts; /* isam, hash */

	/*
	** If dul_recreate is set, then a new file was created for the load
	** and then renamed on top of the old file.  Recovery will have already
	** destroyed the new file and moved the old one back in place.
	** All we have to do now is close-purge the TCB at the bottom.
	**
	** If dul_recreate is not set then the COPY was to an existing heap
	** table and we added the new rows into the current file.  The undo
	** recovery action is then to delete the added rows by freeing the
	** new pages added in the load.
	** Non-recreate mode recovery is only permitted for heap.  There is
	** no good way to undo loads into a non-heap table that would be
	** guaranteed to restore the table to its exact physical state.
	** Thus additional DML undoes would not necessarily work.
	** Therefore Ingres never chooses non-recreate mode for anything
	** but heap tables.
	**
	*/
	if ( ! log_rec->dul_recreate)
	{
	    if (log_rec->dul_lastpage >= 0)
	    {
		/*
		** Copy into an existing heap (possibly non-empty).
		** Free the pages allocated during the copy.
		*/

		status = dm1p_lastused(rcb, 0, &last_page, (DMP_PINFO*)NULL,
					&dmve->dmve_error);	
		if (status != E_DB_OK)
		    break;
		status = dm1p_free(rcb, log_rec->dul_lastpage + 1, 
		    (i4) last_page, &dmve->dmve_error);
		if (status != E_DB_OK)
		    break;

		/*
		** Find the page which was at the end of the HEAP before
		** the load was started and change its ovfl page pointer
		** to unlink the new pages.
		*/
		status = dmve_cachefix_page(dmve, log_lsn,
					    tbio, log_rec->dul_lastpage,
					    DM0P_READAHEAD, 
					    loc_plv,
					    &pinfo);
		if (status != E_DB_OK)
		    break;
		page = pinfo->page;

		dmveMutex(dmve, pinfo);
		DMPP_VPT_SET_PAGE_OVFL_MACRO(t->tcb_rel.relpgtype, page, 0);
		DMPP_VPT_SET_PAGE_STAT_MACRO(t->tcb_rel.relpgtype, page,
		    DMPP_MODIFY);
		dmveUnMutex(dmve, pinfo);

		status = dm0p_uncache_fix(tbio, DM0P_UNFIX, dmve->dmve_lk_id, 
		    dmve->dmve_log_id, &dmve->dmve_tran_id, pinfo, 
		    &dmve->dmve_error);
		if (status != E_DB_OK)
		    break;

		/*
		** Cycle through the allocated pages and re-format them to
		** be free pages.  Be careful only to update actual allocated
		** Data pages; don't muck with any pages that were assigned
		** as FMAP's for the extended table.
		**
		** Specify readahead flag and hope to get group IO actions.
		*/
		for (pageno = log_rec->dul_lastpage + 1; 
		     pageno <= last_page;
		     pageno++)
		{
		    status = dmve_cachefix_page(dmve, log_lsn,
						tbio, pageno,
						DM0P_READAHEAD, 
						loc_plv,
						&pinfo);
		    if (status != E_DB_OK)
			break;
		    page = pinfo->page;

		    if (DMPP_VPT_GET_PAGE_STAT_MACRO(t->tcb_rel.relpgtype, page) & 
			DMPP_DATA)
		    {
			dmveMutex(dmve, pinfo);
			(*loc_plv->dmpp_format)(t->tcb_rel.relpgtype, 
				t->tcb_rel.relpgsize, page, pageno, DMPP_FREE,
				DM1C_ZERO_FILL);
			DMPP_VPT_SET_PAGE_STAT_MACRO(t->tcb_rel.relpgtype, page, 
			    DMPP_MODIFY);
			dmveUnMutex(dmve, pinfo);
		    }

		    status = dm0p_uncache_fix(tbio, DM0P_UNFIX, 
			dmve->dmve_lk_id, dmve->dmve_log_id, 
			&dmve->dmve_tran_id, pinfo, &dmve->dmve_error);
		    if (status != E_DB_OK)
			break;
		}
	    }
	    else
	    {
		/*
		** Copy into non-heap table that did not use recreate mode.
		** This mode is not currently supported by the recovery
		** system.
		*/
		TRdisplay("DMVE_LOAD: Unexpected load undo mode - non-\n");
		TRdisplay("    recreate mode load on a structured table.\n");
		SETDBERR(&dmve->dmve_error, 0, E_DM9601_DMVE_BAD_PARAMETER);
		status = E_DB_ERROR;
		break;
	    }	

	    rcb->rcb_page_adds = 0;
	    rcb->rcb_tup_adds  = 0;
	}

	/*
	** Close and maybe purge the table.
	*/
	status = dm2t_close(rcb, close_flags, &dmve->dmve_error);
	rcb = (DMP_RCB *) 0;
	if (status != E_DB_OK)
	    break;
        if (is_partition)
        {
            status = dm2t_close(master_rcb, close_flags, &dmve->dmve_error);
            master_rcb = NULL;
            if (status != E_DB_OK)
		break;
        }

	/*
	** Write the CLR if necessary.
	*/
	if ((dmve->dmve_logging) &&
	    ((log_rec->dul_header.flags & DM0L_CLR) == 0))
	{
	    dm0l_flags = (log_rec->dul_header.flags | DM0L_CLR);

	    status = dm0l_load(dmve->dmve_log_id, dm0l_flags, 
				&log_rec->dul_tbl_id, &log_rec->dul_name, 
				&log_rec->dul_owner, &log_rec->dul_location,
				log_rec->dul_structure, log_rec->dul_recreate, 
				log_rec->dul_lastpage, log_lsn, &lsn, 
				&dmve->dmve_error);
	    if (status != E_DB_OK) 
	    {
		/*
		 * Bug56702: return logfull indication.
		 */
		dmve->dmve_logfull = dmve->dmve_error.err_code;
		break;
	    }

	    /*
	    ** Release log file space allocated when the LOAD log record
	    ** was written.  (LOAD allows for a FORCE in case of purging.)
	    */
	    dmve_unreserve_space(dmve, 1);
	}

	return (E_DB_OK);
    }

    /*
    ** Error cleanup.
    */
    if (pinfo && pinfo->page)
    {
	status = dm0p_uncache_fix(&rcb->rcb_tcb_ptr->tcb_table_io, DM0P_UNFIX, 
	    dmve->dmve_lk_id, dmve->dmve_log_id, &dmve->dmve_tran_id, 
	    pinfo, &local_dberr);
	if (status != E_DB_OK)
	{
	    uleFormat(&local_dberr, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
		(char *)NULL, (i4)0, (i4 *)NULL, &local_error, 0);
	}
    }

    if (rcb)
    {
	status = dm2t_close(rcb, close_flags, &local_dberr);
	if (status != E_DB_OK)
	{
	    uleFormat(&local_dberr, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL,
		(char *)NULL, (i4)0, (i4 *)NULL, &local_error, 0);
	}
    }
    if (master_rcb && is_partition)
    {
       status = dm2t_close(master_rcb, close_flags, &local_dberr);
       if (status != E_DB_OK)
       {
	    uleFormat(&local_dberr, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL,
               (char *)NULL, (i4)0, (i4 *)NULL, &local_error, 0);
       }
    }
        
    uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, (char *)NULL, 
	(i4)0, (i4 *)NULL, &error, 0);
    SETDBERR(&dmve->dmve_error, 0, E_DM961C_DMVE_LOAD);

    return(E_DB_ERROR);
}
Beispiel #10
0
/*{
** Name: scu_xencode - encrypt a character string
**
** Description:
**      This function uses CI routines to encrypt a character string.
**	Since the character string is used to generate the key schedule,
**	the encryption is essentially one-way (you'd need to know the
**	password to decode the password....)  This routine was designed
**	to encrypt application_id passwords.
**
** Inputs:
**      SCU_XENCODE			the opcode to scf_call()
**      scf_cb                          control block in which is specified
**          .scf_ptr_union.scf_xpassword
**			                pointer to buffer to be encrypted
**          .scf_nbr_union.scf_xpasskey
**			                pointer to seed for key schedule
**          .scf_len_union.scf_xpwdlen
**					length of password and key seed
**
** Outputs:
**      scf_cb                          the same control block
**          .error                      the error control area
**              .err_code               E_SC_OK or ...
**					E_SC0261_XENCODE_BAD_PARM
**					E_SC0262_XENCODE_BAD_RESULT
**	Returns:
**	    E_DB_{OK, WARNING, ERROR, FATAL}
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	24-mar-89 (ralph)
**          Written for terminator
**	20-may-89 (ralph)
**	    Changed encryption to use separate key
**	06-jun-89 (ralph)
**	    Fixed unix compile problems
**	06-may-1993 (ralph)
**	    DELIM_IDENT:
**	    Translate key seed to lower case prior to encryption.
**	2-Jul-1993 (daveb)
**	    prototyped.
**	14-jul-93 (ed)
**	    replacing <dbms.h> by <gl.h> <sl.h> <iicommon.h> <dbdbms.h>
**	12-Sep-2007 (drivi01)
**	    Modified scu_xencode function to fix numerous bugs.
**	    The buffers for password manipulation shouldn't exceed
**	    the size of scb_xpassword field in SCF control block,
**	    otherwise the data will be truncated.
*/
DB_STATUS
scu_xencode(SCF_CB *scf_cb, SCD_SCB *scb )
{
    STATUS	status;
    CI_KS	KS;
    char	inbuffer[DB_PASSWORD_LENGTH+1];
    char	outbuffer[DB_PASSWORD_LENGTH+1];
    char	keybuffer[DB_PASSWORD_LENGTH];
    u_i2	i2_size;
    i4		longnat_size;
    i4		nat_size;
    char	*char_ptr;

#define	PASSINIT    "hjodvwHOJHOJhodh498032&*&*#)$&*jpkshghjlg58925fjkdjkpg"

    status = E_DB_OK;
    CLRDBERR(&scf_cb->scf_error);

    /* Ensure input parameter is okay */

    if ((scf_cb->scf_len_union.scf_xpwdlen <= 0)		||
	(scf_cb->scf_len_union.scf_xpwdlen >= sizeof(inbuffer)) ||
	(scf_cb->scf_nbr_union.scf_xpasskey  == NULL)		||
	(scf_cb->scf_ptr_union.scf_xpassword == NULL))
    {
	sc0ePut(NULL, E_SC0261_XENCODE_BAD_PARM, NULL, 0);
	SETDBERR(&scf_cb->scf_error, 0, E_SC0261_XENCODE_BAD_PARM);
	return(E_DB_ERROR);
    }


    /* Copy string to input buffer */

    MEmove(scf_cb->scf_len_union.scf_xpwdlen,
		 (PTR)scf_cb->scf_ptr_union.scf_xpassword,
		 (char)'\0',
		 sizeof(inbuffer),
		 (PTR)inbuffer);

    /* Copy key to key buffer */

    MEmove(scf_cb->scf_len_union.scf_xpwdlen,
		 (PTR)scf_cb->scf_nbr_union.scf_xpasskey,
		 (char)'?',
		 sizeof(keybuffer),
		 (PTR)keybuffer);

    /* Fold the key to lower case */

    for (nat_size = sizeof(keybuffer), char_ptr = keybuffer;
	 nat_size > 0;
	 nat_size = CMbytedec(nat_size, char_ptr), char_ptr = CMnext(char_ptr))
    {
	CMtolower(char_ptr, char_ptr);
    }

	 

    /* Remove white space from input string */

    nat_size = STzapblank(inbuffer, outbuffer);

    /* Check size */

    if ((nat_size <= 0) ||
	(nat_size > sizeof(outbuffer)-1))
    {
	sc0ePut(NULL, E_SC0261_XENCODE_BAD_PARM, NULL, 0);
	SETDBERR(&scf_cb->scf_error, 0, E_SC0261_XENCODE_BAD_PARM);
	return(E_DB_ERROR);
    }

    /* Initialize input buffer to "garbage" */

    MEmove(sizeof(PASSINIT), (PTR)PASSINIT, (char)'?',
		 sizeof(inbuffer), (PTR)inbuffer);

    /* Normalize the string back into input buffer */

    MEcopy((PTR)outbuffer, nat_size, (PTR)inbuffer);

    /* Reset output buffer to blanks */

    MEfill(sizeof(outbuffer),
		 (u_char)' ',
		 (PTR)outbuffer);

    /*
    ** First, encrypt the key seed using the string to encode.
    ** Then,  encrypt the string using the encrypted seed.
    ** This is done to prevent two roles with the same password
    ** from having the same encrypted value.
    ** Note that this makes the encryption one-way, since
    ** the password must be provided to decrypt the password!
    */

    /* Generate the key schedule to encrypt the key seed */

    (VOID)CIsetkey((PTR)inbuffer, KS);

    /* Encrypt the key seed */

    longnat_size = DB_PASSWORD_LENGTH;
    (VOID)CIencode((PTR)keybuffer, longnat_size, KS, (PTR)outbuffer);

    /* Generate the second key schedule */

    (VOID)CIsetkey((PTR)keybuffer, KS);

    /* Encode the string */

	longnat_size = DB_PASSWORD_LENGTH;
    (VOID)CIencode((PTR)inbuffer, longnat_size, KS, (PTR)outbuffer);

    /* Make sure it was really encoded */

    if ((char *)STskipblank(outbuffer, (i4)sizeof(outbuffer))
		!= NULL)
    {
	/* It was; copy result to caller's area */

	i2_size = scf_cb->scf_len_union.scf_xpwdlen;
	MEmove(sizeof(outbuffer), (PTR)outbuffer, (char)' ',
		     i2_size, (PTR)scf_cb->scf_ptr_union.scf_xpassword);
    }
    else
    {
	/* The encryption did not work; return an error */

	sc0ePut(NULL, E_SC0262_XENCODE_BAD_RESULT, NULL, 0);
	SETDBERR(&scf_cb->scf_error, 0, E_SC0262_XENCODE_BAD_RESULT);
	status = E_DB_ERROR;
    }

    return(status);
}
Beispiel #11
0
DB_STATUS
dmc_end_session(
DMC_CB    *dmc_cb)
{
    DM_SVCB             *svcb;
    DMC_CB		*dmc = dmc_cb;
    DML_SCB		*scb;
    DB_STATUS		status;
    STATUS		cl_status;
    i4		error, local_error;
    CL_ERR_DESC		clerror;
    DML_SLCB            *slcb;
    DMT_CB              *dmtcb;
    DB_ERROR		local_dberr;


    svcb = dmf_svcb;
    svcb->svcb_stat.ses_end++;

    CLRDBERR(&dmc->error);

    for (status = E_DB_ERROR;;)
    {
	if (dmc->dmc_op_type != DMC_SESSION_OP)
	{
	    SETDBERR(&dmc->error, 0, E_DM000C_BAD_CB_TYPE);
	    break;
	}

	if ( (scb = GET_DML_SCB(dmc->dmc_session_id)) == 0 ||
	      dm0m_check((DM_OBJECT *)scb, (i4)SCB_CB) )
	{
	    SETDBERR(&dmc->error, 0, E_DM002F_BAD_SESSION_ID);
	    break;
	}

	/* Toss any blob holding temps left in this session */

	if (scb->scb_oq_next != (DML_ODCB *) &scb->scb_oq_next)
	    (void) dmpe_free_temps(scb->scb_oq_next, &local_dberr);

	if (dmc_cb->dmc_flags_mask & DMC_FORCE_END_SESS)
	{
	    /* Release all the DMF data structures held by the session. */

	    status = release_cb(dmc, scb);
	    if (status)
		break;
	}

        if (scb->scb_x_ref_count != 0)
	{
	    SETDBERR(&dmc->error, 0, E_DM0060_TRAN_IN_PROGRESS);
	    status = E_DB_ERROR;
	    break;
	}

	if (scb->scb_o_ref_count != 0)
	{
	    SETDBERR(&dmc->error, 0, E_DM003F_DB_OPEN);
	    status = E_DB_ERROR;
	    break;
	}

	/* Remove SLCBs. */

	while (scb->scb_kq_next != (DML_SLCB *) &scb->scb_kq_next)
	{
	    /*  Get next SLCB. */

	    slcb = (DML_SLCB *) scb->scb_kq_next;

	    /*	Remove from queue. */

	    slcb->slcb_q_next->slcb_q_prev = slcb->slcb_q_prev;
	    slcb->slcb_q_prev->slcb_q_next = slcb->slcb_q_next;

	    /*	Deallocate. */

	    dm0m_deallocate((DM_OBJECT **)&slcb);

	}

	/* 
	** Don't release the session lock list if the session is forced
	** to terminate or if the session has a pending WILLING COMMIT 
	** transaction. 
	** The session lock list will be released by the recovery process,
	** if the session is going to be aborted by the recovery process or
	** in the session re-association time.
	**
	** If Factotum session, there may not be a session lock list.
	*/

	if ( (dmc_cb->dmc_flags_mask & DMC_FORCE_END_SESS) == 0 &&
	     (scb->scb_state & SCB_WILLING_COMMIT) == 0 &&
	     scb->scb_lock_list )
	{
	    /*  Release the session lock list. */
	    i4	lock_list = scb->scb_lock_list;

	    /* attempt this but once */
	    scb->scb_lock_list = 0;

	    cl_status = LKrelease(LK_ALL, lock_list, (LK_LKID *)0,
		(LK_LOCK_KEY *)0, (LK_VALUE *)0, &clerror);
	    if (cl_status != OK)
	    {
		uleFormat(NULL, cl_status, &clerror, 
		    ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, 
		    &local_error, 0);
		uleFormat( NULL, E_DM901B_BAD_LOCK_RELEASE, &clerror, ULE_LOG, NULL,
	            (char *)NULL, (i4)0, (i4 *)NULL, &local_error, 1,
		    0, lock_list);
		SETDBERR(&dmc->error, 0, E_DM0106_ERROR_ENDING_SESSION);
		status = E_DB_ERROR;
		break;
	    }
	}
	/* Clean up mutexes. */
	dm0s_mrelease(&scb->scb_bqcb_mutex);

	/* Free session's memory pool(s), if any */
	(VOID) dm0m_destroy(scb, &local_dberr);

	/*  Remove SCB from SVCB queue of active sessions. */
	
	dm0s_mlock(&svcb->svcb_sq_mutex);
	scb->scb_q_next->scb_q_prev = scb->scb_q_prev;
        scb->scb_q_prev->scb_q_next = scb->scb_q_next;
        svcb->svcb_cnt.cur_session--;
	/* Recalc serverwide session-trace bits if we had any turned
	** on.  Don't bother for factota, session bit is probably still
	** in the parent session (and this is an optimization anyway).
	** 96 session tracepoints == 3 i4's.
	** Do this while the mutex is held.
	*/
	if ((scb->scb_s_type & SCB_S_FACTOTUM) == 0
	  && (scb->scb_trace[0] | scb->scb_trace[1] | scb->scb_trace[2]) != 0)
	    dmf_trace_recalc();
	dm0s_munlock(&svcb->svcb_sq_mutex);

	/* The DML_SCB is permanently housed in SCF's SCB */
	status = E_DB_OK;

	break;
    }

    return(status);
}
Beispiel #12
0
/*{
**
** Name: dmc_write_along   -  asynchronous, unobtrusive  write along thread
**
** EXTERNAL call format:	status = dmf_call(DMC_WRITE_ALONG, &dmc_cb);
**
** Description:
**	The dmc_write_along routine provides an unobtrusive version of the  
**	write behind threads found in normal servers.  It's job is similar,
**	writing dirty pages out of the cache to make room for new pages,
**	but the following differences exist:
**        Write Behind threads			Write Along Threads
**	  ---------------------------    -----------------------------------
**	Run as part of same servers	Run in a dedicated IO server so that SMP
**	that process user threads	CPUs are better utilized, and UNIX
**					priorities may be used for balancing
**
**	Runs on any type of UNIX	Will only come up on an SMP machine
**	hardware.			with 2 or more CPUs.
**
**	Contend against user threads 	If there is an available CPU, only
**	from same server, and cause	contention is against other service
**	context switching overhead.	threads in this IO server, eg the
**					new ReadAhead threads.
**
**	Are woken all at once, in a  	Are woken up periodically so that 
**	'panic' when low on buffers, 	I/O is spread out more evenly.
**	causing spikes in processing.
**	
**	All threads in all servers	The I/O master server is given a
**	check all modified buffers, 	list of databases it is to operate on
**	always attempting to 'fix' a   	and keeps them open. When a buffer
**	TBIO and ignoring this buf if	otherwise qualifies to be written, if
**	cant get TBIO. This is alot	the table is not already open (and its
**	of thrashing, eg if the same	one of the desired database) the table
**    	tables happen not to be open	is opened. Thus there is less senseless
**	in all servers.			spinning around the cache.
**
**	The modified queues are   	At the cost of some extra cpu time  	
**    	scanned, which is smart, but	(on the surface), the entire buffer
**    	requires holding the buffer	header array is scanned for candidates.
**    	manager mutex, which is bad !.	This kind of scan can be done without
**    	This causes dead waits.		the buffer manager mutex, so the
**    					scan does not get in the way of other
**    					concurrent operations. The mutex is
**    					taken only when needed to alter the
**    					status of a chosen buffer.
**    	
**    	Because WB threads operate in	Because WA can afford to be more
**    	panic mode, and must write out	picky, only buffers that will not
**    	buffers right away, no regard	cause a log force (old lsns) will be
**    	is given to the fact that a	picked to be written. This will
**    	log force may occur due to a	reduce the amount of log records
**    	new lsn on the buffer.		written, LG mutexes etc...
**    	
**    	
**	The dmc_write_along  routine should only be called within a special
**	session that is dedicated for this purpose.  This routine will not
**	return under normal circumstances until server shutdown time.
**
**	This routines wakes up periodically, and calls dm0p_write_along(),
**	which writes pages in a manner that is less intrusive than the
**	dm0p_flush_pages() routines used by normal write behind threads.
**	dm0p_write_along() will not request or hold the buffer manager
**	mutes while looking for victims, instead it does a sequential
**	scan through the main buffer array. Other differences are listed
**	in the above chart, and in the function header.
**
**	This routine will return only if the timer                          
**	is cancelled by an interrupt.  At server shutdown time, the server
**	is expected to interrupt all such service threads.      
**
** Inputs:
**     dmc_cb
** 	.type		    Must be set to DMC_CONTROL_CB.
** 	.length		    Must be at least sizeof(DMC_CB).
**
** Outputs:
**     dmc_cb
** 	.error.err_code	    One of the following error numbers.
**			    E_DB_OK
**			    E_DM004A_INTERNAL_ERROR
**			    E_DM004B_LOCK_QUOTA_EXCEED
**			    E_DM0062_TRAN_QUOTA_EXCEED
**			    E_DM0163_WRITE_ALONG_FAILURE
**
** Returns:
**     E_DB_OK
**     E_DB_FATAL
**
** History:
**	05-Apr-1995 (cohmi01)
**	    Created, as part of the Manmanx research.
**	10-jan-1996 (dougb)
**	    To get this file to allow links on VMS platform, use CSswitch()
**	    not the internal Unix CL routine CL_swuser().  Also, printf()
**	    should never be called from generic code.
*/
DB_STATUS
dmc_write_along(
DMC_CB	    *dmc_cb)
{
    DMC_CB	    *dmc = dmc_cb;
    DM_SVCB	    *svcb = dmf_svcb;
    DB_TRAN_ID	    tran_id;
    LG_LXID	    lx_id;
    DM0L_ADDDB	    add_info;
    TIMERSTAT	    stat_block;
    i4	    lock_list;
    i4		    len_add_info;
    i4		    event_mask;
    i4		    events, wakeup_event;
    i4		    have_locklist = FALSE;
    i4		    have_transaction = FALSE;
    DB_STATUS	    status = E_DB_OK;
    i4	    wbcount = 0;
    i4	    base_time = 0;
    i4	    flush_time, new_time;
    i4	    length;
    i4	    lgd_status;
#define WA_RUNAGAIN     0     /* indicate no sleep desired */
#define WA_SLEEP        1     /* normal sleep interval if buffers empty */
#define WA_STALL        5     /* sleep interval for log full */
#define WA_YIELD       -1     /* yield to another thread */
    i4	    wa_interval = WA_SLEEP;         
    i4	    numforce = 0;
#define MAX_CLEAN   50
    i4         numclean = 0;
    STATUS	    stat;
    i4	    error;
    CL_ERR_DESC	    sys_err;
    DB_OWN_NAME	    user_name;
    LG_DBID	    lg_dbid;
    static i4  nextwa_threadno = 0;
    i4	    wa_threadno;
    i4	    duties[] = {DM0P_WA_SINGLE | DM0P_WA_GROUPS,
				DM0P_WA_SINGLE,
				DM0P_WA_SINGLE,
                                DM0P_WA_GROUPS};
    i4   	    duty;
#define NUM_DUTY (sizeof(duties)/sizeof (i4))

    CLRDBERR(&dmc->error);

    wa_threadno = nextwa_threadno++;
    duty = duties[wa_threadno % NUM_DUTY];

#ifdef xDEBUG
    TRdisplay(
	"Starting server Write Along Thread for server id 0x%x, duties 0x%x\n",
	      dmc_cb->dmc_id, duty );
#endif

    /*
    ** Add write along  thread to logging system.
    ** Write behind thread does not actually open a database, so use
    ** the LG_NOTDB flag.
    */
    STmove((PTR)DB_BWRITALONG_THREAD, ' ', sizeof(add_info.ad_dbname),
	(PTR) &add_info.ad_dbname);
    MEcopy((PTR)DB_INGRES_NAME, sizeof(add_info.ad_dbowner),
	(PTR) &add_info.ad_dbowner);
    MEcopy((PTR)"None", 4, (PTR) &add_info.ad_root);
    add_info.ad_dbid = 0;
    add_info.ad_l_root = 4;
    len_add_info = sizeof(add_info) - sizeof(add_info.ad_root) + 4;

    stat = LGadd(dmf_svcb->svcb_lctx_ptr->lctx_lgid, LG_NOTDB, (char *)&add_info, 
	len_add_info, &lg_dbid, &sys_err);
    if (stat != OK)
    {
	uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
	    ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	uleFormat(NULL, E_DM900A_BAD_LOG_DBADD, &sys_err, ULE_LOG, NULL,
	    (char *)NULL, 0L, (i4 *)NULL, &error, 4, 0,
	    dmf_svcb->svcb_lctx_ptr->lctx_lgid,
	    sizeof(add_info.ad_dbname), (PTR) &add_info.ad_dbname,
	    sizeof(add_info.ad_dbowner), (PTR) &add_info.ad_dbowner,
	    4, (PTR) &add_info.ad_root);
	if (stat == LG_EXCEED_LIMIT)
	    SETDBERR(&dmc->error, 0, E_DM0062_TRAN_QUOTA_EXCEEDED);
	else
	    SETDBERR(&dmc->error, 0, E_DM0163_WRITE_ALONG_FAILURE);
	return (E_DB_ERROR);
    }


    for(;;)
    {
	/*
	** Begin transaction in order to do LG and LK calls.
	** Must specify NOPROTECT transaction so that LG won't pick us
	** as a force-abort victim.  Also, the Log File BOF can be advanced
	** past this transaction's position in the log file, which means that
	** the Write Along thread should do no logging nor work that could
	** require backout.
	*/
	STmove((PTR)DB_BWRITALONG_THREAD, ' ', sizeof(DB_OWN_NAME), 
							(PTR) &user_name);
	stat = LGbegin(LG_NOPROTECT, lg_dbid, &tran_id, &lx_id,
	    sizeof(DB_OWN_NAME), user_name.db_own_name,
	    (DB_DIS_TRAN_ID*)NULL, &sys_err);
	if (stat != OK)
	{
	    uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
		ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	    uleFormat(NULL, E_DM900C_BAD_LOG_BEGIN, &sys_err, ULE_LOG, NULL, 
		(char *)NULL, (i4)0, (i4 *)NULL, &error, 1, 
		0, lg_dbid);
	    if (stat == LG_EXCEED_LIMIT)
		SETDBERR(&dmc->error, 0, E_DM0062_TRAN_QUOTA_EXCEEDED);
	    else
		SETDBERR(&dmc->error, 0, E_DM0163_WRITE_ALONG_FAILURE);
	    status = E_DB_ERROR;
	    break;
	}
	have_transaction = TRUE;

	/*
	** Create locklist to use to wait for Write Behind event.
	*/
	stat = LKcreate_list(LK_NONPROTECT, (i4) 0,
	    (LK_UNIQUE *)&tran_id, (LK_LLID *)&lock_list, (i4)0, 
	    &sys_err);
	if (stat != OK)
	{
	    uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
		ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	    uleFormat(NULL, E_DM901A_BAD_LOCK_CREATE, &sys_err, ULE_LOG, NULL, 
		(char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	    if (stat == LK_NOLOCKS)
		SETDBERR(&dmc->error, 0, E_DM004B_LOCK_QUOTA_EXCEEDED);
	    else
		SETDBERR(&dmc->error, 0, E_DM0163_WRITE_ALONG_FAILURE);
	    status = E_DB_ERROR;
	    break;
	}
	have_locklist = TRUE;

	/*
	** Now begin timer loop for periodically flushing the buffer manager.
	*/
	for (;;)
	{
	    if (DMZ_ASY_MACRO(2))
	    {
		new_time = TMsecs();
		flush_time = new_time - base_time;
		base_time = new_time;

		/* Write Write Along  thread statistics. */
		stat = CSstatistics(&stat_block, 0);
		TRdisplay("%22*- DMF Write Along Thread statistics %21*-\n");
		TRdisplay("    Write Along wakeups: %d    Cpu : %d    Dio : %d\n",
		    wbcount, stat_block.stat_cpu, stat_block.stat_dio);
		TRdisplay("    Time to flush pages: %d seconds\n",
		    flush_time);
		TRdisplay("%79*-\n");
	    }

	    /*
	    ** Wait for some interval before the next pass over the buffers.
	    ** This should return with "timed-out"; if it returns with
	    ** "interrupted", then the server is being shut down. If it
	    ** returns with any other return code, something is awry.
	    */
	    if (wa_interval == WA_YIELD)
	    {
	        /*
	        ** Note:  This routine will yield only to other threads at
	        ** the same or higher priority.
		*/
		CS_swuser();
	    }
	    else
	    if (wa_interval != WA_RUNAGAIN)
	    {
		stat = CSsuspend(CS_TIMEOUT_MASK | CS_INTERRUPT_MASK, 
			    wa_interval, 0);
		if (stat == E_CS0008_INTERRUPTED)
		{
		    status = E_DB_OK;
		    break;  /* server shut-down */
		}
		if (stat != E_CS0009_TIMEOUT)
		{
		    uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
			ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
		    SETDBERR(&dmc->error, 0, E_DM0163_WRITE_ALONG_FAILURE);
		    status = E_DB_ERROR;
		    break;
		}
	    }
	    
	    /*
	    ** Check LOGFULL status.  We don't execute write behind when in
	    ** logfull to avoid background log forces which wreak havoc on
	    ** the recovery logspace reservation algorithms.
	    */
	    stat = LGshow(LG_S_LGSTS, (PTR)&lgd_status, 
		    sizeof(lgd_status), &length, &sys_err);
	    if (stat != OK)
	    {
		uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
		    ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
		uleFormat(NULL, E_DM9017_BAD_LOG_SHOW, &sys_err, ULE_LOG, NULL, 
		    (char *)NULL, (i4)0, (i4 *)NULL, &error, 1, 
		    0, LG_S_LGSTS);
		SETDBERR(&dmc->error, 0, E_DM0163_WRITE_ALONG_FAILURE);
		status = E_DB_ERROR;
		break;
	    }

	    /*
	    ** If logfull continue back to the top of the loop to bypass
	    ** the cache flush and re-wait for timer  interval.
	    ** Use MAX_WA_INTERVAL to pause while log-full is resolved.
	    */
	    if (lgd_status & LGD_LOGFULL)
	    {
		wa_interval = WA_STALL;        
		continue;   /* bypassing flush during logfull */
	    }

	    wbcount++;
	    
	    /*
	    ** Flush some dirty pages out of the Buffer Manager.
	    */
	    status = dm0p_write_along(lock_list, (i4)lx_id, 
		&numforce, duty, &dmc->error);
	    if (status != E_DB_OK)
	    {
		if (dmc->error.err_code > E_DM_INTERNAL)
		{
		    uleFormat(&dmc->error, 0, NULL, ULE_LOG, NULL, 
			(char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
		    SETDBERR(&dmc->error, 0, E_DM0163_WRITE_ALONG_FAILURE);
		}
		break;
	    }

	    /* determine next wait period based on how busy we were */
	    /* if we didnt do much, sleep, else ru thru buffers again */
	    if (numforce == 0)
	    {
		wa_interval = WA_SLEEP;  /* things are calm, good nite*/
	    }
	    else
	    {
		wa_interval = WA_RUNAGAIN; /* hit the road again */
	    }

	}

	break;
    }

    /*
    ** Clean up transaction or lock list left hanging around.
    */
    if (have_transaction)
    {
	stat = LGend(lx_id, 0, &sys_err);
	if (stat != OK)
	{
	    uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
		ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	    uleFormat(NULL, E_DM900E_BAD_LOG_END, &sys_err, ULE_LOG, NULL, 
		(char *)NULL, (i4)0, (i4 *)NULL, &error, 1, 0, lx_id);
	    if ( status == E_DB_OK )
	    {
		SETDBERR(&dmc->error, 0, E_DM0163_WRITE_ALONG_FAILURE);
		status = E_DB_ERROR;
	    }
	}
	have_transaction = FALSE;
    }
    if (have_locklist)
    {
	stat = LKrelease(LK_ALL, lock_list, (LK_LKID *)0, (LK_LOCK_KEY *)0,
	    (LK_VALUE *)0, &sys_err);
	if (stat != OK)
	{
	    uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
		ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	    uleFormat(NULL, E_DM901B_BAD_LOCK_RELEASE, &sys_err, ULE_LOG, NULL,
		(char *)NULL, (i4)0, (i4 *)NULL, &error, 1, 0, lock_list);
	    if ( status == E_DB_OK )
	    {
		SETDBERR(&dmc->error, 0, E_DM0163_WRITE_ALONG_FAILURE);
		status = E_DB_ERROR;
	    }
	}
	have_locklist = FALSE;
    }

    stat = LGremove(lg_dbid, &sys_err);
    if (stat != OK)
    {
	uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
	    ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	uleFormat(NULL, E_DM9016_BAD_LOG_REMOVE, &sys_err, ULE_LOG, NULL,
	    (char *)NULL, (i4)0, (i4 *)NULL, &error, 1, 0, lg_dbid);
	if ( status == E_DB_OK )
	{
	    SETDBERR(&dmc->error, 0, E_DM0163_WRITE_ALONG_FAILURE);
	    status = E_DB_ERROR;
	}
    }

    /* Write thread statistics. */
    stat = CSstatistics(&stat_block, 0);
    TRdisplay("\n%22*- DMF Write Along Thread statistics %21*-\n");
    TRdisplay("    Write Along wakeup: %d    Cpu : %d    Dio : %d\n",
	wbcount, stat_block.stat_cpu, stat_block.stat_dio);
    TRdisplay("%79*-\n");

    return (status);
}
Beispiel #13
0
/*{
**
** Name: dmc_write_behind_common  -  the guts of a write behind thread
**
** Description:
**
**	The dmc_write_behind routine is used for implementing an asynchronous
**	write behind thread.  It wakes up whenever signaled by an LK event
**	and writes dirty pages out of the cache to make room for new pages
**	to be read in.
**
**	The dmc_write_behind routine should only be called within a special
**	session that is dedicated for this purpose.  This routine will not
**	return under normal circumstances until server shutdown time.
**
**	This routine uses two routines in DM0P to drive the write behind
**	thread:
**	    DM0P_BMFLUSH_WAIT waits for a session in the buffer manager
**	    to signal the event to wake up the write behind threads.  This
**	    is signalled when some specified percent of the buffer manager
**	    is filled with dirty pages.
**
**	    DM0P_FLUSH_PAGES goes through the buffer manager modified queue
**	    in reverse priority order writing pages until some specified
**	    percentage of the buffer manager is free.
**
**	This routine will return only if the event wait in DM0P_BMFLUSH_WAIT
**	is cancelled by an interrupt.  At server shutdown time, the server
**	is expected to interrupt all the write behind threads.
**
**	This common code is executed by both Primary and Cloned
**	WriteBehind agents.
**
** Inputs:
**	i_am_a_clone		FALSE if this is the Primary WB Agent,
**				TRUE if a Clone.
**	cfa			Agent's data.
**
** Outputs:
**     dmf_err
** 	.error.err_code	    One of the following error numbers.
**			    E_DB_OK
**			    E_DM004A_INTERNAL_ERROR
**			    E_DM004B_LOCK_QUOTA_EXCEED
**			    E_DM0062_TRAN_QUOTA_EXCEED
**			    E_DM0117_WRITE_BEHIND
**
** Returns:
**     E_DB_OK
**     E_DB_FATAL
**
** History:
**      30-jun-1988 (rogerk)
**          Created for Jupiter.      
**      30-Jan-1989 (ac)
**          Added arguments to LGbegin().      
**	15-may-1989 (rogerk)
**	    Return resource errors if resource limit is exceeded.
**      2-oct-1992 (ed)
**          Use DB_MAXNAME to replace hard coded numbers
**          - also created defines to replace hard coded character strings
**          dependent on DB_MAXNAME
**	18-oct-1993 (rogerk)
**	    Add check for LOGFULL status.  We don't execute write behind when
**	    in logfull to avoid background log forces which wreak havoc on
**	    the recovery logspace reservation algorithms.
**	10-oct-93 (swm)
**	    Bug #56438
**	    Put LG_DBID into automatic variable lg_dbid rather than overloading
**	    dmc_cb->dmc_db_id.
**	31-jan-1994 (bryanp) B58380, B58381
**	    Log LG/LK status code if LG or LK call fails.
**	    Check return code from CSsuspend.
**	10-Mar-1998 (jenjo02)
**	    Support for demand-driven WriteBehind threads. Changed prototype
**	    to pass a boolean indicating whether this is the primary or
**	    cloned WB thread and a pointer to DB_ERROR instead of a pointer
**	    to DMC_CB.
**	    Made this a common function called by Primary and Cloned threads.
*/
static DB_STATUS
dmc_write_behind_common(
i4	    i_am_a_clone,
char	    *cfa,
DB_ERROR    *dmf_err)
{
    DM_SVCB	    *svcb = dmf_svcb;
    DB_TRAN_ID	    tran_id;
    LG_LXID	    lx_id;
    DM0L_ADDDB	    add_info;
    TIMERSTAT	    stat_block;
    i4	    lock_list;
    i4		    len_add_info;
    i4		    event_mask;
    i4		    events, wakeup_event;
    i4		    have_locklist = FALSE;
    i4		    have_transaction = FALSE;
    i4		    lg_added = FALSE;
    DB_STATUS	    status = E_DB_OK;
    i4	    wbcount = 0;
    i4	    wait_time = 0;
    i4	    base_time = 0;
    i4	    flush_time, new_time;
    i4	    length;
    i4	    lgd_status;
    STATUS	    stat;
    i4	    error;
    CL_ERR_DESC	    sys_err;
    DB_OWN_NAME	    user_name;
    LG_DBID	    lg_dbid;

#ifdef xDEBUG
    CS_SID	sid;
    i4	pid;

    PCpid(&pid);
    CSget_sid(&sid);

    TRdisplay("Starting Write Behind Thread %x in server process %d\n",
	sid, pid);
#endif

    CLRDBERR(dmf_err);

    if (status == E_DB_OK)
    {
	/*
	** Add write behind thread to logging system.
	** Write behind thread does not actually open a database, so use
	** the LG_NOTDB flag.
	*/
	STmove((PTR)DB_WRITEBEHIND_THREAD, ' ', sizeof(add_info.ad_dbname),
	    (PTR) &add_info.ad_dbname);
	MEcopy((PTR)DB_INGRES_NAME, sizeof(add_info.ad_dbowner),
	    (PTR) &add_info.ad_dbowner);
	MEcopy((PTR)"None", 4, (PTR) &add_info.ad_root);
	add_info.ad_dbid = 0;
	add_info.ad_l_root = 4;
	len_add_info = sizeof(add_info) - sizeof(add_info.ad_root) + 4;

	stat = LGadd(dmf_svcb->svcb_lctx_ptr->lctx_lgid, LG_NOTDB,
	    (char *)&add_info, 
	    len_add_info, &lg_dbid, &sys_err);
	if (stat != OK)
	{
	    uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
		ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	    uleFormat(NULL, E_DM900A_BAD_LOG_DBADD, &sys_err, ULE_LOG, NULL,
		(char *)NULL, 0L, (i4 *)NULL, &error, 4, 0,
		dmf_svcb->svcb_lctx_ptr->lctx_lgid,
		sizeof(add_info.ad_dbname), (PTR) &add_info.ad_dbname,
		sizeof(add_info.ad_dbowner), (PTR) &add_info.ad_dbowner,
		4, (PTR) &add_info.ad_root);
	    if (stat == LG_EXCEED_LIMIT)
		SETDBERR(dmf_err, 0, E_DM0062_TRAN_QUOTA_EXCEEDED);
	    else
		SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND);
	    status = E_DB_ERROR;
	}
	else
	    lg_added = TRUE;
    }

    if (status == E_DB_OK)
    {
	/*
	** Begin transaction in order to do LG and LK calls.
	** Must specify NOPROTECT transaction so that LG won't pick us
	** as a force-abort victim.  Also, the Log File BOF can be advanced
	** past this transaction's position in the log file, which means that
	** the Write Behind thread should do no logging nor work that could
	** require backout.
	*/
	STmove((PTR)DB_WRITEBEHIND_THROWN, ' ', sizeof(DB_OWN_NAME), 
							(PTR) &user_name);
	stat = LGbegin(LG_NOPROTECT, lg_dbid, &tran_id, &lx_id,
	    sizeof(DB_OWN_NAME), user_name.db_own_name, 
	    (DB_DIS_TRAN_ID*)NULL, &sys_err);
	if (stat != OK)
	{
	    uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
		ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	    uleFormat(NULL, E_DM900C_BAD_LOG_BEGIN, &sys_err, ULE_LOG, NULL, 
		(char *)NULL, (i4)0, (i4 *)NULL, &error, 1, 
		0, lg_dbid);
	    if (stat == LG_EXCEED_LIMIT)
		SETDBERR(dmf_err, 0, E_DM0062_TRAN_QUOTA_EXCEEDED);
	    else
		SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND);
	    status = E_DB_ERROR;
	}
	else
	    have_transaction = TRUE;
    }

    if (status == E_DB_OK)
    {
	/*
	** Create locklist to use to wait for Write Behind event.
	*/
	stat = LKcreate_list(LK_NONPROTECT, (i4) 0,
	    (LK_UNIQUE *)&tran_id, (LK_LLID *)&lock_list, (i4)0, 
	    &sys_err);
	if (stat != OK)
	{
	    uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
		ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	    uleFormat(NULL, E_DM901A_BAD_LOCK_CREATE, &sys_err, ULE_LOG, NULL, 
		(char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	    if (stat == LK_NOLOCKS)
		SETDBERR(dmf_err, 0, E_DM004B_LOCK_QUOTA_EXCEEDED);
	    else
		SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND);
	    status = E_DB_ERROR;
	}
	else
	    have_locklist = TRUE;
    }

    if (status == E_DB_OK)
    {
	/*
	** Now begin loop of waiting for Write Behind event and flushing
	** the buffer manager.
	*/
	do
	{
	    if (DMZ_ASY_MACRO(2))
	    {
		new_time = TMsecs();
		flush_time = new_time - base_time - wait_time;
		base_time = new_time;

		/* Write Write Behind thread statistics. */
		stat = CSstatistics(&stat_block, 0);
		TRdisplay("%22*- DMF Write Behind Thread statistics %21*-\n");
		TRdisplay("    Write Behind wakeups: %d    Cpu : %d    Dio : %d\n",
		    wbcount, stat_block.stat_cpu, stat_block.stat_dio);
		TRdisplay("    Time waiting for event: %d seconds\n",
		    wait_time);
		TRdisplay("    Time to flush pages: %d seconds\n",
		    flush_time);
		TRdisplay("%79*-\n");
	    }

	    /*
	    ** Cloned threads don't wait for a signal, they just
	    ** help flush the cache, then go away.
	    */
	    if (i_am_a_clone == FALSE)
	    {
		/*
		** Wait for the next signal that the buffer manager needs to have
		** pages flushed.
		**
		** This routine will also clear the event from the previous 
		** signal.
		*/
		status = dm0p_wbflush_wait(cfa, lock_list, dmf_err);
		if (status != E_DB_OK)
		{
		    /*
		    ** If warning is returned, that's a signal that
		    ** this thread is to terminate.
		    */
		    if (status == E_DB_WARN)
		    {
			status = E_DB_OK;
			break;
		    }
		    else
		    {
			if (dmf_err->err_code > E_DM_INTERNAL)
			{
			    uleFormat(dmf_err, 0, NULL, ULE_LOG, NULL, 
			    	(char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
			    SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND);
			}
			break;
		    }
		}
	    }

	    /*
	    ** Check LOGFULL status.  We don't execute write behind when in
	    ** logfull to avoid background log forces which wreak havoc on
	    ** the recovery logspace reservation algorithms.
	    */
	    stat = LGshow(LG_S_LGSTS, (PTR)&lgd_status, 
			    sizeof(lgd_status), &length, &sys_err);
	    if (stat != OK)
	    {
		uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
		    ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
		uleFormat(NULL, E_DM9017_BAD_LOG_SHOW, &sys_err, ULE_LOG, NULL, 
		    (char *)NULL, (i4)0, (i4 *)NULL, &error, 1, 
		    0, LG_S_LGSTS);
		SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND);
		status = E_DB_ERROR;
		break;
	    }

	    /*
	    ** If logfull, skip the cache flush.
	    */
	    if (lgd_status & LGD_LOGFULL)
	    {
		/*
		** Pause for a moment since the write-behind event will likely
		** be immediately resignaled. We expect that this 5-second
		** wait will return with "timed-out"; if it returns with
		** "interrupted", then the server is being shut down. If it
		** returns with any other return code, something is awry.
		*/
		stat = CSsuspend(CS_TIMEOUT_MASK | CS_INTERRUPT_MASK, 5, 0);
		if (stat == E_CS0008_INTERRUPTED)
		{
		    status = E_DB_OK;
		    break;
		}
		if (stat != E_CS0009_TIMEOUT)
		{
		    uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
			ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
		    SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND);
		    status = E_DB_ERROR;
		    break;
		}
	    }
	    else
	    {
		/*
		** Flush some dirty pages out of the Buffer Manager.
		*/

		if (dmf_svcb->svcb_status & SVCB_IOMASTER) 
		{
		    /* in IOMASTER server use same func as write-along thread */
		    i4 numforce;
		    u_i4 duty = 0xffffffff;
		    status = dm0p_write_along(lock_list, (i4)lx_id, 
			    &numforce, duty, dmf_err);
		}
		else
		    status = dm0p_flush_pages(lock_list, (i4)lx_id, 
				    cfa,
				    dmf_err);

		if (status != E_DB_OK)
		{
		    if (dmf_err->err_code > E_DM_INTERNAL)
		    {
			uleFormat(dmf_err, 0, NULL, ULE_LOG, NULL, 
			    (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
			SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND);
		    }
		    break;
		}
	    }

	    /*
	    ** If dumping statistics, save time for event to be signaled.
	    */
	    if (DMZ_ASY_MACRO(2))
		wait_time = TMsecs() - base_time;
	    wbcount++;

	} while (i_am_a_clone == FALSE);
    }

    if (i_am_a_clone == FALSE)
    {
	/* Write Fast Commit thread statistics. */
	stat = CSstatistics(&stat_block, 0);
	TRdisplay("\n%22*- DMF Write Behind Thread statistics %21*-\n");
	TRdisplay("    Write Behind wakeup: %d    Cpu : %d    Dio : %d\n",
	    wbcount, stat_block.stat_cpu, stat_block.stat_dio);
	TRdisplay("%79*-\n");
    }

    /*
    ** Clean up transaction and/or lock list left hanging around.
    */
    if (have_transaction)
    {
	stat = LGend(lx_id, 0, &sys_err);
	if (stat != OK)
	{
	    uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
		ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	    uleFormat(NULL, E_DM900E_BAD_LOG_END, &sys_err, ULE_LOG, NULL, 
		(char *)NULL, (i4)0, (i4 *)NULL, &error, 1, 0, lx_id);
	    if ( status == E_DB_OK )
	    {
		SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND);
		status = E_DB_ERROR;
	    }
	}
	have_transaction = FALSE;
    }

    if (have_locklist)
    {
	stat = LKrelease(LK_ALL, lock_list, (LK_LKID *)0, (LK_LOCK_KEY *)0,
	    (LK_VALUE *)0, &sys_err);
	if (stat != OK)
	{
	    uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
		ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	    uleFormat(NULL, E_DM901B_BAD_LOCK_RELEASE, &sys_err, ULE_LOG, NULL, 
		(char *)NULL, (i4)0, (i4 *)NULL, &error, 1, 0, lock_list);
	    if ( status == E_DB_OK )
	    {
		SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND);
		status = E_DB_ERROR;
	    }
	}
	have_locklist = FALSE;
    }

    if (lg_added)
    {
	stat = LGremove(lg_dbid, &sys_err);
	if (stat != OK)
	{
	    uleFormat(NULL, stat, (CL_ERR_DESC *)&sys_err, 
		ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	    uleFormat(NULL, E_DM9016_BAD_LOG_REMOVE, &sys_err, ULE_LOG, NULL, 
		    (char *)NULL, (i4)0, (i4 *)NULL, &error, 1, 0, lg_dbid);
	    if ( status == E_DB_OK )
	    {
		SETDBERR(dmf_err, 0, E_DM0117_WRITE_BEHIND);
		status = E_DB_ERROR;
	    }
	}
    }

    return (status);
}
Beispiel #14
0
/*{
** Name:  dmt_close - Close a table.
**
**  INTERNAL DMF call format:  status = dmt_close(&dmt_cb);
**
**  EXTERNAL call format:      status = dmf_call(DMT_CLOSE,&dmt_cb);
**
** Description:
**      This function closes a table that was previously opened.
**      All internal control information is released including the
**      record access identifier needed to access the table.
**
** Inputs:
**      dmt_cb
**          .type                           Must be set to DMT_TABLE_CB.
**          .length                         Must be at least
**                                          sizeof(DMT_TABLE_CB) bytes.
**          .dmt_flags_mask                 Of no consequence, ignored.
**          .dmt_record_access_id           Identifies open table to close.
**
** Outputs:
**      dmt_cb
**          .error.err_code                 One of the following error numbers.
**                                          E_DM0000_OK                
**                                          E_DM000B_BAD_CB_LENGTH
**                                          E_DM000C_BAD_CB_TYPE
**                                          E_DM001A_BAD_FLAG
**                                          E_DM002B_BAD_RECORD_ID
**                                          E_DM0042_DEADLOCK
**                                          E_DM004A_INTERNAL_ERROR
**                                          E_DM004B_LOCK_QUOTA_EXCEEDED
**					    E_DM004D_LOCK_TIMER_EXPIRED
**                                          E_DM0055_NONEXT
**                                          E_DM0064_USER_ABORT
**					    E_DM0090_ERROR_CLOSING_TABLE
**                                          E_DM0100_DB_INCONSISTENT
**                                          E_DM010C_TRAN_ABORTED
**                                          E_DM0112_RESOURCE_QUOTA_EXCEEDED
**
**      Returns:
**         E_DB_OK                          Function completed normally.
**         E_DB_WARN                        Function completed normally with a 
**                                          termination status which is in
**                                          dmt_cb.err_code.
**         E_DB_ERROR                       Function completed abnormally 
**                                          with a 
**                                          termination status which is in
**                                          dmt_cb.err_code.
**         E_DB_FATAL                       Function completed with a fatal
**                                          error which must be handled
**                                          immediately.  The fatal status is in
**                                          dmt_cb.err_code.
** History:
**      01-sep-1985 (jennifer)
**	    Created new for jupiter.
**	17-dec-1985 (derek)
**	    Completed code.
**	10-oct-1991 (jnash) merged 14-jun-1991 (Derek)
**	    Added performance profiling support.
**      13-jun-97 (dilma04)
**         Add check for the new DMT_CB flag - DMT_CONSTRAINT.
**	10-nov-1998 (somsa01)
**	    If the table we are closing contains blob columns, make sure
**	    we close any extension tables which are open as well.
**	    (Bug #94114)
**	14-oct-99 (stephenb)
**	    re-set the SCB_BLOB_OPTIM flag here.
**	10-May-2004 (schka24)
**	    Remove blob-optim flags from scb and dmtcb.
**	    Name-decode trick for closing blob etabs didn't work for
**	    session temp etabs, use new parent-ID in etab TCB.
**	8-Aug-2005 (schka24)
**	    Ignore session-temp flag now left over from open.
**	15-Jan-2010 (jonj)
**	    SIR 121619 MVCC: Removed silly validation of dmt_flags_mask,
**	    which aren't used to close a table and are of no import.
**	03-Mar-2010 (jonj)
**	    SIR 121619 MVCC: Blob support: Consistently reference tcb_extended
**	    instead of relstat2 & TCB2_EXTENSION.
**      25-Oct-2010 (stial01) (b124640)
	    dmt_close() get next rcb for this tran BEFORE releasing next_rcb
*/
DB_STATUS
dmt_close(
DMT_CB  *dmt_cb)
{
    DMT_CB	    *dmt = dmt_cb;
    DMP_RCB	    *rcb;
    i4	    error;
    DB_STATUS	    status;
    bool            internal_error = 0;

    CLRDBERR(&dmt->error);
   
    status = E_DB_ERROR;

    rcb = (DMP_RCB *)dmt->dmt_record_access_id;
    if (dm0m_check((DM_OBJECT *)rcb, (i4)RCB_CB) == E_DB_OK)
    {
	DML_SCB	       	*scb = rcb->rcb_xcb_ptr->xcb_scb_ptr;
	if (rcb->rcb_state & RCB_OPEN)
	{
	    /*  Dequeue the RCB from the XCB. */

	    rcb->rcb_xq_next->rcb_q_prev = rcb->rcb_xq_prev;
	    rcb->rcb_xq_prev->rcb_q_next = rcb->rcb_xq_next;

#ifdef xDEBUG
	    if (DMZ_SES_MACRO(21) || DMZ_SES_MACRO(20))
		dmd_rstat(rcb);
#endif

	    /*
	    ** First, see if this table has open extension tables.
	    ** If it does, then close them first.
	    */
	    if (rcb->rcb_tcb_ptr->tcb_rel.relstat2 & TCB2_HAS_EXTENSIONS)
	    {
		DML_XCB	*next;

		/*
		** Scan the queue of open RCB's.
		*/
		for (next = (DML_XCB*) rcb->rcb_xcb_ptr->xcb_rq_next;
		    next != (DML_XCB*) &rcb->rcb_xcb_ptr->xcb_rq_next; )
		{
		    DMP_RCB	*next_rcb;

		    /*
		    ** Calculate the RCB starting address.
		    */
		    next_rcb = (DMP_RCB *)(
		    (char *)next - ((char *)&((DMP_RCB*)0)->rcb_xq_next));

		    /*
		    ** next == &next_rcb->rcb_xq_next
		    ** (it's pointing inside next_rcb memory)
		    ** get next rcb for this tran BEFORE releasing next_rcb
		    */
		    next = next->xcb_q_next;

		    /*
		    ** If this table is an extension table, see if its
		    ** base table is the table we are closing. If it is,
		    ** close the extension table as well.
		    */
		    if ( next_rcb->rcb_tcb_ptr->tcb_extended &&
		         next_rcb->rcb_et_parent_id ==
			  rcb->rcb_tcb_ptr->tcb_rel.reltid.db_tab_base)
		    {
			DMP_TCB	*etab_tcb = next_rcb->rcb_tcb_ptr;

			/* Dequeue the RCB from the XCB. */
			next_rcb->rcb_xq_next->rcb_q_prev =
				    next_rcb->rcb_xq_prev;
			next_rcb->rcb_xq_prev->rcb_q_next =
				    next_rcb->rcb_xq_next;

			status = dm2t_close(next_rcb, (i4)0, &dmt->error);
			if (status != E_DB_OK)
			    internal_error = 1;
		    }
		    if (internal_error)
			break;
		}
	    }

	    /*  Call level 2 to close the RCB. */

	    if (!internal_error)
	    {
		status = dm2t_close(rcb, (i4)0, &dmt->error);
		if (status == E_DB_OK)
		    return (status);
	    }
	}
	else
	    SETDBERR(&dmt->error, 0, E_DM002B_BAD_RECORD_ID);
    }
    else
	SETDBERR(&dmt->error, 0, E_DM002B_BAD_RECORD_ID);

    if (dmt->error.err_code > E_DM_INTERNAL)
    {
	STATUS	    local_error;

	uleFormat(&dmt->error, 0, NULL, ULE_LOG, NULL, 
		(char * )NULL, 0L, (i4 *)NULL, &local_error, 0);
	SETDBERR(&dmt->error, 0, E_DM0090_ERROR_CLOSING_TABLE);
    }
    return (status);
}
Beispiel #15
0
/*{
** Name: dmve_dmu - The recovery of a DMU opration.
**
** Description:
**	This routine is used for ROLLBACK recovery of a DMU operation.
**
**	DMU operations are not IDEMPOTENT during backout.  That means that we
**	cannot re-execute abort recovery on operations that occured after a
**	DMU operation, if the DMU operation has already been aborted.
**
**	This is because the recovery of many operations makes the assumption
**	the the basic state of the table (storage structure, number of pages,
**	file layout) is the similar to the state it was in when the operation
**	was logged.
**
**	If we abort the following transaction:
**
**	    CREATE newtab (a = i4)
**	    APPEND newtab (a = 1)
**
**	And the server doing the abort crashes after aborting the CREATE,
**	but before completing the abort, then the RCP will start over with
**	recovery on that transaction.  But the RCP cannot attempt to re-execute
**	the abort of the APPEND statement because the table 'newtab' no longer
**	exists.  (Worse cases are when the server died while in the middle of
**	aborting the CREATE - and left the catalogs in an inconsistent state.)
**
**	To solve this, we write a DM0L_DMU log record whenever we issue an
**	operation for which recovery is not idempotent.  This log record must
**	be written AFTER the operation is complete so that recovery is done
**	before the operation is recovered (while recovery can still be
**	re-executed).
**
**	When we encounter a DM0L_DMU log record during abort processing, we
**	lay down a CLR record which points to the record before the DMU 
**	record.  If we re-execute abort processing on this transaction,
**	we will encounter the CLR and jump over the already backed-out 
**	operations and begin with recovery on the DMU operation
**	itself (which by itself must be idempotent).
**
**	Since we must write a CLR record during recovery, LG must make
**	sure the is room in the log file to write one CLR (and
**	force it to disk) for all DMU operations active in the system.
**
** Inputs:
**	dmve_cb
**	    .dmve_log_rec	    The dmu operation log record.
**	    .dmve_action	    Should only be DMVE_UNDO.
**	    .dmve_dcb_ptr	    Pointer to DCB.
**	    .dmve_tran_id	    The physical transaction id.
**	    .dmve_lk_id		    The transaction lock list id.
**	    .dmve_log_id	    The logging system database id.
**	    .dmve_db_lockmode	    The lockmode of the database. Should be 
**				    DM2T_X or DM2T_S.
**
** Outputs:
**	dmve_cb
**	    .dmve_error.err_code    The reason for error status.
**	Returns:
**	    E_DB_OK
**	    E_DB_ERROR
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	11-sep-89 (rogerk)
**          Created for Terminator.
**	25-sep-1991 (mikem) integrated following change: 20-sep-89 (rogerk)
**	    When write ABORTSAVE record, use address in dmu_sp_addr.
**	25-sep-1991 (mikem) integrated following change: 10-jan-90 (rogerk)
**	    If DMVE says the abort is being done by the CSP, then pass
**	    the DM0L_CLUSTER_REQ flag to dm0l_abortsave.
**	20-oct-1992 (jnash)
**	    Reduced logging project.  Write CLR's instead of abort-save
**	    log records.  
**	18-jan-1993 (rogerk)
**	    Reduced Logging Project: Removed OFFLINE_CSP flag.
**	15-mar-1993 (jnash)
**	    Check dmve->dmve_logging to determine if logging required.
**      26-apr-1993 (bryanp)
**          6.5 Cluster support:
**              Replace all uses of DM_LOG_ADDR with LG_LA or LG_LSN.
**		Remove special checks for DCB_S_CSP, since DCB's built by the
**		    CSP process no longer get special handling.
**	23-aug-1993 (rogerk)
**	    Added support for rollforward of CLR records.
**	    Took out old dm0p_force_pages call (which wrote out records
**	    updated by the current transaction) and replaced it with a
**	    dm0p_toss_pages call - which tosses out all pages belonging
**	    to the dmu table.
**	15-apr-1994 (chiku)
**	    Bug56702: return logfull indication.
*/
DB_STATUS
dmve_dmu(
DMVE_CB		*dmve_cb)
{
    DMVE_CB		*dmve = dmve_cb;
    DM0L_DMU		*log_rec = (DM0L_DMU *)dmve_cb->dmve_log_rec;
    LG_LSN		*log_lsn = &log_rec->dmu_header.lsn; 
    i4		flag;
    i4		error;
    DB_STATUS		status = E_DB_OK;

    CLRDBERR(&dmve->dmve_error);
    DMVE_CLEAR_TABINFO_MACRO(dmve);
	
    for (;;)
    {
	if (log_rec->dmu_header.length > sizeof(DM0L_DMU) || 
	    log_rec->dmu_header.type != DM0LDMU)
	{
	    SETDBERR(&dmve->dmve_error, 0, E_DM9601_DMVE_BAD_PARAMETER);
	    status = E_DB_ERROR;
	    break;
	}

	/*
	** Only UNDO modes require any DMU processing.
	*/
	if ((dmve->dmve_action != DMVE_UNDO) && 
	    ((log_rec->dmu_header.flags & DM0L_CLR) == 0))
	{
	    return (E_DB_OK);
	}

	/*
	** Toss any modified pages for this table from the buffer manager
	** as we are about to rename or possible remove the files associated
	** with it.  (DMU records are usually logged in conjunction with
	** file rename/swap operations).
	*/
	dm0p_toss_pages(dmve->dmve_dcb_ptr->dcb_id, 
			log_rec->dmu_tabid.db_tab_base, 
			dmve->dmve_lk_id, dmve->dmve_log_id, (i4) TRUE);

	/*
	** Write CLR record that specifies that we have aborted
	** the transaction up to this log record.
	**
	** DMU log records are written as NONREDO operations to ensure
	** that redo cannot be attempted using the old (about to be renamed)
	** file structures.  (The nonredo status is appended in the dm0l_dmu
	** routine).
	*/
	if ((dmve->dmve_logging) &&
	    ((log_rec->dmu_header.flags & DM0L_CLR) == 0))
	{
	    status = dm0l_dmu(dmve->dmve_log_id, 
			    (log_rec->dmu_header.flags | DM0L_CLR), 
			    &log_rec->dmu_tabid, &log_rec->dmu_tblname, 
			    &log_rec->dmu_tblowner, log_rec->dmu_operation,
			    log_lsn, &dmve->dmve_error);
	    if (status != E_DB_OK) 
	    {
		/*
		 * Bug56702: returned logfull indication.
		 */
		dmve->dmve_logfull = dmve->dmve_error.err_code;
		break;
	    }
	}

	return(E_DB_OK);	
    }

    if (dmve->dmve_error.err_code > E_DM_INTERNAL)
    {
	uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL,
	    (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	SETDBERR(&dmve->dmve_error, 0, E_DM9622_DMVE_DMU);
    }

    return(E_DB_ERROR);
}
Beispiel #16
0
DB_STATUS
dmu_modify(DMU_CB    *dmu_cb)
{
    DM_SVCB		*svcb = dmf_svcb;
    DMU_CB		*dmu = dmu_cb;
    DML_XCB		*xcb;
    DML_ODCB		*odcb;
    DM2U_MOD_CB		local_mcb, *mcb = &local_mcb;

    i4			recovery;
    i4			truncate;
    i4			duplicates;
    i4			i,j;
    i4			indicator;
    i4			error, local_error;
    DB_STATUS		status;
    bool                bad_loc;
    i4			blob_add_extend = 0;
    bool                used_default_page_size = TRUE;
    i4			page_size;
    i4			verify_options;
    i4			mask;
    i4			has_extensions = 0;
    DB_OWN_NAME		table_owner;
    DB_TAB_NAME		table_name;
    bool		got_action;
    bool		is_table_debug;
    bool		reorg;

    CLRDBERR(&dmu->error);

    /* Any modify should make table recovery disallowed except for the
    ** alter_status options which change logical, physical consistency
    ** and table recovery bit itself
    */
    mcb->mcb_mod_options2 = DM2U_2_TBL_RECOVERY_DEFAULT;

    do
    {
	/*  Check for bad flags. */

	mask = ~(DMU_VGRANT_OK | DMU_INTERNAL_REQ | DMU_RETINTO
		| DMU_PARTITION | DMU_MASTER_OP | DMU_ONLINE_START | DMU_ONLINE_END
		| DMU_NO_PAR_INDEX | DMU_PIND_CHAINED | DMU_NODEPENDENCY_CHECK);
        if ( (dmu->dmu_flags_mask & mask) != 0)
	{
	    SETDBERR(&dmu->error, 0, E_DM001A_BAD_FLAG);
	    break;
	}

	/*  Validate the transaction id. */

	xcb = (DML_XCB *)dmu->dmu_tran_id;
	if (dm0m_check((DM_OBJECT *)xcb, (i4)XCB_CB) != E_DB_OK)
	{
	    SETDBERR(&dmu->error, 0, E_DM003B_BAD_TRAN_ID);
	    break;
	}

	/* Check for external interrupts */
	if ( xcb->xcb_scb_ptr->scb_ui_state )
	    dmxCheckForInterrupt(xcb, &error);

	if ( xcb->xcb_state )
	{
	    if (xcb->xcb_state & XCB_USER_INTR)
	    {
		SETDBERR(&dmu->error, 0, E_DM0065_USER_INTR);
		break;
	    }
	    if (xcb->xcb_state & XCB_FORCE_ABORT)
	    {
		SETDBERR(&dmu->error, 0, E_DM010C_TRAN_ABORTED);
		break;
	    }
	    if (xcb->xcb_state & XCB_ABORT)
	    {
		SETDBERR(&dmu->error, 0, E_DM0064_USER_ABORT);
		break;
	    }	    
	}

	/*  Check the database identifier. */

	odcb = (DML_ODCB *)dmu->dmu_db_id;
	if (dm0m_check((DM_OBJECT *)odcb, (i4)ODCB_CB) != E_DB_OK)
	{
	    SETDBERR(&dmu->error, 0, E_DM0010_BAD_DB_ID);
	    break;
	}

	mcb->mcb_db_lockmode = DM2T_X;

	/*  Check that this is a update transaction on the database 
        **  that can be updated. */
	if (odcb != xcb->xcb_odcb_ptr)
	{
	    SETDBERR(&dmu->error, 0, E_DM005D_TABLE_ACCESS_CONFLICT);
	    break;
	}

	/* Prime the MCB */
	mcb->mcb_dcb = odcb->odcb_dcb_ptr;
	mcb->mcb_xcb = xcb;
	mcb->mcb_tbl_id = &dmu->dmu_tbl_id;
	mcb->mcb_omcb = (DM2U_OMCB*)NULL;
	mcb->mcb_dmu = dmu;
	mcb->mcb_structure = 0;
	mcb->mcb_i_fill = 0;
	mcb->mcb_l_fill = 0;
	mcb->mcb_d_fill = 0;
	mcb->mcb_unique = FALSE;
	mcb->mcb_compressed = TCB_C_NONE;
        mcb->mcb_index_compressed = FALSE;
	mcb->mcb_temporary = FALSE;
	mcb->mcb_merge = FALSE;
	mcb->mcb_clustered = FALSE;
	mcb->mcb_modoptions = 0;
	mcb->mcb_min_pages = 0;
	mcb->mcb_max_pages = 0;
	mcb->mcb_allocation = 0;
	mcb->mcb_extend = 0;
	mcb->mcb_page_type = TCB_PG_INVALID;
	mcb->mcb_page_size = svcb->svcb_page_size;
	mcb->mcb_tup_info = &dmu->dmu_tup_cnt;
	mcb->mcb_reltups = 0;
	mcb->mcb_tab_name = &table_name;
	mcb->mcb_tab_owner = &table_owner;
	mcb->mcb_has_extensions = &has_extensions;
	mcb->mcb_relstat2 = 0;
	mcb->mcb_flagsmask = dmu->dmu_flags_mask;
	mcb->mcb_tbl_pri = 0;
	mcb->mcb_rfp_entry = (DM2U_RFP_ENTRY*)NULL;
	mcb->mcb_new_part_def = (DB_PART_DEF*)dmu->dmu_part_def;
	mcb->mcb_new_partdef_size = dmu->dmu_partdef_size;
	mcb->mcb_verify = 0;

	dmu->dmu_tup_cnt = 0;
        truncate = 0;
	reorg = FALSE;
	duplicates = -1;
	verify_options = 0;
	got_action = FALSE;

	/* FIXME better messages (in general) */
	/* If there's a partdef it has to be one-piece, else bad param */
	if (dmu->dmu_part_def != NULL
	  && dmu->dmu_part_def->ndims > 0
	  && (dmu->dmu_part_def->part_flags & DB_PARTF_ONEPIECE) == 0)
	{
	    SETDBERR(&dmu->error, 0, E_DM002A_BAD_PARAMETER);
	    break;
	}

	/* Disassemble the modify action.
	** FIXME this used to be buried in the characteristics array.
	** It would make much more sense to just carry the action
	** code through, but that will have to wait for another day.
	*/
	got_action = FALSE;
	switch (dmu->dmu_action)
	{
	case DMU_ACT_STORAGE:
	    if (BTtest(DMU_STRUCTURE, dmu->dmu_chars.dmu_indicators))
	    {
		got_action = TRUE;
		mcb->mcb_structure = dmu->dmu_chars.dmu_struct;
	    }
	    break;

	case DMU_ACT_ADDEXTEND:
	    got_action = TRUE;
	    mcb->mcb_mod_options2 |= DM2U_2_ADD_EXTEND;
	    break;

	case DMU_ACT_ENCRYPT:
	    got_action = TRUE;
	    mcb->mcb_mod_options2 |= DM2U_2_ENCRYPT;
	    break;

	case DMU_ACT_LOG_CONSISTENT:
	    if (BTtest(DMU_ACTION_ONOFF, dmu->dmu_chars.dmu_indicators))
	    {
		got_action = TRUE;
		mcb->mcb_mod_options2 &= ~DM2U_2_TBL_RECOVERY_DEFAULT;
		if ( dmu->dmu_chars.dmu_flags & DMU_FLAG_ACTON )
		    mcb->mcb_mod_options2 |= DM2U_2_LOG_CONSISTENT;
		else
		    mcb->mcb_mod_options2 |= DM2U_2_LOG_INCONSISTENT;
	    }
	    break;

	case DMU_ACT_MERGE:
	    got_action = TRUE;
	    mcb->mcb_merge = TRUE;
	    break;

	case DMU_ACT_PERSISTENCE:
	    if (BTtest(DMU_PERSISTS_OVER_MODIFIES, dmu->dmu_chars.dmu_indicators))
	    {
		got_action = TRUE;
		mcb->mcb_mod_options2 |= (dmu->dmu_chars.dmu_flags & DMU_FLAG_PERSISTENCE) ?
			DM2U_2_PERSISTS_OVER_MODIFIES :
			DM2U_2_NOPERSIST_OVER_MODIFIES;
	    }
	    break;

	case DMU_ACT_PHYS_CONSISTENT:
	    if (BTtest(DMU_ACTION_ONOFF, dmu->dmu_chars.dmu_indicators))
	    {
		got_action = TRUE;
		mcb->mcb_mod_options2 &= ~DM2U_2_TBL_RECOVERY_DEFAULT;
		if ( dmu->dmu_chars.dmu_flags & DMU_FLAG_ACTON )
		    mcb->mcb_mod_options2 |= DM2U_2_PHYS_CONSISTENT;
		else
		    mcb->mcb_mod_options2 |= DM2U_2_PHYS_INCONSISTENT;
	    }
	    break;

	case DMU_ACT_PRIORITY:
	    if (BTtest(DMU_TABLE_PRIORITY, dmu->dmu_chars.dmu_indicators))
		got_action = TRUE;
	    /* flag setting when we hit the priority char */
	    break;

	case DMU_ACT_READONLY:
	    if (BTtest(DMU_ACTION_ONOFF, dmu->dmu_chars.dmu_indicators))
	    {
		got_action = TRUE;
		if ( dmu->dmu_chars.dmu_flags & DMU_FLAG_ACTON )
		    mcb->mcb_mod_options2 |= DM2U_2_READONLY;
		else
		    mcb->mcb_mod_options2 |= DM2U_2_NOREADONLY;
	    }
	    break;

	case DMU_ACT_REORG:
	    got_action = TRUE;
	    reorg = TRUE;
	    break;

	case DMU_ACT_TABLE_RECOVERY:
	    if (BTtest(DMU_ACTION_ONOFF, dmu->dmu_chars.dmu_indicators))
	    {
		got_action = TRUE;
		mcb->mcb_mod_options2 &= ~DM2U_2_TBL_RECOVERY_DEFAULT;
		if ( dmu->dmu_chars.dmu_flags & DMU_FLAG_ACTON )
		    mcb->mcb_mod_options2 |= DM2U_2_TBL_RECOVERY_ALLOWED;
		else
		    mcb->mcb_mod_options2 |= DM2U_2_TBL_RECOVERY_DISALLOWED;
	    }
	    break;

	case DMU_ACT_TRUNC:
	    got_action = TRUE;
	    truncate++;
	    break;

	case DMU_ACT_USCOPE:
	    if (BTtest(DMU_STATEMENT_LEVEL_UNIQUE, dmu->dmu_chars.dmu_indicators))
	    {
		got_action = TRUE;
		mcb->mcb_mod_options2 |= DM2U_2_STATEMENT_LEVEL_UNIQUE;
	    }
	    break;

	case DMU_ACT_VERIFY:
	    if (BTtest(DMU_VACTION, dmu->dmu_chars.dmu_indicators))
	    {
		got_action = TRUE;
		mcb->mcb_verify = dmu->dmu_chars.dmu_vaction;
	    }
	    break;
	} /* switch */

	if (! got_action)
	{
	    SETDBERR(&dmu->error, 0, E_DM000E_BAD_CHAR_VALUE);
	    break;
	}

	/* Disassemble the characteristics.
	** FIXME probably better to just carry it through, but one step
	** at a time!
	*/
	indicator = -1;
	while ((indicator = BTnext(indicator, dmu->dmu_chars.dmu_indicators, DMU_CHARIND_LAST)) != -1)
	{
	    switch (indicator)
	    {
	    case DMU_ACTION_ONOFF:
	    case DMU_STRUCTURE:
		/* Already picked it up, just skip on */
		continue;

	    case DMU_IFILL:
		mcb->mcb_i_fill = dmu->dmu_chars.dmu_nonleaff;
		if (mcb->mcb_i_fill > 100)
		    mcb->mcb_i_fill = 100;
		continue;

	    case DMU_LEAFFILL:
		mcb->mcb_l_fill = dmu->dmu_chars.dmu_leaff;
		if (mcb->mcb_l_fill > 100)
		    mcb->mcb_l_fill = 100;
		continue;

	    case DMU_DATAFILL:
		mcb->mcb_d_fill = dmu->dmu_chars.dmu_fillfac;
		if (mcb->mcb_d_fill > 100)
		    mcb->mcb_d_fill = 100;
		continue;

	    case DMU_PAGE_SIZE:
		used_default_page_size = FALSE;
		mcb->mcb_page_size = dmu->dmu_chars.dmu_page_size;
		if (mcb->mcb_page_size != 2048   && mcb->mcb_page_size != 4096  &&
		    mcb->mcb_page_size != 8192   && mcb->mcb_page_size != 16384 &&
		    mcb->mcb_page_size != 32768  && mcb->mcb_page_size != 65536)
		{
		    SETDBERR(&dmu->error, indicator, E_DM000E_BAD_CHAR_VALUE);
		    break;
		}
		else if (!dm0p_has_buffers(mcb->mcb_page_size))
		{
		    SETDBERR(&dmu->error, 0, E_DM0157_NO_BMCACHE_BUFFERS);
		    break;
		}		    
		else
		{
		    continue;
		}

	    case DMU_MINPAGES:
		mcb->mcb_min_pages = dmu->dmu_chars.dmu_minpgs;
		continue;

	    case DMU_MAXPAGES:
		mcb->mcb_max_pages = dmu->dmu_chars.dmu_maxpgs;
		continue;

	    case DMU_UNIQUE:
		mcb->mcb_unique = TRUE;
		continue;

	    case DMU_DCOMPRESSION:
		/* Translate DMU_xxx to TCB compression types */
		if (dmu->dmu_chars.dmu_dcompress == DMU_COMP_ON)
		    mcb->mcb_compressed = TCB_C_DEFAULT;
		else if (dmu->dmu_chars.dmu_dcompress == DMU_COMP_HI)
		    mcb->mcb_compressed = TCB_C_HICOMPRESS;
		continue;

            case DMU_KCOMPRESSION:
                mcb->mcb_index_compressed =
			(dmu->dmu_chars.dmu_kcompress != DMU_COMP_OFF);
                continue;

	    case DMU_TEMP_TABLE:
		mcb->mcb_temporary = TRUE;
		continue;

	    case DMU_RECOVERY:
		recovery = (dmu->dmu_chars.dmu_flags & DMU_FLAG_RECOVERY) != 0;
		if (recovery)
		{
		    /* recovery isn't currently supported */
		    SETDBERR(&dmu->error, indicator, E_DM000D_BAD_CHAR_ID);
		    break;
		}
		continue;

	    case DMU_DUPLICATES:
		duplicates = 0;
		if (dmu->dmu_chars.dmu_flags & DMU_FLAG_DUPS)
		    duplicates = 1;
		continue;

	    case DMU_ALLOCATION:
		mcb->mcb_allocation = dmu->dmu_chars.dmu_alloc;
		continue;

	    case DMU_EXTEND:
		mcb->mcb_extend = dmu->dmu_chars.dmu_extend;
		continue;

	    case DMU_VACTION:
		/* Already got it, just skip on */
		continue;

	    case DMU_VOPTION:
		verify_options = dmu->dmu_chars.dmu_voption;
		continue;

	    case DMU_STATEMENT_LEVEL_UNIQUE:
		if (dmu->dmu_chars.dmu_flags & DMU_FLAG_UNIQUE_STMT)
		    mcb->mcb_relstat2 |= TCB_STATEMENT_LEVEL_UNIQUE;
		continue;

	    case DMU_PERSISTS_OVER_MODIFIES:
		if (dmu->dmu_chars.dmu_flags & DMU_FLAG_PERSISTENCE)
		    mcb->mcb_relstat2 |= TCB_PERSISTS_OVER_MODIFIES;
		continue;

	    case DMU_SYSTEM_GENERATED:
		mcb->mcb_relstat2 |= TCB_SYSTEM_GENERATED;
		continue;

	    case DMU_SUPPORTS_CONSTRAINT:
		mcb->mcb_relstat2 |= TCB_SUPPORTS_CONSTRAINT;
		continue;

	    case DMU_NOT_UNIQUE:
		mcb->mcb_relstat2 |= TCB_NOT_UNIQUE;
		continue;

	    case DMU_NOT_DROPPABLE:
		mcb->mcb_relstat2 |= TCB_NOT_DROPPABLE;
		continue;

	    case DMU_ROW_SEC_AUDIT:
		mcb->mcb_relstat2 |= TCB_ROW_AUDIT;
		continue;

	    case DMU_TABLE_PRIORITY:
		mcb->mcb_tbl_pri = dmu->dmu_chars.dmu_cache_priority;
		if (mcb->mcb_tbl_pri < 0 || mcb->mcb_tbl_pri > DB_MAX_TABLEPRI)
		{
		    SETDBERR(&dmu->error, indicator, E_DM000E_BAD_CHAR_VALUE);
		    break;
		}
		/*
		** DMU_TABLE_PRIORITY    is set if priority came from WITH clause.
		** DMU_TO_TABLE_PRIORITY is set if priority came from MODIFY TO clause.
		*/
		if (dmu->dmu_action != DMU_ACT_PRIORITY)
		    mcb->mcb_mod_options2 |= DM2U_2_TABLE_PRIORITY;
		else
		    mcb->mcb_mod_options2 |= DM2U_2_TO_TABLE_PRIORITY;
		continue;

	    case DMU_BLOBEXTEND:
		blob_add_extend = dmu->dmu_chars.dmu_blobextend;
		continue;

	    case DMU_CLUSTERED:
		mcb->mcb_clustered = (dmu->dmu_chars.dmu_flags & DMU_FLAG_CLUSTERED) != 0;
		continue;

	    case DMU_CONCURRENT_UPDATES:
		/* Translate from PSF flag to DMU internal flag */
		if (dmu->dmu_chars.dmu_flags & DMU_FLAG_CONCUR_U)
		    mcb->mcb_flagsmask |= DMU_ONLINE_START;
		continue;

	    default:
		/* Ignore anything else, might be for CREATE, who knows */
		continue;
	    }
	    break;
	}

	/*
	** If no page size specified, set page_size to zero
	** In this case the current page size will be used
	*/
	if (used_default_page_size)
	    mcb->mcb_page_size = 0;

	/* Save a local copy for dmpe_modify, since dm2u_modify can alter mcb */
	page_size = mcb->mcb_page_size;

	if (mcb->mcb_structure == TCB_HEAP)
	{
	    if (mcb->mcb_d_fill == 0)
		mcb->mcb_d_fill = DM_F_HEAP;
	}
	else if (mcb->mcb_structure == TCB_ISAM)
	{
	    if (mcb->mcb_i_fill == 0)
		mcb->mcb_i_fill = DM_FI_ISAM;
	    if (mcb->mcb_d_fill == 0)
	    {
		if (mcb->mcb_compressed != TCB_C_NONE)
		    mcb->mcb_d_fill = DM_F_CISAM;
		else
		    mcb->mcb_d_fill = DM_F_ISAM;
	    }
	}
	else if (mcb->mcb_structure == TCB_HASH)
	{
	    if (mcb->mcb_d_fill == 0)
	    {
		if (mcb->mcb_compressed != TCB_C_NONE)
		    mcb->mcb_d_fill = DM_F_CHASH;
		else
		    mcb->mcb_d_fill = DM_F_HASH;
	    }
	    if (mcb->mcb_min_pages == 0)
	    {
		if (mcb->mcb_compressed != TCB_C_NONE)
		    mcb->mcb_min_pages = 1;
		else
		    mcb->mcb_min_pages = 10;

		/* If user specified max pages, don't set minpages higher */
		if (mcb->mcb_min_pages > mcb->mcb_max_pages && mcb->mcb_max_pages != 0)
		    mcb->mcb_min_pages = mcb->mcb_max_pages;
	    }
	    if (mcb->mcb_max_pages == 0)
		mcb->mcb_max_pages = 8388607;
	}
	else if (mcb->mcb_structure == TCB_BTREE || mcb->mcb_merge)
	{
            if (DMZ_AM_MACRO(16) && !mcb->mcb_temporary)
            {
                /* DM616 -- forces index compression to be used: */
                mcb->mcb_index_compressed = TRUE;
            }

	    if (mcb->mcb_i_fill == 0)
		mcb->mcb_i_fill = DM_FI_BTREE;
	    if (mcb->mcb_l_fill == 0)
		mcb->mcb_l_fill = DM_FL_BTREE;
	    if (mcb->mcb_d_fill == 0)
	    {
		if (mcb->mcb_compressed != TCB_C_NONE)
		    mcb->mcb_d_fill = DM_F_CBTREE;
		else
		    mcb->mcb_d_fill = DM_F_BTREE;
	    }
	}
	else if (truncate)
	{
	    if (mcb->mcb_d_fill == 0)
		mcb->mcb_d_fill = DM_F_HEAP;
	}

	if (mcb->mcb_structure == TCB_HASH && mcb->mcb_min_pages > mcb->mcb_max_pages)
	{
	    SETDBERR(&dmu->error, 0, E_DM000D_BAD_CHAR_ID);
	    break;
	}

	mcb->mcb_kcount = dmu->dmu_key_array.ptr_in_count;
	mcb->mcb_key = (DMU_KEY_ENTRY**) dmu->dmu_key_array.ptr_address;
	if (mcb->mcb_kcount && (mcb->mcb_key == (DMU_KEY_ENTRY**)NULL ||
              dmu->dmu_key_array.ptr_size != sizeof(DMU_KEY_ENTRY)))
	{
	    SETDBERR(&dmu->error, 0, E_DM002A_BAD_PARAMETER);
	    break;
	}

	if (truncate)
	{
	    mcb->mcb_kcount = 0;
	    mcb->mcb_modoptions |= DM2U_TRUNCATE;
	}
	if (duplicates == 1)
	    mcb->mcb_modoptions |= DM2U_DUPLICATES;
	else if (duplicates == 0)
	    mcb->mcb_modoptions |= DM2U_NODUPLICATES;
	/* else duplicates == -1, set neither flag */
	if (reorg)
	    mcb->mcb_modoptions |= DM2U_REORG;

	/* CLUSTERED implies and requires Unique */
	if ( mcb->mcb_clustered && mcb->mcb_structure == TCB_BTREE )
	    mcb->mcb_unique = TRUE;
	else
	    mcb->mcb_clustered = FALSE;


	if (mcb->mcb_verify)
	{
	    if (verify_options == 0)
	    {
		/*  Apply defaults. */

		switch (mcb->mcb_verify)
		{
		case DMU_V_VERIFY:
		    verify_options = DMU_T_LINK | DMU_T_RECORD | DMU_T_ATTRIBUTE;
		    break;

		case DMU_V_REPAIR:
		case DMU_V_DEBUG:
		    verify_options = DMU_T_BITMAP;
		    break;

		case DMU_V_PATCH:
		case DMU_V_FPATCH:
		    break;
		}
	    }
	    /* Shift modifiers into place */
	    mcb->mcb_verify |= (verify_options << DM1U_MODSHIFT);
	}
	is_table_debug = ((mcb->mcb_verify & DM1U_OPMASK) == DM1U_DEBUG);

	/* Check the location names for duplicates, too many. */

	mcb->mcb_location = (DB_LOC_NAME*)NULL;
	mcb->mcb_l_count = 0;
	if (dmu->dmu_location.data_address && 
	    (dmu->dmu_location.data_in_size >= sizeof(DB_LOC_NAME)) &&
	    mcb->mcb_temporary == FALSE)
	{
	    mcb->mcb_location = (DB_LOC_NAME *) dmu->dmu_location.data_address;
	    mcb->mcb_l_count = dmu->dmu_location.data_in_size/sizeof(DB_LOC_NAME);
	    if (mcb->mcb_l_count > DM_LOC_MAX)
	    {
		SETDBERR(&dmu->error, 0, E_DM0071_LOCATIONS_TOO_MANY);
		break;
	    }
	    bad_loc = FALSE;
	    for (i = 0; i < mcb->mcb_l_count; i++)
	    {
		for (j = 0; j < i; j++)
		{
		    /* 
                    ** Compare this location name against other 
                    ** already given, they cannot be the same.
                    */

		    if (MEcmp(mcb->mcb_location[j].db_loc_name,
                              mcb->mcb_location[i].db_loc_name,
			      sizeof(DB_LOC_NAME)) == 0 )
		    {
			SETDBERR(&dmu->error, i, E_DM001E_DUP_LOCATION_NAME);
			bad_loc = TRUE;
			break;
		    }	    	    
		}
		if (bad_loc == TRUE)
		    break;		    
	    }

	    if (bad_loc == TRUE)
		break;
	}
	else
	{
	    /* There must a location list if you are reorganizing
            ** to a different number of locations.
            */
	    if (reorg)
	    {
		if (dmu->dmu_location.data_address &&
					    dmu->dmu_location.data_in_size)
		    SETDBERR(&dmu->error, 0, E_DM001F_LOCATION_LIST_ERROR);
		else
		    SETDBERR(&dmu->error, 0, E_DM0072_NO_LOCATION);
		break;	    
	    }
	}

	mcb->mcb_partitions = (DMU_PHYPART_CHAR*)NULL;
	mcb->mcb_nparts = 0;
	if ( dmu->dmu_ppchar_array.data_address && 
	     dmu->dmu_ppchar_array.data_in_size >= sizeof(DMU_PHYPART_CHAR) )
	{
	    mcb->mcb_partitions = (DMU_PHYPART_CHAR*)dmu->dmu_ppchar_array.data_address;
	    mcb->mcb_nparts = dmu->dmu_ppchar_array.data_in_size
			/ sizeof(DMU_PHYPART_CHAR);
	}
    
	if ((xcb->xcb_x_type & XCB_RONLY) && !is_table_debug)
	{
	    SETDBERR(&dmu->error, 0, E_DM006A_TRAN_ACCESS_CONFLICT);
	    break;
	}

	/*
	** If this is the first write operation for this transaction,
	** then we need to write the begin transaction record.
	*/
	if ((xcb->xcb_flags & XCB_DELAYBT) != 0 && mcb->mcb_temporary == FALSE
	  && !is_table_debug)
	{
	    status = dmxe_writebt(xcb, TRUE, &dmu->error);
	    if (status != E_DB_OK)
	    {
		xcb->xcb_state |= XCB_TRANABORT;
		break;
	    }
	}

        /* Calls the physical layer to process the rest of the modify */

	status = dm2u_modify(mcb, &dmu->error);

	if (status == E_DB_OK && has_extensions)
	{
	    if ((mcb->mcb_mod_options2 & DM2U_2_ADD_EXTEND) && blob_add_extend == 0)
		status = E_DB_OK;
	    else
	    {
		/* FIX ME make modify etabs optional !! */
		/* Add flag to modify top make modify etabs optional */
		/* Add sysmod dbname tablename blob-column-name */
#ifdef xDEBUG
		TRdisplay("Modify etabs for %~t %~t\n",
		    sizeof(DB_TAB_NAME), table_name.db_tab_name,
		    sizeof(DB_OWN_NAME), table_owner.db_own_name);
#endif
		status = dmpe_modify(dmu, odcb->odcb_dcb_ptr, xcb,
			    &dmu->dmu_tbl_id, mcb->mcb_db_lockmode, mcb->mcb_temporary,
			    truncate, (i4)0, blob_add_extend, &dmu->error);
	    }
	}
	  
	    
	/*	Audit successful MODIFY/PATCH of TABLE. */

	if ( status == E_DB_OK && dmf_svcb->svcb_status & SVCB_C2SECURE )
	{
	    i4 msgid;
	    i4 access = SXF_A_SUCCESS;

	    if ((mcb->mcb_verify & DM1U_OPMASK) == DM1U_PATCH ||
		(mcb->mcb_verify & DM1U_OPMASK) == DM1U_FPATCH)
	    {
		access |= SXF_A_ALTER;
		msgid = I_SX271A_TABLE_PATCH;
	    }
	    else 
	    {
		access |= SXF_A_MODIFY;
		msgid = I_SX270F_TABLE_MODIFY;
	    }
	    /*
	    **	Audit success
	    */
	    status = dma_write_audit(
			SXF_E_TABLE,
			access,
			table_name.db_tab_name,	/* Table/view name */
			sizeof(table_name.db_tab_name),	/* Table/view name */
			&table_owner,	/* Table/view owner */
			msgid, 
			FALSE, /* Not force */
			&dmu->error, NULL);
	}

	if (status == E_DB_OK)
	{
	    /* If modify to reorg or merge then return no tuple count info. */
	    if (reorg || (mcb->mcb_merge) || (mcb->mcb_verify != 0))
	    {
		dmu->dmu_tup_cnt = DM_NO_TUPINFO;
	    }
	    return (E_DB_OK);
	}
	else
	{
            if (dmu->error.err_code > E_DM_INTERNAL)
            {
                uleFormat(&dmu->error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
		    (char * )NULL, (i4)0, (i4 *)NULL, &local_error, 0);
		SETDBERR(&dmu->error, 0, E_DM0091_ERROR_MODIFYING_TABLE);
            }
	    switch (dmu->error.err_code)
	    {
		case E_DM004B_LOCK_QUOTA_EXCEEDED:
		case E_DM0112_RESOURCE_QUOTA_EXCEED:
		case E_DM0091_ERROR_MODIFYING_TABLE:
		case E_DM009B_ERROR_CHK_PATCH_TABLE:
		case E_DM0045_DUPLICATE_KEY:
		case E_DM0137_GATEWAY_ACCESS_ERROR:
	        case E_DM006A_TRAN_ACCESS_CONFLICT:
		    xcb->xcb_state |= XCB_STMTABORT;
		    break;

		case E_DM0042_DEADLOCK:
		case E_DM004A_INTERNAL_ERROR:
		case E_DM0100_DB_INCONSISTENT:
		    xcb->xcb_state |= XCB_TRANABORT;
		    break;
		case E_DM0065_USER_INTR:
		    xcb->xcb_state |= XCB_USER_INTR;
		    break;
		case E_DM010C_TRAN_ABORTED:
		    xcb->xcb_state |= XCB_FORCE_ABORT;
		    break;
		case E_DM007D_BTREE_BAD_KEY_LENGTH:
		    dmu->dmu_tup_cnt = dmu->dmu_tup_cnt; /* same for now */
		default:
                    break;
	    }
	}
    } while (FALSE);

    if (dmu->error.err_code > E_DM_INTERNAL)
    {
	uleFormat(&dmu->error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
	    (char * )NULL, (i4)0, (i4 *)NULL, &local_error, 0);
	SETDBERR(&dmu->error, 0, E_DM0091_ERROR_MODIFYING_TABLE);
    }

    return (E_DB_ERROR);
}
Beispiel #17
0
DB_STATUS
dmx_secure(
DMX_CB    *dmx_cb)
{
    DML_SCB             *scb;
    DMX_CB		*dmx = dmx_cb;
    DML_XCB		*xcb;
    DB_STATUS		status = E_DB_ERROR;
    i4		error,local_error;
    DMP_RCB		*rcb;
    CL_ERR_DESC		sys_err;
    STATUS		cl_stat;
    i4			qnext;
    DMC_REPQ		*repq = (DMC_REPQ *)(Dmc_rep + 1);
    i4			rep_iq_lock;
    i4			rep_maxlocks;
    DB_TAB_TIMESTAMP	timestamp;
    LK_EVENT		lk_event;

    CLRDBERR(&dmx->error);

    for (;;)
    {
	xcb = (DML_XCB *)dmx->dmx_tran_id;
	if (dm0m_check((DM_OBJECT *)xcb, (i4)XCB_CB) != E_DB_OK)
	{
	    SETDBERR(&dmx->error, 0, E_DM003B_BAD_TRAN_ID);
	    break;
	}

	if (dmx->dmx_dis_tran_id.db_dis_tran_id_type == 0)
	{
	    SETDBERR(&dmx->error, 0, E_DM002A_BAD_PARAMETER);
	    break;
	}
	if (xcb->xcb_state & (XCB_STMTABORT | XCB_TRANABORT))
	{
	    SETDBERR(&dmx->error, 0, E_DM0064_USER_ABORT);
	    break;
	}
	if (xcb->xcb_state & XCB_WILLING_COMMIT)
	{
	    SETDBERR(&dmx->error, 0, E_DM0132_ILLEGAL_STMT);
	    break;
	}
        scb = xcb->xcb_scb_ptr;

	/*  Clear user interrupt state in SCB. */

	scb->scb_ui_state &= ~SCB_USER_INTR;

	/* Clear the force abort state of the SCB. */

	if (xcb->xcb_state & XCB_FORCE_ABORT)
	    scb->scb_ui_state &= ~SCB_FORCE_ABORT;

	/*
	** If the transaction has written no Begin Transaction record,
	** then force one to the log file now.
	*/
	if (xcb->xcb_flags & XCB_DELAYBT)
	{
	    status = dmxe_writebt(xcb, TRUE, &dmx->error);
	    if (status != E_DB_OK)
	    {
		xcb->xcb_state |= XCB_TRANABORT;
		break;
	    }
	}

	/*
	** If the database is replicated, the input queue must
	** be processed before putting the transaction in a
	** willing commit state.
	**
	** All of this transaction's tables, including those
	** associated with replication, must be closed and
	** their pages tossed from the cache.
	**
	** This is the same processing as that for a 
	** "normal" commit.
	*/
	if ((xcb->xcb_odcb_ptr->odcb_dcb_ptr->dcb_status & DCB_S_REPLICATE) &&
	    (DMZ_SES_MACRO(32) == 0 || dmd_reptrace() == FALSE) &&
	    ((xcb->xcb_rep_input_q == NULL) || 
		(xcb->xcb_rep_input_q == (DMP_RCB *)-1)))
	{
	    if (dmf_svcb->svcb_rep_iqlock == DMC_C_ROW)
		rep_iq_lock = DM2T_RIX;
	    else if (dmf_svcb->svcb_rep_iqlock == DMC_C_TABLE)
		rep_iq_lock = DM2T_X;
	    else
		rep_iq_lock = DM2T_IX;
 
	    /* allow a minimum maxlocks value of 50 ... */
	    if (dmf_svcb->svcb_rep_dtmaxlock > 50)
		rep_maxlocks = dmf_svcb->svcb_rep_dtmaxlock;
	    /* ...but default to 100 */
	    else if (dmf_svcb->svcb_lk_maxlocks > 100)
		rep_maxlocks = dmf_svcb->svcb_lk_maxlocks;
	    else
		rep_maxlocks = 100;
	    
	    status = dm2t_open(xcb->xcb_odcb_ptr->odcb_dcb_ptr, 
		&xcb->xcb_odcb_ptr->odcb_dcb_ptr->rep_input_q, rep_iq_lock,
                DM2T_UDIRECT, DM2T_A_WRITE, (i4)0, rep_maxlocks,
                (i4)0, xcb->xcb_log_id, xcb->xcb_lk_id, (i4)0, 
		(i4)0, (i4)0, &(xcb->xcb_tran_id), &timestamp, 
		&(xcb->xcb_rep_input_q), (DML_SCB *)0, &dmx->error);
	    if (status != E_DB_OK)
	    {
		xcb->xcb_state |= XCB_TRANABORT;
		break;
	    }
	}
        /*
        ** clean up replication input queue RCB
        */
        if (xcb->xcb_rep_input_q)
        {
	    HRSYSTIME		curtime;
	    bool	list_ok = TRUE;
	    i4		semaphore_status = -1;

	    (VOID)TMhrnow(&curtime);

	    /* If rep_txq_size=0, Dmc_rep will be NULL and we have to
	    ** do the distribution synchronously now (kibro01) b118566
	    */
	    if (Dmc_rep == NULL)
		list_ok = FALSE;

	    /*
	    ** if we had the input queue open, then we need to distribute
	    */
	    if (list_ok)
	    {
	        semaphore_status = CSp_semaphore(TRUE, &Dmc_rep->rep_sem);
		if (semaphore_status == E_DB_OK)
		{
		    qnext = (Dmc_rep->queue_start == Dmc_rep->queue_end &&
			repq[Dmc_rep->queue_start].active == 0 &&
			repq[Dmc_rep->queue_start].tx_id == 0) ?
			  Dmc_rep->queue_end :
			  (Dmc_rep->queue_end + 1) % Dmc_rep->seg_size;
		    if (qnext == Dmc_rep->queue_start && 
			Dmc_rep->queue_start != Dmc_rep->queue_end)
		    {
			list_ok = FALSE;
		    }
		} else
		{
	            TRdisplay("%@ dmxsecure() CSp_semaphore failed %d\n",
                        semaphore_status);

		    /* Error getting the semaphore, can't rely on the list */
		    list_ok = FALSE;
		}
	    }
	    if (!list_ok)
	    {
		/* queue is full, print warning and distribute manually */
		if (semaphore_status == E_DB_OK)
			CSv_semaphore(&Dmc_rep->rep_sem);

		/* If we have a queue and it's full, log that fact */
		if (Dmc_rep)
		{
            	    uleFormat(NULL, W_DM9561_REP_TXQ_FULL, NULL, ULE_LOG, NULL, 
		        (char *)NULL, (i4)0, (i4 *)NULL, &local_error, 0);
		}

		status = dm2rep_qman(xcb->xcb_odcb_ptr->odcb_dcb_ptr,
		    xcb->xcb_rep_remote_tx ? xcb->xcb_rep_remote_tx :
			(i4)xcb->xcb_tran_id.db_low_tran,
		    &curtime, xcb->xcb_rep_input_q, xcb->xcb_lk_id, 
		    &dmx->error, FALSE);
		if (status != E_DB_OK)
		{
		    if (dmx->error.err_code > E_DM004A_INTERNAL_ERROR)
		    {
		        uleFormat( &dmx->error, 0, NULL, ULE_LOG , NULL, 
				    (char * )NULL, 0L, (i4 *)NULL, 
				    &local_error, 0);
			SETDBERR(&dmx->error, 0, E_DM957F_REP_DISTRIB_ERROR);
		    }
		    xcb->xcb_state |= XCB_TRANABORT;
		    break;
		}
	    }
	    else
	    {
	        /* we have mutex and an entry we can use, so fill out entry */
	        /* we assume we are only accessing one db in this tx */
	        STRUCT_ASSIGN_MACRO(xcb->xcb_odcb_ptr->odcb_dcb_ptr->dcb_name,
		    repq[qnext].dbname);
	        /*
	        ** use remote TX if it's set (by the replication server)
	        */
	        if (xcb->xcb_rep_remote_tx)
		    repq[qnext].tx_id = xcb->xcb_rep_remote_tx;
	        else
	            repq[qnext].tx_id = (i4)xcb->xcb_tran_id.db_low_tran;
	        repq[qnext].active = FALSE;
	        MEcopy((char *)&curtime, sizeof(HRSYSTIME), 
		       (char *)&repq[qnext].trans_time);
	        Dmc_rep->queue_end = qnext;
	        CSv_semaphore(&Dmc_rep->rep_sem);
	        /*
	        ** signal the queue management thread(s)
	        */
		lk_event.type_high = REP_READQ;
		lk_event.type_low = 0;
		lk_event.value = REP_READQ_VAL;
		
	        cl_stat = LKevent(LK_E_CLR | LK_E_CROSS_PROCESS, xcb->xcb_lk_id,
		    &lk_event, &sys_err);
                if (cl_stat != OK)
                {
                    uleFormat(NULL, cl_stat, &sys_err, ULE_LOG, NULL,
                        (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
                    uleFormat(&dmx->error, E_DM904B_BAD_LOCK_EVENT, &sys_err, ULE_LOG, 
			NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 3,
                        0, LK_E_CLR, 0, REP_READQ, 0, xcb->xcb_lk_id);
                    status = E_DB_ERROR;
                    break;
                }
	    }
	    /*
	    ** user updates to the input queue will cause xcb_rep_input_q to
	    ** be set to -1, the table will already be closed by the user in
	    ** this case
	    */
	    if (xcb->xcb_rep_input_q != (DMP_RCB *)-1)
	    {
                status = dm2t_close(xcb->xcb_rep_input_q, (i4)0, &dmx->error);

		if (status != E_DB_OK)
		{
		    if (dmx->error.err_code != E_DM004A_INTERNAL_ERROR)
		    {
			uleFormat( &dmx->error, 0, NULL, ULE_LOG , NULL,
			    (char * )NULL, 0L, (i4 *)NULL,
			    &local_error, 0);
			SETDBERR(&dmx->error, 0, E_DM009A_ERROR_SECURE_TRAN);
		    }
		    xcb->xcb_state |= XCB_TRANABORT;
		    return (status);
		}
		xcb->xcb_rep_input_q = (DMP_RCB*)NULL;
	    }
        }

	/*  Close all open tables and destroy all open temporary tables.    */

	while (xcb->xcb_rq_next != (DMP_RCB*) &xcb->xcb_rq_next)
	{
	    /*	Get next RCB. */

	    rcb = (DMP_RCB *)((char *)xcb->xcb_rq_next -
		(char *)&(((DMP_RCB*)0)->rcb_xq_next));

	    /*	Remove from the XCB. */

	    rcb->rcb_xq_next->rcb_q_prev = rcb->rcb_xq_prev;
	    rcb->rcb_xq_prev->rcb_q_next = rcb->rcb_xq_next;

	    /*	Deallocate the RCB. */

	    status = dm2t_close(rcb, (i4)0, &dmx->error);
	    if (status != E_DB_OK)
	    {
		if (dmx->error.err_code != E_DM004A_INTERNAL_ERROR)
		{
		    uleFormat( &dmx->error, 0, NULL, ULE_LOG , NULL, 
			(char * )NULL, 0L, (i4 *)NULL, 
			&local_error, 0);
		    SETDBERR(&dmx->error, 0, E_DM009A_ERROR_SECURE_TRAN);
		}
		xcb->xcb_state |= XCB_TRANABORT;
		return (status);
	    }
	}

	/*  Now prepare to commit the transaction. */

	STRUCT_ASSIGN_MACRO(dmx->dmx_dis_tran_id, xcb->xcb_dis_tran_id);

	status = dmxe_secure(&xcb->xcb_tran_id, &xcb->xcb_dis_tran_id,
				xcb->xcb_log_id, xcb->xcb_lk_id, &dmx->error);
	if (status != E_DB_OK)
	{
	    if (dmx->error.err_code > E_DM_INTERNAL)
	    {
		uleFormat(&dmx->error, 0, NULL, ULE_LOG, NULL, 
			    (char * )NULL, (i4)0, (i4 *)NULL, &error, 0);
		SETDBERR(&dmx->error, 0, E_DM009A_ERROR_SECURE_TRAN);
	    }

	    xcb->xcb_state |= XCB_TRANABORT;
	    return (E_DB_ERROR);
	}

	/* Mark the state of the transaction in xcb to WILLING COMMIT. */

	xcb->xcb_x_type |= XCB_DISTRIBUTED;
	xcb->xcb_state |= XCB_WILLING_COMMIT;
	return (E_DB_OK);
    }
    return (status);
}
Beispiel #18
0
DB_STATUS
dmx_resume(
DMX_CB    *dmx_cb)
{
    char		sem_name[10+16+10+1];	/* last +10 is slop */
    DMX_CB		*dmx = dmx_cb;
    DML_SCB		*scb;
    DML_ODCB		*odcb;
    DML_XCB		*x;
    DML_XCB		*xcb = (DML_XCB *)0;
    i4		btflags = 0;
    i4		mode;
    i4		error,local_error;
    DB_STATUS		status;

    CLRDBERR(&dmx->error);

    status = E_DB_ERROR;
    for (;;)
    {
	mode = DMXE_WRITE;

	if ( (odcb = (DML_ODCB *)dmx->dmx_db_id) &&
	     dm0m_check((DM_OBJECT *)odcb, (i4)ODCB_CB) == 0 )
	{
	    if ( (scb = odcb->odcb_scb_ptr) &&
	         dm0m_check((DM_OBJECT *)scb, (i4)SCB_CB) == 0 )
	    {
		if (scb->scb_x_ref_count == 0)
		{
		    /* Clear user interrupt state in SCB. */
		    scb->scb_ui_state &= ~SCB_USER_INTR;
		    status = dm0m_allocate((i4)sizeof(DML_XCB), 0, 
			(i4)XCB_CB, (i4)XCB_ASCII_ID, (char *)scb, 
			(DM_OBJECT **)&xcb, &dmx->error);
		    if (status == E_DB_OK)
		    {
			if (odcb->odcb_dcb_ptr->dcb_status & DCB_S_JOURNAL)
			    btflags |= DMXE_JOURNAL;
			x = xcb;

			status = dmxe_resume(&dmx->dmx_dis_tran_id,
			    odcb->odcb_dcb_ptr,
			    &scb->scb_lock_list, &x->xcb_log_id, 
			    &x->xcb_tran_id, &x->xcb_lk_id, &dmx->error);
		    }

		    if (status == E_DB_OK)
		    {
			dm0s_minit(&x->xcb_cq_mutex,
				STprintf(sem_name, "XCB cq %p", x));
			x->xcb_x_type = XCB_UPDATE | XCB_DISTRIBUTED;
			STRUCT_ASSIGN_MACRO(dmx->dmx_dis_tran_id, x->xcb_dis_tran_id);
			x->xcb_state = XCB_WILLING_COMMIT;
			x->xcb_flags = 0;
			if (btflags & DMXE_JOURNAL)
			    x->xcb_flags |= XCB_JOURNAL;
			x->xcb_q_next = scb->scb_x_next;
			x->xcb_q_prev = (DML_XCB*)&scb->scb_x_next;
			scb->scb_x_next->xcb_q_prev = xcb;
			scb->scb_x_next = x;
			x->xcb_scb_ptr = scb;
			x->xcb_rq_next = (DMP_RCB*) &x->xcb_rq_next;
			x->xcb_rq_prev = (DMP_RCB*) &x->xcb_rq_next;
			x->xcb_sq_next = (DML_SPCB*) &x->xcb_sq_next;
			x->xcb_sq_prev = (DML_SPCB*) &x->xcb_sq_next;
			x->xcb_cq_next = (DML_XCCB*) &x->xcb_cq_next;
			x->xcb_cq_prev = (DML_XCCB*) &x->xcb_cq_next;
			x->xcb_odcb_ptr = odcb;
			dmx->dmx_tran_id = (char *)x;
			STRUCT_ASSIGN_MACRO(x->xcb_tran_id, dmx->dmx_phys_tran_id);
			scb->scb_x_ref_count++;
			STRUCT_ASSIGN_MACRO(scb->scb_user, x->xcb_username);

			/* Initialize remaining XCB fields */
			x->xcb_sp_id = 0;
			x->xcb_rep_seq = 0;
			x->xcb_rep_input_q = 0;
			x->xcb_rep_remote_tx = 0;
			x->xcb_s_open = 0;
			x->xcb_s_fix = 0;
			x->xcb_s_get = 0;
			x->xcb_s_replace = 0;
			x->xcb_s_delete = 0;
			x->xcb_s_insert = 0;
			x->xcb_s_cpu = 0;
			x->xcb_s_dio = 0;
			x->xcb_seq  = (DML_SEQ*)NULL;
			x->xcb_cseq = (DML_CSEQ*)NULL;
			x->xcb_pcb_list = NULL;
			x->xcb_crib_ptr = NULL;
			x->xcb_lctx_ptr = NULL;
			x->xcb_jctx_ptr = NULL;

			return (E_DB_OK);
		    }
		    if (xcb != (DML_XCB *)0)
			dm0m_deallocate((DM_OBJECT **)&xcb);
		}
		else
		{
		    status = E_DB_ERROR;
		    SETDBERR(&dmx->error, 0, E_DM0060_TRAN_IN_PROGRESS);
		}
	    }
	    else
	    {
		status = E_DB_ERROR;
		SETDBERR(&dmx->error, 0, E_DM002F_BAD_SESSION_ID);
	    }
	}
	else
	{
	    status = E_DB_ERROR;
	    SETDBERR(&dmx->error, 0, E_DM0010_BAD_DB_ID);
	}

	break;
    }
    
    if (dmx->error.err_code > E_DM_INTERNAL)
    {
	uleFormat( &dmx->error, 0, NULL, ULE_LOG , NULL, 
	    (char * )NULL, 0L, (i4 *)NULL, 
	    &local_error, 0);
	SETDBERR(&dmx->error, 0, E_DM0099_ERROR_RESUME_TRAN);
    }

    return (status);
}
Beispiel #19
0
/*{
** Name: dmve_ufmap - The recovery of an Fmap Update operation.
**
** Description:
**
** Inputs:
**	dmve_cb
**	    .dmve_log_rec	    The log record of the fmap operation.
**	    .dmve_action	    Should be DMVE_DO, DMVE_REDO, or DMVE_UNDO
**	    .dmve_dcb_ptr	    Pointer to DCB.
**	    .dmve_tran_id	    The physical transaction id.
**	    .dmve_lk_id		    The transaction lock list id.
**	    .dmve_log_id	    The logging system database id.
**	    .dmve_db_lockmode	    The lockmode of the database. Should be 
**				    DM2T_X or DM2T_S.
**
** Outputs:
**	dmve_cb
**	    .dmve_error.err_code    The reason for error status.
**	Returns:
**	    E_DB_OK
**	    E_DB_ERROR
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	25-Feb-2009 (hanal04) Bug 121652
**          Created.
*/
DB_STATUS
dmve_ufmap(
DMVE_CB		*dmve_cb)
{
    DMVE_CB		*dmve = dmve_cb;
    DM0L_FMAP		*log_rec = (DM0L_FMAP *)dmve->dmve_log_rec;
    LG_LSN		*log_lsn = &log_rec->fmap_header.lsn; 
    DMP_DCB		*dcb = dmve->dmve_dcb_ptr;
    DMP_TABLE_IO	*tbio = NULL;
    DM1P_FMAP		*fmap = NULL;
    DB_STATUS		status = E_DB_OK;
    DB_STATUS		tmp_status;
    DMPP_ACC_PLV	*loc_plv;
    LK_LKID		lockid;
    LK_LKID             fhdr_lockid;
    i4			lock_action;
    i4			grant_mode;
    i4			recovery_action;
    i4			error;
    i4			loc_error;
    i4  		fmap_page_recover;
    i4                  physical_fhdr_page_lock = FALSE;
    i4			fix_option = 0;
    i4			page_type = log_rec->fmap_pg_type;
    DB_ERROR		local_dberr;
    DMP_PINFO		*fmappinfo = NULL;

    CLRDBERR(&dmve->dmve_error);
    DMVE_CLEAR_TABINFO_MACRO(dmve);

    MEfill(sizeof(LK_LKID), 0, &lockid);
    MEfill(sizeof(LK_LKID), 0, &fhdr_lockid);

    for (;;)
    {
	/*
	** Consistency Check:  check for illegal log records.
	*/
	if ((log_rec->fmap_header.type != DM0LUFMAP) ||
	    (log_rec->fmap_header.length != sizeof(DM0L_UFMAP)))
	{
	    SETDBERR(&dmve->dmve_error, 0, E_DM9601_DMVE_BAD_PARAMETER);
	    break;
	}

	/*
	** Get handle to a tableio control block with which to read
	** and write pages needed during recovery.
	**
	** Warning return indicates that no tableio control block was
	** built because no recovery is needed on any of the locations 
	** described in this log record.
	*/
	status = dmve_fix_tabio(dmve, &log_rec->fmap_tblid, &tbio);
	if (DB_FAILURE_MACRO(status))
	    break;
	if (status == E_DB_WARN && 
		dmve->dmve_error.err_code == W_DM9660_DMVE_TABLE_OFFLINE)
	{
	    CLRDBERR(&dmve->dmve_error);
	    return (E_DB_OK);
	}

	/*
	** Get page accessors for page recovery actions.
	*/
	dm1c_get_plv(page_type, &loc_plv);

	/*
	** Check recovery requirements for this operation.  If partial
	** recovery is in use, then we may not need to recover all
	** the pages touched by the original update.
	*/
	fmap_page_recover = dmve_location_check(dmve, 
					(i4)log_rec->fmap_fmap_cnf_loc_id);

	/*
	** Get required Table/Page locks before we can start the updates.
	**
	** FHDR pages are locked using temporary physical locks.
	** Unlike many other recovery operations, it is possible that
	** this page lock request may block temporarily since the page
	** lock is not necessarily already held by the current transaction.
	** FMAP pages are not locked and are protected by the FHDR page lock.
	** (which means that we must lock the FHDR even if the FHDR is not
	** being recovered).
	**
	** Note that if the database is locked exclusively, or if an X table
	** lock is granted then no page lock is requried.
	*/
	if ((dcb->dcb_status & DCB_S_EXCLUSIVE) == 0)
	{
	    /*
	    ** Request IX lock in preparation of requesting an X page lock
	    ** below.  If the transaction already holds an exclusive table
	    ** lock, then an X lock will be granted.  In this case we can
	    ** bypass the page lock request.
	    */
	    status = dm2t_lock_table(dcb, &log_rec->fmap_tblid, DM2T_IX,
		dmve->dmve_lk_id, (i4)0, &grant_mode, &lockid, &dmve->dmve_error);
	    if (status != E_DB_OK)
		break;

	    if (grant_mode != DM2T_X)
	    {               
		/*
		** Page lock required.
		*/
		lock_action = LK_PHYSICAL;
	
		/*
		** Lock the FHDR page.  Use a physical lock.
		** We must request this lock even if we are only
		** processing the FMAP page because the FMAP is
		** implicitly protected by the FHDR lock.
	 	*/
		if (fmap_page_recover)
		{
		    status = dm0p_lock_page(dmve->dmve_lk_id, dcb, 
			&log_rec->fmap_tblid, log_rec->fmap_fhdr_pageno,
			LK_PAGE, LK_X, LK_PHYSICAL, (i4)0, tbio->tbio_relid,
			tbio->tbio_relowner, &dmve->dmve_tran_id,
			&fhdr_lockid, (i4 *)0, (LK_VALUE *)0, &dmve->dmve_error);
		    if (status != E_DB_OK)
			break;

		    physical_fhdr_page_lock = TRUE;
		}
	    }
            else
               fix_option |= DM0P_TABLE_LOCKED_X;
	}

	/*
	** Fix the pages we need to recover in cache for write.
	*/
	if (fmap_page_recover)
	{
	    status = dmve_cachefix_page(dmve, log_lsn,
					tbio, log_rec->fmap_fmap_pageno,
					fix_option, loc_plv,
					&fmappinfo);
	    if (status != E_DB_OK)
		break;
	    fmap = (DM1P_FMAP*)fmappinfo->page;

	    /* Have to set page type if fixing in scratch mode */
	    if (dmve->dmve_action != DMVE_UNDO)
	    {
		dm0p_pagetype(tbio, (DMPP_PAGE *)fmap, 
					    dmve->dmve_log_id, DMPP_FMAP);
	    }
	}

	/*
	** Dump debug trace info about pages if such tracing is configured.
	*/
	if (DMZ_ASY_MACRO(15))
	{
	    dmve_trace_page_info(log_rec->fmap_pg_type, log_rec->fmap_page_size,
		(DMPP_PAGE *) fmap, loc_plv, "FMAP"); 
	}

	/*
	** Compare the LSN's on the pages with that of the log record
	** to determine what recovery will be needed.
	**
	**   - During Forward processing, if the page's LSN is greater than
	**     the log record then no recovery is needed.
	**
	**   - During Backward processing, it is an error for a page's LSN
	**     to be less than the log record LSN.
	**
	**   - Currently, during rollforward processing it is unexpected
	**     to find that a recovery operation need not be applied because
	**     of the page's LSN.  This is because rollforward must always
	**     begin from a checkpoint that is previous to any journal record
	**     begin applied.  In the future this requirement may change and
	**     Rollforward will use the same expectations as Redo.
	*/
	switch (dmve->dmve_action)
	{
	case DMVE_DO:
	case DMVE_REDO:
	    if (fmap && LSN_GTE(
		DM1P_VPT_ADDR_FMAP_PG_LOGADDR_MACRO(page_type, fmap),
		log_lsn) && ((dmve->dmve_flags & DMVE_ROLLDB_BOPT) == 0))
	    {
		if (dmve->dmve_action == DMVE_DO)
		{
		    uleFormat(NULL, E_DM9665_PAGE_OUT_OF_DATE, (CL_ERR_DESC *)NULL,
			ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL,
			&loc_error, 8,
			sizeof(DB_TAB_NAME), tbio->tbio_relid->db_tab_name,
			sizeof(DB_OWN_NAME), tbio->tbio_relowner->db_own_name,
			0, DM1P_VPT_GET_FMAP_PAGE_PAGE_MACRO(page_type, fmap),
			0, DM1P_VPT_GET_FMAP_PAGE_STAT_MACRO(page_type, fmap),
			0, DM1P_VPT_GET_FMAP_LOGADDR_HI_MACRO(page_type, fmap),
			0, DM1P_VPT_GET_FMAP_LOGADDR_LOW_MACRO(page_type, fmap),
			0, log_lsn->lsn_high, 0, log_lsn->lsn_low);
		}
		fmap = NULL;
	    }
	    break;

	case DMVE_UNDO:
	    if (fmap && LSN_LT(
		DM1P_VPT_ADDR_FMAP_PG_LOGADDR_MACRO(page_type, fmap),
		log_lsn))
	    {
		uleFormat(NULL, E_DM9665_PAGE_OUT_OF_DATE, (CL_ERR_DESC *)NULL,
		    ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL,
		    &loc_error, 8,
		    sizeof(DB_TAB_NAME), tbio->tbio_relid->db_tab_name,
		    sizeof(DB_OWN_NAME), tbio->tbio_relowner->db_own_name,
		    0, DM1P_VPT_GET_FMAP_PAGE_PAGE_MACRO(page_type, fmap),
		    0, DM1P_VPT_GET_FMAP_PAGE_STAT_MACRO(page_type, fmap),
		    0, DM1P_VPT_GET_FMAP_LOGADDR_HI_MACRO(page_type, fmap),
		    0, DM1P_VPT_GET_FMAP_LOGADDR_LOW_MACRO(page_type, fmap),
		    0, log_lsn->lsn_high, 0, log_lsn->lsn_low);
		SETDBERR(&dmve->dmve_error, 0, E_DM9666_PAGE_LSN_MISMATCH);
		status = E_DB_ERROR;
	    }
	    break;
	}
	if (status != E_DB_OK || !fmap)
	    break;

	/*
	** Call appropriate recovery action depending on the recovery type
	** and record flags.  CLR actions are always executed as an UNDO
	** operation.
	*/
	recovery_action = dmve->dmve_action;
	if (log_rec->fmap_header.flags & DM0L_CLR)
	    recovery_action = DMVE_UNDO;

	switch (recovery_action)
	{
	case DMVE_DO:
	case DMVE_REDO:
	    status = dmv_reufmap(dmve, tbio, fmappinfo, log_rec);
	    break;

	case DMVE_UNDO:
	    status = dmv_unufmap(dmve, tbio, fmappinfo, log_rec, loc_plv);
	    break;
	}

	break;
    }

    if (status != E_DB_OK)
    {
	uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
	    (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
    }

    /*
    ** Unfix and unlock the pages in the opposite order (FMAP, FHDR)
    ** in which they were acquired (FHDR, FMAP).
    ** No need to force them to disk - they
    ** will be tossed out through normal cache protocols if Fast
    ** Commit or at the end of the abort if non Fast Commit.
    */

    if (fmappinfo)
    {
	tmp_status = dm0p_uncache_fix(tbio, DM0P_UNFIX, dmve->dmve_lk_id, 
		dmve->dmve_log_id, &dmve->dmve_tran_id, 
		fmappinfo, &local_dberr);
	if (tmp_status != E_DB_OK)
	{
	    uleFormat(&local_dberr, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
		(char *)NULL, (i4)0, (i4 *)NULL, &loc_error, 0);
	    if (tmp_status > status)
		status = tmp_status;
	}
    }

    if (physical_fhdr_page_lock)
    {
	tmp_status = dm0p_unlock_page(dmve->dmve_lk_id, dcb, 
	    &log_rec->fmap_tblid, log_rec->fmap_fhdr_pageno, LK_PAGE,
	    tbio->tbio_relid, &dmve->dmve_tran_id, &fhdr_lockid,
	    (LK_VALUE *)NULL, &local_dberr);
	if (tmp_status != E_DB_OK)
	{
	    uleFormat(&local_dberr, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
		(char *)NULL, (i4)0, (i4 *)NULL, &loc_error, 0);
	    if (tmp_status > status)
		status = tmp_status;
	}
    }

    if (tbio)
    {
	tmp_status = dmve_unfix_tabio(dmve, &tbio, 0);
	if (tmp_status > status)
	    status = tmp_status;
    }

    if (status != E_DB_OK)
	SETDBERR(&dmve->dmve_error, 0, E_DM9641_DMVE_FMAP);

    return(status);
}
Beispiel #20
0
/*{
** Name: dmve_ext_alter - The recovery of an extent alteration.
**
** Description:
**	This function performs the recovery of the extent alteration
**	done when iiqef_alter_extension (an internal procedure) changes
**	bits in the config file to alter an extent type.
**
**	Currently the only operation supported is changing a defaultable
**	work location (DU_EXT_WORK) to an auxiliary work location (DU_EXT_AWORK)
**	and vice-versa.
**
**	For UNDO, we read the config file and if the update has been made
**	we reverse it, otherwise we just close it and continue.  For DO, we
**	read the config file and if the update was done we ignore, otherwise
**	we make the update.
**
** Inputs:
**	dmve_cb
**	    .dmve_log_rec	    The rename file log record.
**	    .dmve_action	    Should be DMVE_UNDO, DMVE_REDO or DMVE_DO.
**	    .dmve_dcb_ptr	    Pointer to DCB.
**
** Outputs:
**	dmve_cb
**	    .dmve_error.err_code    The reason for error status.
**	Returns:
**	    E_DB_OK
**	    E_DB_ERROR
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	15-sep-93 (jrb)
**          Created for MLSort project.
**	15-apr-1994 (chiku)
**	    Bug56702: return logfull indication.
**	06-may-1996 (nanpr01)
**	    Get rid of compiler warning message.
*/
DB_STATUS
dmve_ext_alter(
DMVE_CB		*dmve_cb)
{
    DMVE_CB		*dmve = dmve_cb;
    DB_STATUS		status = E_DB_OK;
    DB_STATUS		local_status = E_DB_OK;
    i4		error = E_DB_OK, local_error = E_DB_OK;
    DM0L_EXT_ALTER	*log_rec = (DM0L_EXT_ALTER *)dmve_cb->dmve_log_rec;
    LG_LSN		*log_lsn = &log_rec->ext_header.lsn; 
    DMP_DCB             *dcb;
    DM0C_CNF		*config = 0;
    DM0C_CNF		*cnf = 0;
    i4             lock_list;
    DMP_LOC_ENTRY       *l;
    i4	        loc_count;
    i4	        i;
    i4	        recovery_action;
    i4	        dm0l_flags;
    DM2D_ALTER_INFO	dm2d;
    DB_ERROR		local_dberr;

    CLRDBERR(&dmve->dmve_error);

    for (;;)
    {
	if (log_rec->ext_header.length != sizeof(DM0L_EXT_ALTER) || 
	    log_rec->ext_header.type != DM0LEXTALTER)
	{
	    SETDBERR(&dmve->dmve_error, 0, E_DM9601_DMVE_BAD_PARAMETER);
	    break;
	}

	dcb = dmve->dmve_dcb_ptr;
	lock_list = dmve->dmve_lk_id; 

	recovery_action = dmve->dmve_action;
	if (log_rec->ext_header.flags & DM0L_CLR)
	    recovery_action = DMVE_UNDO;

        switch (recovery_action)
	{
	  case DMVE_UNDO:

	    /*
	    ** Write CLR if necessary
	    */
	    if ((dmve->dmve_logging) &&
	        ((log_rec->ext_header.flags & DM0L_CLR) == 0))
	    {
		dm0l_flags = log_rec->ext_header.flags | DM0L_CLR;

		status = dm0l_ext_alter(dmve->dmve_log_id, dm0l_flags, 
		    log_rec->ext_otype, log_rec->ext_ntype,
		    &log_rec->ext_lname, log_lsn, &dmve->dmve_error);
		if (status != E_DB_OK)
		{
		    /* XXXX Better error message and continue after logging. */
		    TRdisplay(
		 "dmve_ext_alter: dm0l_ext_alter error, status: %d, error: %d\n", 
			status, dmve->dmve_error.err_code);
		    /*
		     * Bug56702: return logfull indication.
		     */
		    dmve->dmve_logfull = dmve->dmve_error.err_code;
		    break;
		}
	    }

	    /*  
	    ** Open the configuration file. 
	    */
            status = dm0c_open(dcb, DM0C_NOLOCK, lock_list, &cnf, &dmve->dmve_error);
	    if (status != E_DB_OK)
		break;
	    config = cnf;

	    /* 
	    ** Change bits for location named in the log_rec
	    */
	    loc_count = cnf->cnf_dsc->dsc_ext_count;
	    for (i = 0; i < loc_count; i++)
	    {	    
		l = &cnf->cnf_ext[i].ext_location;
		if (MEcmp((char *)&l->logical, (char *)&log_rec->ext_lname,
		    sizeof(DB_LOC_NAME)) == 0)
			break;
	    }
	    if (i >= loc_count)
	    {
		/* No entry found; this is bad... */
		TRdisplay(
	       "dmve_ext_alter: UNDO location '%s' not found in config file.\n",
		    (char *)&log_rec->ext_lname);

		SETDBERR(&dmve->dmve_error, 0, E_DM92A0_DMVE_ALTER_UNDO);
		status = E_DB_ERROR;
		break;
	    }

	    /* Undo changes to bits of the current location if necessary */
	    if (l->flags & log_rec->ext_ntype)
	    {
		l->flags &= ~(log_rec->ext_ntype);
		l->flags |= log_rec->ext_otype;
	    }

	    /*  Close the configuration file. */

	    status = dm0c_close(cnf, DM0C_UPDATE | DM0C_COPY, &dmve->dmve_error);
	    if (status != E_DB_OK)
		break;
	    config = 0;
	    break;    

	case DMVE_REDO:
	    break;

	case DMVE_DO:
	    /* Fill in dm2d block in preparation for calling dm2d_alter_db
	    ** to rollforward the extent alteration
	    **
	    ** lock_no_wait doesn't matter because we won't do any logging
	    ** or locking in dm2d_alter_db when calling it from here.
	    */
	    dm2d.lock_list	= lock_list;
	    dm2d.lock_no_wait	= 1;
	    dm2d.logging	= 0;
	    dm2d.locking	= 0;
	    dm2d.name		= &dcb->dcb_name;
	    dm2d.db_loc		= (char *) &dcb->dcb_location.physical;
	    dm2d.l_db_loc	= dcb->dcb_location.phys_length;
	    dm2d.location_name  = &log_rec->ext_lname;
	    dm2d.alter_op	= DM2D_EXT_ALTER;

	    dm2d.alter_info.ext_info.drop_loc_type = log_rec->ext_otype; 
	    dm2d.alter_info.ext_info.add_loc_type  = log_rec->ext_ntype;

	    status = dm2d_alter_db(&dm2d, &dmve->dmve_error);

	    break;    
	} /* end switch. */

	if (config != 0)
	{
	    (void) dm0c_close(cnf, 0, &local_dberr);
	}	
	if (status != E_DB_OK)
	{
	    uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
		(char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	    break;
	}
	return(E_DB_OK);

    } /* end for. */

    if (dmve->dmve_error.err_code > E_DM_INTERNAL)
    {
	uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
	    (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	SETDBERR(&dmve->dmve_error, 0, E_DM9617_DMVE_LOCATION);
    }
    return(status);

}
Beispiel #21
0
/*{
** Name: sxf_call	- The main SXF entry point.
**
** Description:
**      The routine checks that the arguments to sxf_call look reasonable.
**	The implementing function is then called and operation completion
**	status is returned to the caller.
**
** Inputs:
**      op_code			The SXF operation code.
**      rcb			The SXF request control block for the operation.
**
** Outputs:
**	Returns:
**	    DB_STATUS
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	9-July-1992 (markg)
**	    Initial creation.
**	03-sep-1992 (pholman)
**	    Give calls NULL functionlaity for initial integration
**	20-oct-1992 (markg)
**	    Updated to have entry points for all SXF operations.
*/
DB_STATUS
sxf_call(
    SXF_OPERATION	op_code, 
    SXF_RCB		*rcb)
{
    DB_STATUS	    	status = E_DB_OK;
    EX_CONTEXT	    	context;
    i4	    	error;
	
    CLRDBERR(&rcb->sxf_error);

    /*	Make a cursury check for bad parameters. */
    if (op_code < SXF_MIN_OPCODE 
     || op_code > SXF_MAX_OPCODE 
     || rcb->sxf_cb_type != SXFRCB_CB 
     || rcb->sxf_length != sizeof (SXF_RCB) 
     || (Sxf_svcb == NULL && op_code != SXC_STARTUP))
    {
	/*  Figure out the error in more detail. */
	if (op_code < SXF_MIN_OPCODE || op_code > SXF_MAX_OPCODE)
	    SETDBERR(&rcb->sxf_error, 0, E_SX0001_BAD_OP_CODE);
	else if (rcb->sxf_cb_type != SXFRCB_CB)
	    SETDBERR(&rcb->sxf_error, 0, E_SX0002_BAD_CB_TYPE);
	else if (rcb->sxf_length != sizeof (SXF_RCB))
	{
	    TRdisplay("Bad SXF CB length. Input length %d expected %d\n",
		rcb->sxf_length, sizeof(SXF_RCB));
	    SETDBERR(&rcb->sxf_error, 0, E_SX0003_BAD_CB_LENGTH);
	}
	else
	    SETDBERR(&rcb->sxf_error, 0, E_SX000F_SXF_NOT_ACTIVE);

	return (E_DB_ERROR);
    }

    if (EXdeclare(ex_handler, &context) == OK 
     && (Sxf_svcb == NULL || (Sxf_svcb->sxf_svcb_status & SXF_CHECK) == 0))
    {
	switch (op_code)
	{
	/* Control operations. */
	case SXC_STARTUP:
	    status = sxc_startup(rcb);
	    break;

	case SXC_SHUTDOWN:
	    status = sxc_shutdown(rcb);
	    break;

	case SXC_BGN_SESSION:
	    status = sxc_bgn_session(rcb);
	    break;

	case SXC_END_SESSION:
	    status = sxc_end_session(rcb);
	    break;
	
	case SXC_ALTER_SESSION:
	    status = sxc_alter_session(rcb);
	    break;

	case SXC_AUDIT_THREAD:
	    status = sxac_audit_thread(rcb);
	    break;

	case SXC_AUDIT_WRITER_THREAD:
	    status = sxac_audit_writer_thread(rcb);
	    break;

	/* Audit file oerations */
	case SXA_OPEN:
	    status = sxaf_open(rcb);
	    break;

	case SXA_CLOSE:
	    status = sxaf_close(rcb);
	    break;

	/* Audit record operations */
	case SXR_WRITE:
	    status = sxar_write(rcb);
	    break;

	case SXR_POSITION:
	    status = sxar_position(rcb);
	    break;

	case SXR_READ:
	    status = sxar_read(rcb);
	    break;

	case SXR_FLUSH:
	    status = sxar_flush(rcb);
	    break;

	/* Audit state operations */
	case SXS_ALTER:
	    status = sxas_alter(rcb);
	    break;

	case SXS_SHOW:
	    status = sxas_show(rcb);
	    break;
	}

	EXdelete();
        return (status);
    }

    /*
    ** If exception handler declares or the SXF_SVCB has already been 
    ** marked inconsistent, this is a server fatal condition. In most 
    ** cases it is sufficient to return a server fatal error, and let the 
    ** caller handle the rest. 
    **
    ** However, if this is an audit record write operation we have to 
    ** nuke the server ourselves. The reason for this is that if the
    ** client code does not handle the return status correctly the 
    ** security of the system could be compromised.
    */
    EXdelete();

    if (op_code == SXR_WRITE)
    {
	_VOID_ ule_format(E_SX0005_INTERNAL_ERROR, NULL, ULE_LOG,
			NULL, NULL, 0L, NULL, &error, 0);
	_VOID_ ule_format(E_SX1048_SERVER_TERMINATE, NULL, ULE_LOG,
			NULL, NULL, 0L, NULL, &error, 0);
	_VOID_ CSterminate(CS_KILL, NULL);
    }

    SETDBERR(&rcb->sxf_error, 0, E_SX0005_INTERNAL_ERROR);
    return (E_DB_FATAL);
}
Beispiel #22
0
/*{
** Name: dmve_del_location - The recovery of an delete location operation.
**
** Description:
**	This function performs the recovery of the delete location 
**      update operation.  This is to update the config file 
**      with any new locations deleted by unextenddb.
**	In the case of UNDO, reads the config file, if update has been
**      made, then it adds it otherwise it just closes and continues.
**      In case of DO, reads the config file, deletes and then closes.
**
** Inputs:
**	dmve_cb
**	    .dmve_log_rec	    The rename file log record.
**	    .dmve_action	    Should be DMVE_UNDO, DMVE_REDO or DMVE_DO.
**	    .dmve_dcb_ptr	    Pointer to DCB.
**
** Outputs:
**	dmve_cb
**	    .dmve_error.err_code    The reason for error status.
**	Returns:
**	    E_DB_OK
**	    E_DB_ERROR
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	29-apr-2004 (gorvi01)
**          Created for UNEXTENDDB.
**	09-Nov-2004 (jenjo02)
**	    Relocated misplaced logging of CLR from DMVE_DO 
**	    to DMVE_UNDO
*/
DB_STATUS
dmve_del_location(
DMVE_CB		*dmve_cb)
{
    DMVE_CB		*dmve = dmve_cb;
    DB_STATUS		status = E_DB_OK;
    DB_STATUS		local_status = E_DB_OK;
    i4		error = E_DB_OK, local_error = E_DB_OK;
    DM0L_DEL_LOCATION	*log_rec = (DM0L_DEL_LOCATION *)dmve_cb->dmve_log_rec;
    LG_LSN		*log_lsn = &log_rec->loc_header.lsn; 
    DMP_DCB             *dcb;
    DM0C_CNF		*config = 0;
    DM0C_CNF		*cnf = 0;
    i4             lock_list;
    DMP_LOC_ENTRY       *l;
    i4	        loc_count;
    i4	        i;
    i4	        recovery_action;
    i4	        dm0l_flags;
    DB_ERROR		local_dberr;

    CLRDBERR(&dmve->dmve_error);

    for (;;)
    {
	if (log_rec->loc_header.length != sizeof(DM0L_DEL_LOCATION) || 
	    log_rec->loc_header.type != DM0LDELLOCATION)
	{
	    SETDBERR(&dmve->dmve_error, 0, E_DM9601_DMVE_BAD_PARAMETER);
	    break;
	}

	dcb = dmve->dmve_dcb_ptr;
	lock_list = dmve->dmve_lk_id; 

	recovery_action = dmve->dmve_action;
	if (log_rec->loc_header.flags & DM0L_CLR)
	    recovery_action = DMVE_UNDO;

        switch (recovery_action)
	{

	   case DMVE_REDO:
	     break;

    	   case DMVE_DO:

	    /* 
	    ** Remove the location entry from the DCB, if it exists.
	    */
	    if (dcb->dcb_ext && dcb->dcb_ext->ext_count)
		loc_count = dcb->dcb_ext->ext_count;
	    else
	  	loc_count = 0;

	    for (i = 0; i < loc_count; i++)
	    {	    
		l = &dcb->dcb_ext->ext_entry[i];
		if (MEcmp((char *)&l->logical, (char *)&log_rec->loc_name,
		    sizeof(DB_LOC_NAME)) == 0)
			break;
	    }
	    if (i >= loc_count)
	    {
		/* No entry found, nothing to remove. */
		;
#ifdef xDEBUG
		TRdisplay(
		    "dmve_del_location: UNDO location '%s' not found in DCB.\n",
		    (char *)&log_rec->loc_name);
#endif
	    }
	    else if (i == (loc_count - 1))
	    {
		/* This is last entry, easy. */
		dcb->dcb_ext->ext_entry[i].phys_length = 0;
	        dcb->dcb_ext->ext_count--;
	    }		
	    else
	    {	    
		/* In middle of list, compress. */
		loc_count--;
		MEcopy((char *)&dcb->dcb_ext->ext_entry[i+1].logical,
		   sizeof(DMP_LOC_ENTRY) * (loc_count-i),
		   (char *)&dcb->dcb_ext->ext_entry[i].logical);

		/* Mark the end of list. */

		dcb->dcb_ext->ext_entry[loc_count].phys_length = 0;
	        dcb->dcb_ext->ext_count--;
	    }

	    /*  
	    ** Open the configuration file. 
	    */
            status = dm0c_open(dcb, DM0C_NOLOCK, lock_list, 
                                       &cnf, &dmve->dmve_error);
	    if (status != E_DB_OK)
		break;
	    config = cnf;

	    /* 
	    ** Delete this entry from the list. 
	    */
	    loc_count = cnf->cnf_dsc->dsc_ext_count;
	    for (i = 0; i < loc_count; i++)
	    {	    
		l = &cnf->cnf_ext[i].ext_location;
		if (MEcmp((char *)&l->logical, (char *)&log_rec->loc_name,
		    sizeof(DB_LOC_NAME)) == 0)
			break;
	    }
	    if (i >= loc_count)
	    {
		/* No entry found, nothing to undo. */
		break;
	    }

	    if (i == (loc_count - 1))
	    {
		/* This is last entry, easy. */
		cnf->cnf_ext[i].length = 0;
		cnf->cnf_ext[i].type = 0;
	        cnf->cnf_dsc->dsc_ext_count--;
		
	    }		
	    else
	    {	    
		/* In middle of list, compress. */
		loc_count--;
		MEcopy((char *)&cnf->cnf_ext[i+1].ext_location.logical,
			       sizeof(DMP_LOC_ENTRY)*(loc_count-i),
                               (char *)&cnf->cnf_ext[i].ext_location.logical);

		/* Mark the end of list. */

		cnf->cnf_ext[loc_count].length = 0;
		cnf->cnf_ext[loc_count].type = 0;
	        cnf->cnf_dsc->dsc_ext_count--;

	    }
	    /*  Close the configuration file. */

	    status = dm0c_close(cnf, DM0C_UPDATE | DM0C_COPY, &dmve->dmve_error);
	    if (status != E_DB_OK)
		break;
	    config = 0;
	    break;    

	case DMVE_UNDO:

	    /*
	    ** Write CLR if necessary
	    */
	    if ((dmve->dmve_logging) &&
	        ((log_rec->loc_header.flags & DM0L_CLR) == 0))
	    {
		dm0l_flags = log_rec->loc_header.flags | DM0L_CLR;

		status = dm0l_del_location(dmve->dmve_log_id, dm0l_flags, 
		    log_rec->loc_type, &log_rec->loc_name, 
		    log_rec->loc_l_extent, &log_rec->loc_extent,
		    log_lsn, &dmve->dmve_error);
		if (status != E_DB_OK)
		{
		    /* XXXX Better error message and continue after logging. */
		    TRdisplay(
		 "dmve_del_location: dm0l_del_location error, status: %d, error: %d\n", 
			status, dmve->dmve_error.err_code);
		    /*
		     * Bug56702: return logfull indication.
		     */
		    dmve->dmve_logfull = dmve->dmve_error.err_code; 
		    break;
		}
	    }

	    /*  Open the configuration file. */

	    l = dcb->dcb_ext->ext_entry;
	    loc_count = dcb->dcb_ext->ext_count;
	    for (i = 0; i < loc_count; i++, l++)
	    if ((MEcmp((char *)&l->logical, (char *)&log_rec->loc_name,
		sizeof(DB_LOC_NAME)) == 0)
		&& (l->flags == log_rec->loc_type))
		break;
	    if (i < loc_count)
	    {
		/* Found this entry, return error. */
		SETDBERR(&dmve->dmve_error, 0, E_DM007E_LOCATION_EXISTS);
		break;
	    }
		    
            status = dm0c_open(dcb, 0, lock_list, 
                                       &cnf, &dmve->dmve_error);
	    if (status != E_DB_OK)
		break;
	    config = cnf;

	    /* Check if there is room. */

            if (cnf->cnf_free_bytes	< sizeof(DM0C_EXT))
	    {
		status = dm0c_extend(cnf, &dmve->dmve_error);
		if (status != E_DB_OK)
		{
		    SETDBERR(&dmve->dmve_error, 0, E_DM0071_LOCATIONS_TOO_MANY);
		    break;
		}
	    }


	    i = cnf->cnf_dsc->dsc_ext_count++;
	    cnf->cnf_ext[i].length = sizeof(DM0C_EXT);
	    cnf->cnf_ext[i].type = DM0C_T_EXT;
	    MEcopy((char *)&log_rec->loc_name, sizeof(DB_LOC_NAME),
                               (char *)&cnf->cnf_ext[i].ext_location.logical);
	    MEcopy((char *)&log_rec->loc_extent,  sizeof(DM_FILENAME),
			       (char *)&cnf->cnf_ext[i].ext_location.physical);
	    cnf->cnf_ext[i].ext_location.flags = log_rec->loc_type;
	    cnf->cnf_ext[i].ext_location.phys_length = log_rec->loc_l_extent;
	    cnf->cnf_ext[i+1].length = 0;
	    cnf->cnf_ext[i+1].type = 0;

	    /* Add new location info to DCB so RFP will be able to use it. */

	    dcb->dcb_ext->ext_count = cnf->cnf_dsc->dsc_ext_count;
	    STRUCT_ASSIGN_MACRO(cnf->cnf_ext[i].ext_location,
				dcb->dcb_ext->ext_entry[i]);

	    /*  Close the configuration file. */

	    status = dm0c_close(cnf, DM0C_UPDATE, &dmve->dmve_error);
	    if (status != E_DB_OK)
		break;
	    config = 0;
	    break;    

	} /* end switch. */
	if (config != 0)
	{
	    (void) dm0c_close(cnf, 0, &local_dberr);
	}	
	if (status != E_DB_OK)
	{
	    uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
		(char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	    break;
	}
	return(E_DB_OK);

    } /* end for. */
    if (dmve->dmve_error.err_code > E_DM_INTERNAL)
    {
	uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
	    (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
	SETDBERR(&dmve->dmve_error, 0, E_DM9617_DMVE_LOCATION);
    }
    return(status);

}
Beispiel #23
0
/*{
** Name: dmu_pindex - Creates multiple indices on a table.
**
**  INTERNAL DMF call format:      status = dmu_pindex(&dmu_cb);
**
**  EXTERNAL call format:          status = dmf_call(DMU_PINDEX_TABLE,&dmu_cb);
**
** Description:
**    The dmu_index function handles the creation of indices.
**    This dmu function is allowed inside a user specified transaction.
**    The table name must not be the same as a system table name and must 
**    not be the same name as any other table owned by the same user. 
**    The table that this index is defined on must exist and must be
**    identified by use of the internal identifier obtained from a dmt_show
**    operation.  It is assumed that the caller has verified that the
**    base table is owned by the same user.
**
** Inputs:
**      .dmu_cb
**          .type                           Must be set to DMU_UTILITY_CB.
**          .length                         Must be at least sizeof(DMU_CB).
**          .dmu_tran_id                    Must be the current transaction 
**                                          identifier returned from the begin 
**                                          transaction operation.
**          .dmu_flags_mask                 Must be zero.
**          .dmu_tbl_id                     Internal name of table to be 
**                                          indexed.
**          .dmu_index_name                 External name of index to be 
**                                          created.
**          .dmu_location.data_address      Pointer to array of locations.
**                                          Each entry in array is of type
**                                          DB_LOC_NAME.  
**	    .dmu_location.data_in_size      The size of the location array
**                                          in bytes.
**          .dmu_olocation.data_address     This holds source of gateway
**					    table if this is a gateway register.
**          .dmu_key_array.ptr_address      Pointer to an area used to input
**                                          an array of pointer to entries
**					    of type DMU_KEY_ENTRY.  
**                                          See below for description of 
**                                          <dmu_key_list> entries.
**          .dmu_key_array.ptr_size         Size of an entry.
**          .dmu_key_array.ptr_in_count     Count of entries.
**	    .dmu_attr_array.ptr_address	    Pointer to to area used to input
**					    an array or pointers to entries
**					    of type DMU_KEY_ENTRY.
**					    If this entry is not passed in
**					    all the key's given in the key
**					    array are considered part of the
**					    key.  If this pass in, only the
**					    keys in this list are considered
**					    part of the key.  The keys listed
**					    in this list must be a prefix set
**					    of the keys listed in the key array.
**          .dmu_attr_array.ptr_size        Size of an entry.
**          .dmu_attr_array.ptr_in_count    Count of entries.
**          .dmu_char_array.data_address    Pointer to an area used to input
**                                          an array of entries of type
**                                          DMU_CHAR_ENTRY.
**                                          See below for description of 
**                                          <dmu_char_array> entries.
**          .dmu_char_array.data_in_size    Length of char_array in bytes.
**	    .dmu_gwchar_array.data_address  Pointer to an array of gateway table
**					    characteristics.  These are used
**					    if the table is a DMU_GATEWAY type
**					    table.  These characteristics are
**					    passed directly down to the Ingres
**					    Gateway system.
**	    .dmu_gwchar_array.data_in_size  Length of gwchar_array in bytes.
**	    .dmu_gwattr_array.ptr_address   Pointer to array of pointers, each
**					    of which describes gateway specific
**					    information about a table column.
**					    This is used only if the table is
**					    a DMU_GATEWAY type table.  These
**					    entries are passed directly down to
**					    the Ingres Gateway system.
**	    .dmu_gwattr_array.ptr_size	    The size of each element in array.
**	    .dmu_gwattr_array.ptr_address   The number of pointers in the array.
**
**          <dmu_key_array> entries are of type DMU_KEY_ENTRY and
**          must have following format:
**          key_attr_name                   Name of attribute.
**          key_order                       Must be DMU_ASCENDING.
**
**          <dmu_char_array> entries are of type DMU_CHAR_ENTRY and
**          must have following format:
**          char_id                         Must be one of the dmu 
**                                          characteristics like 
**                                          DMU_STRUCTURE,
**                                          DMU_IFILL,
**					    DMU_DATAFILL,
**					    DMU_LEAFILL,
**                                          DMU_MINPAGES,
**                                          DMU_MAXPAGES,
**                                          DMU_UNIQUE,
**					    DMU_COMPRESSED,
**					    DMU_GATEWAY,
**					    DMU_INDEX_COMP.
**					    DMU_CONCURRENT_ACCESS
**					    DMU_DIMENSION
**					    DMU_TABLE_PRIORITY
**          char_value                      The value to associate with above
**                                          characteristic.
**
** Output:
**      dmu_cb 
**          .dmu_idx_id                     The internal table identifier 
**                                          assigned to this index.
**          .error.err_code                 One of the following error numbers.
**                                          E_DM0000_OK                
**                                          E_DM0006_BAD_ATTR_FLAG
**                                          E_DM0007_BAD_ATTR_NAME
**                                          E_DM0009_BAD_ATTR_SIZE
**                                          E_DM0008_BAD_ATTR_PRECISION
**                                          E_DM000A_BAD_ATTR_TYPE
**                                          E_DM000B_BAD_CB_LENGTH
**                                          E_DM000C_BAD_CB_TYPE
**					    E_DM000D_BAD_CHAR_ID
**					    E_DM000E_BAD_CHAR_VALUE
**                                          E_DM0010_BAD_DB_ID
**                                          E_DM001C_BAD_KEY_SEQUENCE.
**                                          E_DM001D_BAD_LOCATION_NAME.
**                                          E_DM001E_DUP_LOCATION_NAME.
**                                          E_DM001A_BAD_FLAG
**                                          E_DM0021_TABLES_TOO_MANY
**                                          E_DM002A_BAD_PARAMETER
**                                          E_DM0039_BAD_TABLE_NAME
**                                          E_DM003A_BAD_TABLE_OWNER
**                                          E_DM003B_BAD_TRAN_ID
**                                          E_DM0042_DEADLOCK
**                                          E_DM0045_DUPLICATE_KEY
**                                          E_DM004A_INTERNAL_ERROR
**                                          E_DM004B_LOCK_QUOTA_EXCEEDED
**                                          E_DM0054_NONEXISTENT_TABLE
**                                          E_DM0059_NOT_ALL_KEYS
**                                          E_DM005D_TABLE_ACCESS_CONFLICT
**                                          E_DM005E_CANT_UPDATE_SYSCAT
**                                          E_DM005F_CANT_INDEX_CORE_SYSCAT
**                                          E_DM0064_USER_INTR
**                                          E_DM0065_USER_ABORT
**	    				    E_DM006A_TRAN_ACCESS_CONFLICT
**                                          E_DM0071_LOCATIONS_TOO_MANY
**                                          E_DM0072_NO_LOCATION
**                                          E_DM0078_TABLE_EXISTS
**					    E_DM007D_BTREE_BAD_KEY_LENGTH
**					    E_DM010F_ISAM_BAD_KEY_LENGTH
**					    E_DM0110_COMP_BAD_KEY_LENGTH
**					    E_DM0092_ERROR_INDEXING_TABLE
**                                          E_DM009F_ILLEGAL_OPERATION
**                                          E_DM0100_DB_INCONSISTENT
**                                          E_DM0103_TUPLE_TOO_WIDE
**                                          E_DM010C_TRAN_ABORTED
**                                          E_DM0125_DDL_SECURITY_ERROR
**
**          .error.err_data                 Set to attribute in error by 
**                                          returning index into attribute list.
**      Returns:
**          E_DB_OK                         Function completed normally. 
**          E_DB_WARN                       Function completed normally with 
**                                          a termination status which is in 
**                                          dmu_cb.error.err_code.
**          E_DB_ERROR                      Function completed abnormally with 
**                                          a termination status which is in
**                                          dmu_cb.error.err_code.
**          E_DB_FATAL                      Function completed with a fatal
**                                          error which must be handled
**                                          immediately.  The fatal status is in
**                                          dmu_cb.error.err_code.
**
** History:
**      01-may-98 (nanpr01) 
**          Created from dmu_index.
**      18-mar-1999 (stial01)
**          Copy err_code, err_data into corresponding dmu cb, not 1st. 
**      01-may-1999 (stial01)
**          Fixed error handling.
**      17-Apr-2001 (horda03) Bug 104402
**          Added support for TCB_NOT_UNIQUE attribute.
**	15-jul-2002 (hayke02)
**	    Initialise systemGenerated et al at the beginning of the
**	    dmu/indx_cb for loop. This prevents non-system generated
**	    (constraint) persistent indices being marked as such after a
**	    system generated index has already been processed. This change
**	    fixes bug 107621.
**      24-jul-2002 (hanal04) Bug 108330 INGSRV 1847
**          Initialise the new indxcb_dmveredo field.
**	22-Dec-2003 (jenjo02)
**	    Added DMU_GLOBAL_INDEX for Partitioned Table Project.
**	6-Feb-2004 (schka24)
**	    Get rid of DMU statement count and its limit.
**	08-jul-2004 (thaju02)
**	    Online Modify - init indxcb_online_fhdr_pageno.
**	    (B112610)
**	11-Mar-2005 (thaju02)
**	    Use $online idxs relation info. (B114069)
**	11-Nov-2005 (jenjo02)
**	    Replaced dmx_show() with the more robust 
**	    dmxCheckForInterrupt() to standardize external
**	    interrupt handling.
**	15-Aug-2006 (jonj)
**	    Moved guts to common dmuIndexSetup().
**      25-oct-2006 (stial01)
**          Fixed initialization of DM2U_INDEX_CB
**	24-Oct-2008 (jonj)
**	    Cleaned up readability, fix potential memory leak.
**	22-Jul-2009 (thaju02)
**	    For E_DM007D, dmu_tup_cnt was not getting set. 
*/
DB_STATUS
dmu_pindex(DMU_CB    *dmu_cbs)
{
    DMU_CB		*dmu = dmu_cbs;
    DMU_CB		*ndmu;
    DM2U_INDEX_CB	*indx_cb, *indx_cbs, *curr_indx_cb = NULL;
    DB_OWN_NAME		table_owner;
    DB_TAB_NAME		table_name;
    DML_XCB		*xcb;
    i4			error,local_error;
    i4			NiX, k, tot_size;
    DB_STATUS		status;

    CLRDBERR(&dmu->error);

    /* Count how many control block was passed */
    ndmu = dmu;

    for ( NiX = 0; ndmu; NiX++ )
	ndmu = (DMU_CB*)ndmu->q_next;

    if (NiX == 1)
    {
	status = dmu_index(dmu);
	return(status);
    }

    tot_size = sizeof(DM2U_INDEX_CB) * NiX; 
    status = dm0m_allocate(tot_size, 0, 
			   (i4)DM2U_IND_CB, (i4)DM2U_IND_ASCII_ID, 
			   (char *)dmu, (DM_OBJECT **)&indx_cbs, &dmu->error);
    if (status != E_DB_OK)
    {
	uleFormat(&dmu->error, 0, NULL, ULE_LOG, NULL, 
		NULL, 0, NULL, &local_error, 0);
	return(E_DB_ERROR);
    }

    ndmu = dmu;
    indx_cb = indx_cbs;

    for (k = 0; k < NiX; k++)
    {
	indx_cb->indxcb_tab_name = &table_name;
	indx_cb->indxcb_tab_owner = &table_owner;

	if ( status = dmuIndexSetup(ndmu, (PTR)indx_cb) )
	{
	    /* copy error info to "first" dmu */
	    dmu->error = ndmu->error;
	    break;
	}

	/* Now link up the control blocks */
	indx_cb->q_next = NULL;
	indx_cb->q_prev = NULL;
	if (curr_indx_cb != (DM2U_INDEX_CB *) NULL)
	    curr_indx_cb->q_next = indx_cb;
	curr_indx_cb = indx_cb;

	ndmu = (DMU_CB*)ndmu->q_next;
	indx_cb++;
    }

    if (dmu->error.err_code)
    {
	if (dmu->error.err_code > E_DM_INTERNAL)
	{
	    uleFormat(&dmu->error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
		(char * )NULL, (i4)0, (i4 *)NULL, &local_error, 0);
	    SETDBERR(&dmu->error, 0, E_DM0092_ERROR_INDEXING_TABLE);
	}

	/* No leaking! */
	dm0m_deallocate((DM_OBJECT **) &indx_cbs);

	return(E_DB_ERROR);
    }

    /* Calls the physical layer to process the rest of the index create */
    status = dm2u_pindex(indx_cbs);

    for (k = 0, ndmu = dmu, indx_cb = indx_cbs; k < NiX; 
	 k++, indx_cb++, ndmu = (DMU_CB *)ndmu->q_next)
    {
	/* Audit successful index on TABLE. */
	if ( status == E_DB_OK && dmf_svcb->svcb_status & SVCB_C2SECURE )
	{
	  status = dma_write_audit(
	     SXF_E_TABLE,
	     SXF_A_SUCCESS | SXF_A_INDEX,
	     ndmu->dmu_index_name.db_tab_name, /* index name */
	     sizeof(ndmu->dmu_index_name.db_tab_name),
	     &ndmu->dmu_owner,		   /* Table/view owner */
	     I_SX2011_INDEX_CREATE,
	     FALSE, /* Not force */
	     &ndmu->error, NULL);
	}

	if ( status )
	{
	    /* Find the first one that got an error */
	    if ( ndmu->error.err_code == 0 )
		continue;

	    if (ndmu->error.err_code > E_DM_INTERNAL)
	    {
		uleFormat(&ndmu->error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
		    (char * )NULL, (i4)0, (i4 *)NULL, &local_error, 0);
		SETDBERR(&ndmu->error, 0, E_DM0092_ERROR_INDEXING_TABLE);
	    }
	    else if ( xcb = indx_cb->indxcb_xcb )
		switch (ndmu->error.err_code)
	    {
		case E_DM004B_LOCK_QUOTA_EXCEEDED:
		case E_DM0112_RESOURCE_QUOTA_EXCEED:
		case E_DM0092_ERROR_INDEXING_TABLE:
		case E_DM0045_DUPLICATE_KEY:
	        case E_DM006A_TRAN_ACCESS_CONFLICT:
		    xcb->xcb_state |= XCB_STMTABORT;
		    break;

		case E_DM0042_DEADLOCK:
		case E_DM004A_INTERNAL_ERROR:
		case E_DM0100_DB_INCONSISTENT:
		    xcb->xcb_state |= XCB_TRANABORT;
		    break;
		case E_DM0065_USER_INTR:
		    xcb->xcb_state |= XCB_USER_INTR;
		    break;
		case E_DM010C_TRAN_ABORTED:
		    xcb->xcb_state |= XCB_FORCE_ABORT;
		    break;
		case E_DM007D_BTREE_BAD_KEY_LENGTH:
		    ndmu->dmu_tup_cnt = indx_cb->indxcb_maxklen;
		    break;
	    }
	    break;
	}
    } 

    dm0m_deallocate((DM_OBJECT **) &indx_cbs);

    return(status);
}
Beispiel #24
0
/*{
** Name:  dmr_get - Get a record.
**
**  INTERNAL DMF call format:      status = dmr_get(&dmr_cb);
**
**  EXTERNAL call format:          status = dmf_call(DMR_GET,&dmr_cb);
**
** Description:
**      This function gets a record from a table.  It can either get a record
**      by tuple identifier, re-get the last record returned, or get the
**      next record that passes the qualification specified by the dmr_position
**      operation.  If no morerecords meet the qualification the caller is
**      returned a error indicating no next record.
**
**	As a special case for aggregate optimization, this function may
**	be called to obtain the count of records in the table, which prior
**	to this change required getting all records with multiple calls
**	to this function.
** 
**      Note:  When a B1 secure server is running, this interface only 
**      returns records that pass the B1 MAC assurances.  Namely, only
**      records with a security label that is dominated by the security 
**      label of the requester are returned.
**
** Inputs:
**      dmr_cb
**          .type                           Must be set to DMR_RECORD_CB.
**          .length                         Must be at least
**                                          sizeof(DMR_RECORD_CB) bytes.
**          .dmr_flags_mask                 Must be DMR_NEXT, DMR_PREV,        
**					    DMR_CURRENT_POS or DMR_BY_TID.
**          .dmr_access_id                  Record access identifer returned
**                                          from DMT_OPEN that identifies a
**                                          table.
**          .dmr_tid                        If dmr_flags_mask = DMR_BY_TID, then
**                                          field is used as a tuple identifer.
**          .dmr_data.data_address          Pointer to area to return the
**                                          requested record.
**          .dmr_data.data_in_size          Size of area for record.
**
** Outputs:
**      dmr_cb
**          .dmr_tid                        The tuple identifier of the record
**                                          being returned.
**          .dmr_data.data_address          The record is stored here.
**          .dmr_data.data_out_size         The size of the returned record.
**          .error.err_code                 One of the following error numbers.
**                                          E_DM0000_OK                
**                                          E_DM000B_BAD_CB_LENGTH
**                                          E_DM000C_BAD_CB_TYPE
**                                          E_DM000F_BAD_DB_ACCESS_MODE
**                                          E_DM0011_BAD_DB_NAME
**                                          E_DM001A_BAD_FLAG
**                                          E_DM001D_BAD_LOCATION_NAME
**                                          E_DM002B_BAD_RECORD_ID
**                                          E_DM003C_BAD_TID
**                                          E_DM0042_DEADLOCK
**                                          E_DM0044_DELETED_TID
**					    E_DM0047_UPDATED_TUPLE
**                                          E_DM004A_INTERNAL_ERROR
**                                          E_DM004B_LOCK_QUOTA_EXCEEDED
**					    E_DM004D_LOCK_TIMER_EXPIRED
**                                          E_DM0055_NONEXT
**                                          E_DM0065_USER_INTR
**                                          E_DM0064_USER_ABORT
**                                          E_DM0073_RECORD_ACCESS_CONFLICT
**                                          E_DM0074_NOT_POSITIONED
**					    E_DM008A_ERROR_GETTING_RECORD
**                                          E_DM0100_DB_INCONSISTENT
**                                          E_DM010C_TRAN_ABORTED
**                                          E_DM0112_RESOURCE_QUOTA_EXCEEDED
**					    E_DM006E_NON_BTREE_GETPREV
**
**      Returns:
**         E_DB_OK                          Function completed normally.
**         E_DB_WARN                        Function completed normally with a 
**                                          termination status which is in
**                                          dmr_cb.err_code.
**         E_DB_ERROR                       Function completed abnormally 
**                                          with a 
**                                          termination status which is in
**                                          dmr_cb.err_code.
**         E_DB_FATAL                       Function completed with a fatal
**                                          error which must be handled
**                                          immediately.  The fatal status is in
**                                          dmr_cb.err_code.
** History:
**      01-sep-85 (jennifer)
**	    Created new for jupiter.
**	17-dec-1985 (derek)
**	    Completed code.
**	28-jul-1989 (mikem)
**	    Added logging of database, table, and owner when we get an internal
**	    error.
**	15-aug-1989 (rogerk)
**	    Added support for Non-SQL Gateway.  If getting record from a 
**	    gateway secondary index, then make sure that record buffer is
**	    large enough to hold a record from the base table, as that is
**	    what the gateway returns.  This is somewhat hokey, and would be
**	    better if the secondary index could actually describe the records
**	    being returned back, but...
**	15-oct-90 (linda)
**	    Integrate bug fix for gateway secondary index support:  perform
**	    sanity check on table width *after* switching tcb's.
**	11-feb-1991 (linda)
**	    Check for dmr_char->char_id == DMR_TIDJOIN, if it does then set
**	    rcb->rcb_tidjoin = RCB_TIDJOIN.  Part of gateway secondary index
**	    support.
**      22-apr-92 (schang)
**          GW merge
**	    30-apr-1991 (rickh)
**	        Removed the 11-feb-1991 tidjoin logic.  Let stand the change in
**	        where table width calculation occurs.
**	    22-jul-1991 (rickh)
**	        And now remove the table width calculation change that went in
**	        with the 11-feb-1991 tidjoin logic.
**	28-may-1993 (robf)
**	   Secure 2.0: Reworked old ORANGE code.
**	23-aug-1993 (bryanp)
**	    Fix a few cut-and-paste errors in some error message parameters.
**	31-jan-1994 (bryanp) B58487
**	    Handle failures in both dm2r_get and dm2r_unfix_pages.
**      30-aug-1994 (cohmi01)
**          Add DMR_PREV support for FASTPATH rel. Error if not btree.
**	22-may-1995 (cohmi01)
**	    Add support for count-only, for aggregate optimisation.
**	21-aug-1995 (cohmi01)
**	    count-only aggregate code moved to dml!dmragg.c
**      22-nov-96 (stial01,dilma04)
**          Row Locking Project:
**          Unfix all pages before leaving DMF if row locking.
**      14-may-97 (dilma04)
**          Cursor Stability Project:
**          - upgrade isolation level from CS to RR, if DMR_SORT flag is set;
**          - if isolation level is CS or RR, set RCB_CSRR_LOCK locking mode 
**          for the time of dm2r_get() call.
**      21-may-97 (stial01)
**          Row locking: No more LK_PH_PAGE locks, so page(s) can stay fixed.
**	19-dec-97 (inkdo01)
**	    Changes for sorts which do NOT materialize results in temp tables.
**	    get is now directed straight to DMF sorter.
**      08-oct-98 (stial01)
**          Deallocate load context after all records read from DMF sorter.
**      09-dec-98 (stial01)
**          DMR_PKEY_PROJECTION: check for relspec BTREE not table_type which
**          may be GATEWAY
**      11-aug-2003 (chash01)
**          For RMS Gateway index table, add specific test to make sure
**          dmr->dmr_data.data_in_size + sizeof(DM_TID)
**          is no more than the value in table_width
**	12-Feb-2004 (schka24)
**	    Defend against someone doing a get on a partitioned master.
**	03-Nov-2004 (jenjo02)
**	    Relocated CSswitch from dmf_call to here; don't waste the
**	    call if Factotum thread.
**	11-Nov-2005 (jenjo02)
**	    Replaced dmx_show() with the more robust 
**	    dmxCheckForInterrupt() to standardize external
**	    interrupt handling.
**	11-Sep-2006 (jonj)
**	    Don't dmxCheckForInterrupt if extended table as txn is
**	    likely in a recursive call and not at an atomic
**	    point in execution as required for LOGFULL_COMMIT.
**	13-Feb-2007 (kschendel)
**	    Replace CSswitch with cancel check.
**	11-Apr-2008 (kschendel)
**	    Roll arithmetic exceptions into caller specified ADFCB.
**	    This is part of getting DMF qual context out of QEF.
**	15-Jan-2010 (jonj)
**	    SIR 121619 MVCC: Don't change isolation level if crow_locking()
**	03-Mar-2010 (jonj)
**	    SIR 121619 MVCC, blob support:
**	    Set rcb_dmr_opcode here; dmpe bypasses dmf_call,
**	    which used to set it.
*/
DB_STATUS
dmr_get(
DMR_CB  *dmr_cb)
{
    DMR_CB		*dmr = dmr_cb;
    DMP_RCB		*rcb;
    DMP_TCB		*tcb;
    DML_XCB		*xcb;
    i4		flag;
    i4		table_width;
    DB_STATUS		status, local_status;
    i4		error, local_error;
    DB_ERROR		local_dberr;

    CLRDBERR(&dmr->error);

    for (status = E_DB_ERROR;;)
    {
	rcb = (DMP_RCB *)dmr->dmr_access_id;
	if (dm0m_check((DM_OBJECT *)rcb, (i4)RCB_CB) == E_DB_OK)
	{
	    if (rcb == NULL)
	    {
		uleFormat(NULL, E_DM00E0_BAD_CB_PTR, (CL_ERR_DESC *)NULL, ULE_LOG, 
		    NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1,
		    sizeof("record")-1, "record");
		SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
		break;
	    }

	    rcb->rcb_dmr_opcode = DMR_GET;

	    tcb = rcb->rcb_tcb_ptr;
	    if (tcb == NULL)
	    {
		uleFormat(NULL, E_DM00E0_BAD_CB_PTR, (CL_ERR_DESC *)NULL, ULE_LOG, 
		    NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1,
		    sizeof("table")-1, "table");
		SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
		break;
	    }
	    if (tcb->tcb_rel.relstat & TCB_IS_PARTITIONED)
	    {
		uleFormat(&dmr->error, E_DM0022_BAD_MASTER_OP, NULL, ULE_LOG, NULL,
			NULL, 0, NULL, &error, 3,
			0, "dmrget",
			sizeof(DB_OWN_NAME),tcb->tcb_rel.relowner.db_own_name,
			sizeof(DB_TAB_NAME),tcb->tcb_rel.relid.db_tab_name);
		break;
	    }
	    xcb = rcb->rcb_xcb_ptr;
	    if (xcb == NULL)
	    {
		uleFormat(NULL, E_DM00E0_BAD_CB_PTR, (CL_ERR_DESC *)NULL, ULE_LOG, 
		    NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1,
		    sizeof("transaction")-1, "transaction");
		SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
		break;
	    }
	    if (dmr->dmr_flags_mask & DMR_NEXT)
	     if (dmr->dmr_flags_mask & DMR_SORTGET) ;
	     else flag = DM2R_GETNEXT;
	    else if (dmr->dmr_flags_mask & DMR_BY_TID)
		flag = DM2R_BYTID;
	    else if (dmr->dmr_flags_mask & DMR_CURRENT_POS)
	    {
	/* 	flag = DM2R_BYPOSITION; */
		flag = DM2R_BYTID;
		dmr->dmr_tid = rcb->rcb_currenttid.tid_i4;
	    }
            else if (dmr->dmr_flags_mask & DMR_PREV)
	    {
		flag = DM2R_GETPREV;
		if (dmr->dmr_flags_mask & DMR_RAAT)
		    flag |= DM2R_RAAT;
		if (tcb->tcb_table_type != TCB_BTREE)
		{
		    SETDBERR(&dmr->error, 0, E_DM006E_NON_BTREE_GETPREV);
	    	    break;
		}
	    }
	    else
	    {
		SETDBERR(&dmr->error, 0, E_DM001A_BAD_FLAG);
		break;
	    }

	    /* Check for btree primary key projection */
	    if (dmr->dmr_flags_mask & DMR_PKEY_PROJECTION)
	    {
		if ((tcb->tcb_rel.relspec == TCB_BTREE) &&
		    ((tcb->tcb_rel.relstat & TCB_INDEX) == 0) &&
		    (flag == DM2R_GETNEXT || flag == DM2R_GETPREV))
		    flag |= DM2R_PKEY_PROJ;
		else
		{
		    SETDBERR(&dmr->error, 0, E_DM001A_BAD_FLAG);
		    break;
		}
	    }

	    if (xcb->xcb_scb_ptr == NULL )
	    {
		uleFormat(NULL, E_DM00E0_BAD_CB_PTR, (CL_ERR_DESC *)NULL, ULE_LOG, 
		    NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1,
		    sizeof("session")-1, "session");
		SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
		break;
	    }
	    
	    /* Check for external interrupts */
	    if ( xcb->xcb_scb_ptr->scb_ui_state && !tcb->tcb_extended )
		dmxCheckForInterrupt(xcb, &error);

	    if (xcb->xcb_state == 0)
	    {
		/*
		** Make sure the caller's buffer is large enough to hold
		** a record from this table.
                **
		** Note that for Gateway secondary index's, retrieves done
		** will actually return records from the base table!!!! So
		** if this is a get on a gateway 2nd index, make sure the
		** buffer is large enough.
                ** Aug-4-2003 (chash01)  The value relwid is sizeof(DM_TID) +
                ** sizeof(index column's length)., but the value in
                ** data_in_size is the size of base table columns.  This leads
                ** to serious problem (looping) in DMFCALL() if the size of
                ** base table columns is less than the value in relwid. This
                ** RMS gateway specific problem will be tested specifically.
		*/
		table_width = tcb->tcb_rel.relwid;

		if ( (dmr->dmr_data.data_address) &&
                       (
                         (tcb->tcb_rel.relgwid != GW_RMS &&
                          dmr->dmr_data.data_in_size >= table_width) ||
                           ( tcb->tcb_rel.relgwid == GW_RMS &&
		             dmr->dmr_data.data_in_size + sizeof(DM_TID)
                                >= table_width)
                       )
                   )
		{
		    dmr->dmr_data.data_out_size = table_width;

                    /* Upgrade isolation level to repeatable read if a 
                    ** cursor stability transaction is getting tuples 
                    ** to sort them for further update of this table,
		    ** but not if MVCC crow_locking().
                    */
		    if ( !crow_locking(rcb) )
		    {
			if (dmr->dmr_flags_mask & DMR_SORT &&
			    rcb->rcb_access_mode == RCB_A_WRITE &&
			    rcb->rcb_iso_level == RCB_CURSOR_STABILITY)
			{
			    rcb->rcb_iso_level = RCB_REPEATABLE_READ;
			}
     
			if (rcb->rcb_iso_level == RCB_CURSOR_STABILITY ||
			    rcb->rcb_iso_level == RCB_REPEATABLE_READ)
			{
			     rcb->rcb_state |= RCB_CSRR_LOCK;
			}
		    }

		    /*
		    ** Quick troll for external interrupts.
		    */
		    CScancelCheck(rcb->rcb_sid);

		    /* If this is a SORTGET, call DMF sorter to retrieve
		    ** next row. 
		    */

		    if (dmr->dmr_flags_mask & DMR_SORTGET)
		    {
			DM2R_L_CONTEXT	*lct;
			lct = (DM2R_L_CONTEXT *)tcb->tcb_lct_ptr;
			status = dmse_get_record(lct->lct_srt,
					&lct->lct_record, &dmr->error);

			if (status == E_DB_OK)
			{
			    MEcopy((PTR)lct->lct_record, 
				dmr->dmr_data.data_in_size, 
				(PTR)dmr->dmr_data.data_address);
			}
			else
			{
			    /* eof or error, call dmse to finish up. */
			    local_status = dmse_end(lct->lct_srt, &local_dberr);
			    if (local_status != E_DB_OK)
			    {
				dmr->error = local_dberr;
				status = local_status;
			    }

			    /* Deallocate load context */
			    if (lct->lct_mct.mct_buffer != (PTR)0)
			    {
				dm0m_deallocate((DM_OBJECT **)&lct->lct_mct.mct_buffer);
			    }

			    dm0m_deallocate((DM_OBJECT **)&tcb->tcb_lct_ptr);
			    tcb->tcb_lct_ptr = 0;
			    rcb->rcb_state &= ~RCB_LSTART;
			}

			return(status);
		    }

		    /* Regular ol' gets are handled here. */
 
		    xcb->xcb_scb_ptr->scb_qfun_errptr = &dmr->error;
		    status = dm2r_get(rcb, (DM_TID*)&dmr->dmr_tid, flag, 
			dmr->dmr_data.data_address, &dmr->error);

		    xcb->xcb_scb_ptr->scb_qfun_errptr = NULL;

		    /* If any arithmetic warnings to the RCB ADFCB during
		    ** position, roll them into the caller's ADFCB.
		    */
		    if (dmr->dmr_q_fcn != NULL && dmr->dmr_qef_adf_cb != NULL
		      && rcb->rcb_adf_cb->adf_warncb.ad_anywarn_cnt > 0)
			dmr_adfwarn_rollup((ADF_CB *)dmr->dmr_qef_adf_cb, rcb->rcb_adf_cb);

		    if ((tcb->tcb_rel.relstat & TCB_CONCUR))
		    {
			local_status = dm2r_unfix_pages(rcb, &local_dberr);
			if (local_status != E_DB_OK)
			{
			    if (status == E_DB_OK)
			    {
				status = local_status;
				dmr->error = local_dberr;
			    }
			    else
			    {
				uleFormat(&local_dberr, 0, (CL_ERR_DESC *)NULL,
					ULE_LOG, NULL, (char * )NULL, (i4)0,
					(i4 *)NULL, &local_error, 0);
			    }
			}
		    }
            
                    rcb->rcb_state &= ~RCB_CSRR_LOCK;

		    if (status == E_DB_OK)
			return (status);
		}
		else
		    SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
	    }
	    else
	    {
		if (xcb->xcb_state & XCB_USER_INTR)
		    SETDBERR(&dmr->error, 0, E_DM0065_USER_INTR);
		else if (xcb->xcb_state & XCB_FORCE_ABORT)
		    SETDBERR(&dmr->error, 0, E_DM010C_TRAN_ABORTED);
		else if (xcb->xcb_state & XCB_ABORT)
		    SETDBERR(&dmr->error, 0, E_DM0064_USER_ABORT);
		else if (xcb->xcb_state & XCB_WILLING_COMMIT)
		    SETDBERR(&dmr->error, 0, E_DM0132_ILLEGAL_STMT);
	    }
	}	    
	else
	    SETDBERR(&dmr->error, 0, E_DM002B_BAD_RECORD_ID);
	break;
    }	    

    if (dmr->error.err_code == E_DM0022_BAD_MASTER_OP ||
	dmr->error.err_code == E_DM004B_LOCK_QUOTA_EXCEEDED ||
	dmr->error.err_code == E_DM0112_RESOURCE_QUOTA_EXCEED)
    {
	rcb->rcb_xcb_ptr->xcb_state |= XCB_STMTABORT;
    }
    else if (dmr->error.err_code == E_DM0042_DEADLOCK ||
	dmr->error.err_code == E_DM004A_INTERNAL_ERROR ||
	dmr->error.err_code == E_DM0100_DB_INCONSISTENT)
    {
	rcb->rcb_xcb_ptr->xcb_state |= XCB_TRANABORT;
    }
    else if (dmr->error.err_code == E_DM010C_TRAN_ABORTED)
    {
	rcb->rcb_xcb_ptr->xcb_state |= XCB_FORCE_ABORT;
    }
    else if (dmr->error.err_code == E_DM0065_USER_INTR)
    {
	rcb->rcb_xcb_ptr->xcb_state |= XCB_USER_INTR;
	rcb->rcb_state &= ~RCB_POSITIONED;
	*(rcb->rcb_uiptr) &= ~SCB_USER_INTR;
    }
    else if (dmr->error.err_code > E_DM_INTERNAL)
    {
	uleFormat( &dmr->error, 0, NULL, ULE_LOG , NULL,
	    (char * )NULL, 0L, (i4 *)NULL, 
	    &error, 0);
	uleFormat(NULL, E_DM904C_ERROR_GETTING_RECORD, NULL, ULE_LOG, NULL,
	    (char * )NULL, 0L, (i4 *)NULL, &error, 3,
	    sizeof(DB_DB_NAME), &rcb->rcb_tcb_ptr->tcb_dcb_ptr->dcb_name,
	    sizeof(DB_OWN_NAME), &rcb->rcb_tcb_ptr->tcb_rel.relowner,
	    sizeof(DB_TAB_NAME), &rcb->rcb_tcb_ptr->tcb_rel.relid
	    );
	SETDBERR(&dmr->error, 0, E_DM008A_ERROR_GETTING_RECORD);
    }

    return (status);
}
Beispiel #25
0
/*{
** Name: dm1ibput - Adds a record to ISAM file. 
**
** Description:
**    This routine builds a ISAM table.  Called by modify. 
**    The records are assumed to be in sorted order by key.
**    There are two phases to building an ISAM file.  First
**    it builds only the main pages.  Duplicates that exceed the 
**    main page space are written to pages of an overflow file.
**    these overflow pages point back to the main pages they 
**    need to be linked after the entire data file with index 
**    has been built.
**    After all main pages have been processed, the ISAM index
**    is built.  When this is finished, the overflow pages are
**    read from the overflow file, written to the end of the
**    main ISAM file and linked to the appropriate main page.
**
**    This routine also assumes that the record is not compressed.
**    It also assumes that the DUP flag will be set if this a 
**    duplicate key. The record is added to the page if there is 
**    room otherwise a new page is added(based on criteria above).
**    Currently the TCB of the table modifying is used for
**    attribute information needed by the low level routines.
**    If attributes are allowed to be modified, then these
**    build routines will not work. 
**
** Inputs:
**      mct                             Pointer to modify context.
**      record                          Pointer to an area containing
**                                      record to add.
**      record_size                     Size of record in bytes.
**      dup                             A flag indicating if this
**                                      record is a duplicate key.
**
** Outputs:
**      err_code                        A pointer to an area to return error
**                                      codes if return status not E_DB_OK.
**      
**	Returns:
**
**	    E_DB_OK
**          E_DB_ERROR
**          E_DB_FATAL
**
**	Exceptions:
**	    none
**
** Side Effects:
**          none.
**
** History:
**	07-feb-86 (jennifer)
**          Created for Jupiter.
**	29-may-89 (rogerk)
**	    Check status from dm1c_comp_rec calls.
**	08-Jun-1992 (kwatts)
**	    6.5 MPF project. Replaced dm1c_add with dmpp_load calls and a
**	    dm1c_comp_rec with a call to the dmpp_compress accessor.
**	08-feb-1993 (rmuth)
**	    On overflow pages set DMPP_OVFL instead of DMPP_CHAIN which is
**	    used for overflow leaf pages.
**	21-June-1993 (rmuth)
**	    ISAM overflow chains were built the highest page number at
**	    the head of the list not allowing us to exploit readahead. 
**	    This has been reversed, hence a chain of the form
**	    main_page.page_ovfl->289->288->287->nil, will now be
**	    main_page.page_ovfl->287->288->289->nil.
**	28-mar-1994 (mikem)
**	    bug #59831
**	    Fixed dm1bput() to check for errors from dm1xnewpage().  Previous
**	    to this change if you got a hard error (rather than a warning, ie.
**	    out-of-disk space) from dm1xnewpage() you would AV in dm1ibput(),
**	    due to a reference through a nil mct->mct_curdata pointer.
**	06-mar-1996 (stial01 for bryanp)
**	    Don't allocate tuple buffers on the stack.
**	06-may-1996 (thaju02)
**	    New Page Format Support:
**		Change page header references to use macros.
**	20-may-1996 (ramra01)
**	    Added new param to the load accessor
**      03-june-1996 (stial01)
**          Use DMPP_INIT_TUPLE_INFO_MACRO to init DMPP_TUPLE_INFO
**      18-jul-1996 (ramra01 for bryanp)
**          Pass 0 as the current table version to dmpp_load.
**	09-oct-1996 (nanpr01)
**	    Donot return E_DB_OK always rather return status. Otherwise
**	    diskfull condition on modify of isam get segmentation fault.
**      22-nov-96 (stial01,dilma04)
**          Row Locking Project:
**          load accessor: changed DMPP_TUPLE_INFO param to table_version
**      10-mar-97 (stial01)
**          dm1ibput: Use mct_crecord to compress a record
**	28-Feb-2001 (thaju02)
**	    Pass mct->mct_ver_number to dmpp_load. (B104100)
*/
DB_STATUS
dm1ibput(
DM2U_M_CONTEXT  *mct,
char            *record,
i4         record_size,
i4         dup,
DB_ERROR        *dberr)
{
    DB_STATUS       status;
    char	    *rec = record;
    i4	    next_page;
    i4         rec_size;
    DB_ERROR	    local_dberr;

    CLRDBERR(dberr);

    /* 
    ** See if there is room on current page,
    ** there is always room for one record. 
    */

    rec_size = record_size;
    if (mct->mct_data_rac.compression_type != TCB_C_NONE)
    {
	rec = mct->mct_crecord;
	/* Note that the following accessor comes from the MCT and not the
	** TCB.  This is because we want the compression scheme of the table
	** we are building, not the one we are building from.
	*/
	status = (*mct->mct_data_rac.dmpp_compress)(&mct->mct_data_rac, 
		record, record_size, rec, &rec_size);
	if (status != E_DB_OK)
	{
	    SETDBERR(dberr, 0, E_DM938B_INCONSISTENT_ROW);
	    return (status);
	}
    }

    /*	Add record to current main page. */

    while ((status = dm1xbput(mct, mct->mct_curdata, rec, rec_size,
			DM1C_LOAD_NORMAL, mct->mct_db_fill, 0,
			mct->mct_ver_number, dberr))
	    == E_DB_WARN)
    {
	if (dup == 0)
	{
	    /*
	    **	Record is not a duplicate of the previous and the main
	    **  page is full, start a new main page.
	    */
	    status = dm1xnewpage(mct, 
			DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type,
			mct->mct_curdata) + 1,
			&mct->mct_curdata, dberr);
	    if (status == E_DB_INFO)
	    {
		/*  Current reserved area is full, allocate a new area. */

		status = dm1xnextpage(mct, &mct->mct_startmain, dberr);
		if (status != E_DB_OK)
		    break;

		/*  Update main page pointer for current chain. */

		DMPP_VPT_SET_PAGE_MAIN_MACRO(mct->mct_page_type, mct->mct_curdata, 
		    mct->mct_startmain);
		for (next_page = DMPP_VPT_GET_PAGE_OVFL_MACRO(mct->mct_page_type, 
			mct->mct_curdata);
		    next_page;
		    next_page = DMPP_VPT_GET_PAGE_OVFL_MACRO(mct->mct_page_type,
			mct->mct_curovfl))
		{
		    status = dm1xreadpage(mct, DM1X_FORUPDATE, next_page,
			&mct->mct_curovfl, dberr);
		    if (status != E_DB_OK)
			break;
		    DMPP_VPT_SET_PAGE_MAIN_MACRO(mct->mct_page_type, 
			mct->mct_curovfl, mct->mct_startmain);
		}

		/*  Get new main page. */

		if (status == E_DB_OK)
		    status = dm1xreserve(mct,
			mct->mct_startmain + mct->mct_kperpage, dberr);
		if (status == E_DB_OK)
		    status = dm1xnewpage(mct, mct->mct_startmain,
			&mct->mct_curdata, dberr);
		if (status != E_DB_OK)
		    break;
	    }
	    else if (status)
	    {
		break;
	    }

	    DMPP_VPT_SET_PAGE_MAIN_MACRO(mct->mct_page_type, mct->mct_curdata, 
		DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type, 
		mct->mct_curdata) + 1);
	    continue;
	}

	/*
	**  Record is a duplicate, keep adding to the overflow page for
	**  this main page.
	*/
	status = E_DB_WARN;

	/*
	** If first overflow page then link it to the main page
	*/
	if ( DMPP_VPT_GET_PAGE_OVFL_MACRO(mct->mct_page_type, mct->mct_curdata) == 
	    0 )
	{
	    status = dm1xnewpage(mct, 0, &mct->mct_curovfl, dberr);
	    if ( status != E_DB_OK )
		break;
	    DMPP_VPT_SET_PAGE_MAIN_MACRO(mct->mct_page_type, mct->mct_curovfl,
		DMPP_VPT_GET_PAGE_MAIN_MACRO(mct->mct_page_type, mct->mct_curdata)); 
	    DMPP_VPT_SET_PAGE_OVFL_MACRO(mct->mct_page_type, mct->mct_curovfl,
		DMPP_VPT_GET_PAGE_OVFL_MACRO(mct->mct_page_type, mct->mct_curdata));
	    DMPP_VPT_SET_PAGE_STAT_MACRO(mct->mct_page_type, mct->mct_curovfl, 
		DMPP_DATA | DMPP_OVFL );
	    DMPP_VPT_SET_PAGE_OVFL_MACRO(mct->mct_page_type, mct->mct_curdata,
		DMPP_VPT_GET_PAGE_PAGE_MACRO(mct->mct_page_type, mct->mct_curovfl));
	}

	/*
	** If the current buffered overflow page is for this main data
	** page then add the data.
	*/
	if ( DMPP_VPT_GET_PAGE_MAIN_MACRO(mct->mct_page_type, mct->mct_curovfl) == 
	    DMPP_VPT_GET_PAGE_MAIN_MACRO(mct->mct_page_type, mct->mct_curdata))
	{
	    status = dm1xbput(mct, mct->mct_curovfl, rec, rec_size,
			DM1C_LOAD_NORMAL, 0, 0,
			mct->mct_ver_number, dberr);
	}

	/*
	** If current overflow page is full then allocate a new overflow
	** page, link it to the end of the overflow chain and add the
	** data to the page
	*/
	if (status == E_DB_WARN)
	{
	    /*
	    ** Find out next page number we will allocate so that
	    ** can fix up the overflow chain ptr on current overflow
	    ** page before we release it
	    */
	    status = dm1xnextpage( mct, &next_page, dberr );
	    if ( status != E_DB_OK )
		break;
	    
	    DMPP_VPT_SET_PAGE_OVFL_MACRO(mct->mct_page_type, mct->mct_curovfl, 
		next_page);

	    status = dm1xnewpage(mct, 0, &mct->mct_curovfl, dberr);
	    if ( status != E_DB_OK )
		break;
	    DMPP_VPT_SET_PAGE_MAIN_MACRO(mct->mct_page_type, mct->mct_curovfl, 
		DMPP_VPT_GET_PAGE_MAIN_MACRO(mct->mct_page_type, mct->mct_curdata));
	    DMPP_VPT_SET_PAGE_OVFL_MACRO(mct->mct_page_type, mct->mct_curovfl, 0); /* end of chain */
	    DMPP_VPT_SET_PAGE_STAT_MACRO(mct->mct_page_type, mct->mct_curovfl, 
		DMPP_DATA | DMPP_OVFL );
	    /*
	    ** Add data to new page
	    */
	    status = dm1xbput(mct, mct->mct_curovfl, rec, rec_size,
			DM1C_LOAD_NORMAL, 0, 0,
			mct->mct_ver_number, dberr);
	}

	break;

    }

    if (status != E_DB_OK)
	log_error(E_DM9253_DM1I_BOTTOM, dberr);
    return (status); 
}
Beispiel #26
0
DB_STATUS
qss_bgn_session( QSF_RCB *qsf_rb )
{
    STATUS		status;
    QSF_CB		*scb = qsf_rb->qsf_scb;
#ifdef    xDEBUG
    i4		n;
    QSF_CB		*prevses;
    QSF_CB		*nextses;
#endif /* xDEBUG */

    CLRDBERR(&qsf_rb->qsf_error);

    Qsr_scb = (QSR_CB *) qsf_rb->qsf_server;

    if (scb == NULL)
    {
        SETDBERR(&qsf_rb->qsf_error, 0, E_QS001B_NO_SESSION_CB);
        return (E_DB_SEVERE);
    }

    /* Before anything, set up the session CB's standard header portion */
    /* ---------------------------------------------------------------- */
    scb->qsf_ascii_id = QSFCB_ASCII_ID;
    scb->qsf_type = QSFCB_CB;
    scb->qsf_length = sizeof(QSF_CB);
    scb->qsf_prev = NULL;

    /* Update the other structure information.  */
    /* ---------------------------------------- */
    scb->qss_obj_list = (QSO_OBJ_HDR *) NULL;
    scb->qss_master = (QSO_MASTER_HDR *) NULL;
    scb->qss_snamed_list = (QSO_OBJ_HDR *) NULL;

    /* Init the tracing vector */
    /* ----------------------- */
    ult_init_macro(&scb->qss_trstruct.trvect, 128, 0, 8);

    /* Get exclusive access to QSF's SERVER CONTROL BLOCK */
    /* -------------------------------------------------- */
    if (CSp_semaphore((i4) TRUE, &Qsr_scb->qsr_psem))
    {
        SETDBERR(&qsf_rb->qsf_error, 0, E_QS0008_SEMWAIT);
        /* Return now, instead of attempting to do a v() */
        return (E_DB_ERROR);
    }

    /* Make the session known to QSF's server CB */
    /* ----------------------------------------- */
    if (Qsr_scb->qsr_se1st != (QSF_CB *) NULL)
        Qsr_scb->qsr_se1st->qsf_prev = scb;
    scb->qsf_next = Qsr_scb->qsr_se1st;
    Qsr_scb->qsr_se1st = scb;
    Qsr_scb->qsr_nsess++;
    if (Qsr_scb->qsr_nsess > Qsr_scb->qsr_mxsess)
        Qsr_scb->qsr_mxsess = Qsr_scb->qsr_nsess;

#ifdef    xDEBUG
    if (Qsr_scb->qsr_tracing && qst_trcheck(&scb, QSF_003_CHK_SCB_LIST))
    {
        /* This code just verifies that the session CB list is intact */
        /* ---------------------------------------------------------- */
        n = Qsr_scb->qsr_nsess;
        prevses = NULL;
        nextses = Qsr_scb->qsr_se1st;

        while (n-- > 0)
        {
            if (nextses == NULL)
            {
                TRdisplay("*** Not enough QSF session CBs found");
                TRdisplay(" while beginning a session.\n");
                SETDBERR(&qsf_rb->qsf_error, 0, E_QS9999_INTERNAL_ERROR);
                return (E_DB_FATAL);
            }

            if (	(nextses->qsf_prev != prevses)
                    ||	(nextses->qsf_ascii_id != QSFCB_ASCII_ID)
                    ||	(nextses->qsf_type != QSFCB_CB)
                    ||	(nextses->qsf_length != sizeof(QSF_CB))
               )
            {
                TRdisplay("*** QSF's session CB list found trashed");
                TRdisplay(" while beginning a session.\n");
                SETDBERR(&qsf_rb->qsf_error, 0, E_QS9999_INTERNAL_ERROR);
                return (E_DB_FATAL);
            }

            prevses = nextses;
            nextses = nextses->qsf_next;
        }

        if (nextses != NULL)
        {
            TRdisplay("*** Too many QSF session CBs detected");
            TRdisplay(" while beginning a session.\n");
            SETDBERR(&qsf_rb->qsf_error, 0, E_QS9999_INTERNAL_ERROR);
            return (E_DB_FATAL);
        }
    }
#endif /* xDEBUG */


    status = E_DB_OK;

    /* Release exclusive access to QSF's SERVER CONTROL BLOCK */
    /* ------------------------------------------------------ */
    if (CSv_semaphore(&Qsr_scb->qsr_psem))
    {
        status = E_DB_ERROR;
        SETDBERR(&qsf_rb->qsf_error, 0, E_QS0004_SEMRELEASE);
    }

    return (status);
}
Beispiel #27
0
/*{
** Name: dmv_rertree_rep - Redo the replace of a rtree key 
**
** Description:
**      This function replaces a new key in a rtree index for the recovery of a
**	replace record operation.
**
** Inputs:
**      dmve				Pointer to dmve control block.
**      tabio				Pointer to table io control block
**      page				Pointer to the page to which to insert
**		log_record		 	Pointer to the log record
**		plv	 				Pointer to page level accessor 
**		recovery_action		Recovery type
**
** Outputs:
**	error				Pointer to Error return area
**	Returns:
**	    E_DB_OK
**	    E_DB_ERROR
**
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	19-sep-1996 (shero03)
**		Created from dmvebtpt.c
**      21-may-1997 (stial01)
**          Added flags arg to dm0p_unmutex call(s).
*/
static DB_STATUS
dmv_rertree_rep(
DMVE_CB             *dmve,
DMP_TABLE_IO	    *tabio,
DMP_PINFO	    *pinfo,
DM_TID		    *bid,
DM0L_RTREP	    *log_rec,
DMPP_ACC_PLV 	    *plv,
i4		    recovery_action)
{
    LG_LSN		*log_lsn = &log_rec->rtr_header.lsn; 
    DB_STATUS		status = E_DB_OK;
    i4		child = bid->tid_tid.tid_line;
    i4		page_type = log_rec->rtr_pg_type;
    i4		ix_compressed;
    char	*old_key;
    char	*new_key;
    char	*key;
    i4		*err_code = &dmve->dmve_error.err_code;
    DMPP_PAGE		*page = pinfo->page;

    CLRDBERR(&dmve->dmve_error);

    /*
    ** If there is nothing to recover, just return.
    */
    if (page == NULL)
	  return (E_DB_OK);

    old_key = &log_rec->rtr_vbuf[log_rec->rtr_stack_size];
    new_key = old_key + log_rec->rtr_stack_size + log_rec->rtr_okey_size;

    ix_compressed = DM1CX_UNCOMPRESSED;
    if (log_rec->rtr_cmp_type != TCB_C_NONE)
	ix_compressed = DM1CX_COMPRESSED;

    /* ****** FIXME (kschendel)
    ** Maybe I'm missing something obvious, but I don't see it writing the
    ** rtrep CLR when it's an UNDO...
    ** Do we somehow not need it?  or is it a bug?
    */

    /*
    ** Mutex the page while updating it.
    */
    dmveMutex(dmve, pinfo);

    /*
    ** If redoing the replace operation
	**   use the new mbr value
	** If undoing the replace operation
	**   use the old mbr value
    */
	switch (dmve->dmve_action)
	{
	  case DMVE_DO:
	  case DMVE_REDO:
		key = new_key;
	    break;
	  case DMVE_UNDO :
		key = old_key;
		break;
	}
	/*
	** Replace the mbr with the new value.
	*/
	status = dm1cxput(page_type, log_rec->rtr_page_size, page,
			ix_compressed, DM1C_DIRECT,
	    		&dmve->dmve_tran_id, (u_i2)0, (i4)0, child,
			key, log_rec->rtr_nkey_size, &log_rec->rtr_tid,
			(i4)0);
	if (status != E_DB_OK)
	{
	  dm1cxlog_error(E_DM93E4_BAD_INDEX_PUT, (DMP_RCB *)NULL, page, 
			   page_type, log_rec->rtr_page_size, child);
	}

    /*
    ** Write the LSN of the Replace log record to the updated page.
    */
    DM1B_VPT_SET_PAGE_LOG_ADDR_MACRO(page_type, page, *log_lsn);

    DM1B_VPT_SET_PAGE_STAT_MACRO(page_type, page, DMPP_MODIFY);
    dmveUnMutex(dmve, pinfo);
    
    if (status != E_DB_OK)
    {
	SETDBERR(&dmve->dmve_error, 0, E_DM9650_REDO_BTREE_PUT);
	  return(E_DB_ERROR);
    }

    return(E_DB_OK);
}
Beispiel #28
0
DB_STATUS
qss_end_session( QSF_RCB *qsf_rb)
{
    STATUS		 status = E_DB_OK;
    QSF_CB		*scb = qsf_rb->qsf_scb;
    QSF_RCB		int_qsf_rb;
    QSO_OBJ_HDR		*objects = (QSO_OBJ_HDR *) scb->qss_obj_list;
    QSO_OBJ_HDR		*obj;
    i4		 error;
    i4		 count;
#ifdef    xDEBUG
    QSF_CB		*prevses;
    QSF_CB		*nextses;
    i4			 trace_003;
#endif /* xDEBUG */

    CLRDBERR(&qsf_rb->qsf_error);

    /* Toss any uncommitted named session-owned objects first.
    ** There shouldn't be any, if QEF did its job right, but who knows.
    ** The important thing is that we NOT have objects pretending to be
    ** on a session list when the session has ended.
    */
    int_qsf_rb.qsf_sid = qsf_rb->qsf_sid;
    int_qsf_rb.qsf_scb = scb;
    CLRDBERR(&int_qsf_rb.qsf_error);
    (void) qsf_clrsesobj(&int_qsf_rb);

    /*
    ** Look for orphaned objects that should not exist and destroy unshareable
    ** named objects.
    */
    count = QSF_MAX_ORPHANS;
    while (objects != (QSO_OBJ_HDR *) NULL && --count > 0)
    {
        STATUS		status;
        QSO_OBJ_HDR	*current_obj = objects;

        objects = objects->qso_obnext;

        if (current_obj->qso_obid.qso_lname == 0)
        {
            char	*type;

            switch (current_obj->qso_obid.qso_type)
            {
            case QSO_QTEXT_OBJ:
                type = "Query Text";
                break;
            case QSO_QTREE_OBJ:
                type = "Query Tree";
                break;
            case QSO_QP_OBJ:
                type = "Query Plan";
                break;
            case QSO_SQLDA_OBJ:
                type = "SQLDA";
                break;
            case QSO_ALIAS_OBJ:
                type = "Alias";
                break;
            default:
                type = "Unknown";
                break;
            }

            /* Report the orphaned object. */
            uleFormat( &int_qsf_rb.qsf_error, E_QS001E_ORPHANED_OBJ,
                       NULL, (i4) ULE_LOG, NULL,
                       NULL, (i4) 0, NULL, &error, 1, 0, type);
        }

        int_qsf_rb.qsf_obj_id = current_obj->qso_obid;
        int_qsf_rb.qsf_lk_state = QSO_EXLOCK;
        int_qsf_rb.qsf_sid = qsf_rb->qsf_sid;
        int_qsf_rb.qsf_scb = scb;
        status = qso_lock(&int_qsf_rb);
        if (DB_FAILURE_MACRO(status))
        {
            uleFormat( &int_qsf_rb.qsf_error, 0, NULL, (i4) ULE_LOG,
                       NULL, NULL, (i4) 0, NULL, &error, 0);
        }

        int_qsf_rb.qsf_lk_id = current_obj->qso_lk_id;
        status = qso_destroy(&int_qsf_rb);
        if (DB_FAILURE_MACRO(status))
        {
            uleFormat( &int_qsf_rb.qsf_error, 0, NULL, (i4) ULE_LOG,
                       NULL, NULL, (i4) 0, NULL, &error, 0);
        }
    }

    if (count <= 0)
    {
        uleFormat( &int_qsf_rb.qsf_error, E_QS001F_TOO_MANY_ORPHANS,
                   NULL, (i4) ULE_LOG,
                   NULL, NULL, (i4) 0, NULL, &error, 0);
    }

#ifdef    xDEBUG
    if (Qsr_scb->qsr_tracing && qst_trcheck(&scb, QSF_001_ENDSES_OBJQ))
    {
        DB_ERROR		save_err = qsf_rb->qsf_error;

        TRdisplay("<<< Dumping QSF object queue before ending session >>>\n");
        (void) qsd_obq_dump(qsf_rb);

        qsf_rb->qsf_error = save_err;
    }


    trace_003 = (   Qsr_scb->qsr_tracing
                    && qst_trcheck(&scb, QSF_003_CHK_SCB_LIST)
                );					    /* Must check before
							    ** we blow away the
							    ** the session CB.
							    */
#endif /* xDEBUG */


    /* First, wait to get exclusive access to QSF's SERVER CONTROL BLOCK */
    /* ----------------------------------------------------------------- */
    if (CSp_semaphore((i4) TRUE, &Qsr_scb->qsr_psem))
    {
        SETDBERR(&qsf_rb->qsf_error, 0, E_QS0008_SEMWAIT);
        /* Return now, instead of attempting to do a v() */
        return (E_DB_ERROR);
    }

    /* Do a quick run through the global LRU-able (persistent) object list
    ** and clear the session if it's us.  This indicates that the object
    ** is not on any session list, and prevents someone else from believing
    ** a stale qso_session.
    */

    obj = Qsr_scb->qsr_1st_lru;
    while (obj != NULL)
    {
        if (obj->qso_session == scb)
            obj->qso_session = NULL;
        obj = obj->qso_lrnext;
    }

    /* Now remove this session from QSF's server CB */
    /* -------------------------------------------- */
    if (scb == Qsr_scb->qsr_se1st)
        Qsr_scb->qsr_se1st = scb->qsf_next;
    else
        scb->qsf_prev->qsf_next = scb->qsf_next;

    if (scb->qsf_next != NULL)
        scb->qsf_next->qsf_prev = scb->qsf_prev;

    scb->qsf_prev = scb->qsf_next = NULL;
    Qsr_scb->qsr_nsess--;


#ifdef    xDEBUG
    if (trace_003)
    {
        /* This code just verifies that the session CB list is intact */
        /* ---------------------------------------------------------- */
        count = Qsr_scb->qsr_nsess;
        prevses = NULL;
        nextses = Qsr_scb->qsr_se1st;

        while (count-- > 0)
        {
            if (nextses == NULL)
            {
                TRdisplay("*** Not enough QSF session CBs found");
                TRdisplay(" while ending a session.\n");
                SETDBERR(&qsf_rb->qsf_error, 0, E_QS9999_INTERNAL_ERROR);
                return (E_DB_FATAL);
            }

            if (	(nextses->qsf_prev != prevses)
                    ||	(nextses->qsf_ascii_id != QSFCB_ASCII_ID)
                    ||	(nextses->qsf_type != QSFCB_CB)
                    ||	(nextses->qsf_length != sizeof(QSF_CB))
               )
            {
                TRdisplay("*** QSF's session CB list found trashed");
                TRdisplay(" while ending a session.\n");
                SETDBERR(&qsf_rb->qsf_error, 0, E_QS9999_INTERNAL_ERROR);
                return (E_DB_FATAL);
            }
            prevses = nextses;
            nextses = nextses->qsf_next;
        }

        if (nextses != NULL)
        {
            TRdisplay("*** Too many QSF session CBs detected");
            TRdisplay(" while ending a session.\n");
            SETDBERR(&qsf_rb->qsf_error, 0, E_QS9999_INTERNAL_ERROR);
            return (E_DB_FATAL);
        }
    }
#endif /* xDEBUG */


    status = E_DB_OK;

    /* Release exclusive access to QSF's SERVER CONTROL BLOCK */
    /* ------------------------------------------------------ */
    if (CSv_semaphore(&Qsr_scb->qsr_psem))
    {
        status = E_DB_ERROR;
        SETDBERR(&qsf_rb->qsf_error, 0, E_QS0004_SEMRELEASE);
    }

    return (status);
}
Beispiel #29
0
/*{
** Name:  dmr_position - Position a record.
**
**  INTERNAL DMF call format:      status = dmr_position(&dmr_cb);
**
**  EXTERNAL call format:          status = dmf_call(DMR_POSITION,&dmr_cb);
**
** Description:
**
**      Note: A position must be done before a table can be read.
**     
**	The act of positioning a table is used to convey information about the
**	set of records that are to be retrieved by succeeding get operations.
**	There are two basic modes of operation for position: position for 
**	full scan of table or position based on primary key qualifications.
**	Positioning for a full scan means that all records of the table will be
**	returned.  Positioning for qualification will return records that
**	satisfy the qualification.  A qualification involves comparisons of
**	attributes that are member of the primary key.  These attributes can
**	be compared for >, >=, ==, < or <=.  The least restrictive set of
**	qualification is used if there are conflicts.  (Example: If position
**	is presented the two following qualifications: (a >= ?, b > ?) where 
**      a and b are members of the key, position would use (a >= ? ,b >= ?).
**	If an attribute is mentioned more then once the extra qualifications are
**	ignored.  The data type of the value passed is determined from the
**	attribute number.  The data type is assumed to be of the length of the
**	same length and type as the attribute it is operating on.
**
**	For each of the position operations a qualification function can be
**	given.  The qualification function will be called when a record has
**	been retrieved and it has passed the key qualification if one was
**	given.  If the qualification function returns TRUE, the record is
**	returned, otherwise the next record is processed.  This allows the
**	caller to add qualifications on non-key fields and have them processed
**	by the get operation.
**
**	The special position operation DMR_TID, allows a the position of
**	heap temporary to be returned to a previous tid value.  The next
**	fetch will return the record associated with tid, and successive fetchs
**	will see successive records.
**
**	The special position operation DMR_REPOSITION, allows the position of
**	a scan on any structure to be repositioned usiing the same key used
**	in the last position request if this request was for DMR_QUAL or
**	DMR_REPOSITION.  The same set of records is returned excpet for
**	records that have been changed or deleted.
**
** Inputs:
**      dmr_cb
**          .type                           Must be set to DMR_RECORD_CB
**          .length                         Must be at least
**                                          sizeof(DMR_RECORD_CB) bytes.
**          .dmr_flags_mask                 Must be zero.
**          .dmr_position_type              Either DMR_TID, DMR_ALL, DMR_QUAL,
**					    DMR_REPOSITION, DMR_LAST or
**					    DMR_ENDQUAL. 
**					    (See above description).
**          .dmr_access_id                  Record access identifer returned
**                                          from DMT_OPEN that identifies a
**                                          table.
**          .dmr_tid                        If dmr_position_type = DMR_TID,
**                                          field is used as a tuple identifer.
**          .dmr_attr_desc.ptr_address      If dmr_position_type = DMR_QUAL, 
**                                          then
**                                          this points to an area containing
**                                          an array of DMR_ATTR_ENTRY's.  See
**                                          below for a description of 
**                                          <dmr_attr_desc>.
**          .dmr_attr_desc.ptr_size         Length of entry of 
**                                          dmr_attr_desc.data_address
**                                          in bytes.
**          .dmr_attr_desc.ptr_in_count     Number of entries in attr array.
**          .dmr_q_fcn                      Zero or address of function used to
**                                          qualify records.
**          .dmr_q_arg                      Argument to be passed to 
**                                          dmr_qual_fcn when called.
**
**          <dmr_attr_desc> is of type DMR_ATTR_ENTRY
**              .attr_number                The attribute number that this
**                                          restriction applies to.
**              .attr_operation             A binary comparision operator
**                                          in the set: DMR_OP_EQ, DMR_OP_LTE
**                                          DMR_OP_GTE, DMR_OP_INTERSECTS,
**					    DMR_OP_OVERLAY, DMR_OP_INSIDE,
**					    DMR_OP_CONTAINS.
**              .attr_value_ptr             A pointer to the value for this 
**                                          restriction.
**
** Outputs:
**      dmr_cb
**          .error.err_code                 One of the following error numbers.
**                                          E_DM0000_OK                
**                                          E_DM000B_BAD_CB_LENGTH
**                                          E_DM000C_BAD_CB_TYPE
**                                          E_DM001A_BAD_FLAG
**                                          E_DM002B_BAD_RECORD_ID
**                                          E_DM003C_BAD_TID
**                                          E_DM0042_DEADLOCK
**                                          E_DM0044_DELETED_TID
**                                          E_DM0047_CHANGED_TUPLE
**                                          E_DM004A_INTERNAL_ERROR
**                                          E_DM004B_LOCK_QUOTA_EXCEEDED
**					    E_DM004D_LOCK_TIMER_EXPIRED
**                                          E_DM0065_USER_INTR
**                                          E_DM0064_USER_ABORT
**					    E_DM008E_ERROR_POSITIONING
**                                          E_DM004B_LOCK_QUOTA_EXCEEDED
**                                          E_DM0100_DB_INCONSISTENT
**                                          E_DM010C_TRAN_ABORTED
**                                          E_DM0112_RESOURCE_QUOTA_EXCEEDED
**
**      Returns:
**         E_DB_OK                          Function completed normally.
**         E_DB_WARN                        Function completed normally with a 
**                                          termination status which is in
**                                          dmr_cb.err_code.
**         E_DB_ERROR                       Function completed abnormally 
**                                          with a 
**                                          termination status which is in
**                                          dmr_cb.err_code.
**         E_DB_FATAL			    Function completed with a fatal
**                                          error which must be handled
**                                          immediately.  The fatal status is in
**                                          dmr_cb.err_code.
** History:
**      01-sep-1985 (jennifer)
**	    Created new for jupiter.
**	17-dec-1985 (derek)
**	    Completed code.
**	 1-dec-1988 (rogerk)
**	    Don't continue after finding bad attribute array entry.
**	17-dec-1990 (jas)
**	    Smart Disk project integration:
**	    Support DMR_SDSCAN, requesting a Smart Disk scan.  In a Smart
**	    Disk scan, the "key pointer" becomes a pointer to a
**	    (hardware-dependent) block encoding the search condition;
**	    the "key count" is the length in bytes of the encoded
**	    search condition.
**	05-dec-1992 (kwatts)
**	    Smart Disk enhancements. There is now a different structure in
**	    the DMR CB to set key_descriptor to.
**	31-jan-1994 (bryanp) B58493
**	    Handle failures in both dm2r_position and dm2r_unfix_pages.
**	5-may-95 (stephenb/lewda02)
**	    Add support for DMR_LAST using DM2R_LAST.
**      22-nov-96 (stial01,dilma04)
**          Row Locking Project:
**          Unfix all pages before leaving DMF if row locking.
**      14-may-97 (dilma04)
**          Cursor Stability Project:
**          If isolation level is CS or RR, set RCB_CSRR_LOCK locking mode 
**          for the time of dm2r_position() call.
**      21-may-97 (stial01)
**          Row locking: No more LK_PH_PAGE locks, so page(s) can stay fixed.
**	23-jul-1998 (nanpr01)
**	    Making position code aware of the scanning direction for backward
**	    scan.
**	01-Mar-2004 (jenjo02)
**	    Position requests on Partition Masters are illegal.
**	11-Nov-2005 (jenjo02)
**	    Replaced dmx_show() with the more robust 
**	    dmxCheckForInterrupt() to standardize external
**	    interrupt handling.
**	11-Sep-2006 (jonj)
**	    Don't dmxCheckForInterrupt if extended table as txn is
**	    likely in a recursive call and not at an atomic
**	    point in execution as required for LOGFULL_COMMIT.
**	15-Jan-2010 (jonj)
**	    SIR 121619 MVCC: No CSRR_LOCK when crow_locking().
**	03-Mar-2010 (jonj)
**	    SIR 121619 MVCC, blob support:
**	    Set rcb_dmr_opcode here; dmpe bypasses dmf_call,
**	    which used to set it.
*/
DB_STATUS
dmr_position(
DMR_CB  *dmr_cb)
{
    DM_TID              tid;
    DMR_CB		*dmr = dmr_cb;
    DMP_RCB		*rcb;
    DMP_TCB		*t;
    DML_XCB		*xcb;
    DMR_ATTR_ENTRY	*k;
    DMR_ATTR_ENTRY	*ka;
    DMR_ATTR_ENTRY      karray[DB_MAX_COLS];
    i4		i;
    i4		flag;
    DB_STATUS		status, local_status;
    i4		error,local_error;
    char		*key_descriptor = (char *) karray;
    i4			key_count = dmr->dmr_attr_desc.ptr_in_count;
    DB_ERROR		local_dberr;

    CLRDBERR(&dmr->error);

    for (status = E_DB_ERROR;;)
    {
	rcb = (DMP_RCB *)dmr->dmr_access_id;
	if (dm0m_check((DM_OBJECT *)rcb, (i4)RCB_CB) == E_DB_OK)
	{
	    rcb->rcb_dmr_opcode = DMR_POSITION;

	    t = rcb->rcb_tcb_ptr;

	    if (t->tcb_rel.relstat & TCB_IS_PARTITIONED)
	    {
		uleFormat(&dmr->error, E_DM0022_BAD_MASTER_OP, NULL, ULE_LOG, NULL,
			NULL, 0, NULL, &error, 3,
			0, "dmrpos",
			sizeof(DB_OWN_NAME),t->tcb_rel.relowner.db_own_name,
			sizeof(DB_TAB_NAME),t->tcb_rel.relid.db_tab_name);
		break;
	    }

	    if ((dmr->dmr_position_type == DMR_QUAL) ||
	        (dmr->dmr_position_type == DMR_ENDQUAL))
	    {
	        if (dmr->dmr_position_type == DMR_QUAL) 
		    flag = DM2R_QUAL;
		else
		    flag = DM2R_ENDQUAL;
		if (dmr->dmr_attr_desc.ptr_address == 0 ||
		    dmr->dmr_attr_desc.ptr_size < sizeof(DMR_ATTR_ENTRY)||
		    dmr->dmr_attr_desc.ptr_in_count == 0 ||
		    dmr->dmr_attr_desc.ptr_in_count > DB_MAX_COLS)
		{
		    SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
		    break;
		}
		ka = karray;
		for (i = 0; i < dmr->dmr_attr_desc.ptr_in_count; i++) 
		{
		    DMR_ATTR_ENTRY	*k = 
			((DMR_ATTR_ENTRY **)dmr->dmr_attr_desc.ptr_address)[i];

		    ka->attr_number = k->attr_number;
		    ka->attr_operator = k->attr_operator;
		    ka->attr_value = k->attr_value;
		    ka++;
		    if (k->attr_number == 0 ||
			k->attr_number > t->tcb_rel.relatts ||
			k->attr_operator == 0 ||
			k->attr_operator > DMR_OP_CONTAINS)
		    {
			SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
			break;
		    }
		}
		if (dmr->error.err_code)
		    break;
	    }
	    else if (dmr->dmr_position_type == DMR_ALL)
		flag = DM2R_ALL;
	    else if (dmr->dmr_position_type == DMR_TID)
	    {
		flag = DM2R_BYTID;
		tid.tid_i4 = ((DM_TID *)&dmr->dmr_tid)->tid_i4;
	    }
	    else if (dmr->dmr_position_type == DMR_REPOSITION)
		flag = DM2R_REPOSITION;	
	    else if (dmr->dmr_position_type == DMR_LAST)
		flag = DM2R_LAST;
	    else
	    {
		SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
		break;
	    }

	    xcb = rcb->rcb_xcb_ptr;
	    if ( dm0m_check((DM_OBJECT *)xcb, (i4)XCB_CB) )
	    {
		uleFormat(NULL, E_DM00E0_BAD_CB_PTR, (CL_ERR_DESC *)NULL, ULE_LOG, 
		    NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1,
		    sizeof("transaction")-1, "transaction");
		SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
		break;
	    }
	    if ( dm0m_check((DM_OBJECT *)xcb->xcb_scb_ptr, (i4)SCB_CB) )
	    {
		uleFormat(NULL, E_DM00E0_BAD_CB_PTR, (CL_ERR_DESC *)NULL, ULE_LOG, 
		    NULL, (char *)NULL, (i4)0, (i4 *)NULL, &error, 1,
		    sizeof("session")-1, "session");
		SETDBERR(&dmr->error, 0, E_DM002A_BAD_PARAMETER);
		break;
	    }

	    /* Check for external interrupts */
	    if ( xcb->xcb_scb_ptr->scb_ui_state && !t->tcb_extended )
		dmxCheckForInterrupt(xcb, &error);

	    if (xcb->xcb_state == 0)
	    {
		if ( !crow_locking(rcb) &&
		       (rcb->rcb_iso_level == RCB_CURSOR_STABILITY ||
			rcb->rcb_iso_level == RCB_REPEATABLE_READ) )
                {
                     rcb->rcb_state |= RCB_CSRR_LOCK;
                }

		/* Informed Read-Ahead */
		if ((flag & DM2R_QUAL || flag & DM2R_ENDQUAL) &&
		    (dmr->dmr_s_estimated_records > 0))
		{
		    f8   	scanfactor = 
		   dmf_svcb->svcb_scanfactor[DM_CACHE_IX(t->tcb_rel.relpgsize)];

		    /* Override the read-ahead decisions */
		    if (((t->tcb_rel.relstat & TCB_INDEX) == 0) &&
			(t->tcb_tperpage != 0) &&
			(dmr->dmr_s_estimated_records/t->tcb_tperpage < 
				0.5 * scanfactor)) 
			flag |= DM2R_NOREADAHEAD;
		    else {
			if ((t->tcb_rel.relstat & TCB_INDEX) &&
			    (t->tcb_rel.relspec == TCB_BTREE) &&
			    (t->tcb_kperleaf != 0) &&
			    (dmr->dmr_s_estimated_records/t->tcb_kperleaf < 
				0.5 * scanfactor)) 
			    flag |= DM2R_NOREADAHEAD;
			else
		    	    if ((t->tcb_rel.relstat & TCB_INDEX) &&
			        (t->tcb_tperpage != 0) &&
				(dmr->dmr_s_estimated_records/t->tcb_tperpage < 
				 0.5 * scanfactor)) 
				flag |= DM2R_NOREADAHEAD;
		    }
		}

		/* Set qualification stuff in RCB from caller DMR_CB.
		** This will persist thru all upcoming gets as well.
		*/
		rcb->rcb_f_rowaddr = dmr->dmr_q_rowaddr;
		rcb->rcb_f_qual = dmr->dmr_q_fcn;
		rcb->rcb_f_arg = dmr->dmr_q_arg;
		rcb->rcb_f_retval = dmr->dmr_q_retval;
		MEfill(sizeof(ADI_WARN), 0, &rcb->rcb_adf_cb->adf_warncb);

		xcb->xcb_scb_ptr->scb_qfun_errptr = &dmr->error;
		status = dm2r_position(rcb, flag, 
		    (DM2R_KEY_DESC *)key_descriptor, key_count, 
		    &tid, &dmr->error);

		xcb->xcb_scb_ptr->scb_qfun_errptr = NULL;

		/* If any arithmetic warnings to the RCB ADFCB during
		** position, roll them into the caller's ADFCB.
		*/
		if (dmr->dmr_q_fcn != NULL && dmr->dmr_qef_adf_cb != NULL
		  && rcb->rcb_adf_cb->adf_warncb.ad_anywarn_cnt > 0)
		    dmr_adfwarn_rollup((ADF_CB *)dmr->dmr_qef_adf_cb, rcb->rcb_adf_cb);

		if ((t->tcb_rel.relstat & TCB_CONCUR))
		{
		    local_status = dm2r_unfix_pages(rcb, &local_dberr);
		    if (local_status != E_DB_OK)
		    {
			if (status == E_DB_OK)
			{
			    status = local_status;
			    dmr->error = local_dberr;
			}
			else
			{
			    uleFormat(&local_dberr, 0, (CL_ERR_DESC *)NULL,
				    ULE_LOG, NULL, (char * )NULL, (i4)0,
				    (i4 *)NULL, &local_error, 0);
			}
		    }
		}
  
                rcb->rcb_state &= ~RCB_CSRR_LOCK;

		if (status == E_DB_OK)
		    return (status);
	    }
	    else
	    {
		if (xcb->xcb_state & XCB_USER_INTR)
		    SETDBERR(&dmr->error, 0, E_DM0065_USER_INTR);
		else if (xcb->xcb_state & XCB_FORCE_ABORT)
		    SETDBERR(&dmr->error, 0, E_DM010C_TRAN_ABORTED);
		else if (xcb->xcb_state & XCB_ABORT)
		    SETDBERR(&dmr->error, 0, E_DM0064_USER_ABORT);
		else if (xcb->xcb_state & XCB_WILLING_COMMIT)
		    SETDBERR(&dmr->error, 0, E_DM0132_ILLEGAL_STMT);
	    }
	}	    
	else
	    SETDBERR(&dmr->error, 0, E_DM002B_BAD_RECORD_ID);
	break;
    }	    

    if (dmr->error.err_code == E_DM004B_LOCK_QUOTA_EXCEEDED ||
	dmr->error.err_code == E_DM0112_RESOURCE_QUOTA_EXCEED)
    {
	rcb->rcb_xcb_ptr->xcb_state |= XCB_STMTABORT;
    }
    else if (dmr->error.err_code == E_DM0042_DEADLOCK ||
	dmr->error.err_code == E_DM004A_INTERNAL_ERROR ||
	dmr->error.err_code == E_DM0100_DB_INCONSISTENT)
    {
	rcb->rcb_xcb_ptr->xcb_state |= XCB_TRANABORT;
    }
    else if (dmr->error.err_code == E_DM010C_TRAN_ABORTED)
    {
	rcb->rcb_xcb_ptr->xcb_state |= XCB_FORCE_ABORT;
    }
    else if (dmr->error.err_code == E_DM0065_USER_INTR)
    {
	rcb->rcb_xcb_ptr->xcb_state |= XCB_USER_INTR;
	rcb->rcb_state &= ~RCB_POSITIONED;
	*(rcb->rcb_uiptr) &= ~SCB_USER_INTR;
    }
    else if (dmr->error.err_code > E_DM_INTERNAL)
    {
	uleFormat( &dmr->error, 0, NULL, ULE_LOG , NULL, 
	    (char * )NULL, 0L, (i4 *)NULL, &error, 0);
	SETDBERR(&dmr->error, 0, E_DM008E_ERROR_POSITIONING);
    }

    return (status);
}
Beispiel #30
0
/*{
** Name: dmv_unbtree_del - UNDO of a delete key operation.
**
** Description:
**      This function removes a key from a btree index for the recovery of a
**	delete record operation.
**
** Inputs:
**      dmve				Pointer to dmve control block.
**      tabio				Pointer to table io control block
**      page				Pointer to page on which row was insert
**
** Outputs:
**	error				Pointer to Error return area
**	Returns:
**	    E_DB_OK
**	    E_DB_ERROR
**
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	14-dec-1992 (rogerk)
**	    Written for 6.5 recovery.
**	18-jan-1992 (rogerk)
**	    Add check in undo routine for case when null page pointer is
**	    passed because undo was found to be not needed.
**	15-mar-1993 (jnash)
**	    Check dmve->dmve_logging to determine if logging required.
**      26-apr-1993 (bryanp)
**          6.5 Cluster support:
**              Replace all uses of DM_LOG_ADDR with LG_LA or LG_LSN.
**	15-apr-1994 (chiku)
**	    Bug56702: return logfull indication.
**	06-may-1996 (thaju02 & nanpr01)
**	    New page format support: change page header references to 
**	    use macros.
**      22-nov-96 (stial01,dilma04)
**          Row Locking Project:
**          Allocate space only if space reclaimed
**      27-feb-97 (stial01)
**          dmv_unbtree_del() allocate parameter TRUE If dm1cxallocate needed
**          Log key in CLR, needed for row locking
**      21-may-1997 (stial01)
**          Added flags arg to dm0p_unmutex call(s).
*/
static DB_STATUS
dmv_unbtree_del(
DMVE_CB             *dmve,
DMP_TABLE_IO	    *tabio,
DMP_PINFO	    *pinfo,
DM_TID		    *bid,
bool                 allocate)
{
    DM0L_BTDEL		*log_rec = (DM0L_BTDEL *)dmve->dmve_log_rec;
    DB_STATUS		status = E_DB_OK;
    DM_LINE_IDX		childkey;
    DM_LINE_IDX		childtid;
    LG_LSN		lsn;
    DM_TID		temptid;
    i4			temppartno;
    i4			page_type = log_rec->btd_pg_type;
    i4			ix_compressed;
    char		*key;
    i4		flags;
    i4		loc_id;
    i4		loc_config_id;
    bool		index_update;
    i4             update_mode;
    i4			local_err;
    i4			*err_code = &dmve->dmve_error.err_code;
    LG_LRI		lri;
    DMPP_PAGE		*page = pinfo->page;

    CLRDBERR(&dmve->dmve_error);

    /*
    ** If there is nothing to recover, just return.
    */
    if (page == NULL)
	return (E_DB_OK);

    key = &log_rec->btd_vbuf[0];

    index_update = ((DM1B_VPT_GET_PAGE_STAT_MACRO(page_type, page) & 
	DMPP_INDEX) != 0);
    ix_compressed = DM1CX_UNCOMPRESSED;
    if (log_rec->btd_cmp_type != TCB_C_NONE)
	ix_compressed = DM1CX_COMPRESSED;

    /*
    ** Get information on the location to which the update is being made.
    */
    loc_id = DM2F_LOCID_MACRO(tabio->tbio_loc_count, 
	(i4) DM1B_VPT_GET_PAGE_PAGE_MACRO(page_type, page));
    loc_config_id = tabio->tbio_location_array[loc_id].loc_config_id;

    /*
    ** Deletes to non-leaf index pages actually effect more than one entry
    ** on the page.  The logged bid describes the entry from which the
    ** TID pointer is deleted.  The key entry is deleted from the previous
    ** position (if there is one).
    */
    if (index_update)
    {
	childtid = log_rec->btd_bid_child;
	childkey = log_rec->btd_bid_child;
    }
    else
    {
	childtid = bid->tid_tid.tid_line;
	childkey = bid->tid_tid.tid_line;
    }

    /* Index pages do not contain partition numbers */
    temppartno = 0;

    if (index_update && (childkey != 0))
    {
	childkey--;
        dm1cxtget(page_type, log_rec->btd_page_size, page, 
		childkey, &temptid, &temppartno); 
    }


    /*
    ** It would be nice to verify that the child position logged (or calculated
    ** by recovery) is the correct spot in the table, but since we have
    ** no attribute or key information to go on, we cannot do key comparisons.
    ** We must trust that the values are correct.
    ** 
    ** We assume here that there is sufficient space on the page.  If not,
    ** then the dm1cx calls below will catch the error.
    ** Since we will have backed out any inserts to this page that may have
    ** occurred after the delete, we should be assured that the the row will
    ** still fit.  If it doesn't, then a fatal recovery error will occur.
    */

    /* 
    ** Mutex the page.  This must be done prior to the log write.
    */
    dmveMutex(dmve, pinfo);

    /*
    ** Check direction of recovery operation:
    **
    **     If this is a normal Undo, then we log the CLR for the operation
    **     and write the LSN of this CLR onto the newly updated page (unless
    **     dmve_logging is turned off - in which case the rollback is not
    **     logged and the page lsn is unchanged).
    **
    **     If the record being processed is itself a CLR, then we are REDOing
    **     an update made during rollback processing.  Updates are not relogged
    **     in redo processing and the LSN is moved forward to the LSN value of
    **     of the original update.
    **
    ** As of release OpenIngres 2.0, we need the key value in CLRs as well,
    ** because of row locking.
    */
    if ((log_rec->btd_header.flags & DM0L_CLR) == 0)
    {
	if (dmve->dmve_logging)
	{
	    flags = (log_rec->btd_header.flags | DM0L_CLR);

	    /* Extract previous page change info */
	    DM1B_VPT_GET_PAGE_LRI_MACRO(log_rec->btd_pg_type, page, &lri);

	    status = dm0l_btdel(dmve->dmve_log_id, flags, 
		&log_rec->btd_tbl_id, 
		tabio->tbio_relid, 0,
		tabio->tbio_relowner, 0,
		log_rec->btd_pg_type, log_rec->btd_page_size,
		log_rec->btd_cmp_type, 
		log_rec->btd_loc_cnt, loc_config_id,
		bid, childkey, &log_rec->btd_tid, log_rec->btd_key_size, key, 
		&log_rec->btd_header.lsn, &lri, log_rec->btd_partno, 
		log_rec->btd_btflags, &dmve->dmve_error);
	    if (status != E_DB_OK)
	    {
		dmveUnMutex(dmve, pinfo);
		/*
		 * Bug56702: return logfull indication.
		 */
		dmve->dmve_logfull = dmve->dmve_error.err_code;

		uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL,
		    (char *)NULL, (i4)0, (i4 *)NULL, &local_err, 0); 

		SETDBERR(&dmve->dmve_error, 0, E_DM9652_UNDO_BTREE_DEL);
		return(E_DB_ERROR);
	    }
	}
    }
    else
    {
	/*
	** If we are processing recovery of an Insert CLR (redo-ing the undo
	** of an insert) then we don't log a CLR but instead save the LSN
	** of the log record we are processing with which to update the
	** page lsn's.
	*/
	DM0L_MAKE_LRI_FROM_LOG_RECORD(&lri, log_rec);
    }

    /* 
    ** Write the LSN, etc, of the delete record onto the page, unless nologging
    */
    if (dmve->dmve_logging)
	DM1B_VPT_SET_PAGE_LRI_MACRO(page_type, page, &lri);

    update_mode = DM1C_DIRECT;
    if ((dmve->dmve_lk_type == LK_ROW || dmve->dmve_lk_type == LK_CROW) ||
	(!index_update && log_rec->btd_pg_type != TCB_PG_V1 &&
	dmve->dmve_lk_type == LK_PAGE && (log_rec->btd_header.flags & DM0L_PHYS_LOCK)))
	update_mode |= DM1C_ROWLK;

    /*
    ** Undo the delete operation.
    */
    for (;;)
    {
	/*
	** Allocate space if necessary
	*/
	if ( allocate == TRUE )
	{
	    status = dm1cxallocate(page_type, log_rec->btd_page_size,
			    page, update_mode, ix_compressed,
			    &dmve->dmve_tran_id, (i4)0, childkey,
			    log_rec->btd_key_size + 
				DM1B_VPT_GET_BT_TIDSZ_MACRO(
					page_type, page));
	    if (status != E_DB_OK)
	    {
		dm1cxlog_error(E_DM93E0_BAD_INDEX_ALLOC, (DMP_RCB *)NULL, page,
			page_type, log_rec->btd_page_size, childkey);
		break;
	    }
	}

	/*
	** Reinsert the key,tid,partition values.
	*/
    /* If leaf overflow look for entry with matching tid */
    /* skip key comparison all the keys on overflow are the same */
    if (DM1B_VPT_GET_PAGE_STAT_MACRO(page_type, page) & DMPP_CHAIN)
    {
	i4 i;
	DM_TID	tmptid;
	i4	tmppart;
	LG_LSN lsn;
	for (i = 0; i < DM1B_VPT_GET_BT_KIDS_MACRO(page_type, page); i++)
	{
	    dm1cxtget(page_type, log_rec->btd_page_size, page, i, &tmptid, &tmppart); 
	lsn = DMPP_VPT_GET_PAGE_LSN_MACRO(page_type, page);
	    if (log_rec->btd_tid.tid_i4 == tmptid.tid_i4 &&
		dmpp_vpt_test_free_macro(page_type,
		DM1B_VPT_BT_SEQUENCE_MACRO(page_type, page), 
		(i4)i + DM1B_OFFSEQ) == FALSE)
	    TRdisplay("dmvebtdl: dup entry on overflow %d for tid %d,%d CRPAGE %d page lsn %x\n",
		DM1B_VPT_GET_PAGE_PAGE_MACRO(page_type, page),
		log_rec->btd_tid.tid_tid.tid_page,
		log_rec->btd_tid.tid_tid.tid_line,
	        DMPP_VPT_IS_CR_PAGE(page_type, page), lsn.lsn_low);
/*
does this trigger the dm1bxreserve failure from dm1bxovfl_alloc 
	    return (E_DB_ERROR);
*/
	}
    }
	status = dm1cxput(page_type, log_rec->btd_page_size, page,
			ix_compressed, update_mode, 
			&dmve->dmve_tran_id,
			LOG_ID_ID(dmve->dmve_log_id),
			(i4)0, childkey, 
			key, log_rec->btd_key_size, &log_rec->btd_tid,
			log_rec->btd_partno);
	if (status != E_DB_OK)
	{
	    dm1cxlog_error(E_DM93E4_BAD_INDEX_PUT, (DMP_RCB *)NULL, page,
			page_type, log_rec->btd_page_size, childkey);
	    break;
	}

	/*
	** If the insert is to a non-leaf index page, then the logged tid
	** value must actually be insert to the position after the one
	** to which we just put the key.  Replace the old tidp from that
	** position and insert the new one to the next entry.
	*/
	if (index_update && (childkey != childtid))
	{
	    status = dm1cxtput(page_type, log_rec->btd_page_size, 
				page, childtid, &log_rec->btd_tid,
				log_rec->btd_partno);
	    if (status != E_DB_OK)
	    {
		dm1cxlog_error(E_DM93EB_BAD_INDEX_TPUT, (DMP_RCB *)NULL, page,
			page_type, log_rec->btd_page_size, childtid);
		break;
	    }
	    status = dm1cxtput(page_type, log_rec->btd_page_size, 
			page, childkey, &temptid, temppartno);
	    if (status != E_DB_OK)
	    {
		dm1cxlog_error(E_DM93EB_BAD_INDEX_TPUT, (DMP_RCB *)NULL, page,
			page_type, log_rec->btd_page_size, childkey);
		break;
	    }
	}

	break;
    }

    DM1B_VPT_SET_PAGE_STAT_MACRO(page_type, page, DMPP_MODIFY);
    dmveUnMutex(dmve, pinfo);
    
    if (status != E_DB_OK)
    {
	SETDBERR(&dmve->dmve_error, 0, E_DM9652_UNDO_BTREE_DEL);
	return(E_DB_ERROR);
    }

    return(E_DB_OK);
}