Example #1
0
/*{
** Name: LKconnect	- Connect to an existing lock list.
**
** Description:
**	This function connects to an existing lock list for the
**	purpose of sharing a transaction's lock context amongst
**	several threads so that the threads do not conflict with
**	each other for lock resources, yet may each independently
**	wait for lock resources.
**
**	On the first connect to the lock list, the list is
**	converted to a SHARED LLB which inherits all the
**	attributes and locks of the original list, and the
**	original LLB is transfomed into a PSHARED LLB, thenceforth
**	acting as a "handle" to the shared LLB.
**
**	On each call, a new PSHARED LLB is allocated on behalf
**	of the caller and linked to the SHARED list.
**
** Inputs:
**	clock_list_id		The lock list id to connect to
**
** Outputs:
**      lock_list_id            The lock list identifier assigned to 
**                              the PSHARED list.
**	Returns:
**	    OK			Successful completion.
**	    LK_BADPARAM		Something wrong with a parameter.
**	    LK_NOLOCKS		No more lock resources available.
**
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	17-Dec-2003 (jenjo02)
**	    Invented for the Partitioned Table, Parallel query
**	    project.
**	12-Aug-2004 (jenjo02)
**	    Move LKB/RSB stash to SHARED list.
**	17-May-2010 (kschendel) SIR 123565
**	    When converting to SHARED, don't mark status as SHARED until the
**	    very end, because that's what other bits of LK look at (unmutexed).
**	23-Jul-2010 (kschendel) b124007
**	    Don't move any related list to the new SHARED LLB.  Related lists
**	    stay with the handle LLB.
*/
STATUS
LKconnect(
LK_LLID			clock_list_id,
LK_LLID			*lock_list_id,
CL_ERR_DESC		*sys_err)
{
    LKD		*lkd = (LKD *)LGK_base.lgk_lkd_ptr;
    LLB		*cllb, *sllb, *next_llb, *prev_llb;
    STATUS	status;
    SIZE_TYPE	*lbk_table;
    SIZE_TYPE	llb_offset;
    SIZE_TYPE	llbq_offset;
    SIZE_TYPE	end_offset;
    i4		err_code;
    i4		assigned_llbname[2];
    LK_UNIQUE	connect_id;
    LLB_ID	*connect_to_id = (LLB_ID*)&clock_list_id;
    LLBQ	*next_llbq, *prev_llbq;
    i4		flags, count;
    LKH		*lkh_table, *lkb_hash_bucket, *old_hash_bucket;
    LKB		*lkb;
    RSH		*rsh_table, *rsb_hash_bucket;
    RSB		*rsb;
    LKBQ	*lkbq, *next_lkbq, *prev_lkbq;
    u_i4	rsb_hash_value;

    LK_WHERE("LKconnect")

    if ( connect_to_id->id_id > lkd->lkd_lbk_count )
    {   
	uleFormat(NULL, E_CL106D_LK_CONNECT_BADPARAM, (CL_ERR_DESC *)NULL, ULE_LOG,
			NULL, (char *)NULL, 0L, (i4 *)NULL, &err_code, 2,
			0, connect_to_id->id_id, 0, lkd->lkd_lbk_count);
       return (LK_BADPARAM);
    }

    lbk_table = (SIZE_TYPE *)LGK_PTR_FROM_OFFSET(lkd->lkd_lbk_table);
    cllb = (LLB *)LGK_PTR_FROM_OFFSET(lbk_table[connect_to_id->id_id]);

    if ( cllb->llb_type != LLB_TYPE ||
	cllb->llb_id.id_instance != connect_to_id->id_instance )
    {
	uleFormat(NULL, E_CL106E_LK_CONNECT_BADPARAM, (CL_ERR_DESC *)NULL, ULE_LOG,
			NULL, (char *)NULL, 0L, (i4 *)NULL, &err_code, 3,
			0, *(u_i4*)&connect_to_id, 
			0, cllb->llb_type,
			0, *(u_i4*)&cllb->llb_id);
	return (LK_BADPARAM);
    }

    (VOID)LK_mutex(SEM_EXCL, &cllb->llb_mutex);

    /* If not already converted to a SHARED list, do it now */
    if ( (cllb->llb_status & LLB_PARENT_SHARED) == 0 )
    {
	/* Allocate an LLB for the SHARED list */
	if ( (sllb = (LLB*)LK_allocate_cb(LLB_TYPE, (LLB*)NULL)) == 0 )
	{
	    uleFormat(NULL, E_DMA011_LK_NO_LLBS, (CL_ERR_DESC*)NULL, ULE_LOG,
			NULL, (char*)NULL, 0L, (i4*)NULL, &err_code, 0);
	    (VOID)LK_unmutex(&cllb->llb_mutex);
	    return(LK_NOLOCKS);
	}

	/* Saved the "assigned" llb name for later */
	assigned_llbname[0] = sllb->llb_name[0];
	assigned_llbname[1] = sllb->llb_name[1];

	/*
	** "sllb" becomes the SHARED lock list and
	** inherits its characteristics and locks 
	** from the "connect to" list (cllb).
	*/

	sllb->llb_cpid = cllb->llb_cpid;

	/* SHARED LLB will start with one connected handle */
	sllb->llb_connect_count = 1;
	sllb->llb_shared_llb = 0;
	sllb->llb_max_lkb = cllb->llb_max_lkb;
	/* SHARED list is multithreaded, handles are not */
	sllb->llb_status |= (LLB_SHARED | LLB_MULTITHREAD);
	sllb->llb_status &= 
	    ~(LLB_WAITING | LLB_EWAIT | LLB_ESET | LLB_EDONE);
	/* Caller's LLB is not flagged until everything else is done.
	** Other parts of LK look at llb-status unmutexed.
	*/
	cllb->llb_shared_llb = LGK_OFFSET_FROM_PTR(sllb);

	/*
	** Move the LKB/RSB stash to the SHARED list.
	** Subsequent lock requests will be filled 
	** from the SHARED LLB, not the handles.
	*/
	sllb->llb_lkb_stash = cllb->llb_lkb_stash;
	sllb->llb_rsb_stash = cllb->llb_rsb_stash;
	sllb->llb_lkb_alloc = cllb->llb_lkb_alloc;
	sllb->llb_rsb_alloc = cllb->llb_rsb_alloc;
	cllb->llb_lkb_stash = 0;
	cllb->llb_rsb_stash = 0;
	cllb->llb_lkb_alloc = 0;
	cllb->llb_rsb_alloc = 0;

	/*
	** Replace the "connect to" LLB on the active list
	** with the SHARED LLB, insert the "connect to"
	** LLB in its collated spot to maintain the
	** descending sequence (by llb_name) of the list.
	**
	** For this we need to hold the llb_q_mutex:
	*/
	(VOID)LK_mutex(SEM_EXCL, &lkd->lkd_llb_q_mutex);

	/*
	** The SHARED LLB's "name" (tran id) becomes that
	** of the "connect" LLB.
	**
	** The "connect" LLB's "name" is replaced with
	** the assigned name.
	*/
	sllb->llb_name[0] = cllb->llb_name[0];
	sllb->llb_name[1] = cllb->llb_name[1];
	cllb->llb_name[0] = assigned_llbname[0];
	cllb->llb_name[1] = assigned_llbname[1];

	/*
	** The SHARED LLB replaces the "connect" LLB
	** on the LLB queue, in the same position.
	*/
	sllb->llb_q_next = cllb->llb_q_next;
	sllb->llb_q_prev = cllb->llb_q_prev;

	next_llb = (LLB*)LGK_PTR_FROM_OFFSET(sllb->llb_q_next);
	prev_llb = (LLB*)LGK_PTR_FROM_OFFSET(sllb->llb_q_prev);

	next_llb->llb_q_prev = prev_llb->llb_q_next =
	    LGK_OFFSET_FROM_PTR(sllb);

	/*
	** Find the insertion point in the active LLB list
	** for the "connect to" LLB.
	*/
	end_offset = LGK_OFFSET_FROM_PTR(&lkd->lkd_llb_next);
	next_llb = (LLB*)LGK_PTR_FROM_OFFSET(lkd->lkd_llb_next);

	for ( llb_offset = lkd->lkd_llb_next;
	      llb_offset != end_offset;
	      llb_offset = next_llb->llb_q_next )
	{
	    next_llb = (LLB*)LGK_PTR_FROM_OFFSET(llb_offset);

	    if ( cllb->llb_name[0] > next_llb->llb_name[0] ||
		(cllb->llb_name[0] == next_llb->llb_name[0] &&
		 cllb->llb_name[1] > next_llb->llb_name[1]) )
	    {
		break;
	    }
	}
	/* Connect "connect to" LLB into its new position */
	cllb->llb_q_next = LGK_OFFSET_FROM_PTR(next_llb);
	cllb->llb_q_prev = next_llb->llb_q_prev;
	prev_llb = (LLB*)LGK_PTR_FROM_OFFSET(next_llb->llb_q_prev);
	prev_llb->llb_q_next = next_llb->llb_q_prev =
	    LGK_OFFSET_FROM_PTR(cllb);

	(VOID)LK_unmutex(&lkd->lkd_llb_q_mutex);

	/* Relocate the cllb's lock list to the SHARED LLB: */

	sllb->llb_lkb_count = cllb->llb_lkb_count;
	sllb->llb_llkb_count = cllb->llb_llkb_count;

	if ( cllb->llb_llbq.llbq_next == 
		LGK_OFFSET_FROM_PTR(&cllb->llb_llbq) )
	{
	    /* No locks held */
	    sllb->llb_llbq.llbq_next = sllb->llb_llbq.llbq_prev
		= LGK_OFFSET_FROM_PTR(&sllb->llb_llbq);
	}
	else
	{
	    sllb->llb_llbq.llbq_next = cllb->llb_llbq.llbq_next;
	    sllb->llb_llbq.llbq_prev = cllb->llb_llbq.llbq_prev;
	    next_llbq = (LLBQ*)LGK_PTR_FROM_OFFSET(sllb->llb_llbq.llbq_next);
	    prev_llbq = (LLBQ*)LGK_PTR_FROM_OFFSET(sllb->llb_llbq.llbq_prev);
	    next_llbq->llbq_prev = 
		LGK_OFFSET_FROM_PTR(&sllb->llb_llbq.llbq_next);
	    prev_llbq->llbq_next = 
		LGK_OFFSET_FROM_PTR(&sllb->llb_llbq.llbq_next);
	}

	/* The handles (cllb) never hold locks */
	cllb->llb_llbq.llbq_next = cllb->llb_llbq.llbq_prev =
	    LGK_OFFSET_FROM_PTR(&cllb->llb_llbq.llbq_next);
	cllb->llb_lkb_count = 0;
	cllb->llb_llkb_count = 0;

	/* 
	** For each lock on the list:
	**	o rehash LKH using sllb
	**	o if hash to new bucket
	**	  - remove LKB from old lkb_hash
	**	  - insert into new lkb_hash
	**	o change lkbq_llb "owner" to sllb
	*/

	rsh_table = (RSH*)LGK_PTR_FROM_OFFSET(lkd->lkd_rsh_table);
	lkh_table = (LKH*)LGK_PTR_FROM_OFFSET(lkd->lkd_lkh_table);

	end_offset = LGK_OFFSET_FROM_PTR(&sllb->llb_llbq);

	for( llbq_offset = sllb->llb_llbq.llbq_next;
	     llbq_offset != end_offset;
	     llbq_offset = next_llbq->llbq_next )
	{
	    next_llbq = (LLBQ*)LGK_PTR_FROM_OFFSET(llbq_offset);
	    lkb = (LKB*)((char*)next_llbq - CL_OFFSETOF(LKB,lkb_llbq));
	    lkbq = &lkb->lkb_lkbq;

	    /* Compute new LKH hash bucket using sllb */
	    rsb = (RSB*)LGK_PTR_FROM_OFFSET(lkbq->lkbq_rsb);
	    rsb_hash_value = (rsb->rsb_name.lk_type + 
		rsb->rsb_name.lk_key1 + rsb->rsb_name.lk_key2 + 
		rsb->rsb_name.lk_key3 + rsb->rsb_name.lk_key4 + 
		rsb->rsb_name.lk_key5 + rsb->rsb_name.lk_key6);

	    rsb_hash_bucket = (RSH *)&rsh_table
		[(unsigned)rsb_hash_value % lkd->lkd_rsh_size];
	    lkb_hash_bucket = (LKH *)&lkh_table
		[((unsigned)rsb_hash_value + (unsigned)LGK_OFFSET_FROM_PTR(sllb)) %
					    lkd->lkd_lkh_size];

	    /* If hash to another bucket, relocate LKB to new hash */
	    old_hash_bucket = (LKH*)LGK_PTR_FROM_OFFSET(lkbq->lkbq_lkh);
	    if ( lkb_hash_bucket != old_hash_bucket )
	    {
		/* Remove from old hash bucket */
		(VOID)LK_mutex(SEM_EXCL, &old_hash_bucket->lkh_mutex);
		if ( lkbq->lkbq_next )
		{
		    next_lkbq = (LKBQ*)LGK_PTR_FROM_OFFSET(lkbq->lkbq_next);
		    next_lkbq->lkbq_prev = lkbq->lkbq_prev;
		}
		prev_lkbq = (LKBQ*)LGK_PTR_FROM_OFFSET(lkbq->lkbq_prev);
		prev_lkbq->lkbq_next = lkbq->lkbq_next;
		(VOID)LK_unmutex(&old_hash_bucket->lkh_mutex);

		/* Insert into new hash bucket */
		(VOID)LK_mutex(SEM_EXCL, &lkb_hash_bucket->lkh_mutex);
		lkbq->lkbq_lkh  = LGK_OFFSET_FROM_PTR(lkb_hash_bucket);
		lkbq->lkbq_next = lkb_hash_bucket->lkh_lkbq_next;
		lkbq->lkbq_prev = LGK_OFFSET_FROM_PTR(lkb_hash_bucket);
		if ( lkb_hash_bucket->lkh_lkbq_next )
		{
		    next_lkbq = (LKBQ*)LGK_PTR_FROM_OFFSET(lkb_hash_bucket->lkh_lkbq_next);
		    next_lkbq->lkbq_prev = LGK_OFFSET_FROM_PTR(lkbq);
		}
		lkb_hash_bucket->lkh_lkbq_next = LGK_OFFSET_FROM_PTR(lkbq);
		(VOID)LK_unmutex(&lkb_hash_bucket->lkh_mutex);
	    }

	    /* Change ownership of LKB to sllb */
	    lkbq->lkbq_llb = LGK_OFFSET_FROM_PTR(sllb);
	}
	cllb->llb_status |= (LLB_PARENT_SHARED | LLB_NONPROTECT);
	cllb->llb_status &= ~LLB_MULTITHREAD;
	/* Conversion of cllb is complete */
	(VOID)LK_unmutex(&cllb->llb_mutex);

	/* Keep the sllb mutex */
    }
    else
    {
	/* Already converted, sanity check sllb */
	if (cllb->llb_shared_llb == 0)
	{
	    uleFormat(NULL, E_CL106F_LK_CONNECT_BADPARAM, (CL_ERR_DESC *)NULL, ULE_LOG,
			NULL, (char *)NULL, 0L, (i4 *)NULL, &err_code, 1,
			0, connect_to_id->id_id);
	    (VOID)LK_unmutex(&cllb->llb_mutex);
	    return (LK_BADPARAM);
	}

	sllb = (LLB *)LGK_PTR_FROM_OFFSET(cllb->llb_shared_llb);

	if ( sllb->llb_type != LLB_TYPE ||
	    (sllb->llb_status & LLB_SHARED) == 0 )
	{
	    uleFormat(NULL, E_CL1070_LK_CONNECT_BADPARAM, (CL_ERR_DESC *)NULL, ULE_LOG,
			NULL, (char *)NULL, 0L, (i4 *)NULL, &err_code, 3,
			0, connect_to_id->id_id, 
			0, sllb->llb_type, 
			0, sllb->llb_status);
	    (VOID)LK_unmutex(&cllb->llb_mutex);
	    return (LK_BADPARAM);
	}

	(VOID)LK_mutex(SEM_EXCL, &sllb->llb_mutex);
	(VOID)LK_unmutex(&cllb->llb_mutex);
    }

    /*
    ** This is the "name" (tran id) of the lock list
    ** we want to connect to.
    */
    connect_id.lk_uhigh = sllb->llb_name[0];
    connect_id.lk_ulow  = sllb->llb_name[1];

    /* Reverse engineer the flags */
    flags = LK_CONNECT;
    if ( sllb->llb_status & LLB_NOINTERRUPT )
	flags |= LK_NOINTERRUPT;
    if ( sllb->llb_status & LLB_MASTER )
	flags |= LK_MASTER;
    if ( sllb->llb_status & LLB_NONPROTECT )
	flags |= LK_NONPROTECT;
    if ( sllb->llb_status & LLB_MASTER_CSP )
	flags |= LK_MASTER_CSP;

    /* "maxlocks" for the handle will be the same as the sllb */
    count = sllb->llb_max_lkb;

    (VOID)LK_unmutex(&sllb->llb_mutex);

    /* Create a handle to this SHARED list */
    return(LKcreate_list(flags,
			  (LK_LLID)0,
			  &connect_id,
			  lock_list_id,
			  count,
			  sys_err));
}
Example #2
0
{
    /* control objects */

    { 0, "exp.glf.mo.oid_map.file_name", sizeof( MO_oid_map ),
	  MO_READ|MO_SERVER_WRITE, 0,
	  0, MOstrget, MO_oidmap_set, (PTR)&MO_oid_map, MOcdata_index },

    { 0, "exp.glf.mo.oid_map.file_time", sizeof( MO_map_time ),
	  MO_READ, 0,
	  0, MOintget, MOnoset, (PTR)&MO_map_time, MOcdata_index },

    /* index class */

   { MO_INDEX_CLASSID, mometa_index_class,
	 0, MO_READ, 0,
	 CL_OFFSETOF( MO_CLASS, node.key ), MOstrpget, MOnoset,
	 0, MO_classid_index },

    /* special methods for handling twins */

   { 0, MO_META_CLASS_CLASS,
	 0, MO_READ, mometa_index_class,
	 0, MO_class_get, MOnoset,
	 0, MO_classid_index },

   { 0, MO_META_OID_CLASS,
	 0, MO_READ|MO_WRITE, mometa_index_class,
	 0, MO_oid_get, MO_oid_set,
	 0, MO_classid_index },

   /* vanilla methods */
Example #3
0
    { 0, "exp.gwf.gwm.session.control.del_vnode",
	  0, MO_SES_READ|MO_SES_WRITE, 0,
	  0, MOblankget, GM_deldomset,
	  0, MOcdata_index },

    { 0, "exp.gwf.gwm.session.control.reset_domain",
	  0, MO_SES_READ|MO_SES_WRITE, 0,
	  0, MOblankget, GM_resdomset,
	  0, MOcdata_index },

    /* query objects -- only column is index of nodes for this session */

    { MO_INDEX_CLASSID|MO_CDATA_INDEX, domain_index,
	  MO_SIZEOF_MEMBER(SPBLK, key), MO_SES_READ, 0,
	  CL_OFFSETOF(SPBLK, key), MOstrpget, MOnoset,
	  0, GM_domain_index },

    { 0 }
} ;


/*{
** Name:	GM_scd_startup	- define SCB MIB clases.
**
** Description:
**	Call this once at facility startup to define all the SCB
**	related classes.
**
** Re-entrancy:
**	yes, but shouldn't be called more than once.
Example #4
0
  /* special methods for special formatting */

  { 0, "exp.clf.vms.cs.scb_self",
	0, MO_READ, index_name,
	0, CS_scb_self_get, MOnoset,
	0, CS_scb_index },

  { 0, "exp.clf.vms.cs.scb_state",
	0, MO_READ, index_name,
	0, CS_scb_state_get, MOnoset,
	0, CS_scb_index },

  { 0, "exp.clf.vms.cs.scb_state_num",
	MO_SIZEOF_MEMBER(CS_SCB, cs_state), MO_READ, index_name,
	CL_OFFSETOF(CS_SCB, cs_state), MOuintget, MOnoset,
	0, CS_scb_index },

  { 0, "exp.clf.vms.cs.scb_memory",
	MO_SIZEOF_MEMBER(CS_SCB, cs_memory), MO_READ, index_name,
	CL_OFFSETOF(CS_SCB, cs_memory), CS_scb_memory_get, MOnoset,
	0, CS_scb_index },

  { 0, "exp.clf.vms.cs.scb_thread_type",
	0, MO_READ, index_name,
	0, CS_scb_thread_type_get, MOnoset,
	0, CS_scb_index },

  { 0, "exp.clf.vms.cs.scb_thread_type_num",
	MO_SIZEOF_MEMBER(CS_SCB, cs_thread_type), MO_READ, index_name,
	CL_OFFSETOF(CS_SCB, cs_thread_type), MOuintget, MOnoset,
Example #5
0
MO_SET_METHOD asct_start_set;
MO_SET_METHOD asct_suspend_set;
MO_SET_METHOD asct_flush_set;
MO_SET_METHOD asct_logfile_set;
MO_SET_METHOD asct_logsize_set;
MO_SET_METHOD asct_logmsgsize_set;
MO_SET_METHOD asct_threshold_set;

MO_GET_METHOD asctfl_state_get;
MO_GET_METHOD asct_tracemask_get;

GLOBALDEF MO_CLASS_DEF asct_class[] =
{

    { 0, "exp.ascf.asct.thread", MO_SIZEOF_MEMBER(ASCTFL, self),
      MO_READ, 0, CL_OFFSETOF(ASCTFL, self),
      MOintget, MOnoset, (PTR)&asctfile, MOcdata_index },

    { 0, "exp.ascf.asct.entry", MO_SIZEOF_MEMBER(ASCTFR, freebuf),
      MO_READ, 0, CL_OFFSETOF(ASCTFR, freebuf),
      MOintget, MOnoset, (PTR)&asctentry, MOcdata_index },

    { 0, "exp.ascf.asct.state", MO_SIZEOF_MEMBER(ASCTFL, state),
      MO_READ, 0, CL_OFFSETOF(ASCTFL, state),
      asctfl_state_get, MOnoset, (PTR)&asctfile, MOcdata_index },

    { 0, "exp.ascf.asct.start", MO_SIZEOF_MEMBER(ASCTFL, start),
      MO_READ, 0, CL_OFFSETOF(ASCTFL, start),
      MOintget, MOnoset, (PTR)&asctfile, MOcdata_index },

    { 0, "exp.ascf.asct.end", MO_SIZEOF_MEMBER(ASCTFL, end),
Example #6
0
*/
static MO_GET_METHOD ADFmo_adg_op_type_get;
static MO_GET_METHOD ADFmo_adg_op_use_get;
static MO_GET_METHOD ADFmo_adg_fi_type_get;

static char dt_index[] = "exp.adf.adg.dt_ix";

static MO_CLASS_DEF ADFmo_adg_dt_classes[] =
{
    {0, dt_index,
	0, MO_READ, dt_index,
	0, MOpvget, MOnoset, (PTR)0, ADFmo_dt_index
    },
    {0, "exp.adf.adg.dt_name",
	MO_SIZEOF_MEMBER(ADI_DATATYPE, adi_dtname), MO_READ, dt_index,
	CL_OFFSETOF(ADI_DATATYPE, adi_dtname),
	MOstrget, MOnoset, (PTR)0, ADFmo_dt_index
    },
    {0, "exp.adf.adg.dt_id",
	MO_SIZEOF_MEMBER(ADI_DATATYPE, adi_dtid), MO_READ, dt_index,
	CL_OFFSETOF(ADI_DATATYPE, adi_dtid),
	MOintget, MOnoset, (PTR)0, ADFmo_dt_index
    },
    {0, "exp.adf.adg.dt_stat",
	MO_SIZEOF_MEMBER(ADI_DATATYPE, adi_dtstat_bits), MO_READ, dt_index,
	CL_OFFSETOF(ADI_DATATYPE, adi_dtstat_bits),
	MOintget, MOnoset, (PTR)0, ADFmo_dt_index
    },
    { 0 }
};
Example #7
0
/*{
** Name: dmve_btdel - The recovery of a delete key operation.
**
** Description:
**	This function is used to do, redo and undo a delete key
**	operation to a btree index/leaf page. This function adds or
**	deletes the key from the index depending on the recovery mode.
**
** Inputs:
**	dmve_cb
**	    .dmve_log_rec	    The log record of the btree delete operation
**	    .dmve_action	    Should be DMVE_DO, DMVE_REDO, or DMVE_UNDO.
**	    .dmve_dcb_ptr	    Pointer to DCB.
**	    .dmve_tran_id	    The physical transaction id.
**	    .dmve_lk_id		    The transaction lock list id.
**	    .dmve_log_id	    The logging system database id.
**	    .dmve_db_lockmode	    The lockmode of the database. Should be 
**				    DM2T_X or DM2T_S.
**
** Outputs:
**	dmve_cb
**	    .dmve_error.err_code    The reason for error status.
**	Returns:
**	    E_DB_OK
**	    E_DB_ERROR
**	    E_DB_FATAL			Operation was partially completed,
**					the transaction must be aborted.
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	14-dec-1992 (rogerk)
**	    Written for 6.5 recovery.
**	15-mar-1993 (rogerk)
**	    Reduced Logging - Phase IV:  Added lsn argument to btundo_check.
**      26-apr-1993 (bryanp)
**          6.5 Cluster support:
**              Replace all uses of DM_LOG_ADDR with LG_LA or LG_LSN.
**		Correct the message parameter lengths for E_DM9665.
**	26-jul-1993 (bryanp)
**	    Replace all uses of DM_LKID with LK_LKID.
**      11-sep-1997 (thaju02) bug#85156 - on recovery lock extension
**          table with DM2T_S.  Attempting to lock table with DM2T_IX
**          causes deadlock, dbms resource deadlock error during recovery,
**          pass abort, rcp resource deadlock error during recovery,
**          rcp marks database inconsistent.
**	06-may-1996 (thaju02 & nanpr01)
**	    New Page Format Support: 
**		Change page header references to use macros.
**		Pass page size to dm1c_getaccessors().
**      22-nov-96 (stial01,dilma04)
**          Row Locking Project:
**          Do not get page lock if row locking.
**          Check/adjust bid if row locking.
**     12-dec-96 (dilma04)
**          Corrected mistake which caused inconsistency between row_locking
**          value and DM0L_ROW_LOCK flag after lock escalation.
**      27-feb-97 (stial01)
**          Whenever row locking, entries can shift requiring btundo_check
**          Fix page without locking and then mutex it if we have IX page lock.
**          Clean ALL deleted leaf entries if X page lock.
**      14-may-97 (dilma04)
**          Cursor Stability Project:
**          Add lock_id argument to dm0p_lock_page and dm0p_unlock_page.
**      21-may-1997 (stial01)
**          Added flags arg to dm0p_unmutex call(s).
**      18-jun-1997 (stial01)
**          dmve_btdel() Request IX page lock if row locking.
**      30-jun-97 (dilma04)
**          Bug 83301. Set DM0P_INTENT_LK fix action if row locking.
**      30-jun-97 (stial01)
**          Use dm0p_lock_buf_macro to lock buffer to reduce mutex duration
**          Don't unfix tabio for bid check, changed args to dmve_bid_check()
**      29-jul-97 (stial01)
**          Validate/adjust bid if page size != DM_COMPAT_PGSIZE
**      06-jan-98 (stial01)
**          If page size 2k, unfix pages,tbio before bid check (B87385) 
**      19-Jun-2002 (horda03) Bug 108074
**          If the table is locked exclusively, then indicate that SCONCUR
**          pages don't need to be flushed immediately.
**      23-feb-2004 (thaju02) Bug 111470 INGSRV2635
**          For rollforwarddb -b option, do not compare the LSN's on the
**          page to that of the log record.
**	13-Apr-2006 (jenjo02)
**	    CLUSTERED Btree leaf pages don't have attributes - check
**	    that before calling dmve_bid_check.
**	13-Jun-2006 (jenjo02)
**	    Deletes of leaf entries may be up to
**	    DM1B_MAXLEAFLEN, not DM1B_KEYLENGTH, bytes.
**	01-Dec-2006 (kiria01) b117225
**	    Initialise the lockid parameters that will be passed to LKrequest
**	    to avoid random implicit lock conversions.
*/
DB_STATUS
dmve_btdel(
DMVE_CB		*dmve)
{
    DM0L_BTDEL		*log_rec = (DM0L_BTDEL *)dmve->dmve_log_rec;
    LG_LSN		*log_lsn = &log_rec->btd_header.lsn; 
    DMP_DCB		*dcb = dmve->dmve_dcb_ptr;
    DMP_TABLE_IO	*tbio = NULL;
    DMPP_PAGE		*page = NULL;
    DB_STATUS		status = E_DB_OK;
    DB_STATUS		tmp_status;
    DM_TID		leaf_bid;
    char		*insert_key;
    i4		recovery_action;
    i4		error;
    i4		loc_error;
    i4		page_type = log_rec->btd_pg_type;
    bool		bid_check_done = FALSE;
    bool                bid_check = FALSE;
    bool                index_update;
    DB_TRAN_ID          zero_tranid;
    i4             opflag;
    bool                allocate = TRUE;
    DB_ERROR		local_dberr;
    DMP_TCB		*t = NULL;
    DMP_PINFO		*pinfo = NULL;

    CLRDBERR(&dmve->dmve_error);
    DMVE_CLEAR_TABINFO_MACRO(dmve);

    if (dmve->dmve_flags & DMVE_MVCC)
	log_rec->btd_bid.tid_tid.tid_page = 
	DM1B_VPT_GET_PAGE_PAGE_MACRO(log_rec->btd_pg_type, dmve->dmve_pinfo->page);

    /*
    ** Store BID of insert into a local variable.  The insert BID may
    ** be modified in undo recovery by the dmve_btunto_check routine.
    */
    leaf_bid = log_rec->btd_bid;

    /*
    ** Call appropriate recovery action depending on the recovery type
    ** and record flags.  CLR actions are always executed as an UNDO
    ** operation.
    */
    recovery_action = dmve->dmve_action;
    if (log_rec->btd_header.flags & DM0L_CLR)
	recovery_action = DMVE_UNDO;

    for (;;)
    {
	/* Consistency Check:  check for illegal log records */
	if (log_rec->btd_header.type != DM0LBTDEL)
	{
	    SETDBERR(&dmve->dmve_error, 0, E_DM9601_DMVE_BAD_PARAMETER);
	    break;
	}

	/*
	** Check for partial recovery during UNDO operations.
	** We cannot bypass recovery of index updates even if the page
	** is on a location which is not being recovered.  This is because
	** the logged page number may not be the page that actually needs
	** the UNDO action!!!  If the page which was originally updated
	** has been since SPLIT, then the logged key may have moved to a
	** new index/leaf page.  It is that new page to which this undo
	** action must be applied.  And that new page may be on a location
	** being recovered even if the original page is not.
	**
	** If recovery is being bypassed on the entire table then no recovery
	** needs to be done.
	*/
	if ((dmve->dmve_action == DMVE_UNDO) &&
	    (dmve_location_check(dmve, (i4)log_rec->btd_cnf_loc_id) == FALSE))
	{
	    uleFormat(NULL, E_DM9668_TABLE_NOT_RECOVERED, (CL_ERR_DESC *)NULL, 
		ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL, 
		&loc_error, 2, 0, log_rec->btd_tbl_id.db_tab_base,
		0, log_rec->btd_tbl_id.db_tab_index);
	    SETDBERR(&dmve->dmve_error, 0, E_DM9667_NOPARTIAL_RECOVERY);
	    break;
	}

	/*
	** Get handle to a tableio control block with which to read
	** and write pages needed during recovery.
	**
	** Warning return indicates that no tableio control block was
	** built because no recovery is needed on any of the locations 
	** described in this log record.  Note that check above prevents
	** this case from occurring during UNDO recovery.
	*/
	status = dmve_fix_tabio(dmve, &log_rec->btd_tbl_id, &tbio);
	if (DB_FAILURE_MACRO(status))
	    break;
	if ((status == E_DB_WARN) && (dmve->dmve_error.err_code == W_DM9660_DMVE_TABLE_OFFLINE))
	{
	    CLRDBERR(&dmve->dmve_error);
	    return (E_DB_OK);
	}

	/* This might be partial tcb, but it will always basic table info */
	t = (DMP_TCB *)((char *)tbio - CL_OFFSETOF(DMP_TCB, tcb_table_io));

	/*
	** Lock/Fix the page we need to recover in cache for write.
	*/
	status = dmve_fix_page(dmve, tbio, leaf_bid.tid_tid.tid_page,&pinfo);
	if (status != E_DB_OK)
	    break;
	page = pinfo->page;

	/*
	** Dump debug trace info about pages if such tracing is configured.
	*/
	if (DMZ_ASY_MACRO(15))
	    dmve_trace_page_info(log_rec->btd_pg_type, log_rec->btd_page_size,
		page, dmve->dmve_plv, "Page"); 

	index_update = ((DM1B_VPT_GET_PAGE_STAT_MACRO(page_type, page)
				& DMPP_INDEX) != 0);

	if (dmve->dmve_action == DMVE_UNDO)
	{
	    /*
	    ** UNDO Recovery SPLIT check:
	    **
	    ** If doing UNDO recovery, we need to verify that this is the 
	    ** correct page from which to undo the put operation.
	    ** If the row in question has been moved to a different page by
	    ** a subsequent SPLIT operation, then we have to find that new page
	    ** in order to perform the undo.
	    **
	    ** The bid_check will search for the correct BID to which to do
	    ** the recovery action and return with that value in 'leafbid'.
	    */
	   if ((LSN_GT(
		DM1B_VPT_ADDR_BT_SPLIT_LSN_MACRO(page_type, page), log_lsn)) &&
	    (DM1B_VPT_GET_PAGE_STAT_MACRO(page_type, page) & DMPP_LEAF))
	    {
		bid_check = TRUE;
	    }

	    /*
	    ** UNDO: if page type != TCB_PG_V1, validate/adjust bid
	    */
	    if ( (page_type != TCB_PG_V1)
		&& (index_update == FALSE) )
	    {
		bid_check = TRUE;
	    }
	}
	else
	{
	    /*
	    ** REDO recovery, the logged page is correct, but
	    ** due to page cleans the logged bid may be incorrect.
	    ** Only do the bid check if we need to REDO this update
	    */
	    if ( (page_type != TCB_PG_V1) 
		&& (index_update == FALSE)
		&&  (LSN_LT(
		    DM1B_VPT_ADDR_PAGE_LOG_ADDR_MACRO(page_type, page), 
		    log_lsn)))
	    {
		bid_check = TRUE;
	    }
	}

	if (bid_check && !bid_check_done)
	{
	    insert_key = &log_rec->btd_vbuf[0]; 
	    if (recovery_action == DMVE_UNDO)
	    {
		/*
		** Undo leaf delete when row locking or
		** VPS physical page lock (for etabs)
		** Space should not have been reclaimed after delete
		** Undo delete needs to FIND the deleted key
		*/
		if (dmve->dmve_lk_type == LK_ROW || dmve->dmve_lk_type == LK_CROW ||
		    (page_type != TCB_PG_V1 &&
		    dmve->dmve_lk_type == LK_PAGE && 
		    (log_rec->btd_header.flags & DM0L_PHYS_LOCK)))
		{
		    opflag = DMVE_FINDKEY;
		    allocate = FALSE;
		}
		else
		{
		    opflag = DMVE_FINDPOS;
		}
	    }
	    else
		opflag = DMVE_FINDKEY;

	    status = dmve_bid_check(dmve, opflag,
		&log_rec->btd_tbl_id, &log_rec->btd_bid, 
		&log_rec->btd_tid, insert_key, log_rec->btd_key_size, 
		&tbio, &leaf_bid, &pinfo); 

	    if (status == E_DB_WARN && (dmve->dmve_flags & DMVE_MVCC))
		return (E_DB_OK);

	    if (status != E_DB_OK)
		break;

            bid_check_done = TRUE;

	    /* Reset local tcb, page pointers */
	    /* This might be partial tcb, but it will always basic table info */
	    t = (DMP_TCB *)((char *)tbio - CL_OFFSETOF(DMP_TCB, tcb_table_io));
            page = pinfo->page;

	}


	/*
	** Compare the LSN on the page with that of the log record
	** to determine what recovery will be needed.
	**
	**   - During Forward processing, if the page's LSN is greater than
	**     the log record then no recovery is needed.
	**
	**   - During Backward processing, it is an error for a page's LSN
	**     to be less than the log record LSN.
	**
	**   - Currently, during rollforward processing it is unexpected
	**     to find that a recovery operation need not be applied because
	**     of the page's LSN.  This is because rollforward must always
	**     begin from a checkpoint that is previous to any journal record
	**     begin applied.  In the future this requirement may change and
	**     Rollforward will use the same expectations as Redo.
	*/
	switch (dmve->dmve_action)
	{
	case DMVE_DO:
	case DMVE_REDO:
            if (LSN_GTE(
		DM1B_VPT_ADDR_PAGE_LOG_ADDR_MACRO(page_type, page),
		log_lsn) && ((dmve->dmve_flags & DMVE_ROLLDB_BOPT) == 0))
	    {
		if (dmve->dmve_action == DMVE_DO)
		{
		    uleFormat(NULL, E_DM9665_PAGE_OUT_OF_DATE, (CL_ERR_DESC *)NULL,
			ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL,
			&loc_error, 8,
			sizeof(DB_TAB_NAME), tbio->tbio_relid->db_tab_name,
			sizeof(DB_OWN_NAME), tbio->tbio_relowner->db_own_name,
			0, DM1B_VPT_GET_PAGE_PAGE_MACRO(page_type, page),
			0, DM1B_VPT_GET_PAGE_STAT_MACRO(page_type, page),
			0, DM1B_VPT_GET_LOG_ADDR_HIGH_MACRO(page_type, page),
			0, DM1B_VPT_GET_LOG_ADDR_LOW_MACRO(page_type, page),
			0, log_lsn->lsn_high, 0, log_lsn->lsn_low);
		}
		page = NULL;
	    }
	    break;

	case DMVE_UNDO:
            if (LSN_LT(
		DM1B_VPT_ADDR_PAGE_LOG_ADDR_MACRO(page_type, page), log_lsn))
	    {
		uleFormat(NULL, E_DM9665_PAGE_OUT_OF_DATE, (CL_ERR_DESC *)NULL,
		    ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL,
		    &loc_error, 8,
		    sizeof(DB_TAB_NAME), tbio->tbio_relid->db_tab_name,
		    sizeof(DB_OWN_NAME), tbio->tbio_relowner->db_own_name,
		    0, DM1B_VPT_GET_PAGE_PAGE_MACRO(page_type, page),
		    0, DM1B_VPT_GET_PAGE_STAT_MACRO(page_type, page),
		    0, DM1B_VPT_GET_LOG_ADDR_HIGH_MACRO(page_type, page),
		    0, DM1B_VPT_GET_LOG_ADDR_LOW_MACRO(page_type,page),
		    0, log_lsn->lsn_high, 0, log_lsn->lsn_low);
		SETDBERR(&dmve->dmve_error, 0, E_DM9666_PAGE_LSN_MISMATCH);
		status = E_DB_ERROR;
	    }
	    break;
	}
	if (status != E_DB_OK || !page)
	    break;

	switch (recovery_action)
	{
	case DMVE_DO:
	case DMVE_REDO:
	    status = dmv_rebtree_del(dmve, tbio, pinfo, &leaf_bid);
	    break;

	case DMVE_UNDO:
	    status = dmv_unbtree_del(dmve, tbio, pinfo, &leaf_bid, allocate);
	    break;
	}

	break;
    }

    if (status != E_DB_OK)
    {
	uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
		(char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
    }

    /*
    ** Error handling: this implies that the abort action failed and the
    ** database will be marked inconsistent by the caller.
    */
    if (pinfo)
    {
	tmp_status = dmve_unfix_page(dmve, tbio, pinfo);
	if (tmp_status > status)
	    status = tmp_status;
    }

    if (tbio)
    {
	tmp_status = dmve_unfix_tabio(dmve, &tbio, 0);
	if (tmp_status > status)
	    status = tmp_status;
    }

    if (status != E_DB_OK)
	SETDBERR(&dmve->dmve_error, 0, E_DM9651_DMVE_BTREE_DEL);

    return(status);
}
Example #8
0
/*{
** Name: dmve_btupdovfl - The recovery of a Btree Split operation.
**
** Description:
**
** Inputs:
**	dmve_cb
**	    .dmve_log_rec	    The log record of the alloc operation.
**	    .dmve_action	    Should be DMVE_DO, DMVE_REDO, or DMVE_UNDO
**	    .dmve_dcb_ptr	    Pointer to DCB.
**	    .dmve_tran_id	    The physical transaction id.
**	    .dmve_lk_id		    The transaction lock list id.
**	    .dmve_log_id	    The logging system database id.
**	    .dmve_db_lockmode	    The lockmode of the database. Should be 
**				    DM2T_X or DM2T_S.
**
** Outputs:
**	dmve_cb
**	    .dmve_error.err_code    The reason for error status.
**	Returns:
**	    E_DB_OK
**	    E_DB_ERROR
**	Exceptions:
**	    none
**
** Side Effects:
**	    none
**
** History:
**	14-dec-1992 (jnash & rogerk)
**	    Reduced Logging Project: Written with new 6.5 recovery protocols.
**      26-apr-1993 (bryanp)
**          6.5 Cluster support:
**              Replace all uses of DM_LOG_ADDR with LG_LA or LG_LSN.
**		Correct the message parameter lengths for E_DM9665.
**	26-jul-1993 (bryanp)
**	    Replace all uses of DM_LKID with LK_LKID.
**	06-may-1996 (thaju02 & nanpr01)
**	    New Page Format Support: 
**		Change page header references to use macros.
**		Pass page size to dm1c_getaccessors().
**      22-nov-96 (dilma04)
**          Row Locking Project:
**          Add lock_type argument to dm0p_lock_page and dm0p_unlock_page
**      14-may-97 (dilma04)
**          Cursor Stability Project:
**          Add lock_id argument to dm0p_lock_page and dm0p_unlock_page.
**      11-sep-1997 (thaju02) bug#85156 - on recovery lock extension
**          table with DM2T_S.  Attempting to lock table with DM2T_IX
**          causes deadlock, dbms resource deadlock error during recovery,
**          pass abort, rcp resource deadlock error during recovery,
**          rcp marks database inconsistent.
**      19-Jun-2002 (horda03) Bug 108074
**          If the table is locked exclusively, then indicate that SCONCUR
**          pages don't need to be flushed immediately.
**      23-feb-2004 (thaju02) Bug 111470 INGSRV2635
**          For rollforwarddb -b option, do not compare the LSN's on the
**          page to that of the log record.
**	01-Dec-2006 (kiria01) b117225
**	    Initialise the lockid parameters that will be passed to LKrequest
**	    to avoid random implicit lock conversions.
*/
DB_STATUS
dmve_btupdovfl(
DMVE_CB		*dmve_cb)
{
    DMVE_CB		*dmve = dmve_cb;
    DM0L_BTUPDOVFL	*log_rec = (DM0L_BTUPDOVFL *)dmve->dmve_log_rec;
    LG_LSN		*log_lsn = &log_rec->btu_header.lsn; 
    DMP_DCB		*dcb = dmve->dmve_dcb_ptr;
    DMP_TABLE_IO	*tbio = NULL;
    DMPP_PAGE		*ovfl = NULL;
    DB_STATUS		status = E_DB_OK;
    DB_STATUS		tmp_status;
    i4		recovery_action;
    i4		error;
    i4		loc_error;
    i4		page_type = log_rec->btu_pg_type;
    bool		ovfl_page_recover;
    DB_ERROR		local_dberr;
    DMP_TCB		*t = NULL;
    DMP_PINFO		*ovflpinfo = NULL;

    CLRDBERR(&dmve->dmve_error);
    DMVE_CLEAR_TABINFO_MACRO(dmve);

    for (;;)
    {
	/*
	** Consistency Check:  check for illegal log records.
	*/
	if ((log_rec->btu_header.type != DM0LBTUPDOVFL) ||
	    (log_rec->btu_header.length != 
                (sizeof(DM0L_BTUPDOVFL) -
                        (DM1B_KEYLENGTH - log_rec->btu_lrange_len) - 
                        (DM1B_KEYLENGTH - log_rec->btu_olrange_len) - 
                        (DM1B_KEYLENGTH - log_rec->btu_rrange_len) - 
                        (DM1B_KEYLENGTH - log_rec->btu_orrange_len))))
	{
	    SETDBERR(&dmve->dmve_error, 0, E_DM9601_DMVE_BAD_PARAMETER);
	    break;
	}

	/*
	** Get handle to a tableio control block with which to read
	** and write pages needed during recovery.
	**
	** Warning return indicates that no tableio control block was
	** built because no recovery is needed on any of the locations 
	** described in this log record.
	*/
	status = dmve_fix_tabio(dmve, &log_rec->btu_tbl_id, &tbio);
	if (DB_FAILURE_MACRO(status))
	    break;
	if (status == E_DB_WARN && 
		dmve->dmve_error.err_code == W_DM9660_DMVE_TABLE_OFFLINE)
	{
	    CLRDBERR(&dmve->dmve_error);
	    return (E_DB_OK);
	}

	/* This might be partial tcb, but it will always have basic info */
	t = (DMP_TCB *)((char *)tbio - CL_OFFSETOF(DMP_TCB, tcb_table_io));

	/*
	** Check recovery requirements for this operation.  If partial
	** recovery is in use, then we may not need to recover all
	** the pages touched by the original update.
	**
	** Also, not all split operations require a data page to be allocated,
	** only those to leaf's in a non secondary index.  If the logged data
	** page is page # 0, then there is no data page to recover.
	*/
	ovfl_page_recover = dmve_location_check(dmve, 
					(i4)log_rec->btu_cnf_loc_id);

	/*
	** Fix the pages we need to recover in cache for write.
	*/
	if (ovfl_page_recover)
	{
	    status = dmve_fix_page(dmve, tbio, log_rec->btu_pageno, &ovflpinfo);
	    if (status != E_DB_OK)
		break;
	    ovfl = ovflpinfo->page;
	}

	/*
	** Dump debug trace info about pages if such tracing is configured.
	*/
	if (DMZ_ASY_MACRO(15))
	    dmve_trace_page_info(log_rec->btu_pg_type, log_rec->btu_page_size,
		ovfl, dmve->dmve_plv, "OVFL"); 

	/*
	** Compare the LSN's on the pages with that of the log record
	** to determine what recovery will be needed.
	**
	**   - During Forward processing, if the page's LSN is greater than
	**     the log record then no recovery is needed.
	**
	**   - During Backward processing, it is an error for a page's LSN
	**     to be less than the log record LSN.
	**
	**   - Currently, during rollforward processing it is unexpected
	**     to find that a recovery operation need not be applied because
	**     of the page's LSN.  This is because rollforward must always
	**     begin from a checkpoint that is previous to any journal record
	**     begin applied.  In the future this requirement may change and
	**     Rollforward will use the same expectations as Redo.
	*/
	switch (dmve->dmve_action)
	{
	case DMVE_DO:
	case DMVE_REDO:
	    if (ovfl && LSN_GTE(
		DM1B_VPT_ADDR_PAGE_LOG_ADDR_MACRO(page_type, ovfl), 
		log_lsn) && ((dmve->dmve_flags & DMVE_ROLLDB_BOPT) == 0))
	    {
		if (dmve->dmve_action == DMVE_DO)
		{
		    uleFormat(NULL, E_DM9665_PAGE_OUT_OF_DATE, (CL_ERR_DESC *)NULL,
			ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL,
			&loc_error, 8,
			sizeof(DB_TAB_NAME), tbio->tbio_relid->db_tab_name,
			sizeof(DB_OWN_NAME), tbio->tbio_relowner->db_own_name,
			0, DM1B_VPT_GET_PAGE_PAGE_MACRO(page_type, ovfl),
			0, DM1B_VPT_GET_PAGE_STAT_MACRO(page_type, ovfl),
			0, DM1B_VPT_GET_LOG_ADDR_HIGH_MACRO(page_type, ovfl),
			0, DM1B_VPT_GET_LOG_ADDR_LOW_MACRO(page_type, ovfl),
			0, log_lsn->lsn_high, 0, log_lsn->lsn_low);
		}
		ovfl = NULL;
	    }
	    break;

	case DMVE_UNDO:
	    if (ovfl && (LSN_LT(
		DM1B_VPT_ADDR_PAGE_LOG_ADDR_MACRO(page_type, ovfl), 
		log_lsn)))
	    {
		uleFormat(NULL, E_DM9665_PAGE_OUT_OF_DATE, (CL_ERR_DESC *)NULL,
		    ULE_LOG, NULL, (char *)NULL, (i4)0, (i4 *)NULL,
		    &loc_error, 8,
		    sizeof(DB_TAB_NAME), tbio->tbio_relid->db_tab_name,
		    sizeof(DB_OWN_NAME), tbio->tbio_relowner->db_own_name,
		    0, DM1B_VPT_GET_PAGE_PAGE_MACRO(page_type, ovfl),
		    0, DM1B_VPT_GET_PAGE_STAT_MACRO(page_type, ovfl),
		    0, DM1B_VPT_GET_LOG_ADDR_HIGH_MACRO(page_type, ovfl),
		    0, DM1B_VPT_GET_LOG_ADDR_LOW_MACRO(page_type, ovfl),
		    0, log_lsn->lsn_high, 0, log_lsn->lsn_low);
	        SETDBERR(&dmve->dmve_error, 0, E_DM9666_PAGE_LSN_MISMATCH);
		status = E_DB_ERROR;
	    }
	    break;
	}
	if (status != E_DB_OK || !ovfl)
	    break;

	/*
	** Call appropriate recovery action depending on the recovery type
	** and record flags.  CLR actions are always executed as an UNDO
	** operation.
	*/
	recovery_action = dmve->dmve_action;
	if (log_rec->btu_header.flags & DM0L_CLR)
	    recovery_action = DMVE_UNDO;

	switch (recovery_action)
	{
	case DMVE_DO:
	case DMVE_REDO:
	    status = dmv_rebtupdovfl(dmve, tbio, ovflpinfo);
	    break;

	case DMVE_UNDO:
	    status = dmv_unbtupdovfl(dmve, tbio, ovflpinfo);
	    break;
	}

	break;
    }

    if (status != E_DB_OK)
    {
	uleFormat(&dmve->dmve_error, 0, (CL_ERR_DESC *)NULL, ULE_LOG, NULL, 
	    (char *)NULL, (i4)0, (i4 *)NULL, &error, 0);
    }

    /*
    ** Unfix/Unlock the updated pages.  No need to force them to disk - they
    ** will be tossed out through normal cache protocols if Fast
    ** Commit or at the end of the abort if non Fast Commit.
    */
    if (ovflpinfo)
    {
	tmp_status = dmve_unfix_page(dmve, tbio, ovflpinfo);
	if (tmp_status > status)
	    status = tmp_status;
    }

    /* 
    ** Release our handle to the table control block.
    */
    if (tbio)
    {
	tmp_status = dmve_unfix_tabio(dmve, &tbio, 0);
	if (tmp_status > status)
	    status = tmp_status;
    }

    if (status != E_DB_OK)
	SETDBERR(&dmve->dmve_error, 0, E_DM9630_DMVE_UPDATE_OVFL);

    return(status);
}
Example #9
0
** Description:
**	MO definitions for Bridge MIB objects.
**
** History:
**      22-Apr-96 (rajus01)
**          Created.
*/
static MO_CLASS_DEF	gcb_classes[] =
{
    {
	0,
	GCB_MIB_CONN_COUNT,
	MO_SIZEOF_MEMBER( GCC_GLOBAL, gcc_conn_ct ),
	MO_READ,
	0,
	CL_OFFSETOF( GCC_GLOBAL, gcc_conn_ct ),
	MOintget,
	MOnoset,
	(PTR) -1,
	MOcdata_index
    },
    {
	0,
	GCB_MIB_IB_CONN_COUNT,
	MO_SIZEOF_MEMBER( GCC_GLOBAL, gcc_ib_conn_ct ),
	MO_READ,
	0,
	CL_OFFSETOF( GCC_GLOBAL, gcc_ib_conn_ct ),
	MOintget,
	MOnoset,
	(PTR) -1,