int bedbug860_clear( int which_bp )
{
  /* -------------------------------------------------- */

  if( which_bp < 1 || which_bp > MAX_BREAK_POINTS )
  {
    printf( "Invalid break point # (%d)\n", which_bp );
    return -1;
  }

  switch( which_bp )
  {
  case 1:
    SET_CMPA( 0 );
    SET_ICTRL( GET_ICTRL() & ~0x80080800 ); /* CTA=Equal,IW0=Match A,SIW0EN */
    break;

  case 2:
    SET_CMPB( 0 );
    SET_ICTRL( GET_ICTRL() & ~0x10020400 ); /* CTB=Equal,IW1=Match B,SIW1EN */
    break;

  case 3:
    SET_CMPC( 0 );
    SET_ICTRL( GET_ICTRL() & ~0x02008200 ); /* CTC=Equal,IW2=Match C,SIW2EN */
    break;

  case 4:
    SET_CMPD( 0 );
    SET_ICTRL( GET_ICTRL() & ~0x00404100 ); /* CTD=Equal,IW3=Match D,SIW3EN */
    break;
  }

  return 0;
} /* bedbug860_clear */
int bedbug860_set( int which_bp, unsigned long addr )
{
  /* -------------------------------------------------- */

  /* Only look if which_bp == 0, else use which_bp */
  if(( bug_ctx.find_empty ) && ( !which_bp ) &&
     ( which_bp = (*bug_ctx.find_empty)()) == 0 )
  {
    printf( "All breakpoints in use\n" );
    return 0;
  }

  if( which_bp < 1 || which_bp > MAX_BREAK_POINTS )
  {
    printf( "Invalid break point # %d\n", which_bp );
    return 0;
  }

  if( ! bug_ctx.hw_debug_enabled )
  {
    bug_ctx.hw_debug_enabled = 1;
    SET_DER( GET_DER() | 0x00000004 );
  }

  switch( which_bp )
  {
  case 1:
    SET_CMPA( addr );
    SET_ICTRL( GET_ICTRL() | 0x80080800 ); /* CTA=Equal,IW0=Match A,SIW0EN */
    break;

  case 2:
    SET_CMPB( addr );
    SET_ICTRL( GET_ICTRL() | 0x10020400 ); /* CTB=Equal,IW1=Match B,SIW1EN */
    break;

  case 3:
    SET_CMPC( addr );
    SET_ICTRL( GET_ICTRL() | 0x02008200 ); /* CTC=Equal,IW2=Match C,SIW2EN */
    break;

  case 4:
    SET_CMPD( addr );
    SET_ICTRL( GET_ICTRL() | 0x00404100 ); /* CTD=Equal,IW3=Match D,SIW3EN */
    break;
  }

  return which_bp;
} /* bedbug860_set */
Exemplo n.º 3
0
/***********************************************************************************************
	Input Parameters:
		cur_level: Working block's level
		d_max_fill: Database fill factor
		i_max_fill: Index fill factor
	Output Parameters:
		blks_created: how many new blocks are created
		lvls_increased : How much level is increased
	Input/Output Parameters:
		gv_target: History of working block
	Here it is assumed that i_max_fill or, d_max_fill is strictly less than block size.
	Returns:
		cdb_sc_normal: if successful
		cdb_sc status otherwise
 ************************************************************************************************/
enum cdb_sc mu_split(int cur_level, int i_max_fill, int d_max_fill, int *blks_created, int *lvls_increased)
{
    boolean_t	first_copy, new_rtblk_star_only, create_root = FALSE, split_required, insert_in_left;
    unsigned char	curr_prev_key[MAX_KEY_SZ+1], new_blk1_last_key[MAX_KEY_SZ+1];
    unsigned short  temp_ushort;
    int		rec_size, new_ins_keycmpc, tkeycmpc, new_ances_currkeycmpc, old_ances_currkeycmpc;
    int		tmp_cmpc;
    block_index	left_index, right_index;
    block_offset 	ins_off, ins_off2;
    int		level;
    int		new_ins_keysz, new_ances_currkeysz, new_blk1_last_keysz, newblk2_first_keysz, next_gv_currkeysz;
    int		old_ances_currkeylen, new_ins_keylen, new_ances_currkeylen, tkeylen, newblk2_first_keylen;
    int		old_blk1_last_rec_size, old_blk1_sz, save_blk_piece_len, old_right_piece_len;
    int		delta, max_fill;
    enum cdb_sc	status;
    int		blk_seg_cnt, blk_size, new_leftblk_top_off;
    block_id	allocation_clue;
    sm_uc_ptr_t 	rPtr1, rPtr2, rec_base, key_base, next_gv_currkey,
                    bn_ptr1, bn_ptr2, save_blk_piece,
                    old_blk_after_currec, ances_currkey,
                    old_blk1_base,
                    new_blk1_top, new_blk2_top,
                    new_blk2_frec_base, new_blk2_rem,
                    newblk2_first_key, new_ins_key;
    blk_segment     *bs_ptr1, *bs_ptr2;
    cw_set_element  *cse;
    rec_hdr_ptr_t	star_rec_hdr, new_rec_hdr1a, new_rec_hdr1b, new_rec_hdr2, root_hdr;
    blk_hdr_ptr_t	blk_hdr_ptr;

    blk_size = cs_data->blk_size;
    CHECK_AND_RESET_UPDATE_ARRAY;	/* reset update_array_ptr to update_array */

    BLK_ADDR(star_rec_hdr, SIZEOF(rec_hdr), rec_hdr);
    star_rec_hdr->rsiz = BSTAR_REC_SIZE;
    SET_CMPC(star_rec_hdr, 0);
    level = cur_level;
    max_fill = (0 == level)? d_max_fill : i_max_fill;

    /*  -------------------
     *  Split working block.
     *  -------------------
     *  new_blk1_last_key = last key of the new working block after split
     *  new_blk1_last_keysz = size of new_blk1_last_key
     *  old_blk1_last_rec_size = last record size of the new working block after split (for old block)
     *  new_blk2_frec_base = base of first record of right block created after split
     *  newblk2_first_key = first key of new block created after split
     *  newblk2_first_keysz = size of newblk2_first_key
     *  new_blk2_rem = pointer to new block to be created after split exclude 1st record header + key
     */
    blk_hdr_ptr = (blk_hdr_ptr_t)(gv_target->hist.h[level].buffaddr);
    old_blk1_base = (sm_uc_ptr_t)blk_hdr_ptr;
    old_blk1_sz = blk_hdr_ptr->bsiz;
    new_blk2_top = old_blk1_base + old_blk1_sz;
    if (cdb_sc_normal != (status = locate_block_split_point (old_blk1_base, level, old_blk1_sz, max_fill,
                                   &old_blk1_last_rec_size, new_blk1_last_key, &new_blk1_last_keysz, &new_leftblk_top_off)))
    {
        assert(t_tries < CDB_STAGNATE);
        return cdb_sc_blkmod;
    }
    if (new_leftblk_top_off + BSTAR_REC_SIZE >= old_blk1_sz)
        /* Avoid split to create a small right sibling. Note this should not happen often when tolerance is high */
        return cdb_sc_oprnotneeded;
    old_right_piece_len = old_blk1_sz - new_leftblk_top_off;
    new_blk2_frec_base = old_blk1_base + new_leftblk_top_off;
    BLK_ADDR(newblk2_first_key, gv_cur_region->max_rec_size + 1, unsigned char);
    READ_RECORD(level, new_blk2_frec_base, tkeycmpc, rec_size, newblk2_first_key, newblk2_first_keylen, status);
    if (cdb_sc_normal != status) /* restart for cdb_sc_starrecord too, because we eliminated the possibility already */
    {
        assert(t_tries < CDB_STAGNATE);
        return cdb_sc_blkmod;
    }
    memcpy(newblk2_first_key, &new_blk1_last_key[0], tkeycmpc); /* copy the compressed key piece */
    new_blk2_rem = new_blk2_frec_base + SIZEOF(rec_hdr) + newblk2_first_keylen;
    newblk2_first_keysz = newblk2_first_keylen + tkeycmpc;

    /* gv_currkey_next_reorg will be saved for next iteration in mu_reorg */
    next_gv_currkey = newblk2_first_key;
    next_gv_currkeysz = newblk2_first_keysz;

    BLK_ADDR(new_rec_hdr1b, SIZEOF(rec_hdr), rec_hdr);
    new_rec_hdr1b->rsiz = rec_size + tkeycmpc;
    SET_CMPC(new_rec_hdr1b, 0);

    /* Create new split piece, we already know that this will not be *-rec only.
     * Note that this has to be done BEFORE modifying working block as building this buffer relies on the
     * working block to be pinned which is possible only if this cw-set-element is created ahead of that
     * of the working block (since order in which blocks are built is the order in which cses are created).
     */
    BLK_INIT(bs_ptr2, bs_ptr1);
    BLK_SEG(bs_ptr2, (sm_uc_ptr_t)new_rec_hdr1b, SIZEOF(rec_hdr));
    BLK_SEG(bs_ptr2, newblk2_first_key, newblk2_first_keysz);
    BLK_SEG(bs_ptr2, new_blk2_rem, new_blk2_top - new_blk2_rem);
    if (!BLK_FINI(bs_ptr2, bs_ptr1))
    {
        assert(t_tries < CDB_STAGNATE);
        return cdb_sc_blkmod;
    }
    allocation_clue = ALLOCATION_CLUE(cs_data->trans_hist.total_blks);
    right_index = t_create(allocation_clue++, (unsigned char *)bs_ptr1, 0, 0, level);
    (*blks_created)++;

    /* Modify working block removing split piece */
    BLK_INIT(bs_ptr2, bs_ptr1);
    if (0 == level)
    {
        BLK_SEG(bs_ptr2, old_blk1_base + SIZEOF(blk_hdr), new_leftblk_top_off - SIZEOF(blk_hdr));
    }
    else
    {
        BLK_SEG(bs_ptr2, old_blk1_base + SIZEOF(blk_hdr),
                new_leftblk_top_off - SIZEOF(blk_hdr) - old_blk1_last_rec_size);
        BLK_SEG(bs_ptr2, (sm_uc_ptr_t)star_rec_hdr, SIZEOF(rec_hdr) );
        BLK_ADDR(bn_ptr1, SIZEOF(block_id), unsigned char);
        memcpy(bn_ptr1, old_blk1_base + new_leftblk_top_off - SIZEOF(block_id), SIZEOF(block_id));
        BLK_SEG(bs_ptr2, bn_ptr1, SIZEOF(block_id));
    }
    if ( !BLK_FINI(bs_ptr2, bs_ptr1))
    {
        assert(t_tries < CDB_STAGNATE);
        return cdb_sc_blkmod;
    }
    t_write(&gv_target->hist.h[level], (unsigned char *)bs_ptr1, 0, 0, level, FALSE, TRUE, GDS_WRITE_KILLTN);

    /*
    ----------------------------------------------------------------------------
    Modify ancestor block for the split in current level.
    new_ins_key = new key to be inserted in parent because of split in child
    new_ins_key will be inserted after gv_target->hist.h[level].prev_rec and
                                before gv_target->hist.h[level].curr_rec
        new_ins_keysz = size of new_ins_key
        Note: A restriction of the algorithm is to have current key and new_ins_key
    	in the same block, either left or, new right block
    ----------------------------------------------------------------------------
    */
    BLK_ADDR(new_ins_key, new_blk1_last_keysz, unsigned char);
    memcpy(new_ins_key, &new_blk1_last_key[0], new_blk1_last_keysz);
    new_ins_keysz = new_blk1_last_keysz;
    for(;;) 	/* ========== loop through ancestors as necessary ======= */
    {
        level ++;
        max_fill = i_max_fill;
        /*
        old_blk_after_currec = remaining of current block after currec
        ances_currkey = old real value of currkey in ancestor block
        */
        blk_hdr_ptr = (blk_hdr_ptr_t)(gv_target->hist.h[level].buffaddr);
        old_blk1_base = (sm_uc_ptr_t)blk_hdr_ptr;
        old_blk1_sz = blk_hdr_ptr->bsiz;
        new_blk2_top = old_blk1_base + old_blk1_sz;
        rec_base = old_blk1_base + gv_target->hist.h[level].curr_rec.offset;
        GET_RSIZ(rec_size, rec_base);
        old_blk_after_currec = rec_base + rec_size;
        old_ances_currkeycmpc = EVAL_CMPC((rec_hdr_ptr_t)rec_base);
        old_ances_currkeylen = rec_size - BSTAR_REC_SIZE;
        if (INVALID_RECORD(level, rec_size,  old_ances_currkeylen, old_ances_currkeycmpc))
        {
            assert(t_tries < CDB_STAGNATE);
            return cdb_sc_blkmod;
        }
        if (0 == old_ances_currkeylen)
        {
            if (0 != old_ances_currkeycmpc)
            {
                assert(t_tries < CDB_STAGNATE);
                return cdb_sc_blkmod;
            }
            new_ances_currkeycmpc = new_ances_currkeylen = 0;
        }
        else
        {
            BLK_ADDR(ances_currkey, gv_cur_region->max_rec_size + 1, unsigned char);
            key_base = rec_base +  SIZEOF(rec_hdr);
        }
        new_ances_currkeysz = old_ances_currkeycmpc + old_ances_currkeylen;
        if (SIZEOF(blk_hdr) != gv_target->hist.h[level].curr_rec.offset) /* cur_rec is not first key */
        {
            if (cdb_sc_normal != (status = gvcst_expand_any_key(old_blk1_base,
                                           old_blk1_base + gv_target->hist.h[level].curr_rec.offset,
                                           &curr_prev_key[0], &rec_size, &tkeylen, &tkeycmpc, NULL)))
            {
                assert(t_tries < CDB_STAGNATE);
                return cdb_sc_blkmod;
            }
            if (old_ances_currkeycmpc)
                memcpy(ances_currkey, &curr_prev_key[0], old_ances_currkeycmpc);
        }
        if (old_ances_currkeylen)
        {
            memcpy(ances_currkey + old_ances_currkeycmpc, key_base, old_ances_currkeylen);
            GET_CMPC(new_ances_currkeycmpc, new_ins_key, ances_currkey);
            new_ances_currkeylen = new_ances_currkeysz - new_ances_currkeycmpc;
        }
        if (SIZEOF(blk_hdr) != gv_target->hist.h[level].curr_rec.offset)
        {
            /* new_ins_key will be inseted after curr_prev_key */
            GET_CMPC(new_ins_keycmpc, &curr_prev_key[0], new_ins_key);
        }
        else
            new_ins_keycmpc = 0; /* new_ins_key will be the 1st key */
        new_ins_keylen = new_ins_keysz - new_ins_keycmpc ;

        delta = BSTAR_REC_SIZE + new_ins_keylen - old_ances_currkeylen + new_ances_currkeylen;
        if (old_blk1_sz + delta > blk_size - cs_data->reserved_bytes) /* split required */
        {
            split_required = TRUE;
            if (level == gv_target->hist.depth)
            {
                create_root = TRUE;
                if (MAX_BT_DEPTH - 1 <= level)  /* maximum level reached */
                    return cdb_sc_maxlvl;
            }
            if (max_fill + BSTAR_REC_SIZE > old_blk1_sz)
            {
                if (SIZEOF(blk_hdr) + BSTAR_REC_SIZE == old_blk1_sz)
                    return cdb_sc_oprnotneeded; /* Improve code to avoid this */
                max_fill = old_blk1_sz - BSTAR_REC_SIZE;
            }
            status = locate_block_split_point(old_blk1_base, level, old_blk1_sz, max_fill,
                                              &old_blk1_last_rec_size, new_blk1_last_key, &new_blk1_last_keysz, &new_leftblk_top_off);
            if (cdb_sc_normal != status || new_leftblk_top_off >= old_blk1_sz
                    || 0 == new_blk1_last_keysz)
            {
                assert(t_tries < CDB_STAGNATE);
                return cdb_sc_blkmod;
            }
            assert(BSTAR_REC_SIZE != old_blk1_last_rec_size);
            old_right_piece_len = old_blk1_sz - new_leftblk_top_off;
            new_blk2_frec_base = new_blk1_top = old_blk1_base + new_leftblk_top_off;
            if (BSTAR_REC_SIZE == old_right_piece_len)
                new_rtblk_star_only = TRUE;
            else
                new_rtblk_star_only = FALSE;
            if (new_leftblk_top_off == gv_target->hist.h[level].curr_rec.offset)
            {
                /* inserted key will be the first record of new right block */
                new_ins_keylen = new_ins_keysz;
                new_ins_keycmpc = 0;
            }
            else
                /* process 1st record of new right block */
            {
                BLK_ADDR(newblk2_first_key, gv_cur_region->max_rec_size + 1, unsigned char);
                READ_RECORD(level, new_blk2_frec_base, tkeycmpc, rec_size,
                            newblk2_first_key, newblk2_first_keylen, status);
                if (cdb_sc_normal == status)
                {
                    memcpy(newblk2_first_key, &new_blk1_last_key[0], tkeycmpc); /* compressed piece */
                    new_blk2_rem =  new_blk2_frec_base + SIZEOF(rec_hdr) + newblk2_first_keylen;
                    newblk2_first_keysz = newblk2_first_keylen + tkeycmpc;
                    BLK_ADDR(new_rec_hdr2, SIZEOF(rec_hdr), rec_hdr);
                    new_rec_hdr2->rsiz = newblk2_first_keysz + BSTAR_REC_SIZE;
                    SET_CMPC(new_rec_hdr2, 0);
                }
                else if (cdb_sc_starrecord != status || !new_rtblk_star_only)
                {
                    assert(t_tries < CDB_STAGNATE);
                    return cdb_sc_blkmod;
                }
            }
            /* else gv_target->hist.h[level].curr_rec will be newblk2_first_key */

            if (new_leftblk_top_off >  gv_target->hist.h[level].curr_rec.offset +
                    old_ances_currkeylen + BSTAR_REC_SIZE)
            {
                /* in this case prev_rec (if exists), new key and curr_rec should go into left block */
                if (new_leftblk_top_off + delta - old_blk1_last_rec_size + BSTAR_REC_SIZE
                        <= blk_size - cs_data->reserved_bytes)
                    insert_in_left = TRUE;
                else
                {
                    /* cannot handle it now */
                    return cdb_sc_oprnotneeded;
                }
            }
            else if (new_leftblk_top_off <  gv_target->hist.h[level].curr_rec.offset +
                     old_ances_currkeylen + BSTAR_REC_SIZE)
            {
                /* if gv_target->hist.h[level].curr_rec is the first key in old_blk1
                   then in new right block,
                   	new_ins_key will be the 1st record key and
                	curr_rec will be 2nd record and
                	there will be no prev_rec in right block.
                   Else (if curr_rec is not first key)
                	there will be some records before new_ins_key, at least prev_rec */
                delta = (int)(BSTAR_REC_SIZE + new_ins_keylen
                              - old_ances_currkeylen + new_ances_currkeylen
                              + ((0 == new_ins_keycmpc) ? 0 : (EVAL_CMPC((rec_hdr_ptr_t)new_blk2_frec_base))));
                if (SIZEOF(blk_hdr) + old_right_piece_len + delta <= blk_size - cs_data->reserved_bytes)
                {
                    insert_in_left = FALSE;
                    if (new_leftblk_top_off + BSTAR_REC_SIZE >= old_blk1_sz)
                    {
                        /* cannot handle it now */
                        return cdb_sc_oprnotneeded;
                    }
                }
                else
                {
                    /* cannot handle it now */
                    return cdb_sc_oprnotneeded;
                }
            }
            else
            {
                /* in this case prev_rec (if exists), new key and curr_rec should go into left block
                	and curr_rec will be the last record (*-key) of left new block */
                delta = BSTAR_REC_SIZE + new_ins_keylen;
                if (new_leftblk_top_off + delta <= blk_size - cs_data->reserved_bytes)
                    insert_in_left = TRUE;
                else
                {
                    /* cannot handle it now */
                    return cdb_sc_oprnotneeded;
                }
            }
        } /* end if split required */
        else
Exemplo n.º 4
0
enum cdb_sc	gvcst_kill_blk(srch_blk_status	*blkhist,
			       char		level,
			       gv_key  		*search_key,
			       srch_rec_status	low,
			       srch_rec_status	high,
			       boolean_t	right_extra,
			       cw_set_element	**cseptr)
{
	typedef sm_uc_ptr_t		bytptr;

	unsigned short			temp_ushort;
	int4				temp_long;
	int				tmp_cmpc;
	int				blk_size, blk_seg_cnt, lmatch, rmatch, targ_len, prev_len, targ_base, next_rec_shrink,
					temp_int, blkseglen;
	bool				kill_root, first_copy;
	blk_hdr_ptr_t			old_blk_hdr;
	rec_hdr_ptr_t			left_ptr;	/*pointer to record before first record to delete*/
	rec_hdr_ptr_t			del_ptr;	/*pointer to first record to delete*/
	rec_hdr_ptr_t	       		right_ptr;	/*pointer to record after last record to delete*/
	rec_hdr_ptr_t			right_prev_ptr;
	rec_hdr_ptr_t			rp, rp1;	/*scratch record pointer*/
	rec_hdr_ptr_t			first_in_blk, top_of_block, new_rec_hdr, star_rec_hdr;
	blk_segment			*bs1, *bs_ptr;
	block_index			new_block_index;
	unsigned char			*skb;
	static readonly block_id	zeroes = 0;
	cw_set_element			*cse, *old_cse;
	bytptr				curr, prev, right_bytptr;
	off_chain			chain1, curr_chain, prev_chain;
	block_id			blk;
	sm_uc_ptr_t			buffer;
	srch_blk_status			*t1;

	*cseptr = NULL;
	if (low.offset == high.offset)
		return cdb_sc_normal;
	blk = blkhist->blk_num;
	if (dollar_tlevel)
	{
		PUT_LONG(&chain1, blk);
		if ((1 == chain1.flag) && ((int)chain1.cw_index >= sgm_info_ptr->cw_set_depth))
		{
			assert(sgm_info_ptr->tp_csa == cs_addrs);
			assert(FALSE == cs_addrs->now_crit);
			return cdb_sc_blknumerr;
		}
	}
	buffer = blkhist->buffaddr;
	old_blk_hdr = (blk_hdr_ptr_t)buffer;
	kill_root = FALSE;
	blk_size = cs_data->blk_size;
	first_in_blk = (rec_hdr_ptr_t)((bytptr)old_blk_hdr + SIZEOF(blk_hdr));
	top_of_block = (rec_hdr_ptr_t)((bytptr)old_blk_hdr + old_blk_hdr->bsiz);
	left_ptr = (rec_hdr_ptr_t)((bytptr)old_blk_hdr + low.offset);
	right_ptr = (rec_hdr_ptr_t)((bytptr)old_blk_hdr + high.offset);
	if (right_extra && right_ptr < top_of_block)
	{
		right_prev_ptr = right_ptr;
		GET_USHORT(temp_ushort, &right_ptr->rsiz);
		right_ptr = (rec_hdr_ptr_t)((bytptr)right_ptr + temp_ushort);
	}
	if ((bytptr)left_ptr < (bytptr)old_blk_hdr ||
		(bytptr)right_ptr > (bytptr)top_of_block ||
		(bytptr)left_ptr >= (bytptr)right_ptr)
	{
		assert(CDB_STAGNATE > t_tries);
		return cdb_sc_rmisalign;
	}
	if ((bytptr)left_ptr == (bytptr)old_blk_hdr)
	{
		if ((bytptr)right_ptr == (bytptr)top_of_block)
		{
			if ((bytptr)first_in_blk == (bytptr)top_of_block)
			{
				if (0 != level)
				{
					assert(CDB_STAGNATE > t_tries);
					return cdb_sc_rmisalign;
				}
				return cdb_sc_normal;
			}
			if (!gv_target->hist.h[level + 1].blk_num)
				kill_root = TRUE;
			else
			{	/* We are about to free up the contents of this entire block. If this block corresponded to
				 * a global that has NOISOLATION turned on and has a non-zero recompute list (i.e. some SETs
				 * already happened in this same TP transaction), make sure we disable the NOISOLATION
				 * optimization in this case as that is applicable only if one or more SETs happened in this
				 * data block and NOT if a KILL happens. Usually this is done by a t_write(GDS_WRITE_KILLTN)
				 * call but since in this case the entire block is being freed, "t_write" wont be invoked
				 * so we need to explicitly set GDS_WRITE_KILLTN like t_write would have (GTM-8269).
				 * Note: blkhist->first_tp_srch_status is not reliable outside of TP. Thankfully the recompute
				 * list is also maintained only in case of TP so a check of dollar_tlevel is enough to
				 * dereference both "first_tp_srch_status" and "recompute_list_head".
				 */
				if (dollar_tlevel)
				{
					t1 = blkhist->first_tp_srch_status ? blkhist->first_tp_srch_status : blkhist;
					cse = t1->cse;
					if ((NULL != cse) && cse->recompute_list_head)
						cse->write_type |= GDS_WRITE_KILLTN;
				}
				return cdb_sc_delete_parent;
			}
		}
		del_ptr = first_in_blk;
	} else
	{
		GET_USHORT(temp_ushort, &left_ptr->rsiz);
		del_ptr = (rec_hdr_ptr_t)((bytptr)left_ptr + temp_ushort);
		if ((bytptr)del_ptr <= (bytptr)(left_ptr + 1)  ||  (bytptr)del_ptr > (bytptr)right_ptr)
		{
			assert(CDB_STAGNATE > t_tries);
			return cdb_sc_rmisalign;
		}
	}
	if ((bytptr)del_ptr == (bytptr)right_ptr)
		return cdb_sc_normal;
	lmatch = low.match;
	rmatch = high.match;
	if (level)
	{
		for (rp = del_ptr ;  rp < right_ptr ;  rp = rp1)
		{
			GET_USHORT(temp_ushort, &rp->rsiz);
			rp1 = (rec_hdr_ptr_t)((bytptr)rp + temp_ushort);
			if (((bytptr)rp1 < (bytptr)(rp + 1) + SIZEOF(block_id)) ||
				((bytptr)rp1 < buffer) || ((bytptr)rp1 > (buffer + blk_size)))
			{
				assert(CDB_STAGNATE > t_tries);
				return cdb_sc_rmisalign;
			}
			GET_LONG(temp_long, ((bytptr)rp1 - SIZEOF(block_id)));
			if (dollar_tlevel)
			{
				chain1 = *(off_chain *)&temp_long;
				if ((1 == chain1.flag) && ((int)chain1.cw_index >= sgm_info_ptr->cw_set_depth))
				{
					assert(sgm_info_ptr->tp_csa == cs_addrs);
					assert(FALSE == cs_addrs->now_crit);
					return cdb_sc_blknumerr;
				}
			}
			gvcst_delete_blk(temp_long, level - 1, FALSE);
		}
	}
	if (kill_root)
	{	/* create an empty data block */
		BLK_INIT(bs_ptr, bs1);
		if (!BLK_FINI(bs_ptr, bs1))
		{
			assert(CDB_STAGNATE > t_tries);
			return cdb_sc_mkblk;
		}
		new_block_index = t_create(blk, (uchar_ptr_t)bs1, 0, 0, 0);
		/* create index block */
		BLK_ADDR(new_rec_hdr, SIZEOF(rec_hdr), rec_hdr);
		new_rec_hdr->rsiz = SIZEOF(rec_hdr) + SIZEOF(block_id);
		SET_CMPC(new_rec_hdr, 0);
		BLK_INIT(bs_ptr, bs1);
		BLK_SEG(bs_ptr, (bytptr)new_rec_hdr, SIZEOF(rec_hdr));
		BLK_SEG(bs_ptr, (bytptr)&zeroes, SIZEOF(block_id));
		if (!BLK_FINI(bs_ptr, bs1))
		{
			assert(CDB_STAGNATE > t_tries);
			return cdb_sc_mkblk;
		}
		cse = t_write(blkhist, (unsigned char *)bs1, SIZEOF(blk_hdr) + SIZEOF(rec_hdr), new_block_index, 1,
			TRUE, FALSE, GDS_WRITE_KILLTN);
		assert(!dollar_tlevel || !cse->high_tlevel);
		*cseptr = cse;
		if (NULL != cse)
			cse->first_off = 0;
		return cdb_sc_normal;
	}
	next_rec_shrink = (int)(old_blk_hdr->bsiz + ((bytptr)del_ptr - (bytptr)right_ptr));
	if (SIZEOF(blk_hdr) >= next_rec_shrink)
	{
		assert(CDB_STAGNATE > t_tries);
		return cdb_sc_rmisalign;
	}
	if ((bytptr)right_ptr == (bytptr)top_of_block)
	{
		if (level)
		{
			GET_USHORT(temp_ushort, &left_ptr->rsiz);
			next_rec_shrink += SIZEOF(rec_hdr) + SIZEOF(block_id) - temp_ushort;
		}
	} else
	{
		targ_base = (rmatch < lmatch) ? rmatch : lmatch;
		prev_len = 0;
		if (right_extra)
		{
			EVAL_CMPC2(right_prev_ptr, tmp_cmpc);
			targ_len = tmp_cmpc - targ_base;
			if (targ_len < 0)
				targ_len = 0;
			temp_int = tmp_cmpc - EVAL_CMPC(right_ptr);
			if (0 >= temp_int)
				prev_len = - temp_int;
			else
			{
				if (temp_int < targ_len)
					targ_len -= temp_int;
				else
					targ_len = 0;
			}
		} else
		{
			targ_len = EVAL_CMPC(right_ptr) - targ_base;
			if (targ_len < 0)
				targ_len = 0;
		}
		next_rec_shrink += targ_len + prev_len;
	}
	BLK_INIT(bs_ptr, bs1);
	first_copy = TRUE;
	blkseglen = (int)((bytptr)del_ptr - (bytptr)first_in_blk);
	if (0 < blkseglen)
	{
		if (((bytptr)right_ptr != (bytptr)top_of_block)  ||  (0 == level))
		{
			BLK_SEG(bs_ptr, (bytptr)first_in_blk, blkseglen);
			first_copy = FALSE;
		} else
		{
			blkseglen = (int)((bytptr)left_ptr - (bytptr)first_in_blk);
			if (0 < blkseglen)
			{
				BLK_SEG(bs_ptr, (bytptr)first_in_blk, blkseglen);
				first_copy = FALSE;
			}
			BLK_ADDR(star_rec_hdr, SIZEOF(rec_hdr), rec_hdr);
			SET_CMPC(star_rec_hdr, 0);
			star_rec_hdr->rsiz = (unsigned short)(SIZEOF(rec_hdr) + SIZEOF(block_id));
			BLK_SEG(bs_ptr, (bytptr)star_rec_hdr, SIZEOF(rec_hdr));
			GET_USHORT(temp_ushort, &left_ptr->rsiz);
			BLK_SEG(bs_ptr, ((bytptr)left_ptr + temp_ushort - SIZEOF(block_id)), SIZEOF(block_id));
		}
	}
	blkseglen = (int)((bytptr)top_of_block - (bytptr)right_ptr);
	assert(0 <= blkseglen);
	if (0 != blkseglen)
	{
		next_rec_shrink = targ_len + prev_len;
		if (0 >= next_rec_shrink)
		{
			BLK_SEG(bs_ptr, (bytptr)right_ptr, blkseglen);
		} else
		{
			BLK_ADDR(new_rec_hdr, SIZEOF(rec_hdr), rec_hdr);
			SET_CMPC(new_rec_hdr, EVAL_CMPC(right_ptr) - next_rec_shrink);
			GET_USHORT(temp_ushort, &right_ptr->rsiz);
			new_rec_hdr->rsiz = temp_ushort + next_rec_shrink;
			BLK_SEG(bs_ptr, (bytptr)new_rec_hdr, SIZEOF(rec_hdr));
			if (targ_len)
			{
				BLK_ADDR(skb, targ_len, unsigned char);
				memcpy(skb, &search_key->base[targ_base], targ_len);
				BLK_SEG(bs_ptr, skb, targ_len);
			}
			if (prev_len)
				BLK_SEG(bs_ptr, (bytptr)(right_prev_ptr + 1) , prev_len);
			right_bytptr = (bytptr)(right_ptr + 1);
			blkseglen = (int)((bytptr)top_of_block - right_bytptr);
			if (0 < blkseglen)
			{
				BLK_SEG(bs_ptr, right_bytptr, blkseglen);
			} else
			{
				assert(CDB_STAGNATE > t_tries);
				return cdb_sc_rmisalign;
			}
		}
	}
	if (!BLK_FINI(bs_ptr, bs1))
	{
		assert(CDB_STAGNATE > t_tries);
		return cdb_sc_mkblk;
	}
	cse = t_write(blkhist, (unsigned char *)bs1, 0, 0, level, first_copy, TRUE, GDS_WRITE_KILLTN);
	assert(!dollar_tlevel || !cse->high_tlevel);
	*cseptr = cse;
	if (horiz_growth)
	{
		old_cse = cse->low_tlevel;
		assert(old_cse && old_cse->done);
		assert(2 == (SIZEOF(old_cse->undo_offset) / SIZEOF(old_cse->undo_offset[0])));
		assert(2 == (SIZEOF(old_cse->undo_next_off) / SIZEOF(old_cse->undo_next_off[0])));
		assert(!old_cse->undo_next_off[0] && !old_cse->undo_offset[0]);
		assert(!old_cse->undo_next_off[1] && !old_cse->undo_offset[1]);
	}
        if ((NULL != cse)  &&  (0 != cse->first_off))
	{	/* fix up chains in the block to account for deleted records */
		prev = NULL;
		curr = buffer + cse->first_off;
		GET_LONGP(&curr_chain, curr);
		while (curr < (bytptr)del_ptr)
		{	/* follow chain to first deleted record */
			if (0 == curr_chain.next_off)
				break;
			if (right_ptr == top_of_block  &&  (bytptr)del_ptr - curr == SIZEOF(off_chain))
				break;	/* special case described below: stop just before the first deleted record */
			prev = curr;
			curr += curr_chain.next_off;
			GET_LONGP(&curr_chain, curr);
		}
		if (right_ptr == top_of_block  &&  (bytptr)del_ptr - curr == SIZEOF(off_chain))
		{
			/* if the right side of the block is gone and our last chain is in the last record,
			 * terminate the chain and adjust the previous entry to point at the new *-key
			 * NOTE: this assumes there's NEVER a TP delete of records in the GVT
			 */
			assert(0 != level);
			/* store next_off in old_cse before actually changing it in the buffer(for rolling back) */
			if (horiz_growth)
			{
				old_cse->undo_next_off[0] = curr_chain.next_off;
				old_cse->undo_offset[0] = (block_offset)(curr - buffer);
				assert(old_cse->undo_offset[0]);
			}
			curr_chain.next_off = 0;
			GET_LONGP(curr, &curr_chain);
			if (NULL != prev)
			{	/* adjust previous chain next_off to reflect the fact that the record it refers to is now a *-key */
				GET_LONGP(&prev_chain, prev);
				/* store next_off in old_cse before actually changing it in the buffer(for rolling back) */
				if (horiz_growth)
				{
					old_cse->undo_next_off[1] = prev_chain.next_off;
					old_cse->undo_offset[1] = (block_offset)(prev - buffer);
					assert(old_cse->undo_offset[1]);
				}
				prev_chain.next_off = (unsigned int)((bytptr)left_ptr - prev + (unsigned int)(SIZEOF(rec_hdr)));
				GET_LONGP(prev, &prev_chain);
			} else	/* it's the first (and only) one */
				cse->first_off = (block_offset)((bytptr)left_ptr - buffer + SIZEOF(rec_hdr));
		} else if (curr >= (bytptr)del_ptr)
		{	/* may be more records on the right that aren't deleted */
			while (curr < (bytptr)right_ptr)
			{	/* follow chain past last deleted record */
				if (0 == curr_chain.next_off)
					break;
				curr += curr_chain.next_off;
				GET_LONGP(&curr_chain, curr);
			}
			/* prev :   ptr to chain record immediately preceding the deleted area,
			 *	    or 0 if none.
			 *
			 * curr :   ptr to chain record immediately following the deleted area,
			 *	    or to last chain record.
			 */
			if (curr < (bytptr)right_ptr)
			{	/* the former end of the chain is going, going, gone */
				if (NULL != prev)
				{	/* terminate the chain before the delete */
					GET_LONGP(&prev_chain, prev);
					/* store next_off in old_cse before actually changing it in the buffer(for rolling back) */
					if (horiz_growth)
					{
						old_cse->undo_next_off[0] = prev_chain.next_off;
						old_cse->undo_offset[0] = (block_offset)(prev - buffer);
						assert(old_cse->undo_offset[0]);
					}
					prev_chain.next_off = 0;
					GET_LONGP(prev, &prev_chain);
				} else
					cse->first_off = 0;		/* the whole chain is gone */
			} else
			{	/* stitch up the left and right to account for the hole in the middle */
				/* next_rec_shrink is the change in record size due to the new compression count */
				if (NULL != prev)
				{
					GET_LONGP(&prev_chain, prev);
					/* ??? new compression may be less (ie +) so why are negative shrinks ignored? */
					/* store next_off in old_cse before actually changing it in the buffer(for rolling back) */
					if (horiz_growth)
					{
						old_cse->undo_next_off[0] = prev_chain.next_off;
						old_cse->undo_offset[0] = (block_offset)(prev - buffer);
						assert(old_cse->undo_offset[0]);
					}
					prev_chain.next_off = (unsigned int)(curr - prev - ((bytptr)right_ptr - (bytptr)del_ptr)
						+ (next_rec_shrink > 0 ? next_rec_shrink : 0));
					GET_LONGP(prev, &prev_chain);
				} else	/* curr remains first: adjust the head */
					cse->first_off = (block_offset)(curr - buffer - ((bytptr)right_ptr - (bytptr)del_ptr)
						+ (next_rec_shrink > 0 ? next_rec_shrink : 0));
			}
		}
	}
	horiz_growth = FALSE;
	return cdb_sc_normal;
}