Exemplo n.º 1
0
void gv_select(char *cli_buff, int n_len, boolean_t freeze, char opname[], glist *gl_head,
	       int *reg_max_rec, int *reg_max_key, int *reg_max_blk)
{
	bool				stashed = FALSE;
	int				num_quote, len, gmap_size, new_gmap_size, estimated_entries, count, rslt;
	char				*ptr, *ptr1, *c;
	mstr				gmap[512], *gmap_ptr, *gmap_ptr_base, gmap_beg, gmap_end;
	mval				val, curr_gbl_name;
	glist				*gl_tail, *gl_ptr;
#ifdef GTM64
	hash_table_int8	        	ext_hash;
	ht_ent_int8                   	*tabent;
#else
	hash_table_int4	        	ext_hash;
	ht_ent_int4                   	*tabent;
#endif /* GTM64 */

	error_def(ERR_FREEZE);
	error_def(ERR_DBRDONLY);
	error_def(ERR_SELECTSYNTAX);
	error_def(ERR_MUNOFINISH);
	error_def(ERR_MUNOACTION);
	error_def(ERR_FREEZECTRL);

	memset(gmap, 0, SIZEOF(gmap));
	gmap_size = SIZEOF(gmap) / SIZEOF(gmap[0]);
	gmap_ptr_base = &gmap[0];
	/* "estimated_entries" is a conservative estimate of the # of entries that could be used up in the gmap array */
	estimated_entries = 1;	/* take into account the NULL gmap entry at the end of the array */
	for (ptr = cli_buff; *ptr; ptr = ptr1)
	{
		for (ptr1 = ptr; ; ptr1++)
		{
			if (',' == *ptr1)
			{
				len = (int)(ptr1 - ptr);
				ptr1++;
				break;
			} else if (!*ptr1)
			{
				len = (int)(ptr1 - ptr);
				break;
			}
		}
		gmap_beg.addr = ptr;
		c = gmap_beg.addr + len - 1;
		num_quote = 0;
		while ('"' == *c)
		{
			len--;
			c--;
			num_quote++;
		}
		if (0 >= len)
		{
			gtm_putmsg(VARLSTCNT(4) ERR_SELECTSYNTAX, 2, LEN_AND_STR(opname));
			mupip_exit(ERR_MUNOACTION);
		}
		c = gmap_beg.addr;
		while (0 < num_quote)
		{
			if ('"' == *c)
			{
				c++;
				len--;
			} else
			{
				gtm_putmsg(VARLSTCNT(4) ERR_SELECTSYNTAX, 2, LEN_AND_STR(opname));
				mupip_exit(ERR_MUNOACTION);
			}
			num_quote--;
		}
		gmap_beg.addr = c;
		if ('^' == *c)
		{
			gmap_beg.addr++;
			len--;
		}
		gmap_beg.len = len;
		c = mu_extr_ident(&gmap_beg);
		len -= INTCAST(c - gmap_beg.addr);
		assert(len >= 0);
		if (0 == len)
			gmap_end = gmap_beg;
		else if (gmap_beg.len == 1 && '*' == *c)
		{
			gmap_beg.addr = (char*)&percent_lit;
			gmap_beg.len = SIZEOF(percent_lit);
			gmap_end.addr =  (char*)&tilde_lit;
			gmap_end.len = SIZEOF(tilde_lit);
		} else if (1 == len && '*' == *c)
		{
			gmap_end = gmap_beg;
			gmap_beg.len--;
			*c = '~';
		} else if (':' != *c)
		{
			gtm_putmsg(VARLSTCNT(4) ERR_SELECTSYNTAX, 2, LEN_AND_STR(opname));
			mupip_exit(ERR_MUNOACTION);
		} else
		{
			gmap_beg.len = INTCAST(c - gmap_beg.addr);
			c++;
			gmap_end.addr = c;
			gmap_end.len = len - 1;
			if ('^' == *c)
			{
				gmap_end.addr++;
				gmap_end.len--;
			}
			c = mu_extr_ident(&gmap_end);
			MSTR_CMP(gmap_beg, gmap_end, rslt);
			if (((c - gmap_end.addr) != gmap_end.len) || (0 < rslt))
			{
				gtm_putmsg(VARLSTCNT(4) ERR_SELECTSYNTAX, 2, LEN_AND_STR(opname));
				mupip_exit(ERR_MUNOACTION);
			}
		}
		/* "estimated_entries" is the maximum number of entries that could be used up in the gmap array including the
		 * next global_map call. The actual number of used entries could be much lower than this.
		 * But since determining the actual number would mean scanning the gmap array for the first NULL pointer (a
		 * performance overhead), we do an approximate check instead.
		 */
		estimated_entries += MAX_GMAP_ENTRIES_PER_ITER;
		if (estimated_entries >= gmap_size)
		{	/* Current gmap array does not have enough space. Double size before calling global_map */
			new_gmap_size = gmap_size * 2;	/* double size of gmap array */
			gmap_ptr = (mstr *)malloc(SIZEOF(mstr) * new_gmap_size);
			memcpy(gmap_ptr, gmap_ptr_base, SIZEOF(mstr) * gmap_size);
			if (gmap_ptr_base != &gmap[0])
				free(gmap_ptr_base);
			gmap_size = new_gmap_size;
			gmap_ptr_base = gmap_ptr;
		}
		global_map(gmap_ptr_base, &gmap_beg, &gmap_end);
		DEBUG_ONLY(
			count = 1;
			for (gmap_ptr = gmap_ptr_base; gmap_ptr->addr; gmap_ptr++)
				count++;
			assert(count < gmap_size);
		)
	}
	if (freeze)
	{
		GTM64_ONLY(init_hashtab_int8(&ext_hash, 0, HASHTAB_COMPACT, HASHTAB_SPARE_TABLE);)
		NON_GTM64_ONLY(init_hashtab_int4(&ext_hash, 0, HASHTAB_COMPACT, HASHTAB_SPARE_TABLE);)
	}
Exemplo n.º 2
0
/* Initialize the TP structures we will be using for the successive TP operations */
void gvcst_tp_init(gd_region *greg)
{
	sgm_info		*si;
	sgmnt_addrs		*csa;

	csa = (sgmnt_addrs *)&FILE_INFO(greg)->s_addrs;
	if (NULL == csa->sgm_info_ptr)
	{
		si = csa->sgm_info_ptr = (sgm_info *)malloc(sizeof(sgm_info));
		assert(32768 > sizeof(sgm_info));
		memset(si, 0, sizeof(sgm_info));
		si->tp_hist_size = TP_MAX_MM_TRANSIZE;
		si->cur_tp_hist_size = INIT_CUR_TP_HIST_SIZE;	/* should be very much less than si->tp_hist_size */
		assert(si->cur_tp_hist_size <= si->tp_hist_size);
		si->blks_in_use = (hash_table_int4 *)malloc(sizeof(hash_table_int4));
		init_hashtab_int4(si->blks_in_use, BLKS_IN_USE_INIT_ELEMS);
		/* See comment in tp.h about cur_tp_hist_size for details */
		si->first_tp_hist = si->last_tp_hist =
			(srch_blk_status *)malloc(sizeof(srch_blk_status) * si->cur_tp_hist_size);
		si->cw_set_list = (buddy_list *)malloc(sizeof(buddy_list));
		initialize_list(si->cw_set_list, sizeof(cw_set_element), CW_SET_LIST_INIT_ALLOC);
		si->tlvl_cw_set_list = (buddy_list *)malloc(sizeof(buddy_list));
		initialize_list(si->tlvl_cw_set_list, sizeof(cw_set_element), TLVL_CW_SET_LIST_INIT_ALLOC);
		si->tlvl_info_list = (buddy_list *)malloc(sizeof(buddy_list));
		initialize_list(si->tlvl_info_list, sizeof(tlevel_info), TLVL_INFO_LIST_INIT_ALLOC);
		si->new_buff_list = (buddy_list *)malloc(sizeof(buddy_list));
		initialize_list(si->new_buff_list, SIZEOF(que_ent) + csa->hdr->blk_size, NEW_BUFF_LIST_INIT_ALLOC);
		si->recompute_list = (buddy_list *)malloc(sizeof(buddy_list));
		initialize_list(si->recompute_list, sizeof(key_cum_value), RECOMPUTE_LIST_INIT_ALLOC);
		/* The size of the si->cr_array can go up to TP_MAX_MM_TRANSIZE, but usually is quite less.
		 * Therefore, initially allocate a small array and expand as needed later.
		 */
		if (dba_bg == greg->dyn.addr->acc_meth)
		{
			si->cr_array_size = si->cur_tp_hist_size;
			si->cr_array = (cache_rec_ptr_ptr_t)malloc(sizeof(cache_rec_ptr_t) * si->cr_array_size);
		} else
		{
			si->cr_array_size = 0;
			si->cr_array = NULL;
		}
		si->fresh_start = TRUE;
	} else
		si = csa->sgm_info_ptr;
	si->gv_cur_region = greg;
	si->tp_csa = csa;
	si->tp_csd = csa->hdr;
	si->start_tn = csa->ti->curr_tn;
	if (JNL_ALLOWED(csa))
	{
		si->total_jnl_rec_size = csa->min_total_tpjnl_rec_size;	/* Reinitialize total_jnl_rec_size */
		/* Since the following jnl-mallocs are independent of any dynamically-changeable parameter of the
		 * database, we can as well use the existing malloced jnl structures if at all they exist.
		 */
		if (NULL == si->jnl_tail)
		{
			si->jnl_tail = &si->jnl_head;
			si->jnl_list = (buddy_list *)malloc(sizeof(buddy_list));
			initialize_list(si->jnl_list, sizeof(jnl_format_buffer), JNL_LIST_INIT_ALLOC);
			si->format_buff_list = (buddy_list *)malloc(sizeof(buddy_list));
			/* Minimum value of elemSize is 8 due to alignment requirements of the returned memory location.
			 * Therefore, we request an elemSize of 8 bytes for the format-buffer and will convert as much
			 * bytes as we need into as many 8-byte multiple segments (see code in jnl_format).
			 */
			initialize_list(si->format_buff_list, JFB_ELE_SIZE,
					DIVIDE_ROUND_UP(JNL_FORMAT_BUFF_INIT_ALLOC, JFB_ELE_SIZE));
		}
	} else if (NULL != si->jnl_tail)
	{	/* journaling is currently disallowed although it was allowed (non-zero si->jnl_tail)
		 * during the prior use of this region. Free up unnecessary region-specific structures now.
		 */
		FREEUP_BUDDY_LIST(si->jnl_list);
		FREEUP_BUDDY_LIST(si->format_buff_list);
		si->jnl_tail = NULL;
	}
}