Exemple #1
0
tagi_t *tl_vlist2(tag_type_t tag, tag_value_t value, va_list ap)
{
    tagi_t *t, *rv;
    tagi_t tagi[1];
    size_t size;

    tagi->t_tag = tag, tagi->t_value = value;

    if (!t_end(tagi)) {
        va_list aq;
        va_copy(aq, ap);
        size = sizeof(tagi) + tl_vlen(aq);
        va_end(aq);
    }
    else
        size = sizeof(tagi);

    t = rv = malloc(size);

    for (; t;) {
        *t++ = *tagi;

        if (t_end(tagi))
            break;

        tagi->t_tag = va_arg(ap, tag_type_t);
        tagi->t_value = va_arg(ap, tag_value_t);
    }

    assert((char *)rv + size == (char *)t);

    return rv;
}
Exemple #2
0
int main(int argc, char **argv)
{
	int ret, r2;

	runcmd_init();
	t_set_colors(0);
	t_start("exec output comparison");
	{
		int i;
		char *out = calloc(1, BUF_SIZE);
		for (i = 0; cases[i].input != NULL; i++) {
			memset(out, 0, BUF_SIZE);
			int pfd[2] = {-1, -1}, pfderr[2] = {-1, -1};
			int fd;
			char *cmd;
			asprintf(&cmd, "/bin/echo -n %s", cases[i].input);
			fd = runcmd_open(cmd, pfd, pfderr, NULL);
			read(pfd[0], out, BUF_SIZE);
			ok_str(cases[i].output, out, "Echoing a command should give expected output");
			close(pfd[0]);
			close(pfderr[0]);
			close(fd);
		}
	}
	ret = t_end();
	t_reset();
	t_start("anomaly detection");
	{
		int i;
		for (i = 0; anomaly[i].cmd; i++) {
			int out_argc;
			char *out_argv[256];
			int result = runcmd_cmd2strv(anomaly[i].cmd, &out_argc, out_argv);
			ok_int(result, anomaly[i].ret, anomaly[i].cmd);
		}
	}
	r2 = t_end();
	ret = r2 ? r2 : ret;
	t_reset();
	t_start("argument splitting");
	{
		int i;
		for (i = 0; parse_case[i].cmd; i++) {
			int x, out_argc;
			char *out_argv[256];
			int result = runcmd_cmd2strv(parse_case[i].cmd, &out_argc, out_argv);
			out_argv[out_argc] = NULL;
			ok_int(result, 0, parse_case[i].cmd);
			ok_int(out_argc, parse_case[i].argc_exp, parse_case[i].cmd);
			for (x = 0; x < parse_case[x].argc_exp && out_argv[x]; x++) {
				ok_str(parse_case[i].argv_exp[x], out_argv[x], "argv comparison test");
			}
		}
	}

	r2 = t_end();
	return r2 ? r2 : ret;
}
Exemple #3
0
/**Move a tag list.
 *
 * The function tl_tmove() moves the tag list arguments to @a dst.  The @a
 * dst must have big enough for all arguments.
 *
 * @param dst   pointer to the destination buffer
 * @param size  sizeof @a dst
 * @param t_tag,t_value,... tag list
 *
 * @return
 * The function tl_tmove() returns number of tag list items initialized.
 */
size_t tl_tmove(tagi_t *dst, size_t size,
                tag_type_t t_tag, tag_value_t t_value, ...)
{
    size_t n = 0, N = size / sizeof(tagi_t);
    tagi_t tagi[1];
    va_list ap;

    va_start(ap, t_value);

    tagi->t_tag = t_tag, tagi->t_value = t_value;

    for (;;) {
        assert((size_t)((char *)&dst[n] - (char *)dst) < size);
        if (n < N)
            dst[n] = *tagi;
        n++;
        if (t_end(tagi))
            break;

        tagi->t_tag = va_arg(ap, tag_type_t);
        tagi->t_value = va_arg(ap, tag_value_t);
    }

    va_end(ap);

    return n;
}
int main(int argc, char **argv)
{
	unsigned long k;
	struct test_data *td;
	t_set_colors(0);
	t_start("fanout tests");

	run_tests(10, 64);
	run_tests(512, 64);
	run_tests(64, 64);
	run_tests(511, 17);

	destroyed = 0;
	fot = fanout_create(512);
	ok_int(fanout_remove(fot, 12398) == NULL, 1,
	       "remove on empty table must yield NULL");
	ok_int(fanout_get(fot, 123887987) == NULL, 1,
	       "get on empty table must yield NULL");
	for (k = 0; k < 16385; k++) {
		struct test_data *tdata = calloc(1, sizeof(*td));
		tdata->key = k;
		asprintf(&tdata->name, "%lu", k);
		fanout_add(fot, k, tdata);
	}
	td = fanout_get(fot, k - 1);
	ok_int(td != NULL, 1, "get must get what add inserts");
	ok_int(fanout_remove(fot, k + 1) == NULL, 1,
	       "remove on non-inserted key must yield NULL");
	ok_int(fanout_get(fot, k + 1) == NULL, 1,
	       "get on non-inserted must yield NULL");
	fanout_destroy(fot, pdest);
	ok_int((int)destroyed, (int)k, "destroy counter while free()'ing");

	return t_end();
}
Exemple #5
0
void MInput::flush(void)
{
	// keys
	map<string, int>::iterator
		mit (m_keys.begin()),
		mend(m_keys.end());

	for(; mit!=mend; mit++)
	{
		if(mit->second == 1)
			mit->second = 2;
		else if(mit->second == 3)
			mit->second = 0;
	}
    
	// axis
	unsigned int a, aSize = m_axisToFlush.size();
	for(a=0; a<aSize; a++)
		(*m_axisToFlush[a]) = 0;
	
    // touches
    map<int, TouchData>::iterator
		t_it(m_touches.begin()),
		t_end(m_touches.end());
    
    for(; t_it!=t_end; t_it++)
    {
		TouchData* data = &(t_it->second);
        data->phase = M_TOUCH_NONE;
        data->touchPoint = MVector2(0.0f);
    }
}
Exemple #6
0
int main(int argc, char **argv)
{
	unsigned int i;
	struct strcode sc[] = {
		ADDSTR("\n"),
		ADDSTR("\0\0"),
		ADDSTR("XXXxXXX"),
		ADDSTR("LALALALALALALAKALASBALLE\n"),
	};

	t_set_colors(0);
	t_start("iocache_use_delim() test");
	for (i = 0; i < ARRAY_SIZE(sc); i++) {
		t_start("Testing delimiter of len %d", sc[i].len);
		test_delimiter(sc[i].str, sc[i].len);
		t_end();
	}

	return t_end();
}
Exemple #7
0
int mu_extr_getblk(unsigned char *ptr)
{
	error_def(ERR_GVGETFAIL);
	enum cdb_sc	status;
	rec_hdr_ptr_t	rp;
	bool		two_histories, end_of_tree;
	blk_hdr_ptr_t	bp;
	srch_blk_status	*bh;
	srch_hist	*rt_history;

	t_begin(ERR_GVGETFAIL, FALSE);
	for (;;)
	{
		if (cdb_sc_normal != (status = gvcst_search(gv_currkey, NULL)))
		{
			t_retry(status);
			continue;
		}
		end_of_tree = two_histories
			    = FALSE;
		bh = gv_target->hist.h;
		rp = (rec_hdr_ptr_t)(bh->buffaddr + bh->curr_rec.offset);
		bp = (blk_hdr_ptr_t)bh->buffaddr;
		if (rp >= (rec_hdr_ptr_t)CST_TOB(bp))
		{
			rt_history = gv_target->alt_hist;
			if (cdb_sc_normal == (status = gvcst_rtsib(rt_history, 0)))
			{
				two_histories = TRUE;
				if (cdb_sc_normal != (status = gvcst_search_blk(gv_currkey, rt_history->h)))
				{
					t_retry(status);
					continue;
				}
				bp = (blk_hdr_ptr_t)rt_history->h[0].buffaddr;
			} else if (cdb_sc_endtree == status)
					end_of_tree = TRUE;
			else
			{
				t_retry(status);
				continue;
			}
		}
		memcpy(ptr, bp, bp->bsiz);
		if (t_end(&gv_target->hist, two_histories ? rt_history : NULL) != 0)
		{
			if (two_histories)
				memcpy(gv_target->hist.h, rt_history->h, sizeof(srch_blk_status) * (rt_history->depth + 1));
			return !end_of_tree;
		}
	}
}
Exemple #8
0
/** Calculate length of a tag list with a @c va_list. */
size_t tl_vlen(va_list ap)
{
    size_t len = 0;
    tagi_t tagi[2] = {{ NULL }};

    do {
        tagi->t_tag = va_arg(ap, tag_type_t );
        tagi->t_value = va_arg(ap, tag_value_t);
        len += sizeof(tagi_t);
    } while (!t_end(tagi));

    return len;
}
Exemple #9
0
/* create a biarc for a bezier curve.
 *
 * extends the tangent lines to the bezier curve at its first and last control
 * points, and intersects them to find a third point.
 * the biarc passes through the first and last control points, and the incenter
 * of the circle defined by the first, last and intersection points.
 */
HIDDEN ON_Arc
make_biarc(const ON_BezierCurve& bezier)
{
    ON_2dPoint isect, arc_pt;
    ON_2dPoint p_start(bezier.PointAt(0)), p_end(bezier.PointAt(1.0));
    ON_2dVector t_start(bezier.TangentAt(0)), t_end(bezier.TangentAt(1.0));
    ON_Ray r_start(p_start, t_start), r_end(p_end, t_end);

    r_start.IntersectRay(r_end, isect);
    arc_pt = incenter(p_start, p_end, isect);

    return ON_Arc(p_start, arc_pt, p_end);
}
Exemple #10
0
void prof_pmk(char *essid)
{
  TIME_STRUCT p1,p2;

  char key[16][128];
  unsigned char pmk_sol[16][40];
  unsigned char pmk_fast[16][40];

  int i,j;
  for(i=0;i<16;i++)
    {
      strcpy(key[i],"atest");
      key[i][0]+=i;
    }

  p1 = t_start();
  for(i=0;i<16;i++)
    calc_pmk(key[i],essid,pmk_sol[i]); //key값과 essid로부터 pmk 값을 계산함
  t_end(&p1);

  p2 = t_start();
  calc_16pmk(key,essid,pmk_fast);
  t_end(&p2);

  //diff
  for(i=0;i<16;i++)
    {
      if(memcmp(pmk_sol[i],pmk_fast[i],sizeof(pmk_sol[i])) != 0)
	{
	  printf("* %d wrong case (key:%s)\n",i,key[i]);
	  dump_key("pmk_sol",pmk_sol[i],sizeof(pmk_sol[i]));
	  dump_key("pmk_fst",pmk_fast[i],sizeof(pmk_fast[i]));	  
	}
    }  
  printf("original : %0.2lf ms\n",t_get(p1)/1000);
  printf("simd ver : %0.2lf ms\n",t_get(p2)/1000);
  printf("performance : x%0.2lf\n",t_get(p1)/t_get(p2));
}
Exemple #11
0
int main(int argc, char **argv)
{
	int listen_fd, flags, sockopt = 1;
	struct sockaddr_in sain;
	int error;
	const char *err_msg;

	t_set_colors(0);
	t_start("iobroker ipc test");

	error = iobroker_get_max_fds(NULL);
	ok_int(error, IOBROKER_ENOSET, "test errors when passing null");

	err_msg = iobroker_strerror(error);
	test(err_msg && !strcmp(err_msg, iobroker_errors[(~error) + 1].string), "iobroker_strerror() returns the right string");

	iobs = iobroker_create();
	error = iobroker_get_max_fds(iobs);
	test(iobs && error >= 0, "max fd's for real iobroker set must be > 0");

	listen_fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
	flags = fcntl(listen_fd, F_GETFD);
	flags |= FD_CLOEXEC;
	fcntl(listen_fd, F_SETFD, flags);

	(void)setsockopt(listen_fd, SOL_SOCKET, SO_REUSEADDR, &sockopt, sizeof(sockopt));

	memset(&sain, 0, sizeof(sain));
	sain.sin_port = ntohs(9123);
	sain.sin_family = AF_INET;
	bind(listen_fd, (struct sockaddr *)&sain, sizeof(sain));
	listen(listen_fd, 128);
	iobroker_register(iobs, listen_fd, iobs, listen_handler);

	if (argc == 1)
		conn_spam(&sain);

	for (;;) {
		iobroker_poll(iobs, -1);
		if (iobroker_get_num_fds(iobs) <= 1) {
			break;
		}
	}

	iobroker_close(iobs, listen_fd);
	iobroker_destroy(iobs, 0);

	t_end();
	return 0;
}
Exemple #12
0
void TextRenderer::fillVerts(const std::string &text,
                             const vec2 &origin,
                             const vec2 &scale,
                             std::vector<float> &coords, 
                             std::vector<float> &tcs) 
{
  const int ROWS = 16;
  const int COLS = 16;

  const int WIDTH = 256/COLS * scale.x;
  const int HEIGHT = 256/ROWS * scale.y;

  vec2 offset = origin;
  offset.y -= HEIGHT / 2;
  offset.x -= (WIDTH * text.size()) / 4;
  
  
  for (size_t i = 0; i < text.size(); ++i) {
    char ascii = text[i];
    int map_x = ascii % COLS;
    int map_y = ascii / ROWS;

    vec2 c_start = offset;
    vec2 c_end = offset + vec2(WIDTH, HEIGHT);
    vec2 t_start(static_cast<float>(map_x) / COLS /*+ 1.0/256.0*/,
                 static_cast<float>(map_y) / ROWS /*+ 1.0/256.0*/);
    vec2 t_end(static_cast<float>(map_x + 1) / COLS /*+ 1.0/256.0*/,
               static_cast<float>(map_y + 1) / ROWS /*+ 1.0/256.0*/);

    coords.push_back(c_start.x);
    coords.push_back(c_start.y);
    coords.push_back(c_end.x);
    coords.push_back(c_start.y);
    coords.push_back(c_end.x);
    coords.push_back(c_end.y);
    coords.push_back(c_start.x);
    coords.push_back(c_end.y);
    
    tcs.push_back(t_start.x);
    tcs.push_back(t_start.y);
    tcs.push_back(t_end.x);
    tcs.push_back(t_start.y);
    tcs.push_back(t_end.x);
    tcs.push_back(t_end.y);
    tcs.push_back(t_start.x);
    tcs.push_back(t_end.y);
    
    offset.x += static_cast<float>(WIDTH) / 2.0f;
  }
}
Exemple #13
0
/** Convert va_list to tag list */
tagi_t *tl_vlist(va_list ap)
{
    tagi_t *t, *rv;
    va_list aq;

    va_copy(aq, ap);
    rv = malloc(tl_vlen(aq));
    va_end(aq);

    for (t = rv; t; t++) {
        t->t_tag = va_arg(ap, tag_type_t);
        t->t_value = va_arg(ap, tag_value_t);

        if (t_end(t))
            break;
    }

    return rv;
}
Exemple #14
0
void gvcst_root_search(void)
{
	srch_blk_status	*h0;
	uchar_ptr_t	c, c1;
	sm_uc_ptr_t	rp;
	unsigned short	rlen, hdr_len;
	uchar_ptr_t	subrec_ptr;
	enum cdb_sc	status;
	boolean_t	gbl_target_was_set;
	gv_namehead	*save_targ;
	mname_entry	*gvent;
	int		altkeylen;

	assert((dba_bg == gv_cur_region->dyn.addr->acc_meth) || (dba_mm == gv_cur_region->dyn.addr->acc_meth));
	assert(gv_altkey->top == gv_currkey->top);
	assert(gv_altkey->top == gv_keysize);
	assert(gv_currkey->end < gv_currkey->top);
	for (c = gv_altkey->base, c1 = gv_currkey->base;  *c1;)
		*c++ = *c1++;
	*c++ = 0;
	*c = 0;
	gv_altkey->end = c - gv_altkey->base;
	assert(gv_altkey->end < gv_altkey->top);
	assert(gv_target != cs_addrs->dir_tree);
	save_targ = gv_target;
	/* Check if "gv_target->gvname" matches "gv_altkey->base". If not, there is a name mismatch (out-of-design situation).
	 * This check is temporary until we catch the situation that caused D9H02-002641 */
	/* --- Check BEGIN --- */
	gvent = &save_targ->gvname;
	altkeylen = gv_altkey->end - 1;
	if (!altkeylen || (altkeylen != gvent->var_name.len) || memcmp(gv_altkey->base, gvent->var_name.addr, gvent->var_name.len))
		GTMASSERT;
	/* --- Check END   --- */
	if (INVALID_GV_TARGET != reset_gv_target)
		gbl_target_was_set = TRUE;
	else
	{
		gbl_target_was_set = FALSE;
		reset_gv_target = save_targ;
	}
	gv_target = cs_addrs->dir_tree;
	if (is_standalone)  /* *&&  (0 != gv_target->clue.end)  &&  (FALSE == is_valid_hist(&gv_target->hist))) */
		gv_target->clue.end = 0;
	T_BEGIN_READ_NONTP_OR_TP(ERR_GVGETFAIL);
	assert(t_tries < CDB_STAGNATE || cs_addrs->now_crit);	/* we better hold crit in the final retry (TP & non-TP) */
	for (;;)
	{
		hdr_len = rlen = 0;
		gv_target = cs_addrs->dir_tree;
		if (dollar_trestart)
			gv_target->clue.end = 0;
		if (cdb_sc_normal == (status = gvcst_search(gv_altkey, 0)))
		{
			if (gv_altkey->end + 1 == gv_target->hist.h[0].curr_rec.match)
			{
				h0 = gv_target->hist.h;
				rp = (h0->buffaddr + h0->curr_rec.offset);
				hdr_len = sizeof(rec_hdr) + gv_altkey->end + 1 - ((rec_hdr_ptr_t)rp)->cmpc;
				GET_USHORT(rlen, rp);
				if (FALSE == (CHKRECLEN(rp, h0->buffaddr, rlen)) || (rlen < hdr_len + sizeof(block_id)))
				{
					gv_target->clue.end = 0;
					RESET_GV_TARGET_LCL(save_targ);
					t_retry(cdb_sc_rmisalign);
					continue;
				}
				GET_LONG(save_targ->root, (rp + hdr_len));
				if (rlen > hdr_len + sizeof(block_id))
				{
					assert(NULL != global_collation_mstr.addr || 0 == global_collation_mstr.len);
					if (global_collation_mstr.len < rlen - (hdr_len + sizeof(block_id)))
					{
						if (NULL != global_collation_mstr.addr)
							free(global_collation_mstr.addr);
						global_collation_mstr.len = rlen - (hdr_len + SIZEOF(block_id));
						global_collation_mstr.addr = (char *)malloc(global_collation_mstr.len);
					}
					/* the memcpy needs to be done here instead of out of for loop for
					 * concurrency consideration. We don't use s2pool because the pointer rp is 64 bits
					 */
					memcpy(global_collation_mstr.addr, rp + hdr_len + sizeof(block_id),
							rlen - (hdr_len + sizeof(block_id)));
				}
				if (0 != dollar_tlevel)
				{
					status = tp_hist(NULL);
					if (cdb_sc_normal != status)
					{
						gv_target->clue.end = 0;
						RESET_GV_TARGET_LCL(save_targ);
						gv_target->root = 0;
						t_retry(status);
						continue;
					}
					break;
				}
			}
			if (0 == dollar_tlevel)
			{
				if ((trans_num)0 != t_end(&gv_target->hist, 0))
					break;
			} else
			{
				status = tp_hist(NULL);
				if (cdb_sc_normal == status)
					break;
				gv_target->clue.end = 0;
				RESET_GV_TARGET_LCL(save_targ);
				gv_target->root = 0;
				t_retry(status);
				continue;
			}
			save_targ->root = 0;
		} else
		{
			gv_target->clue.end = 0;
			RESET_GV_TARGET_LCL(save_targ);
			t_retry(status);
			continue;
		}
	}
	RESET_GV_TARGET_LCL_AND_CLR_GBL(save_targ);
	if (rlen > hdr_len + sizeof(block_id))
	{
		assert(NULL != global_collation_mstr.addr);
		subrec_ptr = get_spec((uchar_ptr_t)global_collation_mstr.addr,
				      (int)(rlen - (hdr_len + sizeof(block_id))), COLL_SPEC);
		if (subrec_ptr)
		{
			gv_target->nct = *(subrec_ptr + COLL_NCT_OFFSET);
			gv_target->act = *(subrec_ptr + COLL_ACT_OFFSET);
			gv_target->ver = *(subrec_ptr + COLL_VER_OFFSET);
		} else
		{
			gv_target->nct = 0;
			gv_target->act = 0;
			gv_target->ver = 0;
		}
	} else
	{
		gv_target->nct = 0;
		gv_target->act = cs_addrs->hdr->def_coll;
		gv_target->ver = cs_addrs->hdr->def_coll_ver;
	}
	if (gv_target->act)
		act_in_gvt();
	assert(gv_target->act || NULL == gv_target->collseq);
	return;
}
Exemple #15
0
void	mu_swap_root(glist *gl_ptr, int *root_swap_statistic_ptr)
{
	sgmnt_data_ptr_t	csd;
	sgmnt_addrs		*csa;
	node_local_ptr_t	cnl;
	srch_hist		*dir_hist_ptr, *gvt_hist_ptr;
	gv_namehead		*save_targ;
	block_id		root_blk_id, child_blk_id, free_blk_id;
	sm_uc_ptr_t		root_blk_ptr, child_blk_ptr;
	kill_set		kill_set_list;
	trans_num		curr_tn, ret_tn;
	int			level, root_blk_lvl;
	block_id		save_root;
	boolean_t		tn_aborted;
	unsigned int		lcl_t_tries;
	enum cdb_sc		status;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	assert(mu_reorg_process);
	gv_target = gl_ptr->gvt;
	gv_target->root = 0;		/* reset root so we recompute it in DO_OP_GVNAME below */
	gv_target->clue.end = 0;	/* reset clue since reorg action on later globals might have invalidated it */
	reorg_gv_target->gvname.var_name = gv_target->gvname.var_name;	/* needed by SAVE_ROOTSRCH_ENTRY_STATE */
	dir_hist_ptr = gv_target->alt_hist;
	gvt_hist_ptr = &(gv_target->hist);
	inctn_opcode = inctn_invalid_op;
	DO_OP_GVNAME(gl_ptr);
		/* sets gv_target/gv_currkey/gv_cur_region/cs_addrs/cs_data to correspond to <globalname,reg> in gl_ptr */
	csa = cs_addrs;
	cnl = csa->nl;
	csd = cs_data;	/* Be careful to keep csd up to date. With MM, cs_data can change, and
			 * dereferencing an older copy can result in a SIG-11.
			 */
	if (gv_cur_region->read_only)
		return;	/* Cannot proceed for read-only data files */
	if (0 == gv_target->root)
	{	/* Global does not exist (online rollback). No problem. */
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_GBLNOEXIST, 2, GNAME(gl_ptr).len, GNAME(gl_ptr).addr);
		return;
	}
	if (dba_mm == csd->acc_meth)
		/* return for now without doing any swapping operation because later mu_truncate
		 * is going to issue the MUTRUNCNOTBG message.
		 */
		return;
	SET_GV_ALTKEY_TO_GBLNAME_FROM_GV_CURRKEY;		/* set up gv_altkey to be just the gblname */
	/* ------------ Swap root block of global variable tree --------- */
	t_begin(ERR_MUREORGFAIL, UPDTRNS_DB_UPDATED_MASK);
	for (;;)
	{
		curr_tn = csa->ti->curr_tn;
		kill_set_list.used = 0;
		save_root = gv_target->root;
		gv_target->root = csa->dir_tree->root;
		gv_target->clue.end = 0;
		if (cdb_sc_normal != (status = gvcst_search(gv_altkey, dir_hist_ptr)))
		{	/* Assign directory tree path to dir_hist_ptr */
			assert(t_tries < CDB_STAGNATE);
			gv_target->root = save_root;
			t_retry(status);
			continue;
		}
		gv_target->root = save_root;
		gv_target->clue.end = 0;
		if (cdb_sc_normal != (gvcst_search(gv_currkey, NULL)))
		{	/* Assign global variable tree path to gvt_hist_ptr */
			assert(t_tries < CDB_STAGNATE);
			t_retry(status);
			continue;
		}
		/* We've already search the directory tree in op_gvname/t_retry and obtained gv_target->root.
		 * Should restart with gvtrootmod2 if they don't agree. gvcst_root_search is the final arbiter.
		 * Really need that for debug info and also should assert(gv_currkey is global name).
		 */
		root_blk_lvl = gvt_hist_ptr->depth;
		assert(root_blk_lvl > 0);
		root_blk_ptr = gvt_hist_ptr->h[root_blk_lvl].buffaddr;
		root_blk_id = gvt_hist_ptr->h[root_blk_lvl].blk_num;
		assert((CDB_STAGNATE > t_tries) || (gv_target->root == gvt_hist_ptr->h[root_blk_lvl].blk_num));
		free_blk_id = swap_root_or_directory_block(0, root_blk_lvl, dir_hist_ptr, root_blk_id,
				root_blk_ptr, &kill_set_list, curr_tn);
		if (RETRY_SWAP == free_blk_id)
			continue;
		else if (ABORT_SWAP == free_blk_id)
			break;
		update_trans = UPDTRNS_DB_UPDATED_MASK;
		inctn_opcode = inctn_mu_reorg;
		assert(1 == kill_set_list.used);
		need_kip_incr = TRUE;
		if (!csa->now_crit)
			WAIT_ON_INHIBIT_KILLS(cnl, MAXWAIT2KILL);
		DEBUG_ONLY(lcl_t_tries = t_tries);
		TREF(in_mu_swap_root_state) = MUSWP_INCR_ROOT_CYCLE;
		assert(!TREF(in_gvcst_redo_root_search));
		if ((trans_num)0 == (ret_tn = t_end(gvt_hist_ptr, dir_hist_ptr, TN_NOT_SPECIFIED)))
		{
			TREF(in_mu_swap_root_state) = MUSWP_NONE;
			need_kip_incr = FALSE;
			assert(NULL == kip_csa);
			ABORT_TRANS_IF_GBL_EXIST_NOMORE(lcl_t_tries, tn_aborted);
			if (tn_aborted)
			{	/* It is not an error if the global (that once existed) doesn't exist anymore (due to ROLLBACK) */
				gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_GBLNOEXIST, 2, GNAME(gl_ptr).len, GNAME(gl_ptr).addr);
				return;
			}
			continue;
		}
		TREF(in_mu_swap_root_state) = MUSWP_NONE;
		/* Note that this particular process's csa->root_search_cycle is now behind cnl->root_search_cycle.
		 * This forces a cdb_sc_gvtrootmod2 restart in gvcst_bmp_mark_free below.
		 */
		assert(cnl->root_search_cycle > csa->root_search_cycle);
		gvcst_kill_sort(&kill_set_list);
		GVCST_BMP_MARK_FREE(&kill_set_list, ret_tn, inctn_mu_reorg, inctn_bmp_mark_free_mu_reorg, inctn_opcode, csa);
		DECR_KIP(csd, csa, kip_csa);
		*root_swap_statistic_ptr += 1;
		break;
	}
	/* ------------ Swap blocks in branch of directory tree --------- */
	for (level = 0; level <= MAX_BT_DEPTH; level++)
	{
		t_begin(ERR_MUREORGFAIL, UPDTRNS_DB_UPDATED_MASK);
		for (;;)
		{
			curr_tn = csa->ti->curr_tn;
			kill_set_list.used = 0;
			save_root = gv_target->root;
			gv_target->root = csa->dir_tree->root;
			gv_target->clue.end = 0;
			if (cdb_sc_normal != (status = gvcst_search(gv_altkey, dir_hist_ptr)))
			{	/* assign branch path of directory tree into dir_hist_ptr */
				assert(t_tries < CDB_STAGNATE);
				gv_target->root = save_root;
				t_retry(status);
				continue;
			}
			gv_target->root = save_root;
			gv_target->clue.end = 0;
			if (level >= dir_hist_ptr->depth)
			{	/* done */
				t_abort(gv_cur_region, csa);
				return;
			}
			child_blk_ptr = dir_hist_ptr->h[level].buffaddr;
			child_blk_id = dir_hist_ptr->h[level].blk_num;
			assert(csa->dir_tree->root != child_blk_id);
			free_blk_id = swap_root_or_directory_block(level + 1, level, dir_hist_ptr, child_blk_id,
					child_blk_ptr, &kill_set_list, curr_tn);
			if (level == 0)
				/* set level as 1 to mark this kill set is for level-0 block in directory tree.
				 * The kill-set level later will be used in gvcst_bmp_markfree to assign a special value to
				 * cw_set_element, which will be eventually used by t_end to write the block to snapshot
				 */
				kill_set_list.blk[kill_set_list.used - 1].level = 1;
			if (RETRY_SWAP == free_blk_id)
				continue;
			else if (ABORT_SWAP == free_blk_id)
				break;
			update_trans = UPDTRNS_DB_UPDATED_MASK;
			inctn_opcode = inctn_mu_reorg;
			assert(1 == kill_set_list.used);
			need_kip_incr = TRUE;
			if (!csa->now_crit)
				WAIT_ON_INHIBIT_KILLS(cnl, MAXWAIT2KILL);
			DEBUG_ONLY(lcl_t_tries = t_tries);
			TREF(in_mu_swap_root_state) = MUSWP_DIRECTORY_SWAP;
			if ((trans_num)0 == (ret_tn = t_end(dir_hist_ptr, NULL, TN_NOT_SPECIFIED)))
			{
				TREF(in_mu_swap_root_state) = MUSWP_NONE;
				need_kip_incr = FALSE;
				assert(NULL == kip_csa);
				continue;
			}
			TREF(in_mu_swap_root_state) = MUSWP_NONE;
			gvcst_kill_sort(&kill_set_list);
			TREF(in_mu_swap_root_state) = MUSWP_FREE_BLK;
			GVCST_BMP_MARK_FREE(&kill_set_list, ret_tn, inctn_mu_reorg, inctn_bmp_mark_free_mu_reorg,
					inctn_opcode, csa);
			TREF(in_mu_swap_root_state) = MUSWP_NONE;
			DECR_KIP(csd, csa, kip_csa);
			break;
		}
	}
	return;
}
Exemple #16
0
boolean_t	gvcst_query2(void)
{
	boolean_t	found, two_histories;
	enum cdb_sc	status;
	blk_hdr_ptr_t	bp;
	rec_hdr_ptr_t	rp;
	unsigned char	*c1, *c2;
	srch_blk_status	*bh;
	srch_hist	*rt_history;

	T_BEGIN_READ_NONTP_OR_TP(ERR_GVQUERYFAIL);
	assert(t_tries < CDB_STAGNATE || cs_addrs->now_crit);	/* we better hold crit in the final retry (TP & non-TP) */
	for (;;)
	{
		two_histories = FALSE;
#		if defined(DEBUG) && defined(UNIX)
		if (gtm_white_box_test_case_enabled && (WBTEST_ANTIFREEZE_GVQUERYFAIL == gtm_white_box_test_case_number))
		{
			t_retry(cdb_sc_blknumerr);
			continue;
		}
#		endif
		if (cdb_sc_normal == (status = gvcst_search(gv_currkey, 0)))
		{
			found = TRUE;
			bh = &gv_target->hist.h[0];
			rp = (rec_hdr_ptr_t)(bh->buffaddr + bh->curr_rec.offset);
			bp = (blk_hdr_ptr_t)bh->buffaddr;
			if (rp >= (rec_hdr_ptr_t)CST_TOB(bp))
			{
				two_histories = TRUE;
				rt_history = gv_target->alt_hist;
				status = gvcst_rtsib(rt_history, 0);
				if (cdb_sc_endtree == status)		/* end of tree */
				{
					found = FALSE;
					two_histories = FALSE;		/* second history not valid */
				} else  if (cdb_sc_normal != status)
				{
					t_retry(status);
					continue;
				} else
				{
					bh = &rt_history->h[0];
					if (cdb_sc_normal != (status = gvcst_search_blk(gv_currkey, bh)))
					{
						t_retry(status);
						continue;
					}
					rp = (rec_hdr_ptr_t)(bh->buffaddr + bh->curr_rec.offset);
					bp = (blk_hdr_ptr_t)bh->buffaddr;
				}
			}
			if (found)
			{	/* !found indicates that the end of tree has been reached (see call to
				 *  gvcst_rtsib).  If there is no more tree, don't bother doing expansion.
				 */
				status = gvcst_expand_curr_key(bh, gv_currkey, gv_altkey);
				if (cdb_sc_normal != status)
				{
					t_retry(status);
					continue;
				}
			}
			if (!dollar_tlevel)
			{
				if ((trans_num)0 == t_end(&gv_target->hist, !two_histories ? NULL : rt_history, TN_NOT_SPECIFIED))
					continue;
			} else
			{
				status = tp_hist(!two_histories ? NULL : rt_history);
				if (cdb_sc_normal != status)
				{
					t_retry(status);
					continue;
				}
		    	}
			assert(cs_data == cs_addrs->hdr);
			INCR_GVSTATS_COUNTER(cs_addrs, cs_addrs->nl, n_query, 1);
			if (found)
			{
				c1 = &gv_altkey->base[0];
				c2 = &gv_currkey->base[0];
				for ( ; *c2; )
				{
					if (*c2++ != *c1++)
						break;
				}
				if (!*c2 && !*c1)
					return TRUE;
			}
			return FALSE;
		}
		t_retry(status);
	}
}
Exemple #17
0
mint	gvcst_data(void)
{
	blk_hdr_ptr_t	bp;
	enum cdb_sc	status;
	mint		val;
	rec_hdr_ptr_t	rp;
	unsigned short	rec_size;
	srch_blk_status *bh;
	srch_hist	*rt_history;
	sm_uc_ptr_t	b_top;

	assert((gv_target->root < cs_addrs->ti->total_blks) || (0 < dollar_tlevel));
	T_BEGIN_READ_NONTP_OR_TP(ERR_GVDATAFAIL);
	assert(t_tries < CDB_STAGNATE || cs_addrs->now_crit);	/* we better hold crit in the final retry (TP & non-TP) */
	for (;;)
	{
		rt_history = gv_target->alt_hist;
		rt_history->h[0].blk_num = 0;
		if ((status = gvcst_search(gv_currkey, NULL)) != cdb_sc_normal)
		{
			t_retry(status);
			continue;
		}
		bh = gv_target->hist.h;
		bp = (blk_hdr_ptr_t)bh->buffaddr;
		rp = (rec_hdr_ptr_t)(bh->buffaddr + bh->curr_rec.offset);
		b_top = bh->buffaddr + bp->bsiz;
		val = 0;
		if (gv_currkey->end + 1 == bh->curr_rec.match)
			val = 1;
		else if (bh->curr_rec.match >= gv_currkey->end)
			val = 10;
		if (1 == val  ||  rp == (rec_hdr_ptr_t)b_top)
		{
			GET_USHORT(rec_size, &rp->rsiz);
			if (rp == (rec_hdr_ptr_t)b_top  ||  (sm_uc_ptr_t)rp + rec_size == b_top)
			{
				if (cdb_sc_endtree != (status = gvcst_rtsib(rt_history, 0)))
				{
					if ((cdb_sc_normal != status)
						|| (cdb_sc_normal != (status = gvcst_search_blk(gv_currkey, rt_history->h))))
					{
						t_retry(status);
						continue;
					}
					if (rt_history->h[0].curr_rec.match >= gv_currkey->end)
						val += 10;
				}
			} else
			{
				if ((sm_uc_ptr_t)rp + rec_size > b_top)
				{
					t_retry(cdb_sc_rmisalign);
					continue;
				}
				rp = (rec_hdr_ptr_t)((sm_uc_ptr_t)rp + rec_size);
				if (rp->cmpc >= gv_currkey->end)
					val += 10;
			}
		}
		if (0 == dollar_tlevel)
		{
			if ((trans_num)0 == t_end(&gv_target->hist, 0 == rt_history->h[0].blk_num ? NULL : rt_history))
				continue;
		} else
		{
			status = tp_hist(0 == rt_history->h[0].blk_num ? NULL : rt_history);
			if (cdb_sc_normal != status)
			{
				t_retry(status);
				continue;
			}
		}
		INCR_GVSTATS_COUNTER(cs_addrs, cs_addrs->nl, n_data, 1);
		return val;
	}
}
Exemple #18
0
void crack_wpa(char *fn)
{
  hccap_t hc;
  int res, i, itr;

  unsigned char pmk[128];

  unsigned char pke[100];
  unsigned char ptk[80];
  unsigned char mic[20];

  char key[128]; // passphase 키값
  int keylen;

  memset(key,0,sizeof(key));
  strcpy(key,"dekdekdek");

  keylen = strlen(key);
  // load from file
  if((res = hccap_load(fn,&hc)))
    {
      printf("hashcat file load failed! code : %d\n",res);
      return ;
    }
  
  memcpy( pke,"Pairwise key expansion", 23);
  if(memcmp(hc.mac2,hc.mac1,6) < 0) {
    memcpy( pke + 23, hc.mac2, 6);
    memcpy( pke + 29, hc.mac1,6 );
  } else {
    memcpy(pke + 23, hc.mac1, 6);
    memcpy(pke + 29, hc.mac2, 6);
  }
  
  if( memcmp( hc.nonce1,hc.nonce2, 32) < 0 ) {
    memcpy(pke + 35, hc.nonce1,32);
    memcpy(pke + 67, hc.nonce2,32);
  }
  else {
    memcpy(pke + 35, hc.nonce2,32);
    memcpy(pke + 67, hc.nonce1,32);    
  }

  #pragma omp parallel
  #pragma omp master
  printf("Initializing ... %d/%d threads\n",omp_get_num_threads(),omp_get_max_threads());


   prof_pmk(hc.essid);
  return;
  //#pragma omp parallel for private(i) shared(key,hc,pmk,pke,mic)

  {
    char key[16][128];
    unsigned char pmk_sol[16][40];
    unsigned char pmk_fast[16][40];

    TIME_STRUCT p1;

    int i,j;
    for(i=0;i<16;i++)
      {
	strcpy(key[i],"atest");
	key[i][0]+=i;
      }
    p1 = t_start();
#pragma omp parallel for
    for(itr=0;itr<16000;itr+=16)
      {
	calc_16pmk(key,hc.essid,pmk_fast); //key값과 essid로부터 pmk 값을 계산함
	//t_end(&p1);

	//      p2 = t_start();
	for(i=0;i<4;i++)
	  {
	    pke[99] = i;
	    HMAC(EVP_sha1(), pmk, 32, pke, 100, ptk + i * 20, NULL);
	  }
      
	if(hc.keyver == 1)
	  HMAC(EVP_md5(), ptk, 16,hc.eapol,hc.eapol_size,mic,NULL);
	else
	  HMAC(EVP_sha1(), ptk, 16, hc.eapol,hc.eapol_size,mic,NULL);
      
	//      show_wpa_stats(key,keylen,pmk,ptk,mic);
      
	if(memcmp(mic,hc.keymic,16) == 0)
	  {
	    //	  printf("success\n");
	    //	  return ;
	  }
	//      t_end(&p2);
      }
    t_end(&p1);
    printf("time : %0.2lf ms\n",t_get(p1)/1000);
  }
  //  printf("failed\n");
  return;
}
boolean_t	gvcst_order2(void)
{
	blk_hdr_ptr_t	bp;
	boolean_t	found, two_histories;
	enum cdb_sc	status;
	rec_hdr_ptr_t	rp;
	unsigned short	rec_size;
	srch_blk_status	*bh;
	srch_hist	*rt_history;
	sm_uc_ptr_t	c1, c2, ctop, alt_top;
	int		tmp_cmpc;

	T_BEGIN_READ_NONTP_OR_TP(ERR_GVORDERFAIL);
	for (;;)
	{
		assert(t_tries < CDB_STAGNATE || cs_addrs->now_crit);	/* we better hold crit in the final retry (TP & non-TP) */
		two_histories = FALSE;
#if defined(DEBUG) && defined(UNIX)
		if (gtm_white_box_test_case_enabled && (WBTEST_ANTIFREEZE_GVORDERFAIL == gtm_white_box_test_case_number))
		{
			status = cdb_sc_blknumerr;
			t_retry(status);
			continue;
		}
#endif
		if (cdb_sc_normal == (status = gvcst_search(gv_currkey, NULL)))
		{
			found = TRUE;
			bh = gv_target->hist.h;
			rp = (rec_hdr_ptr_t)(bh->buffaddr + bh->curr_rec.offset);
			bp = (blk_hdr_ptr_t)bh->buffaddr;
			if ((rec_hdr_ptr_t)CST_TOB(bp) <= rp)
			{
				two_histories = TRUE;
				rt_history = gv_target->alt_hist;
				status = gvcst_rtsib(rt_history, 0);
				if (cdb_sc_normal == status)
				{
					bh = rt_history->h;
			       		if (cdb_sc_normal != (status = gvcst_search_blk(gv_currkey, bh)))
					{
						t_retry(status);
						continue;
					}
					rp = (rec_hdr_ptr_t)(bh->buffaddr + bh->curr_rec.offset);
					bp = (blk_hdr_ptr_t)bh->buffaddr;
				} else
				{
			  	     	if (cdb_sc_endtree == status)
					{
						found = FALSE;
						two_histories = FALSE;		/* second history not valid */
					} else
					{
						t_retry(status);
						continue;
					}
				}
			}
			if (found)
			{
				assert(gv_altkey->top == gv_currkey->top);
				assert(gv_altkey->top == gv_keysize);
				assert(gv_altkey->end < gv_altkey->top);
				/* store new subscipt */
				c1 = gv_altkey->base;
				alt_top = gv_altkey->base + gv_altkey->top - 1;
					/* Make alt_top one less than gv_altkey->top to allow double-null at end of a key-name */
				/* 4/17/96
				 * HP compiler bug work-around.  The original statement was
				 * c2 = (unsigned char *)CST_BOK(rp) + bh->curr_rec.match - rp->cmpc;
				 *
				 * ...but this was sometimes compiled incorrectly (the lower 4 bits
				 * of rp->cmpc, sign extended, were subtracted from bh->curr_rec.match).
				 * I separated out the subtraction of rp->cmpc.
				 *
				 * -VTF.
				 */
				c2 = (sm_uc_ptr_t)CST_BOK(rp) + bh->curr_rec.match;
				memcpy(c1, gv_currkey->base, bh->curr_rec.match);
				c1 += bh->curr_rec.match;
				c2 -= EVAL_CMPC(rp);
				GET_USHORT(rec_size, &rp->rsiz);
				ctop = (sm_uc_ptr_t)rp + rec_size;
				for (;;)
				{
					if (c2 >= ctop  ||  c1 >= alt_top)
					{
						assert(CDB_STAGNATE > t_tries);
						status = cdb_sc_rmisalign;
						goto restart;	/* goto needed because of nested FOR loop */
					}
 					if (0 == (*c1++ = *c2++))
					{
						*c1 = 0;
						break;
					}
				}
				gv_altkey->end = c1 - gv_altkey->base;
				assert(gv_altkey->end < gv_altkey->top);
			}
                        if (!dollar_tlevel)
			{
				if ((trans_num)0 == t_end(&gv_target->hist, two_histories ? rt_history : NULL, TN_NOT_SPECIFIED))
					continue;
			} else
			{
				status = tp_hist(two_histories ? rt_history : NULL);
				if (cdb_sc_normal != status)
				{
					t_retry(status);
					continue;
				}
			}
			assert(cs_data == cs_addrs->hdr);
			INCR_GVSTATS_COUNTER(cs_addrs, cs_addrs->nl, n_order, 1);
			return (found && (bh->curr_rec.match >= gv_currkey->prev));
		}
restart:	t_retry(status);
	}
}
Exemple #20
0
void dse_chng_bhead(void)
{
	blk_hdr			new_hdr;
	blk_segment		*bs1, *bs_ptr;
	block_id		blk;
	boolean_t		chng_blk, ismap, was_hold_onto_crit;
	int4			blk_seg_cnt, blk_size;	/* needed for BLK_INIT,BLK_SEG and BLK_FINI macros */
	int4			x;
	sgmnt_addrs		*csa;
	sgmnt_data_ptr_t	csd;
	srch_blk_status		blkhist;
	trans_num		tn;
	uint4			mapsize;

	csa = cs_addrs;
        if (gv_cur_region->read_only)
                rts_error_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_DBRDONLY, 2, DB_LEN_STR(gv_cur_region));
	CHECK_AND_RESET_UPDATE_ARRAY;	/* reset update_array_ptr to update_array */
	chng_blk = FALSE;
	if (BADDSEBLK == (blk = dse_getblk("BLOCK", DSEBMLOK, DSEBLKCUR)))		/* WARNING: assignment */
		return;
	csd = csa->hdr;
	assert(csd == cs_data);
	blk_size = csd->blk_size;
	ismap = IS_BITMAP_BLK(blk);
	mapsize = BM_SIZE(csd->bplmap);
	t_begin_crit(ERR_DSEFAIL);
	blkhist.blk_num = blk;
	if (!(blkhist.buffaddr = t_qread(blkhist.blk_num, &blkhist.cycle, &blkhist.cr)))
		rts_error_csa(CSA_ARG(csa) VARLSTCNT(1) ERR_DSEBLKRDFAIL);
	new_hdr = *(blk_hdr_ptr_t)blkhist.buffaddr;
	if (CLI_PRESENT == cli_present("LEVEL"))
	{
		if (!cli_get_hex("LEVEL", (uint4 *)&x))
		{
			t_abort(gv_cur_region, csa);
			return;
		}
		if (ismap && (unsigned char)x != LCL_MAP_LEVL)
		{
			util_out_print("Error: invalid level for a bit map block.", TRUE);
			t_abort(gv_cur_region, csa);
			return;
		}
		if (!ismap && (x < 0 || x > MAX_BT_DEPTH + 1))
		{
			util_out_print("Error: invalid level.", TRUE);
			t_abort(gv_cur_region, csa);
			return;
		}
		new_hdr.levl = (unsigned char)x;
		chng_blk = TRUE;
		if (new_hdr.bsiz < SIZEOF(blk_hdr))
			new_hdr.bsiz = SIZEOF(blk_hdr);
		if (new_hdr.bsiz  > blk_size)
			new_hdr.bsiz = blk_size;
	}
	if (CLI_PRESENT == cli_present("BSIZ"))
	{
		if (!cli_get_hex("BSIZ", (uint4 *)&x))
		{
			t_abort(gv_cur_region, csa);
			return;
		}
		if (ismap && x != mapsize)
		{
			util_out_print("Error: invalid bsiz.", TRUE);
			t_abort(gv_cur_region, csa);
			return;
		} else if (x < SIZEOF(blk_hdr) || x > blk_size)
		{
			util_out_print("Error: invalid bsiz.", TRUE);
			t_abort(gv_cur_region, csa);
			return;
		}
		chng_blk = TRUE;
		new_hdr.bsiz = x;
	}
	if (!chng_blk)
		t_abort(gv_cur_region, csa);
	else
	{
		BLK_INIT(bs_ptr, bs1);
		BLK_SEG(bs_ptr, blkhist.buffaddr + SIZEOF(new_hdr), new_hdr.bsiz - SIZEOF(new_hdr));
		if (!BLK_FINI(bs_ptr, bs1))
		{
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(5) ERR_AIMGBLKFAIL, 3, blk, DB_LEN_STR(gv_cur_region));
			t_abort(gv_cur_region, csa);
			return;
		}
		t_write(&blkhist, (unsigned char *)bs1, 0, 0, new_hdr.levl, TRUE, FALSE, GDS_WRITE_KILLTN);
		BUILD_AIMG_IF_JNL_ENABLED(csd, csa->ti->curr_tn);
		t_end(&dummy_hist, NULL, TN_NOT_SPECIFIED);
	}
	if (CLI_PRESENT == cli_present("TN"))
	{
		if (!cli_get_hex64("TN", &tn))
			return;
		t_begin_crit(ERR_DSEFAIL);
		CHECK_TN(csa, csd, csd->trans_hist.curr_tn);	/* can issue rts_error TNTOOLARGE */
		assert(csa->ti->early_tn == csa->ti->curr_tn);
		if (NULL == (blkhist.buffaddr = t_qread(blkhist.blk_num, &blkhist.cycle, &blkhist.cr)))
		{
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(1) ERR_DSEBLKRDFAIL);
			t_abort(gv_cur_region, csa);
			return;
		}
		if (new_hdr.bsiz < SIZEOF(blk_hdr))
			new_hdr.bsiz = SIZEOF(blk_hdr);
		if (new_hdr.bsiz  > blk_size)
			new_hdr.bsiz = blk_size;
		BLK_INIT(bs_ptr, bs1);
		BLK_SEG(bs_ptr, blkhist.buffaddr + SIZEOF(new_hdr), new_hdr.bsiz - SIZEOF(new_hdr));
		BLK_FINI(bs_ptr, bs1);
		t_write(&blkhist, (unsigned char *)bs1, 0, 0,
			((blk_hdr_ptr_t)blkhist.buffaddr)->levl, TRUE, FALSE, GDS_WRITE_KILLTN);
		/* Pass the desired tn as argument to bg_update/mm_update below */
		BUILD_AIMG_IF_JNL_ENABLED_AND_T_END_WITH_EFFECTIVE_TN(csa, csd, tn, &dummy_hist);
	}
	return;
}
boolean_t	gvcst_gblmod(mval *v)
{
	boolean_t	gblmod, is_dummy;
	enum cdb_sc	status;
	int		key_size,  key_size2, data_len;
	srch_hist	*alt_history;
	blk_hdr_ptr_t	bp;
	rec_hdr_ptr_t	rp;
	unsigned short	match, match2, rsiz, offset_to_value, oldend;
	srch_blk_status	*bh;
	sm_uc_ptr_t	b_top;
	trans_num	tn_to_compare;

	T_BEGIN_READ_NONTP_OR_TP(ERR_GBLMODFAIL);
	assert(t_tries < CDB_STAGNATE || cs_addrs->now_crit);	/* we better hold crit in the final retry (TP & non-TP) */
	for (;;)
	{
		gblmod = TRUE;
		if (cdb_sc_normal == (status = gvcst_search(gv_currkey, NULL)))
		{
			alt_history = gv_target->alt_hist;
			alt_history->h[0].blk_num = 0;

			VMS_ONLY(
				if (cs_addrs->hdr->resync_tn >= ((blk_hdr_ptr_t)gv_target->hist.h[0].buffaddr)->tn)
					gblmod = FALSE;
			)
#			ifdef UNIX
				tn_to_compare = ((blk_hdr_ptr_t)gv_target->hist.h[0].buffaddr)->tn;
				bh = gv_target->hist.h;
				bp = (blk_hdr_ptr_t) bh->buffaddr;
				rp = (rec_hdr_ptr_t) (bh->buffaddr + bh->curr_rec.offset);
				b_top = bh->buffaddr + bp->bsiz;
				GET_USHORT(rsiz, &rp->rsiz);
				key_size = gv_currkey->end + 1;
				data_len = rsiz + EVAL_CMPC(rp) - SIZEOF(rec_hdr) - key_size;
				match = bh->curr_rec.match;
				if (key_size == match)
				{
					if ((0 > data_len) || ((sm_uc_ptr_t)rp + rsiz > b_top))
					{
						status = cdb_sc_rmisalign1;
						t_retry(status);
						continue;
					}
					offset_to_value = SIZEOF(rec_hdr) + key_size - EVAL_CMPC(rp);
					/* If it could be a spanning node, i.e., has special value, then try to get tn from the
					 * block that contains the first special subscript. Since dummy nodes always have the
					 * same value, the tn number is not updated It s enough to do only the first piece
					 * since all pieces of a spanning node are killed  before an update is applied.
					 */
					if (IS_SN_DUMMY(data_len, (sm_uc_ptr_t)rp + offset_to_value))
					{
						oldend = gv_currkey->end;
						APPEND_HIDDEN_SUB(gv_currkey);
						if (cdb_sc_normal == (status = gvcst_search(gv_currkey, alt_history)))
						{
							key_size2 = gv_currkey->end + 1;
							match = alt_history->h[0].curr_rec.match;
							if (key_size2 == match)
								tn_to_compare =  ((blk_hdr_ptr_t)alt_history->h[0].buffaddr)->tn;
						}
						else
						{
							gv_currkey->end = oldend;
							gv_currkey->base[gv_currkey->end - 1] = KEY_DELIMITER;
							gv_currkey->base[gv_currkey->end] = KEY_DELIMITER;
							t_retry(status);
							continue;
						}
						gv_currkey->end = oldend;
						gv_currkey->base[gv_currkey->end - 1] = KEY_DELIMITER;
						gv_currkey->base[gv_currkey->end] = KEY_DELIMITER;
					}
				}
				if (cs_addrs->hdr->zqgblmod_tn > tn_to_compare)
					gblmod = FALSE;
#			endif
			if (!dollar_tlevel)
			{
				if ((trans_num)0 == t_end(&gv_target->hist, 0 == alt_history->h[0].blk_num ? NULL : alt_history,
						TN_NOT_SPECIFIED))
					continue;
			} else
			{
				status = tp_hist(0 == alt_history->h[0].blk_num ? NULL : alt_history);
				if (cdb_sc_normal != status)
				{
					t_retry(status);
					continue;
				}
			}
			return gblmod;
		}
Exemple #22
0
boolean_t mu_truncate(int4 truncate_percent)
{
	sgmnt_addrs		*csa;
	sgmnt_data_ptr_t 	csd;
	int			num_local_maps;
	int 			lmap_num, lmap_blk_num;
	int			bml_status, sigkill;
	int			save_errno;
	int			ftrunc_status;
	uint4			jnl_status;
	uint4			old_total, new_total;
	uint4			old_free, new_free;
	uint4			end_blocks;
	int4			blks_in_lmap, blk;
	gtm_uint64_t		before_trunc_file_size;
	off_t			trunc_file_size;
	off_t			padding;
	uchar_ptr_t		lmap_addr;
	boolean_t		was_crit;
	uint4			found_busy_blk;
	srch_blk_status		bmphist;
	srch_blk_status 	*blkhist;
	srch_hist		alt_hist;
	trans_num		curr_tn;
	blk_hdr_ptr_t		lmap_blk_hdr;
	block_id		*blkid_ptr;
	unix_db_info    	*udi;
	jnl_private_control	*jpc;
	jnl_buffer_ptr_t	jbp;
	char			*err_msg;
	intrpt_state_t		prev_intrpt_state;
	off_t			offset;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	csa = cs_addrs;
	csd = cs_data;
	if (dba_mm == csd->acc_meth)
	{
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_MUTRUNCNOTBG, 2, REG_LEN_STR(gv_cur_region));
		return TRUE;
	}
	if ((GDSVCURR != csd->desired_db_format) || (csd->blks_to_upgrd != 0))
	{
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_MUTRUNCNOV4, 2, REG_LEN_STR(gv_cur_region));
		return TRUE;
	}
	if (csa->ti->free_blocks < (truncate_percent * csa->ti->total_blks / 100))
	{
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(5) ERR_MUTRUNCNOSPACE, 3, REG_LEN_STR(gv_cur_region), truncate_percent);
		return TRUE;
	}
	/* already checked for parallel truncates on this region --- see mupip_reorg.c */
	gv_target = NULL;
	assert(csa->nl->trunc_pid == process_id);
	assert(dba_mm != csd->acc_meth);
	old_total = csa->ti->total_blks;
	old_free = csa->ti->free_blocks;
	sigkill = 0;
	found_busy_blk = 0;
	memset(&alt_hist, 0, SIZEOF(alt_hist)); /* null-initialize history */
	assert(csd->bplmap == BLKS_PER_LMAP);
	end_blocks = old_total % BLKS_PER_LMAP; /* blocks in the last lmap (first one we start scanning) */
	if (0 == end_blocks)
		end_blocks = BLKS_PER_LMAP;
	num_local_maps = DIVIDE_ROUND_UP(old_total, BLKS_PER_LMAP);
	/* ======================================== PHASE 1 ======================================== */
	for (lmap_num = num_local_maps - 1; (lmap_num > 0 && !found_busy_blk); lmap_num--)
	{
		if (mu_ctrly_occurred || mu_ctrlc_occurred)
			return TRUE;
		assert(csa->ti->total_blks >= old_total); /* otherwise, a concurrent truncate happened... */
		if (csa->ti->total_blks != old_total) /* Extend (likely called by mupip extend) -- don't truncate */
		{
			gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(5) ERR_MUTRUNCNOSPACE, 3, REG_LEN_STR(gv_cur_region),
					truncate_percent);
			return TRUE;
		}
		lmap_blk_num = lmap_num * BLKS_PER_LMAP;
		if (csa->nl->highest_lbm_with_busy_blk >= lmap_blk_num)
		{
			found_busy_blk = lmap_blk_num;
			break;
		}
		blks_in_lmap = (lmap_num == num_local_maps - 1) ? end_blocks : BLKS_PER_LMAP;
		/* Loop through non-bitmap blocks of this lmap, do recycled2free */
		DBGEHND((stdout, "DBG:: lmap_num = [%lu], lmap_blk_num = [%lu], blks_in_lmap = [%lu]\n",
			lmap_num, lmap_blk_num, blks_in_lmap));
		for (blk = 1; blk < blks_in_lmap && blk != -1 && !found_busy_blk;)
		{
			t_begin(ERR_MUTRUNCFAIL, UPDTRNS_DB_UPDATED_MASK);
			for (;;) /* retry loop for recycled to free transactions */
			{
				curr_tn = csd->trans_hist.curr_tn;
				/* Read the nth local bitmap into memory */
				bmphist.blk_num = lmap_blk_num;
				bmphist.buffaddr = t_qread(bmphist.blk_num, &bmphist.cycle, &bmphist.cr);
				lmap_blk_hdr = (blk_hdr_ptr_t)bmphist.buffaddr;
				if (!(bmphist.buffaddr) || (BM_SIZE(BLKS_PER_LMAP) != lmap_blk_hdr->bsiz))
				{ /* Could not read the block successfully. Retry. */
					t_retry((enum cdb_sc)rdfail_detail);
					continue;
				}
				lmap_addr = bmphist.buffaddr + SIZEOF(blk_hdr);
				/* starting from the hint (blk itself), find the first busy or recycled block */
				blk = bml_find_busy_recycled(blk, lmap_addr, blks_in_lmap, &bml_status);
				assert(blk < BLKS_PER_LMAP);
				if (blk == -1 || blk >= blks_in_lmap)
				{ /* done with this lmap, continue to next */
					t_abort(gv_cur_region, csa);
					break;
				}
				else if (BLK_BUSY == bml_status || csa->nl->highest_lbm_with_busy_blk >= lmap_blk_num)
				{ /* stop processing blocks... skip ahead to phase 2 */
					found_busy_blk = lmap_blk_num;
					t_abort(gv_cur_region, csa);
					break;
				}
				else if (BLK_RECYCLED == bml_status)
				{ /* Write PBLK records for recycled blocks only if before_image journaling is
				   * enabled. t_end() takes care of checking if journaling is enabled and
				   * writing PBLK record. We have to at least mark the recycled block as free.
				   */
					RESET_UPDATE_ARRAY;
					update_trans = UPDTRNS_DB_UPDATED_MASK;
					*((block_id *)update_array_ptr) = blk;
					update_array_ptr += SIZEOF(block_id);
					*(int *)update_array_ptr = 0;
					alt_hist.h[1].blk_num = 0;
					alt_hist.h[0].level = 0;
					alt_hist.h[0].cse = NULL;
					alt_hist.h[0].tn = curr_tn;
					alt_hist.h[0].blk_num = lmap_blk_num + blk;
					alt_hist.h[0].buffaddr = t_qread(alt_hist.h[0].blk_num,
							&alt_hist.h[0].cycle, &alt_hist.h[0].cr);
					if (!alt_hist.h[0].buffaddr)
					{
						t_retry((enum cdb_sc)rdfail_detail);
						continue;
					}
					if (!t_recycled2free(&alt_hist.h[0]))
					{
						t_retry(cdb_sc_lostbmlcr);
						continue;
					}
					t_write_map(&bmphist, (unsigned char *)update_array, curr_tn, 0);
					/* Set the opcode for INCTN record written by t_end() */
					inctn_opcode = inctn_blkmarkfree;
					if ((trans_num)0 == t_end(&alt_hist, NULL, TN_NOT_SPECIFIED))
						continue;
					/* block processed, scan from the next one */
					blk++;
					break;
				} else
				{
					assert(t_tries < CDB_STAGNATE);
					t_retry(cdb_sc_badbitmap);
					continue;
				}
			} /* END recycled2free retry loop */
		} /* END scanning blocks of this particular lmap */
		/* Write PBLK for the bitmap block, in case it hasn't been written i.e. t_end() was never called above */
		/* Do a transaction that just increments the bitmap block's tn so that t_end() can do its thing */
		DBGEHND((stdout, "DBG:: bitmap block inctn -- lmap_blk_num = [%lu]\n", lmap_blk_num));
		t_begin(ERR_MUTRUNCFAIL, UPDTRNS_DB_UPDATED_MASK);
		for (;;)
		{
			RESET_UPDATE_ARRAY;
			BLK_ADDR(blkid_ptr, SIZEOF(block_id), block_id);
			*blkid_ptr = 0;
			update_trans = UPDTRNS_DB_UPDATED_MASK;
			inctn_opcode = inctn_mu_reorg; /* inctn_mu_truncate */
			curr_tn = csd->trans_hist.curr_tn;
			blkhist = &alt_hist.h[0];
			blkhist->blk_num = lmap_blk_num;
			blkhist->tn = curr_tn;
			blkhist->cse = NULL; /* start afresh (do not use value from previous retry) */
			/* Read the nth local bitmap into memory */
			blkhist->buffaddr = t_qread(lmap_blk_num, (sm_int_ptr_t)&blkhist->cycle, &blkhist->cr);
			lmap_blk_hdr = (blk_hdr_ptr_t)blkhist->buffaddr;
			if (!(blkhist->buffaddr) || (BM_SIZE(BLKS_PER_LMAP) != lmap_blk_hdr->bsiz))
			{ /* Could not read the block successfully. Retry. */
				t_retry((enum cdb_sc)rdfail_detail);
				continue;
			}
			t_write_map(blkhist, (unsigned char *)blkid_ptr, curr_tn, 0);
			blkhist->blk_num = 0; /* create empty history for bitmap block */
			if ((trans_num)0 == t_end(&alt_hist, NULL, TN_NOT_SPECIFIED))
				continue;
			break;
		}
	} /* END scanning lmaps */
	/* ======================================== PHASE 2 ======================================== */
	assert(!csa->now_crit);
	for (;;)
	{ /* wait for FREEZE, we don't want to truncate a frozen database */
		grab_crit(gv_cur_region);
		if (FROZEN_CHILLED(cs_data))
			DO_CHILLED_AUTORELEASE(csa, cs_data);
		if (!FROZEN(cs_data) && !IS_REPL_INST_FROZEN)
			break;
		rel_crit(gv_cur_region);
		while (FROZEN(cs_data) || IS_REPL_INST_FROZEN)
		{
			hiber_start(1000);
			if (FROZEN_CHILLED(cs_data) && CHILLED_AUTORELEASE(cs_data))
				break;
		}
	}
	assert(csa->nl->trunc_pid == process_id);
	/* Flush pending updates to disk. If this is not done, old updates can be flushed AFTER ftruncate, extending the file. */
	if (!wcs_flu(WCSFLU_FLUSH_HDR | WCSFLU_WRITE_EPOCH | WCSFLU_MSYNC_DB))
	{
		assert(FALSE);
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_BUFFLUFAILED, 4, LEN_AND_LIT("MUPIP REORG TRUNCATE"),
				DB_LEN_STR(gv_cur_region));
		rel_crit(gv_cur_region);
		return FALSE;
	}
	csa->nl->highest_lbm_with_busy_blk = MAX(found_busy_blk, csa->nl->highest_lbm_with_busy_blk);
	assert(IS_BITMAP_BLK(csa->nl->highest_lbm_with_busy_blk));
	new_total = MIN(old_total, csa->nl->highest_lbm_with_busy_blk + BLKS_PER_LMAP);
	if (mu_ctrly_occurred || mu_ctrlc_occurred)
	{
		rel_crit(gv_cur_region);
		return TRUE;
	} else if (csa->ti->total_blks != old_total || new_total == old_total)
	{
		assert(csa->ti->total_blks >= old_total); /* Better have been an extend, not a truncate... */
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(5) ERR_MUTRUNCNOSPACE, 3, REG_LEN_STR(gv_cur_region), truncate_percent);
		rel_crit(gv_cur_region);
		return TRUE;
	} else if (GDSVCURR != csd->desired_db_format || csd->blks_to_upgrd != 0 || !csd->fully_upgraded)
	{
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_MUTRUNCNOV4, 2, REG_LEN_STR(gv_cur_region));
		rel_crit(gv_cur_region);
		return TRUE;
	} else if (SNAPSHOTS_IN_PROG(csa->nl))
	{
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_MUTRUNCSSINPROG, 2, REG_LEN_STR(gv_cur_region));
		rel_crit(gv_cur_region);
		return TRUE;
	} else if (BACKUP_NOT_IN_PROGRESS != cs_addrs->nl->nbb)
	{
		gtm_putmsg_csa(CSA_ARG(csa) VARLSTCNT(4) ERR_MUTRUNCBACKINPROG, 2, REG_LEN_STR(gv_cur_region));
		rel_crit(gv_cur_region);
		return TRUE;
	}
	DEFER_INTERRUPTS(INTRPT_IN_TRUNC, prev_intrpt_state);
	if (JNL_ENABLED(csa))
	{ /* Write JRT_TRUNC and INCTN records */
		if (!jgbl.dont_reset_gbl_jrec_time)
		SET_GBL_JREC_TIME;	/* needed before jnl_ensure_open as that can write jnl records */
		jpc = csa->jnl;
		jbp = jpc->jnl_buff;
		/* Before writing to jnlfile, adjust jgbl.gbl_jrec_time if needed to maintain time order
		 * of jnl records. This needs to be done BEFORE the jnl_ensure_open as that could write
		 * journal records (if it decides to switch to a new journal file).
		 */
		ADJUST_GBL_JREC_TIME(jgbl, jbp);
		jnl_status = jnl_ensure_open(gv_cur_region, csa);
		if (SS_NORMAL != jnl_status)
			send_msg_csa(CSA_ARG(csa) VARLSTCNT(6) jnl_status, 4, JNL_LEN_STR(csd), DB_LEN_STR(gv_cur_region));
		else
		{
			if (0 == jpc->pini_addr)
				jnl_put_jrt_pini(csa);
			jnl_write_trunc_rec(csa, old_total, csa->ti->free_blocks, new_total);
			inctn_opcode = inctn_mu_reorg;
			jnl_write_inctn_rec(csa);
			jnl_status = jnl_flush(gv_cur_region);
			if (SS_NORMAL != jnl_status)
			{
				send_msg_csa(CSA_ARG(csa) VARLSTCNT(9) ERR_JNLFLUSH, 2, JNL_LEN_STR(csd),
					ERR_TEXT, 2, RTS_ERROR_TEXT("Error with journal flush during mu_truncate"),
					jnl_status);
				assert(NOJNL == jpc->channel); /* jnl file lost has been triggered */
			}
		}
	}
	/* Good to go ahead and REALLY truncate (reduce total_blks, clear cache_array, FTRUNCATE) */
	curr_tn = csa->ti->curr_tn;
	CHECK_TN(csa, csd, curr_tn);
	udi = FILE_INFO(gv_cur_region);
	/* Information used by recover_truncate to check if the file size and csa->ti->total_blks are INCONSISTENT */
	trunc_file_size = BLK_ZERO_OFF(csd->start_vbn) + ((off_t)csd->blk_size * (new_total + 1));
	csd->after_trunc_total_blks = new_total;
	csd->before_trunc_free_blocks = csa->ti->free_blocks;
	csd->before_trunc_total_blks = old_total; /* Flags interrupted truncate for recover_truncate */
	/* file size and total blocks: INCONSISTENT */
	csa->ti->total_blks = new_total;
	/* past the point of no return -- shared memory intact */
	assert(csa->ti->free_blocks >= DELTA_FREE_BLOCKS(old_total, new_total));
	csa->ti->free_blocks -= DELTA_FREE_BLOCKS(old_total, new_total);
	new_free = csa->ti->free_blocks;
	KILL_TRUNC_TEST(WBTEST_CRASH_TRUNCATE_1); /* 55 : Issue a kill -9 before 1st fsync */
	fileheader_sync(gv_cur_region);
	DB_FSYNC(gv_cur_region, udi, csa, db_fsync_in_prog, save_errno);
	CHECK_DBSYNC(gv_cur_region, save_errno);
	/* past the point of no return -- shared memory deleted */
	KILL_TRUNC_TEST(WBTEST_CRASH_TRUNCATE_2); /* 56 : Issue a kill -9 after 1st fsync */
	clear_cache_array(csa, csd, gv_cur_region, new_total, old_total);
	offset = (off_t)BLK_ZERO_OFF(csd->start_vbn) + (off_t)new_total * csd->blk_size;
	save_errno = db_write_eof_block(udi, udi->fd, csd->blk_size, offset, &(TREF(dio_buff)));
	if (0 != save_errno)
	{
		err_msg = (char *)STRERROR(errno);
		rts_error_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_MUTRUNCERROR, 4, REG_LEN_STR(gv_cur_region), LEN_AND_STR(err_msg));
		return FALSE;
	}
	KILL_TRUNC_TEST(WBTEST_CRASH_TRUNCATE_3); /* 57 : Issue a kill -9 after reducing csa->ti->total_blks, before FTRUNCATE */
	/* Execute an ftruncate() and truncate the DB file
	 * ftruncate() is a SYSTEM CALL on almost all platforms (except SunOS)
	 * It ignores kill -9 signal till its operation is completed.
	 * So we can safely assume that the result of ftruncate() will be complete.
	 */
	FTRUNCATE(FILE_INFO(gv_cur_region)->fd, trunc_file_size, ftrunc_status);
	if (0 != ftrunc_status)
	{
		err_msg = (char *)STRERROR(errno);
		rts_error_csa(CSA_ARG(csa) VARLSTCNT(6) ERR_MUTRUNCERROR, 4, REG_LEN_STR(gv_cur_region), LEN_AND_STR(err_msg));
		/* should go through recover_truncate now, which will again try to FTRUNCATE */
		return FALSE;
	}
	/* file size and total blocks: CONSISTENT (shrunk) */
	KILL_TRUNC_TEST(WBTEST_CRASH_TRUNCATE_4); /* 58 : Issue a kill -9 after FTRUNCATE, before 2nd fsync */
	csa->nl->root_search_cycle++;	/* Force concurrent processes to restart in t_end/tp_tend to make sure no one
					 * tries to commit updates past the end of the file. Bitmap validations together
					 * with highest_lbm_with_busy_blk should actually be sufficient, so this is
					 * just to be safe.
					 */
	csd->before_trunc_total_blks = 0; /* indicate CONSISTENT */
	/* Increment TN */
	assert(csa->ti->early_tn == csa->ti->curr_tn);
	csd->trans_hist.early_tn = csd->trans_hist.curr_tn + 1;
	INCREMENT_CURR_TN(csd);
	fileheader_sync(gv_cur_region);
	DB_FSYNC(gv_cur_region, udi, csa, db_fsync_in_prog, save_errno);
	KILL_TRUNC_TEST(WBTEST_CRASH_TRUNCATE_5); /* 58 : Issue a kill -9 after after 2nd fsync */
	CHECK_DBSYNC(gv_cur_region, save_errno);
	ENABLE_INTERRUPTS(INTRPT_IN_TRUNC, prev_intrpt_state);
	curr_tn = csa->ti->curr_tn;
	rel_crit(gv_cur_region);
	send_msg_csa(CSA_ARG(csa) VARLSTCNT(7) ERR_MUTRUNCSUCCESS, 5, DB_LEN_STR(gv_cur_region), old_total, new_total, &curr_tn);
	util_out_print("Truncated region: !AD. Reduced total blocks from [!UL] to [!UL]. Reduced free blocks from [!UL] to [!UL].",
					FLUSH, REG_LEN_STR(gv_cur_region), old_total, new_total, old_free, new_free);
	return TRUE;
} /* END of mu_truncate() */
Exemple #23
0
void bm_setmap(block_id bml, block_id blk, int4 busy)
{
	sm_uc_ptr_t	bmp;
	trans_num	ctn;
	srch_hist	alt_hist;
	srch_blk_status	blkhist; /* block-history to fill in for t_write_map which uses "blk_num", "buffaddr", "cr", "cycle" */
	cw_set_element  *cse;
	int		lbm_status;	/* local bitmap status of input "blk" i.e. BUSY or FREE or RECYCLED  */
	int4		reference_cnt;
	uint4		bitnum;

	error_def(ERR_DSEFAIL);

	t_begin_crit(ERR_DSEFAIL);
	ctn = cs_addrs->ti->curr_tn;
	if (!(bmp = t_qread(bml, &blkhist.cycle, &blkhist.cr)))
		t_retry((enum cdb_sc)rdfail_detail);
	blkhist.blk_num = bml;
	blkhist.buffaddr = bmp;
	alt_hist.h[0].blk_num = 0;	/* Need for calls to T_END for bitmaps */
	CHECK_AND_RESET_UPDATE_ARRAY;	/* reset update_array_ptr to update_array */
	bitnum = blk - bml;
	/* Find out current status in order to determine if there is going to be a state transition */
	assert(ROUND_DOWN2(blk, cs_data->bplmap) == bml);
	GET_BM_STATUS(bmp, bitnum, lbm_status);
	switch(lbm_status)
	{
		case BLK_BUSY:
			reference_cnt = busy ? 0 : -1;
			break;
		case BLK_FREE:
		case BLK_MAPINVALID:
		case BLK_RECYCLED:
			assert(BLK_MAPINVALID != lbm_status);
			reference_cnt = busy ? 1 : 0;
			break;
		default:
			assert(FALSE);
			break;
	}
	if (reference_cnt)
	{	/* Initialize update array with non-zero bitnum only if reference_cnt is non-zero. */
		assert(bitnum);
		*((block_id_ptr_t)update_array_ptr) = bitnum;
		update_array_ptr += sizeof(block_id);
	}
	/* Terminate update array unconditionally with zero bitnum. */
	*((block_id_ptr_t)update_array_ptr) = 0;
	update_array_ptr += sizeof(block_id);
	t_write_map(&blkhist, (uchar_ptr_t)update_array, ctn, reference_cnt);
	if (JNL_ENABLED(cs_data))
        {
                cse = (cw_set_element *)(&cw_set[0]);
                cse->new_buff = non_tp_jfb_buff_ptr;
                memcpy(non_tp_jfb_buff_ptr, bmp, ((blk_hdr_ptr_t)bmp)->bsiz);
                gvcst_map_build((uint4 *)cse->upd_addr, (uchar_ptr_t)cse->new_buff, cse, cs_addrs->ti->curr_tn);
                cse->done = TRUE;
        }
	/* Call t_end till it succeeds or aborts (error will be reported) */
	while ((trans_num)0 == t_end(&alt_hist, 0))
		;
	return;
}
Exemple #24
0
bool	gvcst_get(mval *v)
{
	srch_blk_status	*s;
	enum cdb_sc	status;
	int		key_size, data_len;
	unsigned short	rsiz;
	rec_hdr_ptr_t	rp;

	T_BEGIN_READ_NONTP_OR_TP(ERR_GVGETFAIL);
	assert(t_tries < CDB_STAGNATE || cs_addrs->now_crit);	/* we better hold crit in the final retry (TP & non-TP) */
	for (;;)
	{
		if (cdb_sc_normal == (status = gvcst_search(gv_currkey, NULL)))
		{
			if ((key_size = gv_currkey->end + 1) == gv_target->hist.h[0].curr_rec.match)
			{
				rp = (rec_hdr_ptr_t)(gv_target->hist.h[0].buffaddr + gv_target->hist.h[0].curr_rec.offset);
				GET_USHORT(rsiz, &rp->rsiz);
				data_len = rsiz + rp->cmpc - sizeof(rec_hdr) - key_size;
				if (data_len < 0  || (sm_uc_ptr_t)rp + rsiz >
					gv_target->hist.h[0].buffaddr + ((blk_hdr_ptr_t)gv_target->hist.h[0].buffaddr)->bsiz)
				{
					assert(CDB_STAGNATE > t_tries);
					status = cdb_sc_rmisalign1;
				} else
				{
					if (stringpool.top - stringpool.free < data_len)
						stp_gcol(data_len);
					assert(stringpool.top - stringpool.free >= data_len);
					memcpy(stringpool.free, (sm_uc_ptr_t)rp + rsiz - data_len, data_len);
					if (0 == dollar_tlevel)
					{
						if (0 == t_end(&gv_target->hist, NULL))
							continue;
					} else
					{
						status = tp_hist(NULL);
						if (cdb_sc_normal != status)
						{
							t_retry(status);
							continue;
						}
					}
					v->mvtype = MV_STR;
					v->str.addr = (char *)stringpool.free;
					v->str.len = data_len;
					stringpool.free += data_len;
					if (cs_addrs->read_write)
						cs_addrs->hdr->n_gets++;
					return TRUE;
				}
			} else
			{
				if (0 == dollar_tlevel)
				{
					if (0 == t_end(&gv_target->hist, NULL))
						continue;
				} else
				{
					status = tp_hist(NULL);
					if (cdb_sc_normal != status)
					{
						t_retry(status);
						continue;
					}
				}

				cs_addrs->hdr->n_gets++;
				return FALSE;
			}
		}
		t_retry(status);
	}
}
int main(int argc, char **argv)
{
	int ret, r2;

	runcmd_init();
	t_set_colors(0);
	t_start("exec output comparison");
	{
		int i;
		char *out = calloc(1, BUF_SIZE);
		for (i = 0; cases[i].input != NULL; i++) {
			memset(out, 0, BUF_SIZE);
			int pfd[2] = {-1, -1}, pfderr[2] = {-1, -1};
			/* We need a stub iobregarg since runcmd_open()'s prototype
			 * declares it attribute non-null. */
			int stub_iobregarg = 0;
			int fd;
			char *cmd;
			asprintf(&cmd, ECHO_COMMAND " -n %s", cases[i].input);
			fd = runcmd_open(cmd, pfd, pfderr, NULL, stub_iobreg, &stub_iobregarg);
			free(cmd);
			read(pfd[0], out, BUF_SIZE);
			ok_str(cases[i].output, out, "Echoing a command should give expected output");
			close(pfd[0]);
			close(pfderr[0]);
			close(fd);
		}
		free(out);
	}
	ret = t_end();
	t_reset();
	t_start("anomaly detection");
	{
		int i;
		for (i = 0; anomaly[i].cmd; i++) {
			int out_argc;
			char *out_argv[256];
			int result = runcmd_cmd2strv(anomaly[i].cmd, &out_argc, out_argv);
			ok_int(result, anomaly[i].ret, anomaly[i].cmd);
			if (out_argv[0]) free(out_argv[0]);
		}
	}
	r2 = t_end();
	ret = r2 ? r2 : ret;
	t_reset();
	t_start("argument splitting");
	{
		int i;
		for (i = 0; parse_case[i].cmd; i++) {
			int x, out_argc;
			char *out_argv[256];
			int result = runcmd_cmd2strv(parse_case[i].cmd, &out_argc, out_argv);
			/*out_argv[out_argc] = NULL;*//* This must be NULL terminated already. */
			ok_int(result, parse_case[i].ret, parse_case[i].cmd);
			ok_int(out_argc, parse_case[i].argc_exp, parse_case[i].cmd);
			for (x = 0; x < parse_case[x].argc_exp && out_argv[x]; x++) {
				ok_str(parse_case[i].argv_exp[x], out_argv[x], "argv comparison test");
			}
			if (out_argv[0]) free(out_argv[0]);
		}
	}

	r2 = t_end();
	return r2 ? r2 : ret;
}
Exemple #26
0
int4 mu_size_arsample(mval *gn, uint4 M, boolean_t ar, int seed)
{
	enum cdb_sc		status;
	trans_num		ret_tn;
	int			k, h;
	boolean_t		verify_reads;
	boolean_t		tn_aborted;
	unsigned int		lcl_t_tries;
	double			r[MAX_BT_DEPTH + 1];	/* r[j] is #records in level j block of current traversal */
	stat_t		rstat, ustat;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	inctn_opcode = inctn_invalid_op;
	op_gvname(VARLSTCNT(1) gn);
	if (0 == gv_target->root)
        {       /* Global does not exist (online rollback). Not an error. */
                gtm_putmsg(VARLSTCNT(4) ERR_GBLNOEXIST, 2, gn->str.len, gn->str.addr);
                return EXIT_NRM;
        }
	if (!seed)
		seed = (int4)(time(0) * process_id);
	srand48(seed);

	/* do random traversals until M of them are accepted at level 1 */
	INIT_STATS(rstat);
	for (k = 1; rstat.N[1] < M; k++)
	{
		if (mu_ctrlc_occurred || mu_ctrly_occurred)
			return EXIT_ERR;
		t_begin(ERR_MUSIZEFAIL, 0);
		for (;;)
		{
			CLEAR_VECTOR(r);
			if (cdb_sc_normal != (status = rand_traverse(r)))
			{
				assert(CDB_STAGNATE > t_tries);
				t_retry(status);
				continue;
			}
			gv_target->clue.end = 0;
			gv_target->hist.h[0] = gv_target->hist.h[1];	/* No level 0 block to validate */
			DEBUG_ONLY(lcl_t_tries = t_tries);
			if ((trans_num)0 == (ret_tn = t_end(&gv_target->hist, NULL, TN_NOT_SPECIFIED)))
			{
				ABORT_TRANS_IF_GBL_EXIST_NOMORE(lcl_t_tries, tn_aborted);
				if (tn_aborted)
				{	/* Global does not exist (online rollback). Not an error. */
					gtm_putmsg(VARLSTCNT(4) ERR_GBLNOEXIST, 2, gn->str.len, gn->str.addr);
					return EXIT_NRM;
				}
				continue;
			}
			accum_stats_ar(&rstat, r, ar);
			break;
		}
	}
	finalize_stats_ar(&rstat, ar);

	/* display rstat data */
	/* Showing the error as 2 standard deviations which is a 95% confidence interval for the mean number of blocks at
	 * each level*/
	util_out_print("!/Number of generated samples = !UL", FLUSH, rstat.n);
	util_out_print("Number of accepted samples = !UL", FLUSH, rstat.N[1]);
	util_out_print("Level          Blocks           2 sigma(+/-)      % Accepted", FLUSH);
	for (h = MAX_BT_DEPTH; (h >= 0) && (rstat.blktot[h] < EPS); h--);
	for ( ; h > 0; h--)
		util_out_print("!5UL !15UL !15UL ~ !3UL% !15UL", FLUSH, h, (int)ROUND(rstat.blktot[h]),
				(int)ROUND(sqrt(rstat.blkerr[h])*2),
				(int)ROUND(sqrt(rstat.blkerr[h])*2/rstat.blktot[h]*100),
				(int)ROUND(100.0*rstat.N[h]/rstat.n)
				);
	util_out_print("!5UL !15UL !15UL ~ !3UL%             N/A", FLUSH, h, (int)ROUND(rstat.blktot[h]),
			(int)ROUND(sqrt(rstat.blkerr[h])*2),
			(int)ROUND(sqrt(rstat.blkerr[h])*2/rstat.blktot[h]*100.0)
			);
	util_out_print("Total !15UL !15UL ~ !3UL%             N/A", FLUSH, (int)ROUND(rstat.B),
			(int)ROUND(sqrt(rstat.error)*2),
			(int)ROUND(sqrt(rstat.error)*2/rstat.B*100.0)
			);

	return EXIT_NRM;
}
trans_num gvcst_bmp_mark_free(kill_set *ks)
{
	block_id		bit_map, next_bm, *updptr;
	blk_ident		*blk, *blk_top, *nextblk;
	trans_num		ctn, start_db_fmt_tn;
	unsigned int		len;
#	if defined(UNIX) && defined(DEBUG)
	unsigned int		lcl_t_tries;
#	endif
	int4			blk_prev_version;
	srch_hist		alt_hist;
	trans_num		ret_tn = 0;
	boolean_t		visit_blks;
	srch_blk_status		bmphist;
	cache_rec_ptr_t		cr;
	enum db_ver		ondsk_blkver;
	enum cdb_sc		status;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	TREF(in_gvcst_bmp_mark_free) = TRUE;
	assert(inctn_bmp_mark_free_gtm == inctn_opcode || inctn_bmp_mark_free_mu_reorg == inctn_opcode);
	/* Note down the desired_db_format_tn before you start relying on cs_data->fully_upgraded.
	 * If the db is fully_upgraded, take the optimal path that does not need to read each block being freed.
	 * But in order to detect concurrent desired_db_format changes, note down the tn (when the last format change occurred)
	 * 	before the fully_upgraded check	and after having noted down the database current_tn.
	 * If they are the same, then we are guaranteed no concurrent desired_db_format change occurred.
	 * If they are not, then fall through to the non-optimal path where each to-be-killed block has to be visited.
	 * The reason we need to visit every block in case desired_db_format changes is to take care of the case where
	 *	MUPIP REORG DOWNGRADE concurrently changes a block that we are about to free.
	 */
	start_db_fmt_tn = cs_data->desired_db_format_tn;
	visit_blks = (!cs_data->fully_upgraded);	/* Local evaluation */
	assert(!visit_blks || (visit_blks && dba_bg == cs_addrs->hdr->acc_meth)); /* must have blks_to_upgrd == 0 for non-BG */
	assert(!dollar_tlevel); 			/* Should NOT be in TP now */
	blk = &ks->blk[0];
	blk_top = &ks->blk[ks->used];
	if (!visit_blks)
	{	/* Database has been completely upgraded. Free all blocks in one bitmap as part of one transaction. */
		assert(cs_data->db_got_to_v5_once); /* assert all V4 fmt blocks (including RECYCLED) have space for V5 upgrade */
		inctn_detail.blknum_struct.blknum = 0; /* to indicate no adjustment to "blks_to_upgrd" necessary */
		/* If any of the mini transaction below restarts because of an online rollback, we don't want the application
		 * refresh to happen (like $ZONLNRLBK++ or rts_error(DBROLLEDBACK). This is because, although we are currently in
		 * non-tp (dollar_tleve = 0), we could actually be in a TP transaction and have actually faked dollar_tlevel. In
		 * such a case, we should NOT * be issuing a DBROLLEDBACK error as TP transactions are supposed to just restart in
		 * case of an online rollback. So, set the global variable that gtm_onln_rlbk_clnup can check and skip doing the
		 * application refresh, but will reset the clues. The next update will see the cycle mismatch and will accordingly
		 * take the right action.
		 */
		for ( ; blk < blk_top;  blk = nextblk)
		{
			if (0 != blk->flag)
			{
				nextblk = blk + 1;
				continue;
			}
			assert(0 < blk->block);
			assert((int4)blk->block < cs_addrs->ti->total_blks);
			bit_map = ROUND_DOWN2((int)blk->block, BLKS_PER_LMAP);
			next_bm = bit_map + BLKS_PER_LMAP;
			CHECK_AND_RESET_UPDATE_ARRAY;	/* reset update_array_ptr to update_array */
			/* Scan for the next local bitmap */
			updptr = (block_id *)update_array_ptr;
			for (nextblk = blk;
				(0 == nextblk->flag) && (nextblk < blk_top) && ((block_id)nextblk->block < next_bm);
				++nextblk)
			{
				assert((block_id)nextblk->block - bit_map);
				*updptr++ = (block_id)nextblk->block - bit_map;
			}
			len = (unsigned int)((char *)nextblk - (char *)blk);
			update_array_ptr = (char *)updptr;
			alt_hist.h[0].blk_num = 0;			/* need for calls to T_END for bitmaps */
			alt_hist.h[0].blk_target = NULL;		/* need to initialize for calls to T_END */
			/* the following assumes SIZEOF(blk_ident) == SIZEOF(int) */
			assert(SIZEOF(blk_ident) == SIZEOF(int));
			*(int *)update_array_ptr = 0;
			t_begin(ERR_GVKILLFAIL, UPDTRNS_DB_UPDATED_MASK);
			for (;;)
			{
				ctn = cs_addrs->ti->curr_tn;
				/* Need a read fence before reading fields from cs_data as we are reading outside
				 * of crit and relying on this value to detect desired db format state change.
				 */
				SHM_READ_MEMORY_BARRIER;
				if (start_db_fmt_tn != cs_data->desired_db_format_tn)
				{	/* Concurrent db format change has occurred. Need to visit every block to be killed
					 * to determine its block format. Fall through to the non-optimal path below
					 */
					ret_tn = 0;
					break;
				}
				bmphist.blk_num = bit_map;
				if (NULL == (bmphist.buffaddr = t_qread(bmphist.blk_num, (sm_int_ptr_t)&bmphist.cycle,
									&bmphist.cr)))
				{
					t_retry((enum cdb_sc)rdfail_detail);
					continue;
				}
				t_write_map(&bmphist, (uchar_ptr_t)update_array, ctn, -(int4)(nextblk - blk));
				UNIX_ONLY(DEBUG_ONLY(lcl_t_tries = t_tries));
				if ((trans_num)0 == (ret_tn = t_end(&alt_hist, NULL, TN_NOT_SPECIFIED)))
				{
#					ifdef UNIX
					assert((CDB_STAGNATE == t_tries) || (lcl_t_tries == t_tries - 1));
					status = LAST_RESTART_CODE;
					if ((cdb_sc_onln_rlbk1 == status) || (cdb_sc_onln_rlbk2 == status)
						|| TREF(rlbk_during_redo_root))
					{	/* t_end restarted due to online rollback. Discard bitmap free-up and return control
						 * to the application. But, before that reset only_reset_clues_if_onln_rlbk to FALSE
						 */
						TREF(in_gvcst_bmp_mark_free) = FALSE;
						send_msg(VARLSTCNT(6) ERR_IGNBMPMRKFREE, 4, REG_LEN_STR(gv_cur_region),
								DB_LEN_STR(gv_cur_region));
						t_abort(gv_cur_region, cs_addrs);
						return ret_tn; /* actually 0 */
					}
#					endif
					continue;
				}
				break;
			}
			if (0 == ret_tn) /* db format change occurred. Fall through to below for loop to visit each block */
			{
				/* Abort any active transaction to get rid of lingering Non-TP artifacts */
				t_abort(gv_cur_region, cs_addrs);
				break;
			}
		}
	}	/* for all blocks in the kill_set */
Exemple #28
0
int main(int argc, char **argv)
{
	int i, j;
	struct kvvec *kvv, *kvv2, *kvv3;
	struct kvvec_buf *kvvb, *kvvb2;
	struct kvvec k = KVVEC_INITIALIZER;

	t_set_colors(0);

	t_start("key/value vector tests");
	kvv = kvvec_create(1);
	kvv2 = kvvec_create(1);
	kvv3 = kvvec_create(1);
	add_vars(kvv, test_data, 1239819);
	add_vars(kvv, (const char **)argv + 1, argc - 1);

	kvvec_sort(kvv);
	kvvec_foreach(kvv, NULL, walker);

	/* kvvec2buf -> buf2kvvec -> kvvec2buf -> buf2kvvec conversion */
	kvvb = kvvec2buf(kvv, KVSEP, PAIRSEP, OVERALLOC);
	kvv3 = buf2kvvec(kvvb->buf, kvvb->buflen, KVSEP, PAIRSEP, KVVEC_COPY);
	kvvb2 = kvvec2buf(kvv3, KVSEP, PAIRSEP, OVERALLOC);

	buf2kvvec_prealloc(kvv2, kvvb->buf, kvvb->buflen, KVSEP, PAIRSEP, KVVEC_ASSIGN);
	kvvec_foreach(kvv2, kvv, walker);

	kvvb = kvvec2buf(kvv, KVSEP, PAIRSEP, OVERALLOC);

	test(kvv->kv_pairs == kvv2->kv_pairs, "pairs should be identical");

	for (i = 0; i < kvv->kv_pairs; i++) {
		struct key_value *kv1, *kv2;
		kv1 = &kvv->kv[i];
		if (i >= kvv2->kv_pairs) {
			t_fail("missing var %d in kvv2", i);
			printf("[%s=%s] (%d+%d)\n", kv1->key, kv1->value, kv1->key_len, kv1->value_len);
			continue;
		}
		kv2 = &kvv2->kv[i];
		if (!test(!kv_compare(kv1, kv2), "kv pair %d must match", i)) {
			printf("%d failed: [%s=%s] (%d+%d) != [%s=%s (%d+%d)]\n",
				   i,
				   kv1->key, kv1->value, kv1->key_len, kv1->value_len,
				   kv2->key, kv2->value, kv2->key_len, kv2->value_len);
		}
	}

	test(kvvb2->buflen == kvvb->buflen, "buflens must match");
	test(kvvb2->bufsize == kvvb->bufsize, "bufsizes must match");

	if (kvvb2->buflen == kvvb->buflen && kvvb2->bufsize == kvvb->bufsize &&
		!memcmp(kvvb2->buf, kvvb->buf, kvvb->bufsize))
	{
		t_pass("kvvec -> buf -> kvvec conversion works flawlessly");
	} else {
		t_fail("kvvec -> buf -> kvvec conversion failed :'(");
	}

	free(kvvb->buf);
	free(kvvb);
	free(kvvb2->buf);
	free(kvvb2);
	kvvec_destroy(kvv, 1);
	kvvec_destroy(kvv3, KVVEC_FREE_ALL);

	for (j = 0; pair_term_missing[j]; j++) {
		buf2kvvec_prealloc(&k, strdup(pair_term_missing[j]), strlen(pair_term_missing[j]), '=', ';', KVVEC_COPY);
		for (i = 0; i < k.kv_pairs; i++) {
			struct key_value *kv = &k.kv[i];
			test(kv->key_len == kv->value_len, "%d.%d; key_len=%d; value_len=%d (%s = %s)",
				 j, i, kv->key_len, kv->value_len, kv->key, kv->value);
			test(kv->value_len == strlen(kv->value),
				 "%d.%d; kv->value_len(%d) == strlen(%s)(%d)",
				 j, i, kv->value_len, kv->value, (int)strlen(kv->value));
		}
	}

	t_end();
	return 0;
}
Exemple #29
0
 /* Importance Sampling */
int4 mu_size_impsample(glist *gl_ptr, int4 M, int4 seed)
{
	boolean_t		tn_aborted;
	double			a[MAX_BT_DEPTH + 1];	/* a[j] is # of adjacent block pointers in level j block of cur traversal */
	double			r[MAX_BT_DEPTH + 1];	/* r[j] is #records in level j block of current traversal */
	enum cdb_sc		status;
	int			k, h;
	stat_t			rstat;
	trans_num		ret_tn;
	unsigned int		lcl_t_tries;
	DCL_THREADGBL_ACCESS;

	SETUP_THREADGBL_ACCESS;
	inctn_opcode = inctn_invalid_op;
	/* set gv_target/gv_currkey/gv_cur_region/cs_addrs/cs_data to correspond to <globalname,reg> in gl_ptr */
	DO_OP_GVNAME(gl_ptr);
	if (0 == gv_target->root)
	{       /* Global does not exist (online rollback). Not an error. */
		gtm_putmsg_csa(CSA_ARG(NULL) VARLSTCNT(4) ERR_GBLNOEXIST, 2, GNAME(gl_ptr).len, GNAME(gl_ptr).addr);
		return EXIT_NRM;
	}
	if (!seed)
		seed = (int4)(time(0) * process_id);
	srand48(seed);
	/* do M random traversals */
	INIT_STATS(rstat);
	for (k = 1; k <= M; k++)
	{
		if (mu_ctrlc_occurred || mu_ctrly_occurred)
			return EXIT_ERR;
		t_begin(ERR_MUSIZEFAIL, 0);
		for (;;)
		{
			CLEAR_VECTOR(r);
			CLEAR_VECTOR(a);
			if (cdb_sc_normal != (status = mu_size_rand_traverse(r, a)))			/* WARNING: assignment */
			{
				assert((CDB_STAGNATE > t_tries) || IS_FINAL_RETRY_CODE(status));
				t_retry(status);
				continue;
			}
			gv_target->clue.end = 0;
			gv_target->hist.h[0] = gv_target->hist.h[1];				/* No level 0 block to validate */
			DEBUG_ONLY(lcl_t_tries = t_tries);
			if ((trans_num)0 == (ret_tn = t_end(&gv_target->hist, NULL, TN_NOT_SPECIFIED)))	/* WARNING: assignment */
			{
				ABORT_TRANS_IF_GBL_EXIST_NOMORE(lcl_t_tries, tn_aborted);
				if (tn_aborted)
				{	/* Global does not exist (online rollback). Not an error. */
					gtm_putmsg_csa(CSA_ARG(NULL)
						VARLSTCNT(4) ERR_GBLNOEXIST, 2, GNAME(gl_ptr).len, GNAME(gl_ptr).addr);
					return EXIT_NRM;
				}
				continue;
			}
			accum_stats_impsmpl(&rstat, r, a);
			break;
		}
	}
	finalize_stats_impsmpl(&rstat);
	/* display rstat data
	 * Showing the error as 2 standard deviations which is a 95% confidence interval for the
	 * mean number of blocks at each level
	 */
	util_out_print("Number of generated samples = !UL", FLUSH, rstat.n);
	util_out_print("Level          Blocks        Adjacent           2 sigma(+/-)", FLUSH);
	for (h = MAX_BT_DEPTH; (0 <= h) && (rstat.blktot[h] < EPS); h--);
	for ( ; h > 0; h--)
		util_out_print("!5UL !15UL !15UL !15UL ~ !3UL%", FLUSH, h, (int)ROUND(rstat.blktot[h]),
				(int)ROUND(mu_int_adj[h]),
				(int)ROUND(sqrt(rstat.blkerr[h]) * 2),
				(int)ROUND(sqrt(rstat.blkerr[h]) * 2 / rstat.blktot[h] * 100.0)
				);
	util_out_print("!5UL !15UL !15UL !15UL ~ !3UL%", FLUSH, h, (int)ROUND(rstat.blktot[h]),
			(int)ROUND(mu_int_adj[h]),
			(int)ROUND(sqrt(rstat.blkerr[h]) * 2),
			(int)ROUND(sqrt(rstat.blkerr[h]) * 2 / rstat.blktot[h] * 100.0)
			);
	util_out_print("Total !15UL !15UL !15UL ~ !3UL%", FLUSH, (int)ROUND(rstat.B),
			(int)ROUND(rstat.AT),
			(int)ROUND(sqrt(rstat.error) * 2),
			(int)ROUND(sqrt(rstat.error) * 2 / rstat.B * 100.0)
			);
	return EXIT_NRM;
}
trans_num gvcst_bmp_mark_free(kill_set *ks)
{
	block_id	bit_map, next_bm, *updptr;
	blk_ident	*blk, *blk_top, *nextblk;
	trans_num	ctn, start_db_fmt_tn;
	unsigned int	len;
	int4		blk_prev_version;
	srch_hist	alt_hist;
	trans_num	ret_tn = 0;
	boolean_t	visit_blks;
	srch_blk_status	bmphist;
	cache_rec_ptr_t	cr;
	enum db_ver	ondsk_blkver;

	error_def(ERR_GVKILLFAIL);

	assert(inctn_bmp_mark_free_gtm == inctn_opcode || inctn_bmp_mark_free_mu_reorg == inctn_opcode);
	/* Note down the desired_db_format_tn before you start relying on cs_data->fully_upgraded.
	 * If the db is fully_upgraded, take the optimal path that does not need to read each block being freed.
	 * But in order to detect concurrent desired_db_format changes, note down the tn (when the last format change occurred)
	 * 	before the fully_upgraded check	and after having noted down the database current_tn.
	 * If they are the same, then we are guaranteed no concurrent desired_db_format change occurred.
	 * If they are not, then fall through to the non-optimal path where each to-be-killed block has to be visited.
	 * The reason we need to visit every block in case desired_db_format changes is to take care of the case where
	 *	MUPIP REORG DOWNGRADE concurrently changes a block that we are about to free.
	 */
	start_db_fmt_tn = cs_data->desired_db_format_tn;
	visit_blks = (!cs_data->fully_upgraded);	/* Local evaluation */
	assert(!visit_blks || (visit_blks && dba_bg == cs_addrs->hdr->acc_meth)); /* must have blks_to_upgrd == 0 for non-BG */
	assert(!dollar_tlevel); 			/* Should NOT be in TP now */
	blk = &ks->blk[0];
	blk_top = &ks->blk[ks->used];
	if (!visit_blks)
	{	/* Database has been completely upgraded. Free all blocks in one bitmap as part of one transaction. */
		assert(cs_data->db_got_to_v5_once); /* assert all V4 fmt blocks (including RECYCLED) have space for V5 upgrade */
		inctn_detail.blknum_struct.blknum = 0; /* to indicate no adjustment to "blks_to_upgrd" necessary */
		for ( ; blk < blk_top;  blk = nextblk)
		{
			if (0 != blk->flag)
			{
				nextblk = blk + 1;
				continue;
			}
			assert(0 < blk->block);
			assert((int4)blk->block < cs_addrs->ti->total_blks);
			bit_map = ROUND_DOWN2((int)blk->block, BLKS_PER_LMAP);
			next_bm = bit_map + BLKS_PER_LMAP;
			CHECK_AND_RESET_UPDATE_ARRAY;	/* reset update_array_ptr to update_array */
			/* Scan for the next local bitmap */
			updptr = (block_id *)update_array_ptr;
			for (nextblk = blk;
				(0 == nextblk->flag) && (nextblk < blk_top) && ((block_id)nextblk->block < next_bm);
				++nextblk)
			{
				assert((block_id)nextblk->block - bit_map);
				*updptr++ = (block_id)nextblk->block - bit_map;
			}
			len = (unsigned int)((char *)nextblk - (char *)blk);
			update_array_ptr = (char *)updptr;
			alt_hist.h[0].blk_num = 0;			/* need for calls to T_END for bitmaps */
			/* the following assumes SIZEOF(blk_ident) == SIZEOF(int) */
			assert(SIZEOF(blk_ident) == SIZEOF(int));
			*(int *)update_array_ptr = 0;
			t_begin(ERR_GVKILLFAIL, UPDTRNS_DB_UPDATED_MASK);
			for (;;)
			{
				ctn = cs_addrs->ti->curr_tn;
				/* Need a read fence before reading fields from cs_data as we are reading outside
				 * of crit and relying on this value to detect desired db format state change.
				 */
				SHM_READ_MEMORY_BARRIER;
				if (start_db_fmt_tn != cs_data->desired_db_format_tn)
				{	/* Concurrent db format change has occurred. Need to visit every block to be killed
					 * to determine its block format. Fall through to the non-optimal path below
					 */
					ret_tn = 0;
					break;
				}
				bmphist.blk_num = bit_map;
				if (NULL == (bmphist.buffaddr = t_qread(bmphist.blk_num, (sm_int_ptr_t)&bmphist.cycle,
									&bmphist.cr)))
				{
					t_retry((enum cdb_sc)rdfail_detail);
					continue;
				}
				t_write_map(&bmphist, (uchar_ptr_t)update_array, ctn, -(int4)(nextblk - blk));
				if ((trans_num)0 == (ret_tn = t_end(&alt_hist, NULL, TN_NOT_SPECIFIED)))
					continue;
				break;
			}
			if (0 == ret_tn) /* db format change occurred. Fall through to below for loop to visit each block */
				break;
		}
	}	/* for all blocks in the kill_set */